From 8aff29509f1f6cfc73973b8da3e55ed559ae1112 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Tue, 1 Jul 2014 17:11:09 +0900 Subject: [PATCH] import neutron master of cba140daccd7c4f715263cda422d5cec27af069d Merge Neutron master branch to tacker master branch with modification of tox.ini and .gitreview. This patch imports the following change set of Neutron. > commit cba140daccd7c4f715263cda422d5cec27af069d > Merge: 63d8237 6bed4a0 > Author: Jenkins > Date: Sun Jun 22 16:02:56 2014 +0000 > > Merge "Adding static routes data for members" Change-Id: I5a0f522bc20530c46e35dc9e03fe72d72ad04577 --- .coveragerc | 7 + .gitignore | 28 + .mailmap | 11 + .pylintrc | 42 + .testr.conf | 4 + HACKING.rst | 28 + LICENSE | 176 + MANIFEST.in | 14 + README.rst | 25 + TESTING.rst | 180 + babel.cfg | 2 + bin/neutron-rootwrap | 21 + bin/neutron-rootwrap-xen-dom0 | 141 + doc/Makefile | 96 + doc/pom.xml | 135 + doc/source/conf.py | 250 + doc/source/devref/advanced_services.rst | 7 + doc/source/devref/api_extensions.rst | 18 + doc/source/devref/api_layer.rst | 57 + doc/source/devref/common.rst | 25 + doc/source/devref/db_layer.rst | 2 + doc/source/devref/development.environment.rst | 49 + doc/source/devref/fwaas.rst | 30 + doc/source/devref/index.rst | 65 + doc/source/devref/l2_agents.rst | 7 + doc/source/devref/layer3.rst | 199 + doc/source/devref/lbaas.rst | 32 + doc/source/devref/linuxbridge_agent.rst | 2 + doc/source/devref/openvswitch_agent.rst | 21 + doc/source/devref/plugin-api.rst | 12 + doc/source/devref/rpc_api.rst | 2 + doc/source/devref/security_group_api.rst | 50 + doc/source/devref/vpnaas.rst | 21 + doc/source/docbkx/docbkx-example/README | 14 + doc/source/docbkx/docbkx-example/pom.xml | 38 + .../docbkx-example/src/docbkx/example.xml | 318 + .../src/docbkx/figures/example.sdx | 79 + .../src/docbkx/figures/example.svg | 523 + doc/source/docbkx/quantum-api-1.0/common.ent | 112 + .../quantum-api-1.0/figures/Arrow_east.svg | 70 + .../figures/Check_mark_23x20_02.svg | 60 + .../docbkx/quantum-api-1.0/js/shjs/sh_java.js | 337 + .../quantum-api-1.0/js/shjs/sh_javascript.js | 347 + .../docbkx/quantum-api-1.0/js/shjs/sh_main.js | 538 + .../docbkx/quantum-api-1.0/js/shjs/sh_xml.js | 115 + .../js/trc/schema/controller.js | 184 + .../js/trc/schema/layoutManager.js | 137 + .../js/trc/schema/sampleManager.js | 342 + .../docbkx/quantum-api-1.0/js/trc/util.js | 564 + .../quantum-api-1.0/quantum-api-guide.xml | 2011 ++ .../samples/att-get-res-none.json | 3 + .../samples/att-get-res-none.xml | 1 + .../quantum-api-1.0/samples/att-get-res.json | 6 + .../quantum-api-1.0/samples/att-get-res.xml | 1 + .../quantum-api-1.0/samples/att-put-req.json | 6 + .../quantum-api-1.0/samples/att-put-req.xml | 2 + .../quantum-api-1.0/samples/extensions.json | 19 + .../quantum-api-1.0/samples/extensions.xml | 21 + .../docbkx/quantum-api-1.0/samples/fault.json | 7 + .../docbkx/quantum-api-1.0/samples/fault.xml | 8 + .../samples/network-get-detail-res.json | 22 + .../samples/network-get-detail-res.xml | 14 + .../samples/network-get-res.json | 7 + .../samples/network-get-res.xml | 3 + .../samples/network-post-req.json | 6 + .../samples/network-post-req.xml | 2 + .../samples/network-post-res.json | 6 + .../samples/network-post-res.xml | 2 + .../samples/networks-get-detail-res.json | 13 + .../samples/networks-get-detail-res.xml | 8 + .../samples/networks-get-res.json | 11 + .../samples/networks-get-res.xml | 4 + .../samples/networks-post-req.json | 5 + .../samples/networks-post-req.xml | 2 + .../samples/networks-post-res.json | 5 + .../samples/networks-post-res.xml | 2 + .../quantum-api-1.0/samples/notfound.json | 7 + .../quantum-api-1.0/samples/notfound.xml | 7 + .../samples/notimplemented.json | 7 + .../samples/notimplemented.xml | 5 + .../samples/port-get-detail-res.json | 12 + .../samples/port-get-detail-res.xml | 6 + .../quantum-api-1.0/samples/port-get-res.json | 8 + .../quantum-api-1.0/samples/port-get-res.xml | 3 + .../samples/port-post-req.json | 6 + .../quantum-api-1.0/samples/port-post-req.xml | 2 + .../samples/port-post-res.json | 6 + .../quantum-api-1.0/samples/port-post-res.xml | 2 + .../samples/ports-get-detail-res.json | 12 + .../samples/ports-get-detail-res.xml | 8 + .../samples/ports-get-res.json | 11 + .../quantum-api-1.0/samples/ports-get-res.xml | 6 + .../quantum-api-1.0/samples/private.json | 9 + .../quantum-api-1.0/samples/private.xml | 6 + .../quantum-api-1.0/samples/public.json | 11 + .../docbkx/quantum-api-1.0/samples/public.xml | 8 + .../quantum-api-1.0/samples/versions-atom.xml | 22 + .../quantum-api-1.0/samples/versions.json | 24 + .../quantum-api-1.0/samples/versions.xml | 12 + .../docbkx/quantum-api-1.0/style/schema.css | 82 + .../quantum-api-1.0/style/shjs/sh_acid.css | 151 + .../style/shjs/sh_darkblue.css | 151 + .../quantum-api-1.0/style/shjs/sh_emacs.css | 139 + .../quantum-api-1.0/style/shjs/sh_night.css | 151 + .../quantum-api-1.0/style/shjs/sh_pablo.css | 151 + .../quantum-api-1.0/style/shjs/sh_print.css | 145 + .../quantum-api-1.0/style/shjs/sh_style.css | 66 + .../style/shjs/sh_whitengrey.css | 139 + .../docbkx/quantum-api-1.0/xsd/.htaccess | 6 + .../docbkx/quantum-api-1.0/xsd/actions.xsd | 439 + .../quantum-api-1.0/xsd/affinity-id.xjb | 11 + .../quantum-api-1.0/xsd/affinity-id.xsd | 39 + .../docbkx/quantum-api-1.0/xsd/api-common.xjb | 11 + .../docbkx/quantum-api-1.0/xsd/api-common.xsd | 66 + doc/source/docbkx/quantum-api-1.0/xsd/api.xjb | 21 + doc/source/docbkx/quantum-api-1.0/xsd/api.xsd | 103 + .../docbkx/quantum-api-1.0/xsd/atom.xjb | 11 + .../docbkx/quantum-api-1.0/xsd/atom/atom.xsd | 105 + .../docbkx/quantum-api-1.0/xsd/atom/xml.xsd | 294 + .../docbkx/quantum-api-1.0/xsd/backup.xsd | 378 + .../docbkx/quantum-api-1.0/xsd/common.xsd | 156 + .../quantum-api-1.0/xsd/ext/rax-dme/api.xsd | 38 + .../xsd/ext/rax-dme/rax-dme.xsd | 25 + .../docbkx/quantum-api-1.0/xsd/extensions.xsd | 203 + .../docbkx/quantum-api-1.0/xsd/faults.xsd | 532 + .../docbkx/quantum-api-1.0/xsd/flavor.xsd | 244 + .../docbkx/quantum-api-1.0/xsd/image.xsd | 443 + .../docbkx/quantum-api-1.0/xsd/ipgroup.xsd | 245 + .../docbkx/quantum-api-1.0/xsd/limits.xsd | 315 + .../docbkx/quantum-api-1.0/xsd/metadata.xsd | 89 + .../docbkx/quantum-api-1.0/xsd/server.xsd | 1013 + .../docbkx/quantum-api-1.0/xsd/shareip.xjb | 11 + .../docbkx/quantum-api-1.0/xsd/shareip.xsd | 83 + .../docbkx/quantum-api-1.0/xsd/txt.htaccess | 4 + .../docbkx/quantum-api-1.0/xsd/version.xsd | 355 + .../docbkx/quantum-api-1.0/xslt/schema.xsl | 1342 ++ doc/source/index.rst | 60 + doc/source/man/neutron-server.rst | 75 + etc/api-paste.ini | 30 + etc/dhcp_agent.ini | 88 + etc/fwaas_driver.ini | 3 + etc/init.d/neutron-server | 68 + etc/l3_agent.ini | 79 + etc/lbaas_agent.ini | 42 + etc/metadata_agent.ini | 59 + etc/metering_agent.ini | 15 + etc/neutron.conf | 479 + etc/neutron/plugins/bigswitch/restproxy.ini | 114 + .../plugins/bigswitch/ssl/ca_certs/README | 3 + .../plugins/bigswitch/ssl/host_certs/README | 6 + etc/neutron/plugins/brocade/brocade.ini | 29 + etc/neutron/plugins/cisco/cisco_plugins.ini | 138 + etc/neutron/plugins/cisco/cisco_vpn_agent.ini | 22 + etc/neutron/plugins/embrane/heleos_conf.ini | 41 + .../plugins/hyperv/hyperv_neutron_plugin.ini | 63 + .../plugins/ibm/sdnve_neutron_plugin.ini | 50 + .../plugins/linuxbridge/linuxbridge_conf.ini | 78 + etc/neutron/plugins/metaplugin/metaplugin.ini | 31 + etc/neutron/plugins/midonet/midonet.ini | 19 + etc/neutron/plugins/ml2/ml2_conf.ini | 62 + etc/neutron/plugins/ml2/ml2_conf_arista.ini | 45 + etc/neutron/plugins/ml2/ml2_conf_brocade.ini | 13 + etc/neutron/plugins/ml2/ml2_conf_cisco.ini | 94 + etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini | 52 + etc/neutron/plugins/ml2/ml2_conf_mlnx.ini | 6 + etc/neutron/plugins/ml2/ml2_conf_ncs.ini | 28 + etc/neutron/plugins/ml2/ml2_conf_odl.ini | 30 + etc/neutron/plugins/ml2/ml2_conf_ofa.ini | 13 + etc/neutron/plugins/mlnx/mlnx_conf.ini | 79 + etc/neutron/plugins/nec/nec.ini | 60 + etc/neutron/plugins/nuage/nuage_plugin.ini | 10 + .../plugins/oneconvergence/nvsdplugin.ini | 35 + .../openvswitch/ovs_neutron_plugin.ini | 179 + etc/neutron/plugins/plumgrid/plumgrid.ini | 14 + etc/neutron/plugins/ryu/ryu.ini | 44 + etc/neutron/plugins/vmware/nsx.ini | 202 + etc/neutron/rootwrap.d/debug.filters | 14 + etc/neutron/rootwrap.d/dhcp.filters | 38 + .../rootwrap.d/iptables-firewall.filters | 21 + etc/neutron/rootwrap.d/l3.filters | 41 + etc/neutron/rootwrap.d/lbaas-haproxy.filters | 26 + .../rootwrap.d/linuxbridge-plugin.filters | 19 + etc/neutron/rootwrap.d/nec-plugin.filters | 12 + .../rootwrap.d/openvswitch-plugin.filters | 22 + etc/neutron/rootwrap.d/ryu-plugin.filters | 21 + etc/neutron/rootwrap.d/vpnaas.filters | 13 + etc/policy.json | 136 + etc/rootwrap.conf | 34 + etc/services.conf | 40 + etc/vpn_agent.ini | 14 + neutron/__init__.py | 21 + neutron/agent/__init__.py | 16 + neutron/agent/common/__init__.py | 16 + neutron/agent/common/config.py | 123 + neutron/agent/dhcp_agent.py | 622 + neutron/agent/firewall.py | 138 + neutron/agent/l2population_rpc.py | 56 + neutron/agent/l3_agent.py | 990 + neutron/agent/linux/__init__.py | 16 + neutron/agent/linux/async_process.py | 223 + neutron/agent/linux/daemon.py | 151 + neutron/agent/linux/dhcp.py | 908 + neutron/agent/linux/external_process.py | 104 + neutron/agent/linux/interface.py | 450 + neutron/agent/linux/ip_lib.py | 569 + neutron/agent/linux/iptables_firewall.py | 383 + neutron/agent/linux/iptables_manager.py | 668 + neutron/agent/linux/ovs_lib.py | 564 + neutron/agent/linux/ovsdb_monitor.py | 107 + neutron/agent/linux/polling.py | 114 + neutron/agent/linux/utils.py | 130 + neutron/agent/metadata/__init__.py | 17 + neutron/agent/metadata/agent.py | 392 + neutron/agent/metadata/namespace_proxy.py | 184 + neutron/agent/netns_cleanup_util.py | 176 + neutron/agent/ovs_cleanup_util.py | 112 + neutron/agent/rpc.py | 112 + neutron/agent/securitygroups_rpc.py | 303 + neutron/api/__init__.py | 0 neutron/api/api_common.py | 327 + neutron/api/extensions.py | 684 + neutron/api/rpc/__init__.py | 0 neutron/api/rpc/agentnotifiers/__init__.py | 0 .../rpc/agentnotifiers/dhcp_rpc_agent_api.py | 177 + .../rpc/agentnotifiers/l3_rpc_agent_api.py | 121 + .../agentnotifiers/metering_rpc_agent_api.py | 99 + neutron/api/v2/__init__.py | 0 neutron/api/v2/attributes.py | 774 + neutron/api/v2/base.py | 679 + neutron/api/v2/resource.py | 172 + neutron/api/v2/resource_helper.py | 93 + neutron/api/v2/router.py | 115 + neutron/api/versions.py | 69 + neutron/api/views/__init__.py | 0 neutron/api/views/versions.py | 60 + neutron/auth.py | 73 + neutron/cmd/__init__.py | 16 + neutron/cmd/sanity/__init__.py | 0 neutron/cmd/sanity/checks.py | 38 + neutron/cmd/sanity_check.py | 93 + neutron/cmd/usage_audit.py | 50 + neutron/common/__init__.py | 0 neutron/common/config.py | 189 + neutron/common/constants.py | 121 + neutron/common/exceptions.py | 321 + neutron/common/ipv6_utils.py | 39 + neutron/common/log.py | 35 + neutron/common/rpc.py | 136 + neutron/common/rpc_compat.py | 165 + neutron/common/test_lib.py | 44 + neutron/common/topics.py | 57 + neutron/common/utils.py | 301 + neutron/context.py | 176 + neutron/db/__init__.py | 0 neutron/db/agents_db.py | 219 + neutron/db/agentschedulers_db.py | 226 + neutron/db/allowedaddresspairs_db.py | 147 + neutron/db/api.py | 89 + neutron/db/db_base_plugin_v2.py | 1625 ++ neutron/db/dhcp_rpc_base.py | 287 + neutron/db/external_net_db.py | 163 + neutron/db/extradhcpopt_db.py | 127 + neutron/db/extraroute_db.py | 185 + neutron/db/firewall/__init__.py | 16 + neutron/db/firewall/firewall_db.py | 481 + neutron/db/l3_agentschedulers_db.py | 291 + neutron/db/l3_db.py | 1039 + neutron/db/l3_gwmode_db.py | 73 + neutron/db/l3_rpc_base.py | 128 + neutron/db/loadbalancer/__init__.py | 15 + neutron/db/loadbalancer/loadbalancer_db.py | 802 + neutron/db/metering/__init__.py | 15 + neutron/db/metering/metering_db.py | 239 + neutron/db/metering/metering_rpc.py | 55 + neutron/db/migration/README | 92 + neutron/db/migration/__init__.py | 53 + neutron/db/migration/alembic.ini | 52 + .../migration/alembic_migrations/__init__.py | 0 .../alembic_migrations/common_ext_ops.py | 83 + .../db/migration/alembic_migrations/env.py | 106 + .../alembic_migrations/script.py.mako | 52 + .../versions/1064e98b7917_nec_pf_port_del.py | 63 + .../versions/10cd28e692e9_nuage_extraroute.py | 68 + .../versions/1149d7de0cfa_port_security.py | 84 + .../117643811bca_nec_delete_ofc_mapping.py | 208 + .../11c6e18605c8_pool_monitor_status_.py | 62 + .../versions/128e042a2b68_ext_gw_mode.py | 71 + .../1341ed32cc1e_nvp_netbinding_update.py | 70 + .../versions/13de305df56e_add_nec_pf_name.py | 55 + .../1421183d533f_nsx_dhcp_metadata.py | 74 + .../versions/14f24494ca31_arista_ml2.py | 78 + .../157a5d299379_ml2_binding_profile.py | 55 + .../176a85fc7d79_add_portbindings_db.py | 66 + .../versions/19180cf98af6_nsx_gw_devices.py | 103 + .../1b2580001654_nsx_sec_group_mappin.py | 61 + .../1b693c095aa3_quota_ext_db_grizzly.py | 64 + .../1b837a7125a9_cisco_apic_driver.py | 74 + .../1c33fa3cd1a1_extra_route_config.py | 82 + .../versions/1d76643bcec4_nvp_netbinding.py | 67 + ...5dd1d09b22_set_not_null_fields_lb_stats.py | 66 + .../1efb85914233_allowedaddresspairs.py | 67 + ...c149aca4_agents_unique_by_type_and_host.py | 73 + .../2032abe8edac_lbaas_add_status_des.py | 57 + .../20ae61555e95_ml2_gre_type_driver.py | 66 + .../2447ad0e9585_add_ipv6_mode_props.py | 81 + .../versions/24c7ea5160d7_cisco_csr_vpnaas.py | 60 + .../versions/2528ceb28230_nec_pf_netid_fix.py | 61 + .../263772d65691_cisco_db_cleanup_2.py | 66 + .../versions/27cc183af192_ml2_vnic_type.py | 54 + .../27ef74513d33_quota_in_plumgrid_pl.py | 65 + .../versions/2a3bae1ceb8_nec_port_binding.py | 65 + .../2a6d0b51f4bb_cisco_plugin_cleanup.py | 88 + .../versions/2c4af419145b_l3_support.py | 56 + .../versions/2db5203cb7a9_nuage_floatingip.py | 83 + .../2eeaf963a447_floatingip_status.py | 81 + .../versions/32a65f71af51_ml2_portbinding.py | 70 + .../32b517556ec9_remove_tunnelip_mode.py | 58 + .../338d7508968c_vpnaas_peer_address_.py | 55 + ...et_length_of_description_field_metering.py | 58 + .../33dd0a9fa487_embrane_lbaas_driver.py | 61 + ...35c7c198ddea_lbaas_healthmon_del_status.py | 58 + .../versions/363468ac592c_nvp_network_gw.py | 100 + .../versions/38335592a0dc_nvp_portmap.py | 62 + .../38fc1f6789f8_cisco_n1kv_overlay.py | 57 + .../39cf3f799352_fwaas_havana_2_model.py | 109 + .../3a520dd165d0_cisco_nexus_multi_switch.py | 59 + .../3b54bf9e29f7_nec_plugin_sharednet.py | 84 + .../3c6e57a23db4_add_multiprovider.py | 103 + .../3cabb850f4a5_table_to_track_port_.py | 63 + .../versions/3cb5d900c5de_security_groups.py | 103 + .../versions/3cbf70257c28_nvp_mac_learning.py | 63 + .../versions/3d2585038b95_vmware_nsx.py | 65 + .../3d3cb89d84ee_nsx_switch_mappings.py | 61 + .../versions/3d6fae8b70b0_nvp_lbaas_plugin.py | 82 + .../versions/3ed8f075e38a_nvp_fwaas_plugin.py | 60 + .../versions/40b0aff0302e_mlnx_initial.py | 194 + .../versions/40dffbf4b549_nvp_dist_router.py | 63 + .../versions/45680af419f9_nvp_qos.py | 94 + .../versions/4692d074d587_agent_scheduler.py | 89 + .../46a0efbd8f0_cisco_n1kv_multisegm.py | 80 + .../477a4488d3f4_ml2_vxlan_type_driver.py | 69 + .../versions/48b6f43f7471_service_type.py | 76 + .../492a106273f8_brocade_ml2_mech_dri.py | 70 + .../49332180ca96_ryu_plugin_update.py | 59 + .../49f5e553f61f_ml2_security_groups.py | 95 + .../versions/4a666eb208c2_service_router.py | 70 + .../4ca36cfc898c_nsx_router_mappings.py | 64 + .../4eca4a84f08a_remove_ml2_cisco_cred_db.py | 59 + .../50d5ba354c23_ml2_binding_vif_details.py | 99 + .../versions/50e86cb2637a_nsx_mappings.py | 82 + .../511471cc46b_agent_ext_model_supp.py | 84 + .../51b4de912379_cisco_nexus_ml2_mech.py | 68 + .../52c5e4a18807_lbaas_pool_scheduler.py | 63 + .../52ff27f7567a_support_for_vpnaas.py | 183 + ...1e1_nec_rename_quantum_id_to_neutron_id.py | 65 + .../53bbd27ec841_extra_dhcp_opts_supp.py | 66 + .../versions/54c2c487e913_lbaas.py | 163 + .../54f7549a0e5f_set_not_null_peer_address.py | 54 + .../557edfc53098_new_service_types.py | 81 + .../versions/569e98a8132b_metering.py | 77 + .../5918cbddab04_add_tables_for_route.py | 71 + .../versions/5a875d0e5c_ryu.py | 74 + .../5ac1c354a051_n1kv_segment_alloc.py | 83 + .../versions/5ac71e65402c_ml2_initial.py | 84 + ...afba73813_ovs_tunnelendpoints_id_unique.py | 64 + .../66a59a7f516_nec_openflow_router.py | 68 + .../6be312499f9_set_not_null_vlan_id_cisco.py | 54 + .../81c553f3776c_bsn_consistencyhashes.py | 56 + .../86cf4d88bd3_remove_bigswitch_por.py | 59 + .../versions/8f682276ee4_ryu_plugin_quota.py | 61 + .../alembic_migrations/versions/HEAD | 1 + .../alembic_migrations/versions/README | 5 + .../abc88c33f74f_lb_stats_needs_bigint.py | 67 + ...65aa907aec_set_length_of_protocol_field.py | 52 + .../b7a8863760e_rm_cisco_vlan_bindin.py | 60 + .../versions/c88b6b5fea3_cisco_n1kv_tables.py | 150 + ...871c0d5_set_admin_state_up_not_null_ml2.py | 54 + .../e197124d4b9_add_unique_constrain.py | 65 + .../e6b16a30d97_cisco_provider_nets.py | 62 + .../versions/e766b19a3bb_nuage_initial.py | 120 + .../versions/ed93525fd003_bigswitch_quota.py | 64 + .../f44ab9871cd6_bsn_security_groups.py | 95 + .../versions/f489cf14a79c_lbaas_havana.py | 162 + .../versions/f9263d6df56_remove_dhcp_lease.py | 46 + .../versions/fcac4c42e2cc_bsn_addresspairs.py | 58 + .../versions/folsom_initial.py | 563 + .../versions/grizzly_release.py | 42 + .../versions/havana_release.py | 42 + .../versions/icehouse_release.py | 42 + neutron/db/migration/cli.py | 171 + neutron/db/migration/migrate_to_ml2.py | 462 + neutron/db/model_base.py | 52 + neutron/db/models_v2.py | 204 + neutron/db/portbindings_base.py | 41 + neutron/db/portbindings_db.py | 121 + neutron/db/portsecurity_db.py | 185 + neutron/db/quota_db.py | 179 + neutron/db/routedserviceinsertion_db.py | 106 + neutron/db/routerservicetype_db.py | 57 + neutron/db/securitygroups_db.py | 564 + neutron/db/securitygroups_rpc_base.py | 374 + neutron/db/servicetype_db.py | 99 + neutron/db/sqlalchemyutils.py | 107 + neutron/db/vpn/__init__.py | 18 + neutron/db/vpn/vpn_db.py | 691 + neutron/debug/README | 38 + neutron/debug/__init__.py | 16 + neutron/debug/commands.py | 157 + neutron/debug/debug_agent.py | 198 + neutron/debug/shell.py | 90 + neutron/extensions/__init__.py | 0 neutron/extensions/agent.py | 163 + neutron/extensions/allowedaddresspairs.py | 116 + neutron/extensions/dhcpagentscheduler.py | 152 + neutron/extensions/external_net.py | 68 + neutron/extensions/extra_dhcp_opt.py | 91 + neutron/extensions/extraroute.py | 74 + neutron/extensions/firewall.py | 431 + neutron/extensions/flavor.py | 67 + neutron/extensions/l3.py | 254 + neutron/extensions/l3_ext_gw_mode.py | 66 + neutron/extensions/l3agentscheduler.py | 194 + neutron/extensions/lbaas_agentscheduler.py | 137 + neutron/extensions/loadbalancer.py | 506 + neutron/extensions/metering.py | 190 + neutron/extensions/multiprovidernet.py | 114 + neutron/extensions/portbindings.py | 133 + neutron/extensions/portsecurity.py | 78 + neutron/extensions/providernet.py | 95 + neutron/extensions/quotasv2.py | 152 + neutron/extensions/routedserviceinsertion.py | 71 + neutron/extensions/routerservicetype.py | 55 + neutron/extensions/securitygroup.py | 354 + neutron/extensions/servicetype.py | 91 + neutron/extensions/vpnaas.py | 482 + neutron/hacking/__init__.py | 0 neutron/hacking/checks.py | 50 + neutron/hooks.py | 29 + .../de/LC_MESSAGES/neutron-log-error.po | 170 + .../locale/de/LC_MESSAGES/neutron-log-info.po | 131 + .../de/LC_MESSAGES/neutron-log-warning.po | 57 + .../en_AU/LC_MESSAGES/neutron-log-error.po | 163 + .../en_AU/LC_MESSAGES/neutron-log-info.po | 128 + .../en_GB/LC_MESSAGES/neutron-log-error.po | 163 + .../en_GB/LC_MESSAGES/neutron-log-info.po | 128 + neutron/locale/en_US/LC_MESSAGES/neutron.po | 16163 ++++++++++++++++ .../es/LC_MESSAGES/neutron-log-error.po | 170 + .../locale/es/LC_MESSAGES/neutron-log-info.po | 128 + .../fr/LC_MESSAGES/neutron-log-critical.po | 23 + .../fr/LC_MESSAGES/neutron-log-error.po | 171 + .../locale/fr/LC_MESSAGES/neutron-log-info.po | 128 + .../locale/it/LC_MESSAGES/neutron-log-info.po | 128 + .../ja/LC_MESSAGES/neutron-log-error.po | 170 + .../locale/ja/LC_MESSAGES/neutron-log-info.po | 128 + .../ko_KR/LC_MESSAGES/neutron-log-error.po | 165 + .../ko_KR/LC_MESSAGES/neutron-log-info.po | 128 + neutron/locale/neutron-log-critical.pot | 19 + neutron/locale/neutron-log-error.pot | 158 + neutron/locale/neutron-log-info.pot | 127 + neutron/locale/neutron-log-warning.pot | 53 + neutron/locale/neutron.pot | 16162 +++++++++++++++ .../pt_BR/LC_MESSAGES/neutron-log-error.po | 168 + .../pt_BR/LC_MESSAGES/neutron-log-info.po | 128 + .../zh_CN/LC_MESSAGES/neutron-log-error.po | 162 + .../zh_CN/LC_MESSAGES/neutron-log-info.po | 128 + .../zh_TW/LC_MESSAGES/neutron-log-info.po | 128 + neutron/manager.py | 225 + neutron/neutron_plugin_base_v2.py | 352 + neutron/notifiers/__init__.py | 0 neutron/notifiers/nova.py | 249 + neutron/openstack/__init__.py | 0 neutron/openstack/common/__init__.py | 17 + neutron/openstack/common/cache/__init__.py | 0 .../common/cache/_backends/__init__.py | 0 .../common/cache/_backends/memory.py | 165 + neutron/openstack/common/cache/backends.py | 263 + neutron/openstack/common/cache/cache.py | 78 + neutron/openstack/common/context.py | 83 + neutron/openstack/common/db/__init__.py | 0 neutron/openstack/common/db/api.py | 162 + neutron/openstack/common/db/exception.py | 56 + neutron/openstack/common/db/options.py | 171 + .../common/db/sqlalchemy/__init__.py | 0 .../openstack/common/db/sqlalchemy/models.py | 119 + .../common/db/sqlalchemy/provision.py | 157 + .../openstack/common/db/sqlalchemy/session.py | 904 + .../common/db/sqlalchemy/test_base.py | 153 + .../openstack/common/db/sqlalchemy/utils.py | 647 + neutron/openstack/common/eventlet_backdoor.py | 144 + neutron/openstack/common/excutils.py | 113 + neutron/openstack/common/fileutils.py | 137 + neutron/openstack/common/fixture/__init__.py | 0 neutron/openstack/common/fixture/config.py | 45 + neutron/openstack/common/fixture/lockutils.py | 51 + neutron/openstack/common/fixture/mockpatch.py | 49 + .../openstack/common/fixture/moxstubout.py | 32 + neutron/openstack/common/gettextutils.py | 498 + neutron/openstack/common/importutils.py | 66 + neutron/openstack/common/jsonutils.py | 186 + neutron/openstack/common/local.py | 45 + neutron/openstack/common/lockutils.py | 303 + neutron/openstack/common/log.py | 626 + neutron/openstack/common/log_handler.py | 30 + neutron/openstack/common/loopingcall.py | 145 + .../openstack/common/middleware/__init__.py | 0 neutron/openstack/common/middleware/audit.py | 44 + neutron/openstack/common/middleware/base.py | 56 + .../common/middleware/catch_errors.py | 43 + .../common/middleware/correlation_id.py | 28 + neutron/openstack/common/middleware/debug.py | 60 + .../openstack/common/middleware/notifier.py | 126 + .../openstack/common/middleware/request_id.py | 41 + .../openstack/common/middleware/sizelimit.py | 81 + neutron/openstack/common/network_utils.py | 89 + neutron/openstack/common/periodic_task.py | 183 + neutron/openstack/common/policy.py | 780 + neutron/openstack/common/processutils.py | 248 + neutron/openstack/common/service.py | 512 + neutron/openstack/common/sslutils.py | 98 + neutron/openstack/common/strutils.py | 239 + neutron/openstack/common/systemd.py | 104 + neutron/openstack/common/threadgroup.py | 129 + neutron/openstack/common/timeutils.py | 210 + neutron/openstack/common/uuidutils.py | 37 + neutron/openstack/common/versionutils.py | 148 + neutron/plugins/__init__.py | 0 neutron/plugins/bigswitch/README | 14 + neutron/plugins/bigswitch/__init__.py | 16 + neutron/plugins/bigswitch/agent/__init__.py | 0 .../bigswitch/agent/restproxy_agent.py | 181 + neutron/plugins/bigswitch/config.py | 123 + neutron/plugins/bigswitch/db/__init__.py | 18 + .../plugins/bigswitch/db/consistency_db.py | 56 + .../plugins/bigswitch/db/porttracker_db.py | 53 + .../plugins/bigswitch/extensions/__init__.py | 18 + .../bigswitch/extensions/routerrule.py | 144 + neutron/plugins/bigswitch/plugin.py | 1115 ++ neutron/plugins/bigswitch/routerrule_db.py | 148 + neutron/plugins/bigswitch/servermanager.py | 595 + neutron/plugins/bigswitch/tests/__init__.py | 16 + .../plugins/bigswitch/tests/test_server.py | 188 + neutron/plugins/bigswitch/vcsversion.py | 27 + neutron/plugins/bigswitch/version.py | 53 + neutron/plugins/brocade/NeutronPlugin.py | 497 + neutron/plugins/brocade/README.md | 112 + neutron/plugins/brocade/__init__.py | 16 + neutron/plugins/brocade/db/__init__.py | 16 + neutron/plugins/brocade/db/models.py | 151 + neutron/plugins/brocade/nos/__init__.py | 16 + neutron/plugins/brocade/nos/fake_nosdriver.py | 117 + neutron/plugins/brocade/nos/nctemplates.py | 204 + neutron/plugins/brocade/nos/nosdriver.py | 233 + neutron/plugins/brocade/tests/README | 24 + neutron/plugins/brocade/tests/noscli.py | 93 + neutron/plugins/brocade/tests/nostest.py | 48 + neutron/plugins/brocade/vlanbm.py | 60 + neutron/plugins/cisco/README | 7 + neutron/plugins/cisco/__init__.py | 18 + neutron/plugins/cisco/common/__init__.py | 17 + .../plugins/cisco/common/cisco_constants.py | 111 + .../cisco/common/cisco_credentials_v2.py | 61 + .../plugins/cisco/common/cisco_exceptions.py | 236 + neutron/plugins/cisco/common/cisco_faults.py | 138 + neutron/plugins/cisco/common/config.py | 151 + neutron/plugins/cisco/db/__init__.py | 18 + neutron/plugins/cisco/db/n1kv_db_v2.py | 1621 ++ neutron/plugins/cisco/db/n1kv_models_v2.py | 185 + neutron/plugins/cisco/db/network_db_v2.py | 290 + neutron/plugins/cisco/db/network_models_v2.py | 56 + neutron/plugins/cisco/db/nexus_db_v2.py | 154 + neutron/plugins/cisco/db/nexus_models_v2.py | 46 + neutron/plugins/cisco/extensions/__init__.py | 16 + .../cisco/extensions/_credential_view.py | 52 + neutron/plugins/cisco/extensions/_qos_view.py | 52 + .../plugins/cisco/extensions/credential.py | 84 + neutron/plugins/cisco/extensions/n1kv.py | 106 + .../cisco/extensions/network_profile.py | 103 + .../cisco/extensions/policy_profile.py | 85 + neutron/plugins/cisco/extensions/qos.py | 156 + neutron/plugins/cisco/l2device_plugin_base.py | 175 + neutron/plugins/cisco/models/__init__.py | 17 + .../plugins/cisco/models/virt_phy_sw_v2.py | 553 + neutron/plugins/cisco/n1kv/__init__.py | 18 + neutron/plugins/cisco/n1kv/n1kv_client.py | 541 + .../plugins/cisco/n1kv/n1kv_neutron_plugin.py | 1438 ++ neutron/plugins/cisco/network_plugin.py | 176 + neutron/plugins/cisco/nexus/__init__.py | 21 + .../nexus/cisco_nexus_network_driver_v2.py | 196 + .../cisco/nexus/cisco_nexus_plugin_v2.py | 347 + .../cisco/nexus/cisco_nexus_snippets.py | 180 + neutron/plugins/cisco/test/__init__.py | 0 neutron/plugins/cisco/test/nexus/__init__.py | 19 + .../cisco/test/nexus/fake_nexus_driver.py | 101 + neutron/plugins/common/__init__.py | 16 + neutron/plugins/common/constants.py | 85 + neutron/plugins/common/utils.py | 69 + neutron/plugins/embrane/README | 9 + neutron/plugins/embrane/__init__.py | 18 + neutron/plugins/embrane/agent/__init__.py | 18 + neutron/plugins/embrane/agent/dispatcher.py | 134 + .../embrane/agent/operations/__init__.py | 18 + .../agent/operations/router_operations.py | 156 + neutron/plugins/embrane/base_plugin.py | 375 + neutron/plugins/embrane/common/__init__.py | 18 + neutron/plugins/embrane/common/config.py | 49 + neutron/plugins/embrane/common/constants.py | 72 + neutron/plugins/embrane/common/contexts.py | 40 + neutron/plugins/embrane/common/exceptions.py | 28 + neutron/plugins/embrane/common/operation.py | 51 + neutron/plugins/embrane/common/utils.py | 73 + neutron/plugins/embrane/l2base/__init__.py | 18 + .../plugins/embrane/l2base/fake/__init__.py | 18 + .../embrane/l2base/fake/fake_l2_plugin.py | 24 + .../embrane/l2base/fake/fakeplugin_support.py | 45 + .../embrane/l2base/openvswitch/__init__.py | 18 + .../l2base/openvswitch/openvswitch_support.py | 58 + .../plugins/embrane/l2base/support_base.py | 50 + .../embrane/l2base/support_exceptions.py | 25 + neutron/plugins/embrane/plugins/__init__.py | 18 + .../embrane/plugins/embrane_fake_plugin.py | 34 + .../embrane/plugins/embrane_ovs_plugin.py | 38 + neutron/plugins/hyperv/__init__.py | 16 + neutron/plugins/hyperv/agent/__init__.py | 16 + .../hyperv/agent/hyperv_neutron_agent.py | 475 + .../hyperv/agent/security_groups_driver.py | 146 + neutron/plugins/hyperv/agent/utils.py | 256 + neutron/plugins/hyperv/agent/utilsfactory.py | 72 + neutron/plugins/hyperv/agent/utilsv2.py | 439 + neutron/plugins/hyperv/agent_notifier_api.py | 80 + neutron/plugins/hyperv/common/__init__.py | 16 + neutron/plugins/hyperv/common/constants.py | 23 + neutron/plugins/hyperv/db.py | 219 + .../plugins/hyperv/hyperv_neutron_plugin.py | 333 + neutron/plugins/hyperv/model.py | 55 + neutron/plugins/hyperv/rpc_callbacks.py | 94 + neutron/plugins/ibm/README | 6 + neutron/plugins/ibm/__init__.py | 0 neutron/plugins/ibm/agent/__init__.py | 0 .../plugins/ibm/agent/sdnve_neutron_agent.py | 270 + neutron/plugins/ibm/common/__init__.py | 0 neutron/plugins/ibm/common/config.py | 74 + neutron/plugins/ibm/common/constants.py | 32 + neutron/plugins/ibm/common/exceptions.py | 28 + neutron/plugins/ibm/sdnve_api.py | 388 + neutron/plugins/ibm/sdnve_api_fake.py | 64 + neutron/plugins/ibm/sdnve_neutron_plugin.py | 666 + neutron/plugins/linuxbridge/README | 169 + neutron/plugins/linuxbridge/__init__.py | 0 neutron/plugins/linuxbridge/agent/__init__.py | 0 .../agent/linuxbridge_neutron_agent.py | 1026 + .../plugins/linuxbridge/common/__init__.py | 17 + neutron/plugins/linuxbridge/common/config.py | 78 + .../plugins/linuxbridge/common/constants.py | 42 + neutron/plugins/linuxbridge/db/__init__.py | 18 + .../plugins/linuxbridge/db/l2network_db_v2.py | 238 + .../linuxbridge/db/l2network_models_v2.py | 59 + .../plugins/linuxbridge/lb_neutron_plugin.py | 530 + neutron/plugins/metaplugin/README | 92 + neutron/plugins/metaplugin/__init__.py | 16 + neutron/plugins/metaplugin/common/__init__.py | 16 + neutron/plugins/metaplugin/common/config.py | 80 + neutron/plugins/metaplugin/meta_db_v2.py | 52 + neutron/plugins/metaplugin/meta_models_v2.py | 43 + .../plugins/metaplugin/meta_neutron_plugin.py | 419 + .../metaplugin/proxy_neutron_plugin.py | 136 + neutron/plugins/midonet/__init__.py | 17 + neutron/plugins/midonet/agent/__init__.py | 16 + .../plugins/midonet/agent/midonet_driver.py | 52 + neutron/plugins/midonet/common/__init__.py | 16 + neutron/plugins/midonet/common/config.py | 46 + neutron/plugins/midonet/common/net_util.py | 68 + neutron/plugins/midonet/midonet_lib.py | 696 + neutron/plugins/midonet/plugin.py | 1258 ++ neutron/plugins/ml2/README | 53 + neutron/plugins/ml2/__init__.py | 14 + neutron/plugins/ml2/common/__init__.py | 14 + neutron/plugins/ml2/common/exceptions.py | 23 + neutron/plugins/ml2/config.py | 36 + neutron/plugins/ml2/db.py | 136 + neutron/plugins/ml2/driver_api.py | 597 + neutron/plugins/ml2/driver_context.py | 135 + neutron/plugins/ml2/drivers/README.fslsdn | 102 + neutron/plugins/ml2/drivers/README.odl | 41 + neutron/plugins/ml2/drivers/__init__.py | 14 + neutron/plugins/ml2/drivers/brocade/README.md | 60 + .../plugins/ml2/drivers/brocade/__init__.py | 0 .../ml2/drivers/brocade/db/__init__.py | 0 .../plugins/ml2/drivers/brocade/db/models.py | 139 + .../ml2/drivers/brocade/mechanism_brocade.py | 385 + .../ml2/drivers/brocade/nos/__init__.py | 0 .../ml2/drivers/brocade/nos/nctemplates.py | 197 + .../ml2/drivers/brocade/nos/nosdriver.py | 236 + neutron/plugins/ml2/drivers/cisco/__init__.py | 14 + .../ml2/drivers/cisco/apic/__init__.py | 0 .../ml2/drivers/cisco/apic/apic_client.py | 416 + .../ml2/drivers/cisco/apic/apic_manager.py | 559 + .../ml2/drivers/cisco/apic/apic_model.py | 177 + .../plugins/ml2/drivers/cisco/apic/config.py | 82 + .../ml2/drivers/cisco/apic/exceptions.py | 59 + .../ml2/drivers/cisco/apic/mechanism_apic.py | 150 + .../plugins/ml2/drivers/cisco/nexus/README | 19 + .../ml2/drivers/cisco/nexus/__init__.py | 0 .../plugins/ml2/drivers/cisco/nexus/config.py | 65 + .../ml2/drivers/cisco/nexus/constants.py | 24 + .../ml2/drivers/cisco/nexus/exceptions.py | 84 + .../drivers/cisco/nexus/mech_cisco_nexus.py | 219 + .../ml2/drivers/cisco/nexus/nexus_db_v2.py | 143 + .../drivers/cisco/nexus/nexus_models_v2.py | 45 + .../cisco/nexus/nexus_network_driver.py | 171 + .../ml2/drivers/cisco/nexus/nexus_snippets.py | 200 + neutron/plugins/ml2/drivers/l2pop/README | 41 + neutron/plugins/ml2/drivers/l2pop/__init__.py | 18 + neutron/plugins/ml2/drivers/l2pop/config.py | 29 + .../plugins/ml2/drivers/l2pop/constants.py | 23 + neutron/plugins/ml2/drivers/l2pop/db.py | 83 + .../plugins/ml2/drivers/l2pop/mech_driver.py | 248 + neutron/plugins/ml2/drivers/l2pop/rpc.py | 86 + neutron/plugins/ml2/drivers/mech_agent.py | 149 + .../plugins/ml2/drivers/mech_arista/README | 9 + .../ml2/drivers/mech_arista/__init__.py | 14 + .../plugins/ml2/drivers/mech_arista/config.py | 70 + neutron/plugins/ml2/drivers/mech_arista/db.py | 402 + .../ml2/drivers/mech_arista/exceptions.py | 27 + .../drivers/mech_arista/mechanism_arista.py | 1014 + .../ml2/drivers/mech_bigswitch/__init__.py | 0 .../ml2/drivers/mech_bigswitch/driver.py | 130 + neutron/plugins/ml2/drivers/mech_hyperv.py | 57 + .../plugins/ml2/drivers/mech_linuxbridge.py | 57 + neutron/plugins/ml2/drivers/mech_ofagent.py | 61 + .../plugins/ml2/drivers/mech_openvswitch.py | 58 + .../plugins/ml2/drivers/mechanism_fslsdn.py | 288 + neutron/plugins/ml2/drivers/mechanism_ncs.py | 182 + neutron/plugins/ml2/drivers/mechanism_odl.py | 374 + neutron/plugins/ml2/drivers/mlnx/__init__.py | 0 neutron/plugins/ml2/drivers/mlnx/config.py | 32 + neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py | 91 + neutron/plugins/ml2/drivers/type_flat.py | 131 + neutron/plugins/ml2/drivers/type_gre.py | 190 + neutron/plugins/ml2/drivers/type_local.py | 59 + neutron/plugins/ml2/drivers/type_tunnel.py | 132 + neutron/plugins/ml2/drivers/type_vlan.py | 267 + neutron/plugins/ml2/drivers/type_vxlan.py | 203 + neutron/plugins/ml2/managers.py | 480 + neutron/plugins/ml2/models.py | 76 + neutron/plugins/ml2/plugin.py | 791 + neutron/plugins/ml2/rpc.py | 239 + neutron/plugins/mlnx/README | 8 + neutron/plugins/mlnx/__init__.py | 16 + neutron/plugins/mlnx/agent/__init__.py | 16 + .../mlnx/agent/eswitch_neutron_agent.py | 438 + neutron/plugins/mlnx/agent/utils.py | 144 + neutron/plugins/mlnx/agent_notify_api.py | 67 + neutron/plugins/mlnx/common/__init__.py | 16 + neutron/plugins/mlnx/common/comm_utils.py | 66 + neutron/plugins/mlnx/common/config.py | 80 + neutron/plugins/mlnx/common/constants.py | 28 + neutron/plugins/mlnx/common/exceptions.py | 30 + neutron/plugins/mlnx/db/__init__.py | 16 + neutron/plugins/mlnx/db/mlnx_db_v2.py | 257 + neutron/plugins/mlnx/db/mlnx_models_v2.py | 86 + neutron/plugins/mlnx/mlnx_plugin.py | 512 + neutron/plugins/mlnx/rpc_callbacks.py | 119 + neutron/plugins/nec/README | 13 + neutron/plugins/nec/__init__.py | 15 + neutron/plugins/nec/agent/__init__.py | 15 + .../plugins/nec/agent/nec_neutron_agent.py | 252 + neutron/plugins/nec/common/__init__.py | 15 + neutron/plugins/nec/common/config.py | 84 + neutron/plugins/nec/common/constants.py | 24 + neutron/plugins/nec/common/exceptions.py | 85 + neutron/plugins/nec/common/ofc_client.py | 158 + neutron/plugins/nec/common/utils.py | 24 + neutron/plugins/nec/db/__init__.py | 15 + neutron/plugins/nec/db/api.py | 186 + neutron/plugins/nec/db/models.py | 71 + neutron/plugins/nec/db/packetfilter.py | 220 + neutron/plugins/nec/db/router.py | 92 + neutron/plugins/nec/drivers/__init__.py | 40 + neutron/plugins/nec/drivers/pfc.py | 374 + neutron/plugins/nec/drivers/trema.py | 250 + neutron/plugins/nec/extensions/__init__.py | 15 + .../plugins/nec/extensions/packetfilter.py | 208 + .../plugins/nec/extensions/router_provider.py | 60 + neutron/plugins/nec/nec_plugin.py | 781 + neutron/plugins/nec/nec_router.py | 358 + neutron/plugins/nec/ofc_driver_base.py | 105 + neutron/plugins/nec/ofc_manager.py | 201 + neutron/plugins/nec/packet_filter.py | 258 + neutron/plugins/nec/router_drivers.py | 224 + neutron/plugins/nuage/__init__.py | 0 neutron/plugins/nuage/common/__init__.py | 0 neutron/plugins/nuage/common/config.py | 47 + neutron/plugins/nuage/common/constants.py | 28 + neutron/plugins/nuage/common/exceptions.py | 24 + neutron/plugins/nuage/extensions/__init__.py | 0 .../plugins/nuage/extensions/netpartition.py | 107 + .../plugins/nuage/extensions/nuage_router.py | 73 + .../plugins/nuage/extensions/nuage_subnet.py | 59 + neutron/plugins/nuage/nuage_models.py | 102 + neutron/plugins/nuage/nuagedb.py | 202 + neutron/plugins/nuage/plugin.py | 1006 + neutron/plugins/ofagent/README | 21 + neutron/plugins/ofagent/__init__.py | 0 neutron/plugins/ofagent/agent/__init__.py | 0 .../ofagent/agent/ofa_neutron_agent.py | 1418 ++ neutron/plugins/ofagent/common/__init__.py | 0 neutron/plugins/ofagent/common/config.py | 33 + neutron/plugins/oneconvergence/README | 32 + neutron/plugins/oneconvergence/__init__.py | 0 .../plugins/oneconvergence/agent/__init__.py | 0 .../agent/nvsd_neutron_agent.py | 176 + .../plugins/oneconvergence/lib/__init__.py | 0 neutron/plugins/oneconvergence/lib/config.py | 57 + .../plugins/oneconvergence/lib/exception.py | 55 + neutron/plugins/oneconvergence/lib/nvsd_db.py | 45 + neutron/plugins/oneconvergence/lib/nvsdlib.py | 352 + .../oneconvergence/lib/plugin_helper.py | 186 + neutron/plugins/oneconvergence/plugin.py | 440 + neutron/plugins/openvswitch/README | 6 + neutron/plugins/openvswitch/__init__.py | 0 neutron/plugins/openvswitch/agent/__init__.py | 0 .../openvswitch/agent/ovs_neutron_agent.py | 1517 ++ .../plugins/openvswitch/agent/xenapi/README | 16 + .../agent/xenapi/contrib/build-rpm.sh | 34 + .../SPECS/openstack-quantum-xen-plugins.spec | 30 + .../agent/xenapi/etc/xapi.d/plugins/netwrap | 72 + .../plugins/openvswitch/common/__init__.py | 15 + neutron/plugins/openvswitch/common/config.py | 94 + .../plugins/openvswitch/common/constants.py | 54 + neutron/plugins/openvswitch/ovs_db_v2.py | 396 + neutron/plugins/openvswitch/ovs_models_v2.py | 107 + .../plugins/openvswitch/ovs_neutron_plugin.py | 623 + neutron/plugins/plumgrid/README | 8 + neutron/plugins/plumgrid/__init__.py | 17 + neutron/plugins/plumgrid/common/__init__.py | 17 + neutron/plugins/plumgrid/common/exceptions.py | 30 + neutron/plugins/plumgrid/drivers/__init__.py | 16 + .../plugins/plumgrid/drivers/fake_plumlib.py | 99 + neutron/plugins/plumgrid/drivers/plumlib.py | 100 + .../plumgrid/plumgrid_plugin/__init__.py | 17 + .../plumgrid/plumgrid_plugin/plugin_ver.py | 19 + .../plumgrid_plugin/plumgrid_plugin.py | 604 + neutron/plugins/ryu/README | 22 + neutron/plugins/ryu/__init__.py | 0 neutron/plugins/ryu/agent/__init__.py | 0 .../plugins/ryu/agent/ryu_neutron_agent.py | 314 + neutron/plugins/ryu/common/__init__.py | 15 + neutron/plugins/ryu/common/config.py | 52 + neutron/plugins/ryu/db/__init__.py | 0 neutron/plugins/ryu/db/api_v2.py | 215 + neutron/plugins/ryu/db/models_v2.py | 41 + neutron/plugins/ryu/ryu_neutron_plugin.py | 269 + neutron/plugins/vmware/__init__.py | 3 + neutron/plugins/vmware/api_client/__init__.py | 29 + neutron/plugins/vmware/api_client/base.py | 249 + neutron/plugins/vmware/api_client/client.py | 143 + .../vmware/api_client/eventlet_client.py | 155 + .../vmware/api_client/eventlet_request.py | 240 + .../plugins/vmware/api_client/exception.py | 121 + neutron/plugins/vmware/api_client/request.py | 287 + neutron/plugins/vmware/api_client/version.py | 43 + neutron/plugins/vmware/check_nsx_config.py | 163 + neutron/plugins/vmware/common/__init__.py | 0 neutron/plugins/vmware/common/config.py | 198 + neutron/plugins/vmware/common/exceptions.py | 126 + neutron/plugins/vmware/common/nsx_utils.py | 249 + .../plugins/vmware/common/securitygroups.py | 134 + neutron/plugins/vmware/common/sync.py | 669 + neutron/plugins/vmware/common/utils.py | 69 + neutron/plugins/vmware/dbexts/__init__.py | 0 neutron/plugins/vmware/dbexts/db.py | 193 + .../vmware/dbexts/distributedrouter.py | 28 + neutron/plugins/vmware/dbexts/lsn_db.py | 131 + neutron/plugins/vmware/dbexts/maclearning.py | 78 + neutron/plugins/vmware/dbexts/models.py | 135 + neutron/plugins/vmware/dbexts/networkgw_db.py | 499 + neutron/plugins/vmware/dbexts/nsxrouter.py | 66 + neutron/plugins/vmware/dbexts/qos_db.py | 297 + .../plugins/vmware/dbexts/servicerouter.py | 27 + neutron/plugins/vmware/dbexts/vcns_db.py | 202 + neutron/plugins/vmware/dbexts/vcns_models.py | 90 + neutron/plugins/vmware/dhcp_meta/__init__.py | 16 + neutron/plugins/vmware/dhcp_meta/combined.py | 95 + neutron/plugins/vmware/dhcp_meta/constants.py | 28 + .../plugins/vmware/dhcp_meta/lsnmanager.py | 462 + neutron/plugins/vmware/dhcp_meta/migration.py | 180 + neutron/plugins/vmware/dhcp_meta/nsx.py | 321 + neutron/plugins/vmware/dhcp_meta/rpc.py | 222 + neutron/plugins/vmware/dhcpmeta_modes.py | 163 + neutron/plugins/vmware/extensions/__init__.py | 0 .../vmware/extensions/distributedrouter.py | 70 + neutron/plugins/vmware/extensions/lsn.py | 82 + .../plugins/vmware/extensions/maclearning.py | 61 + .../plugins/vmware/extensions/networkgw.py | 251 + neutron/plugins/vmware/extensions/nvp_qos.py | 40 + neutron/plugins/vmware/extensions/qos.py | 223 + .../vmware/extensions/servicerouter.py | 59 + neutron/plugins/vmware/nsx_cluster.py | 97 + neutron/plugins/vmware/nsxlib/__init__.py | 141 + neutron/plugins/vmware/nsxlib/l2gateway.py | 211 + neutron/plugins/vmware/nsxlib/lsn.py | 270 + neutron/plugins/vmware/nsxlib/queue.py | 71 + neutron/plugins/vmware/nsxlib/router.py | 689 + neutron/plugins/vmware/nsxlib/secgroup.py | 141 + neutron/plugins/vmware/nsxlib/switch.py | 397 + neutron/plugins/vmware/nsxlib/versioning.py | 66 + neutron/plugins/vmware/plugin.py | 22 + neutron/plugins/vmware/plugins/__init__.py | 0 neutron/plugins/vmware/plugins/base.py | 2528 +++ neutron/plugins/vmware/plugins/service.py | 1812 ++ neutron/plugins/vmware/shell/__init__.py | 41 + neutron/plugins/vmware/shell/commands.py | 67 + neutron/plugins/vmware/vshield/__init__.py | 16 + .../vmware/vshield/common/VcnsApiClient.py | 80 + .../plugins/vmware/vshield/common/__init__.py | 0 .../vmware/vshield/common/constants.py | 45 + .../vmware/vshield/common/exceptions.py | 70 + .../vmware/vshield/edge_appliance_driver.py | 667 + .../vmware/vshield/edge_firewall_driver.py | 354 + .../vmware/vshield/edge_ipsecvpn_driver.py | 150 + .../vshield/edge_loadbalancer_driver.py | 403 + .../plugins/vmware/vshield/tasks/__init__.py | 0 .../plugins/vmware/vshield/tasks/constants.py | 44 + neutron/plugins/vmware/vshield/tasks/tasks.py | 397 + neutron/plugins/vmware/vshield/vcns.py | 304 + neutron/plugins/vmware/vshield/vcns_driver.py | 53 + neutron/policy.py | 416 + neutron/quota.py | 334 + neutron/scheduler/__init__.py | 16 + neutron/scheduler/dhcp_agent_scheduler.py | 132 + neutron/scheduler/l3_agent_scheduler.py | 194 + neutron/server/__init__.py | 70 + neutron/service.py | 299 + neutron/services/__init__.py | 16 + neutron/services/firewall/__init__.py | 16 + neutron/services/firewall/agents/__init__.py | 16 + .../firewall/agents/firewall_agent_api.py | 85 + .../firewall/agents/l3reference/__init__.py | 16 + .../agents/l3reference/firewall_l3_agent.py | 295 + .../firewall/agents/varmour/__init__.py | 16 + .../firewall/agents/varmour/varmour_api.py | 147 + .../firewall/agents/varmour/varmour_router.py | 351 + .../firewall/agents/varmour/varmour_utils.py | 74 + neutron/services/firewall/drivers/__init__.py | 16 + .../services/firewall/drivers/fwaas_base.py | 100 + .../firewall/drivers/linux/__init__.py | 16 + .../firewall/drivers/linux/iptables_fwaas.py | 275 + .../firewall/drivers/varmour/__init__.py | 16 + .../firewall/drivers/varmour/varmour_fwaas.py | 207 + neutron/services/firewall/fwaas_plugin.py | 299 + neutron/services/l3_router/README | 30 + neutron/services/l3_router/__init__.py | 16 + neutron/services/l3_router/l3_apic.py | 135 + .../services/l3_router/l3_router_plugin.py | 98 + neutron/services/loadbalancer/__init__.py | 16 + .../services/loadbalancer/agent/__init__.py | 0 neutron/services/loadbalancer/agent/agent.py | 72 + .../services/loadbalancer/agent/agent_api.py | 100 + .../loadbalancer/agent/agent_device_driver.py | 98 + .../loadbalancer/agent/agent_manager.py | 338 + .../services/loadbalancer/agent_scheduler.py | 130 + neutron/services/loadbalancer/constants.py | 47 + .../services/loadbalancer/drivers/__init__.py | 17 + .../loadbalancer/drivers/abstract_driver.py | 130 + .../loadbalancer/drivers/common/__init__.py | 0 .../drivers/common/agent_driver_base.py | 445 + .../loadbalancer/drivers/embrane/README | 9 + .../loadbalancer/drivers/embrane/__init__.py | 0 .../drivers/embrane/agent/__init__.py | 0 .../drivers/embrane/agent/dispatcher.py | 108 + .../drivers/embrane/agent/lb_operations.py | 179 + .../loadbalancer/drivers/embrane/config.py | 53 + .../loadbalancer/drivers/embrane/constants.py | 74 + .../loadbalancer/drivers/embrane/db.py | 56 + .../loadbalancer/drivers/embrane/driver.py | 342 + .../loadbalancer/drivers/embrane/models.py | 30 + .../loadbalancer/drivers/embrane/poller.py | 71 + .../loadbalancer/drivers/haproxy/__init__.py | 17 + .../loadbalancer/drivers/haproxy/cfg.py | 238 + .../drivers/haproxy/namespace_driver.py | 396 + .../drivers/haproxy/plugin_driver.py | 23 + .../drivers/netscaler/__init__.py | 0 .../drivers/netscaler/ncc_client.py | 182 + .../drivers/netscaler/netscaler_driver.py | 489 + .../loadbalancer/drivers/radware/__init__.py | 17 + .../loadbalancer/drivers/radware/driver.py | 1097 ++ .../drivers/radware/exceptions.py | 44 + neutron/services/loadbalancer/plugin.py | 326 + neutron/services/metering/__init__.py | 15 + neutron/services/metering/agents/__init__.py | 15 + .../metering/agents/metering_agent.py | 297 + neutron/services/metering/drivers/__init__.py | 15 + .../metering/drivers/abstract_driver.py | 51 + .../metering/drivers/iptables/__init__.py | 15 + .../drivers/iptables/iptables_driver.py | 284 + .../metering/drivers/noop/__init__.py | 15 + .../metering/drivers/noop/noop_driver.py | 45 + neutron/services/metering/metering_plugin.py | 74 + neutron/services/provider_configuration.py | 162 + neutron/services/service_base.py | 103 + neutron/services/vpn/__init__.py | 18 + neutron/services/vpn/agent.py | 148 + neutron/services/vpn/common/__init__.py | 16 + neutron/services/vpn/common/topics.py | 22 + .../services/vpn/device_drivers/__init__.py | 38 + .../device_drivers/cisco_csr_rest_client.py | 258 + .../vpn/device_drivers/cisco_ipsec.py | 858 + neutron/services/vpn/device_drivers/ipsec.py | 713 + .../template/openswan/ipsec.conf.template | 64 + .../template/openswan/ipsec.secret.template | 3 + neutron/services/vpn/plugin.py | 107 + .../services/vpn/service_drivers/__init__.py | 92 + .../vpn/service_drivers/cisco_csr_db.py | 239 + .../vpn/service_drivers/cisco_ipsec.py | 245 + neutron/services/vpn/service_drivers/ipsec.py | 156 + neutron/tests/__init__.py | 16 + neutron/tests/base.py | 216 + neutron/tests/etc/api-paste.ini.test | 8 + neutron/tests/etc/neutron.conf.test | 27 + .../tests/etc/rootwrap.d/neutron.test.filters | 12 + neutron/tests/fake_notifier.py | 50 + neutron/tests/functional/__init__.py | 15 + neutron/tests/functional/agent/__init__.py | 15 + .../tests/functional/agent/linux/__init__.py | 15 + neutron/tests/functional/agent/linux/base.py | 74 + .../agent/linux/test_async_process.py | 71 + .../agent/linux/test_ovsdb_monitor.py | 108 + neutron/tests/functional/sanity/__init__.py | 0 .../functional/sanity/test_ovs_sanity.py | 46 + neutron/tests/post_mortem_debug.py | 106 + neutron/tests/tools.py | 47 + neutron/tests/unit/__init__.py | 26 + .../unit/_test_extension_portbindings.py | 377 + neutron/tests/unit/_test_rootwrap_exec.py | 85 + neutron/tests/unit/agent/__init__.py | 15 + neutron/tests/unit/agent/linux/__init__.py | 15 + .../unit/agent/linux/test_async_process.py | 251 + .../tests/unit/agent/linux/test_ovs_lib.py | 967 + .../unit/agent/linux/test_ovsdb_monitor.py | 105 + .../tests/unit/agent/linux/test_polling.py | 116 + neutron/tests/unit/api/__init__.py | 0 neutron/tests/unit/api/rpc/__init__.py | 0 .../unit/api/rpc/agentnotifiers/__init__.py | 0 .../agentnotifiers/test_dhcp_rpc_agent_api.py | 154 + neutron/tests/unit/bigswitch/__init__.py | 16 + .../unit/bigswitch/etc/restproxy.ini.test | 44 + .../unit/bigswitch/etc/ssl/ca_certs/README | 2 + .../unit/bigswitch/etc/ssl/combined/README | 2 + .../unit/bigswitch/etc/ssl/host_certs/README | 2 + neutron/tests/unit/bigswitch/fake_server.py | 185 + .../unit/bigswitch/test_agent_scheduler.py | 33 + neutron/tests/unit/bigswitch/test_base.py | 74 + .../tests/unit/bigswitch/test_capabilities.py | 84 + .../unit/bigswitch/test_restproxy_agent.py | 188 + .../unit/bigswitch/test_restproxy_plugin.py | 316 + .../tests/unit/bigswitch/test_router_db.py | 554 + .../unit/bigswitch/test_security_groups.py | 47 + .../unit/bigswitch/test_servermanager.py | 467 + neutron/tests/unit/bigswitch/test_ssl.py | 250 + neutron/tests/unit/brocade/__init__.py | 17 + neutron/tests/unit/brocade/test_brocade_db.py | 100 + .../tests/unit/brocade/test_brocade_plugin.py | 74 + .../tests/unit/brocade/test_brocade_vlan.py | 73 + neutron/tests/unit/cisco/__init__.py | 16 + neutron/tests/unit/cisco/n1kv/__init__.py | 18 + neutron/tests/unit/cisco/n1kv/fake_client.py | 119 + neutron/tests/unit/cisco/n1kv/test_n1kv_db.py | 870 + .../tests/unit/cisco/n1kv/test_n1kv_plugin.py | 709 + neutron/tests/unit/cisco/test_config.py | 72 + neutron/tests/unit/cisco/test_network_db.py | 291 + .../tests/unit/cisco/test_network_plugin.py | 1186 ++ neutron/tests/unit/cisco/test_nexus_db.py | 239 + neutron/tests/unit/cisco/test_nexus_plugin.py | 301 + neutron/tests/unit/cisco/test_plugin_model.py | 63 + neutron/tests/unit/database_stubs.py | 188 + neutron/tests/unit/db/__init__.py | 15 + neutron/tests/unit/db/firewall/__init__.py | 15 + .../unit/db/firewall/test_db_firewall.py | 1055 + .../tests/unit/db/loadbalancer/__init__.py | 15 + .../db/loadbalancer/test_db_loadbalancer.py | 1572 ++ neutron/tests/unit/db/metering/__init__.py | 15 + .../unit/db/metering/test_db_metering.py | 291 + neutron/tests/unit/db/test_agent_db.py | 86 + neutron/tests/unit/db/test_quota_db.py | 143 + neutron/tests/unit/db/vpn/__init__.py | 17 + neutron/tests/unit/db/vpn/test_db_vpnaas.py | 1670 ++ neutron/tests/unit/dummy_plugin.py | 139 + neutron/tests/unit/embrane/__init__.py | 18 + .../unit/embrane/test_embrane_defaults.py | 31 + .../unit/embrane/test_embrane_l3_plugin.py | 41 + .../embrane/test_embrane_neutron_plugin.py | 82 + neutron/tests/unit/extension_stubs.py | 77 + neutron/tests/unit/extensions/__init__.py | 15 + .../unit/extensions/extendedattribute.py | 58 + .../unit/extensions/extensionattribute.py | 110 + neutron/tests/unit/extensions/foxinsocks.py | 110 + neutron/tests/unit/extensions/v2attributes.py | 48 + neutron/tests/unit/hyperv/__init__.py | 16 + .../unit/hyperv/test_hyperv_neutron_agent.py | 221 + .../unit/hyperv/test_hyperv_neutron_plugin.py | 69 + .../tests/unit/hyperv/test_hyperv_rpcapi.py | 125 + .../test_hyperv_security_groups_driver.py | 189 + .../unit/hyperv/test_hyperv_utilsfactory.py | 54 + .../tests/unit/hyperv/test_hyperv_utilsv2.py | 519 + neutron/tests/unit/ibm/__init__.py | 0 neutron/tests/unit/ibm/test_sdnve_agent.py | 118 + neutron/tests/unit/ibm/test_sdnve_api.py | 145 + neutron/tests/unit/ibm/test_sdnve_plugin.py | 126 + neutron/tests/unit/linuxbridge/__init__.py | 16 + .../unit/linuxbridge/test_agent_scheduler.py | 34 + .../tests/unit/linuxbridge/test_defaults.py | 42 + neutron/tests/unit/linuxbridge/test_lb_db.py | 172 + .../unit/linuxbridge/test_lb_neutron_agent.py | 1054 + .../linuxbridge/test_lb_security_group.py | 99 + .../linuxbridge/test_linuxbridge_plugin.py | 132 + neutron/tests/unit/linuxbridge/test_rpcapi.py | 132 + neutron/tests/unit/metaplugin/__init__.py | 16 + neutron/tests/unit/metaplugin/fake_plugin.py | 79 + neutron/tests/unit/metaplugin/test_basic.py | 78 + .../tests/unit/metaplugin/test_metaplugin.py | 404 + neutron/tests/unit/midonet/__init__.py | 17 + .../tests/unit/midonet/etc/midonet.ini.test | 16 + neutron/tests/unit/midonet/mock_lib.py | 265 + .../tests/unit/midonet/test_midonet_driver.py | 55 + .../tests/unit/midonet/test_midonet_lib.py | 189 + .../tests/unit/midonet/test_midonet_plugin.py | 218 + neutron/tests/unit/ml2/__init__.py | 14 + neutron/tests/unit/ml2/_test_mech_agent.py | 218 + neutron/tests/unit/ml2/drivers/__init__.py | 14 + .../unit/ml2/drivers/brocade/__init__.py | 0 .../brocade/test_brocade_mechanism_driver.py | 69 + .../tests/unit/ml2/drivers/cisco/__init__.py | 0 .../unit/ml2/drivers/cisco/apic/__init__.py | 0 .../cisco/apic/test_cisco_apic_client.py | 272 + .../cisco/apic/test_cisco_apic_common.py | 225 + .../cisco/apic/test_cisco_apic_manager.py | 698 + .../apic/test_cisco_apic_mechanism_driver.py | 226 + .../unit/ml2/drivers/cisco/nexus/__init__.py | 0 .../drivers/cisco/nexus/test_cisco_config.py | 71 + .../drivers/cisco/nexus/test_cisco_mech.py | 715 + .../drivers/cisco/nexus/test_cisco_nexus.py | 201 + .../cisco/nexus/test_cisco_nexus_db.py | 206 + .../unit/ml2/drivers/mechanism_bulkless.py | 23 + .../unit/ml2/drivers/mechanism_logger.py | 120 + .../tests/unit/ml2/drivers/mechanism_test.py | 171 + .../drivers/test_arista_mechanism_driver.py | 726 + .../unit/ml2/drivers/test_bigswitch_mech.py | 144 + .../unit/ml2/drivers/test_l2population.py | 724 + .../tests/unit/ml2/drivers/test_mech_mlnx.py | 139 + .../unit/ml2/drivers/test_ofagent_mech.py | 74 + .../tests/unit/ml2/test_agent_scheduler.py | 36 + neutron/tests/unit/ml2/test_mech_hyperv.py | 65 + .../tests/unit/ml2/test_mech_linuxbridge.py | 74 + .../tests/unit/ml2/test_mech_openvswitch.py | 74 + .../tests/unit/ml2/test_mechanism_fslsdn.py | 293 + neutron/tests/unit/ml2/test_mechanism_ncs.py | 50 + neutron/tests/unit/ml2/test_mechanism_odl.py | 117 + neutron/tests/unit/ml2/test_ml2_plugin.py | 477 + neutron/tests/unit/ml2/test_port_binding.py | 136 + neutron/tests/unit/ml2/test_rpcapi.py | 109 + neutron/tests/unit/ml2/test_security_group.py | 106 + neutron/tests/unit/ml2/test_type_flat.py | 98 + neutron/tests/unit/ml2/test_type_gre.py | 208 + neutron/tests/unit/ml2/test_type_local.py | 56 + neutron/tests/unit/ml2/test_type_vlan.py | 187 + neutron/tests/unit/ml2/test_type_vxlan.py | 227 + neutron/tests/unit/mlnx/__init__.py | 16 + .../tests/unit/mlnx/test_agent_scheduler.py | 34 + neutron/tests/unit/mlnx/test_defaults.py | 39 + .../tests/unit/mlnx/test_mlnx_comm_utils.py | 139 + neutron/tests/unit/mlnx/test_mlnx_db.py | 181 + .../unit/mlnx/test_mlnx_neutron_agent.py | 156 + neutron/tests/unit/mlnx/test_mlnx_plugin.py | 116 + .../unit/mlnx/test_mlnx_plugin_config.py | 89 + .../unit/mlnx/test_mlnx_security_group.py | 100 + neutron/tests/unit/mlnx/test_rpcapi.py | 155 + neutron/tests/unit/nec/__init__.py | 15 + neutron/tests/unit/nec/fake_ofc_manager.py | 106 + neutron/tests/unit/nec/stub_ofc_driver.py | 293 + .../tests/unit/nec/test_agent_scheduler.py | 118 + neutron/tests/unit/nec/test_config.py | 44 + neutron/tests/unit/nec/test_db.py | 176 + neutron/tests/unit/nec/test_nec_agent.py | 366 + neutron/tests/unit/nec/test_nec_plugin.py | 930 + neutron/tests/unit/nec/test_ofc_client.py | 179 + neutron/tests/unit/nec/test_ofc_manager.py | 297 + neutron/tests/unit/nec/test_packet_filter.py | 714 + neutron/tests/unit/nec/test_pfc_driver.py | 705 + neutron/tests/unit/nec/test_portbindings.py | 350 + neutron/tests/unit/nec/test_router.py | 45 + neutron/tests/unit/nec/test_security_group.py | 101 + neutron/tests/unit/nec/test_trema_driver.py | 353 + neutron/tests/unit/nec/test_utils.py | 31 + neutron/tests/unit/notifiers/__init__.py | 0 .../unit/notifiers/test_notifiers_nova.py | 305 + neutron/tests/unit/nuage/__init__.py | 0 neutron/tests/unit/nuage/fake_nuageclient.py | 115 + neutron/tests/unit/nuage/test_netpartition.py | 100 + neutron/tests/unit/nuage/test_nuage_plugin.py | 283 + neutron/tests/unit/ofagent/__init__.py | 0 neutron/tests/unit/ofagent/fake_oflib.py | 113 + .../tests/unit/ofagent/test_ofa_defaults.py | 25 + .../unit/ofagent/test_ofa_neutron_agent.py | 1023 + neutron/tests/unit/oneconvergence/__init__.py | 0 .../unit/oneconvergence/test_nvsd_agent.py | 177 + .../unit/oneconvergence/test_nvsd_plugin.py | 152 + .../tests/unit/oneconvergence/test_nvsdlib.py | 261 + .../unit/oneconvergence/test_plugin_helper.py | 60 + .../oneconvergence/test_security_group.py | 157 + neutron/tests/unit/openvswitch/__init__.py | 16 + .../unit/openvswitch/test_agent_scheduler.py | 1245 ++ .../openvswitch/test_openvswitch_plugin.py | 88 + neutron/tests/unit/openvswitch/test_ovs_db.py | 322 + .../unit/openvswitch/test_ovs_defaults.py | 35 + .../openvswitch/test_ovs_neutron_agent.py | 954 + .../tests/unit/openvswitch/test_ovs_rpcapi.py | 123 + .../openvswitch/test_ovs_security_group.py | 104 + .../tests/unit/openvswitch/test_ovs_tunnel.py | 603 + neutron/tests/unit/plumgrid/__init__.py | 17 + .../unit/plumgrid/test_plumgrid_plugin.py | 171 + neutron/tests/unit/ryu/__init__.py | 16 + neutron/tests/unit/ryu/fake_ryu.py | 42 + neutron/tests/unit/ryu/test_defaults.py | 33 + neutron/tests/unit/ryu/test_ryu_agent.py | 651 + neutron/tests/unit/ryu/test_ryu_db.py | 57 + neutron/tests/unit/ryu/test_ryu_plugin.py | 51 + .../tests/unit/ryu/test_ryu_security_group.py | 92 + neutron/tests/unit/services/__init__.py | 17 + .../tests/unit/services/firewall/__init__.py | 15 + .../unit/services/firewall/agents/__init__.py | 15 + .../firewall/agents/l3reference/__init__.py | 15 + .../l3reference/test_firewall_l3_agent.py | 391 + .../agents/test_firewall_agent_api.py | 105 + .../firewall/agents/varmour/__init__.py | 16 + .../agents/varmour/test_varmour_router.py | 322 + .../services/firewall/drivers/__init__.py | 15 + .../firewall/drivers/linux/__init__.py | 15 + .../drivers/linux/test_iptables_fwaas.py | 218 + .../firewall/drivers/varmour/__init__.py | 16 + .../drivers/varmour/test_varmour_fwaas.py | 290 + .../services/firewall/test_fwaas_plugin.py | 401 + .../tests/unit/services/l3_router/__init__.py | 0 .../services/l3_router/test_l3_apic_plugin.py | 134 + .../unit/services/loadbalancer/__init__.py | 17 + .../services/loadbalancer/agent/__init__.py | 0 .../services/loadbalancer/agent/test_agent.py | 51 + .../loadbalancer/agent/test_agent_manager.py | 371 + .../services/loadbalancer/agent/test_api.py | 166 + .../services/loadbalancer/drivers/__init__.py | 17 + .../loadbalancer/drivers/embrane/__init__.py | 0 .../drivers/embrane/test_embrane_defaults.py | 30 + .../drivers/embrane/test_plugin_driver.py | 93 + .../loadbalancer/drivers/haproxy/__init__.py | 17 + .../loadbalancer/drivers/haproxy/test_cfg.py | 228 + .../drivers/haproxy/test_namespace_driver.py | 550 + .../drivers/netscaler/__init__.py | 0 .../drivers/netscaler/test_ncc_client.py | 204 + .../netscaler/test_netscaler_driver.py | 802 + .../loadbalancer/drivers/radware/__init__.py | 15 + .../drivers/radware/test_plugin_driver.py | 961 + .../drivers/test_agent_driver_base.py | 753 + .../loadbalancer/test_agent_scheduler.py | 222 + .../loadbalancer/test_loadbalancer_plugin.py | 464 + .../test_loadbalancer_quota_ext.py | 168 + .../tests/unit/services/metering/__init__.py | 15 + .../services/metering/drivers/__init__.py | 15 + .../metering/drivers/test_iptables_driver.py | 408 + .../services/metering/test_metering_agent.py | 160 + .../services/metering/test_metering_plugin.py | 448 + neutron/tests/unit/services/vpn/__init__.py | 17 + .../services/vpn/device_drivers/__init__.py | 16 + .../vpn/device_drivers/cisco_csr_mock.py | 579 + .../device_drivers/notest_cisco_csr_rest.py | 1346 ++ .../vpn/device_drivers/test_cisco_ipsec.py | 1709 ++ .../services/vpn/device_drivers/test_ipsec.py | 258 + .../services/vpn/service_drivers/__init__.py | 16 + .../vpn/service_drivers/test_cisco_ipsec.py | 365 + .../vpn/service_drivers/test_ipsec.py | 91 + .../tests/unit/services/vpn/test_vpn_agent.py | 196 + .../services/vpn/test_vpnaas_driver_plugin.py | 160 + .../services/vpn/test_vpnaas_extension.py | 530 + neutron/tests/unit/test_agent_config.py | 44 + neutron/tests/unit/test_agent_ext_plugin.py | 259 + neutron/tests/unit/test_agent_linux_utils.py | 162 + .../tests/unit/test_agent_netns_cleanup.py | 258 + neutron/tests/unit/test_agent_ovs_cleanup.py | 93 + neutron/tests/unit/test_agent_rpc.py | 113 + neutron/tests/unit/test_api_api_common.py | 99 + neutron/tests/unit/test_api_v2.py | 1541 ++ neutron/tests/unit/test_api_v2_extension.py | 125 + neutron/tests/unit/test_api_v2_resource.py | 372 + neutron/tests/unit/test_attributes.py | 800 + neutron/tests/unit/test_auth.py | 99 + neutron/tests/unit/test_common_log.py | 70 + neutron/tests/unit/test_common_utils.py | 383 + neutron/tests/unit/test_config.py | 55 + neutron/tests/unit/test_db_migration.py | 184 + neutron/tests/unit/test_db_plugin.py | 3982 ++++ neutron/tests/unit/test_db_rpc_base.py | 233 + neutron/tests/unit/test_debug_commands.py | 363 + neutron/tests/unit/test_dhcp_agent.py | 1466 ++ neutron/tests/unit/test_dhcp_scheduler.py | 90 + .../test_extension_allowedaddresspairs.py | 262 + .../tests/unit/test_extension_ext_gw_mode.py | 421 + neutron/tests/unit/test_extension_ext_net.py | 176 + .../unit/test_extension_extended_attribute.py | 156 + .../unit/test_extension_extradhcpopts.py | 266 + .../tests/unit/test_extension_extraroute.py | 500 + neutron/tests/unit/test_extension_firewall.py | 495 + neutron/tests/unit/test_extension_pnet.py | 161 + .../tests/unit/test_extension_portsecurity.py | 392 + .../unit/test_extension_security_group.py | 1431 ++ neutron/tests/unit/test_extensions.py | 685 + neutron/tests/unit/test_hacking.py | 43 + neutron/tests/unit/test_iptables_firewall.py | 1225 ++ neutron/tests/unit/test_iptables_manager.py | 705 + neutron/tests/unit/test_ipv6.py | 50 + neutron/tests/unit/test_l3_agent.py | 1379 ++ neutron/tests/unit/test_l3_plugin.py | 2070 ++ neutron/tests/unit/test_l3_schedulers.py | 206 + neutron/tests/unit/test_linux_daemon.py | 211 + neutron/tests/unit/test_linux_dhcp.py | 1252 ++ .../tests/unit/test_linux_external_process.py | 202 + neutron/tests/unit/test_linux_interface.py | 620 + neutron/tests/unit/test_linux_ip_lib.py | 860 + neutron/tests/unit/test_metadata_agent.py | 581 + .../unit/test_metadata_namespace_proxy.py | 353 + neutron/tests/unit/test_neutron_context.py | 136 + neutron/tests/unit/test_neutron_manager.py | 146 + neutron/tests/unit/test_policy.py | 553 + neutron/tests/unit/test_post_mortem_debug.py | 101 + .../tests/unit/test_provider_configuration.py | 201 + neutron/tests/unit/test_quota_ext.py | 432 + .../tests/unit/test_routerserviceinsertion.py | 490 + .../tests/unit/test_security_groups_rpc.py | 2047 ++ neutron/tests/unit/test_servicetype.py | 241 + neutron/tests/unit/test_wsgi.py | 1136 ++ neutron/tests/unit/testlib_api.py | 84 + neutron/tests/unit/vmware/__init__.py | 53 + .../tests/unit/vmware/apiclient/__init__.py | 0 neutron/tests/unit/vmware/apiclient/fake.py | 660 + .../unit/vmware/apiclient/test_api_common.py | 35 + .../apiclient/test_api_eventlet_request.py | 331 + neutron/tests/unit/vmware/db/__init__.py | 0 neutron/tests/unit/vmware/db/test_lsn_db.py | 103 + neutron/tests/unit/vmware/db/test_nsx_db.py | 86 + .../unit/vmware/etc/fake_get_gwservice.json | 15 + .../unit/vmware/etc/fake_get_lqueue.json | 11 + .../unit/vmware/etc/fake_get_lrouter.json | 29 + .../vmware/etc/fake_get_lrouter_lport.json | 12 + .../etc/fake_get_lrouter_lport_att.json | 11 + .../unit/vmware/etc/fake_get_lrouter_nat.json | 6 + .../unit/vmware/etc/fake_get_lswitch.json | 12 + .../vmware/etc/fake_get_lswitch_lport.json | 28 + .../etc/fake_get_lswitch_lport_att.json | 7 + .../etc/fake_get_lswitch_lport_status.json | 23 + .../vmware/etc/fake_get_security_profile.json | 10 + .../unit/vmware/etc/fake_post_gwservice.json | 13 + .../unit/vmware/etc/fake_post_lqueue.json | 11 + .../unit/vmware/etc/fake_post_lrouter.json | 23 + .../vmware/etc/fake_post_lrouter_lport.json | 10 + .../vmware/etc/fake_post_lrouter_nat.json | 6 + .../unit/vmware/etc/fake_post_lswitch.json | 12 + .../vmware/etc/fake_post_lswitch_lport.json | 17 + .../etc/fake_post_security_profile.json | 10 + .../etc/fake_put_lrouter_lport_att.json | 12 + .../etc/fake_put_lswitch_lport_att.json | 11 + .../tests/unit/vmware/etc/neutron.conf.test | 26 + .../unit/vmware/etc/nsx.ini.agentless.test | 17 + .../tests/unit/vmware/etc/nsx.ini.basic.test | 5 + .../unit/vmware/etc/nsx.ini.combined.test | 17 + .../tests/unit/vmware/etc/nsx.ini.full.test | 13 + neutron/tests/unit/vmware/etc/nsx.ini.test | 7 + .../tests/unit/vmware/etc/nvp.ini.full.test | 13 + neutron/tests/unit/vmware/etc/vcns.ini.test | 9 + .../tests/unit/vmware/extensions/__init__.py | 0 .../vmware/extensions/test_addresspairs.py | 22 + .../vmware/extensions/test_maclearning.py | 139 + .../unit/vmware/extensions/test_networkgw.py | 1074 + .../vmware/extensions/test_portsecurity.py | 47 + .../vmware/extensions/test_providernet.py | 163 + .../unit/vmware/extensions/test_qosqueues.py | 273 + neutron/tests/unit/vmware/nsxlib/__init__.py | 0 neutron/tests/unit/vmware/nsxlib/base.py | 88 + .../unit/vmware/nsxlib/test_l2gateway.py | 296 + neutron/tests/unit/vmware/nsxlib/test_lsn.py | 370 + .../tests/unit/vmware/nsxlib/test_queue.py | 69 + .../tests/unit/vmware/nsxlib/test_router.py | 922 + .../tests/unit/vmware/nsxlib/test_secgroup.py | 140 + .../tests/unit/vmware/nsxlib/test_switch.py | 314 + .../unit/vmware/nsxlib/test_versioning.py | 58 + .../tests/unit/vmware/test_agent_scheduler.py | 65 + neutron/tests/unit/vmware/test_dhcpmeta.py | 1429 ++ neutron/tests/unit/vmware/test_nsx_opts.py | 253 + neutron/tests/unit/vmware/test_nsx_plugin.py | 1181 ++ neutron/tests/unit/vmware/test_nsx_sync.py | 712 + neutron/tests/unit/vmware/test_nsx_utils.py | 325 + neutron/tests/unit/vmware/vshield/__init__.py | 0 .../tests/unit/vmware/vshield/fake_vcns.py | 600 + .../unit/vmware/vshield/test_edge_router.py | 308 + .../vmware/vshield/test_firewall_driver.py | 375 + .../unit/vmware/vshield/test_fwaas_plugin.py | 697 + .../unit/vmware/vshield/test_lbaas_plugin.py | 532 + .../vshield/test_loadbalancer_driver.py | 340 + .../unit/vmware/vshield/test_vcns_driver.py | 587 + .../unit/vmware/vshield/test_vpnaas_plugin.py | 417 + neutron/tests/var/ca.crt | 35 + neutron/tests/var/certandkey.pem | 81 + neutron/tests/var/certificate.crt | 30 + neutron/tests/var/privatekey.key | 51 + neutron/version.py | 19 + neutron/wsgi.py | 1303 ++ openstack-common.conf | 35 + requirements.txt | 29 + run_tests.sh | 226 + setup.cfg | 201 + setup.py | 30 + test-requirements.txt | 18 + tools/check_i18n.py | 155 + tools/check_i18n_test_case.txt | 67 + tools/clean.sh | 5 + tools/i18n_cfg.py | 97 + tools/install_venv.py | 74 + tools/install_venv_common.py | 174 + tools/with_venv.sh | 21 + tox.ini | 73 + 1426 files changed, 278589 insertions(+) create mode 100644 .coveragerc create mode 100644 .gitignore create mode 100644 .mailmap create mode 100644 .pylintrc create mode 100644 .testr.conf create mode 100644 HACKING.rst create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 TESTING.rst create mode 100644 babel.cfg create mode 100755 bin/neutron-rootwrap create mode 100755 bin/neutron-rootwrap-xen-dom0 create mode 100644 doc/Makefile create mode 100644 doc/pom.xml create mode 100644 doc/source/conf.py create mode 100644 doc/source/devref/advanced_services.rst create mode 100644 doc/source/devref/api_extensions.rst create mode 100644 doc/source/devref/api_layer.rst create mode 100644 doc/source/devref/common.rst create mode 100644 doc/source/devref/db_layer.rst create mode 100644 doc/source/devref/development.environment.rst create mode 100644 doc/source/devref/fwaas.rst create mode 100644 doc/source/devref/index.rst create mode 100644 doc/source/devref/l2_agents.rst create mode 100644 doc/source/devref/layer3.rst create mode 100644 doc/source/devref/lbaas.rst create mode 100644 doc/source/devref/linuxbridge_agent.rst create mode 100644 doc/source/devref/openvswitch_agent.rst create mode 100644 doc/source/devref/plugin-api.rst create mode 100644 doc/source/devref/rpc_api.rst create mode 100644 doc/source/devref/security_group_api.rst create mode 100644 doc/source/devref/vpnaas.rst create mode 100644 doc/source/docbkx/docbkx-example/README create mode 100644 doc/source/docbkx/docbkx-example/pom.xml create mode 100644 doc/source/docbkx/docbkx-example/src/docbkx/example.xml create mode 100644 doc/source/docbkx/docbkx-example/src/docbkx/figures/example.sdx create mode 100644 doc/source/docbkx/docbkx-example/src/docbkx/figures/example.svg create mode 100644 doc/source/docbkx/quantum-api-1.0/common.ent create mode 100644 doc/source/docbkx/quantum-api-1.0/figures/Arrow_east.svg create mode 100644 doc/source/docbkx/quantum-api-1.0/figures/Check_mark_23x20_02.svg create mode 100644 doc/source/docbkx/quantum-api-1.0/js/shjs/sh_java.js create mode 100644 doc/source/docbkx/quantum-api-1.0/js/shjs/sh_javascript.js create mode 100644 doc/source/docbkx/quantum-api-1.0/js/shjs/sh_main.js create mode 100644 doc/source/docbkx/quantum-api-1.0/js/shjs/sh_xml.js create mode 100644 doc/source/docbkx/quantum-api-1.0/js/trc/schema/controller.js create mode 100644 doc/source/docbkx/quantum-api-1.0/js/trc/schema/layoutManager.js create mode 100644 doc/source/docbkx/quantum-api-1.0/js/trc/schema/sampleManager.js create mode 100644 doc/source/docbkx/quantum-api-1.0/js/trc/util.js create mode 100644 doc/source/docbkx/quantum-api-1.0/quantum-api-guide.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/att-get-res-none.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/att-get-res-none.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/att-get-res.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/att-get-res.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/att-put-req.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/att-put-req.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/extensions.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/extensions.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/fault.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/fault.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-get-detail-res.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-get-detail-res.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-get-res.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-get-res.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-post-req.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-post-req.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-post-res.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-post-res.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-get-detail-res.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-get-detail-res.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-get-res.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-get-res.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-post-req.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-post-req.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-post-res.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-post-res.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/notfound.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/notfound.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/notimplemented.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/notimplemented.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-get-detail-res.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-get-detail-res.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-get-res.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-get-res.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-post-req.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-post-req.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-post-res.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-post-res.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/ports-get-detail-res.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/ports-get-detail-res.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/ports-get-res.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/ports-get-res.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/private.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/private.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/public.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/public.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/versions-atom.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/versions.json create mode 100644 doc/source/docbkx/quantum-api-1.0/samples/versions.xml create mode 100644 doc/source/docbkx/quantum-api-1.0/style/schema.css create mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_acid.css create mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_darkblue.css create mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_emacs.css create mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_night.css create mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_pablo.css create mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_print.css create mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_style.css create mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_whitengrey.css create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/.htaccess create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/actions.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/affinity-id.xjb create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/affinity-id.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/api-common.xjb create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/api-common.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/api.xjb create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/api.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/atom.xjb create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/atom/atom.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/atom/xml.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/backup.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/common.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/ext/rax-dme/api.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/ext/rax-dme/rax-dme.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/extensions.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/faults.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/flavor.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/image.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/ipgroup.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/limits.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/metadata.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/server.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/shareip.xjb create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/shareip.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/txt.htaccess create mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/version.xsd create mode 100644 doc/source/docbkx/quantum-api-1.0/xslt/schema.xsl create mode 100644 doc/source/index.rst create mode 100644 doc/source/man/neutron-server.rst create mode 100644 etc/api-paste.ini create mode 100644 etc/dhcp_agent.ini create mode 100644 etc/fwaas_driver.ini create mode 100755 etc/init.d/neutron-server create mode 100644 etc/l3_agent.ini create mode 100644 etc/lbaas_agent.ini create mode 100644 etc/metadata_agent.ini create mode 100644 etc/metering_agent.ini create mode 100644 etc/neutron.conf create mode 100644 etc/neutron/plugins/bigswitch/restproxy.ini create mode 100644 etc/neutron/plugins/bigswitch/ssl/ca_certs/README create mode 100644 etc/neutron/plugins/bigswitch/ssl/host_certs/README create mode 100644 etc/neutron/plugins/brocade/brocade.ini create mode 100644 etc/neutron/plugins/cisco/cisco_plugins.ini create mode 100644 etc/neutron/plugins/cisco/cisco_vpn_agent.ini create mode 100644 etc/neutron/plugins/embrane/heleos_conf.ini create mode 100644 etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini create mode 100644 etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini create mode 100644 etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini create mode 100644 etc/neutron/plugins/metaplugin/metaplugin.ini create mode 100644 etc/neutron/plugins/midonet/midonet.ini create mode 100644 etc/neutron/plugins/ml2/ml2_conf.ini create mode 100644 etc/neutron/plugins/ml2/ml2_conf_arista.ini create mode 100644 etc/neutron/plugins/ml2/ml2_conf_brocade.ini create mode 100644 etc/neutron/plugins/ml2/ml2_conf_cisco.ini create mode 100644 etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini create mode 100644 etc/neutron/plugins/ml2/ml2_conf_mlnx.ini create mode 100644 etc/neutron/plugins/ml2/ml2_conf_ncs.ini create mode 100644 etc/neutron/plugins/ml2/ml2_conf_odl.ini create mode 100644 etc/neutron/plugins/ml2/ml2_conf_ofa.ini create mode 100644 etc/neutron/plugins/mlnx/mlnx_conf.ini create mode 100644 etc/neutron/plugins/nec/nec.ini create mode 100644 etc/neutron/plugins/nuage/nuage_plugin.ini create mode 100644 etc/neutron/plugins/oneconvergence/nvsdplugin.ini create mode 100644 etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini create mode 100644 etc/neutron/plugins/plumgrid/plumgrid.ini create mode 100644 etc/neutron/plugins/ryu/ryu.ini create mode 100644 etc/neutron/plugins/vmware/nsx.ini create mode 100644 etc/neutron/rootwrap.d/debug.filters create mode 100644 etc/neutron/rootwrap.d/dhcp.filters create mode 100644 etc/neutron/rootwrap.d/iptables-firewall.filters create mode 100644 etc/neutron/rootwrap.d/l3.filters create mode 100644 etc/neutron/rootwrap.d/lbaas-haproxy.filters create mode 100644 etc/neutron/rootwrap.d/linuxbridge-plugin.filters create mode 100644 etc/neutron/rootwrap.d/nec-plugin.filters create mode 100644 etc/neutron/rootwrap.d/openvswitch-plugin.filters create mode 100644 etc/neutron/rootwrap.d/ryu-plugin.filters create mode 100644 etc/neutron/rootwrap.d/vpnaas.filters create mode 100644 etc/policy.json create mode 100644 etc/rootwrap.conf create mode 100644 etc/services.conf create mode 100644 etc/vpn_agent.ini create mode 100644 neutron/__init__.py create mode 100644 neutron/agent/__init__.py create mode 100644 neutron/agent/common/__init__.py create mode 100644 neutron/agent/common/config.py create mode 100644 neutron/agent/dhcp_agent.py create mode 100644 neutron/agent/firewall.py create mode 100644 neutron/agent/l2population_rpc.py create mode 100644 neutron/agent/l3_agent.py create mode 100644 neutron/agent/linux/__init__.py create mode 100644 neutron/agent/linux/async_process.py create mode 100644 neutron/agent/linux/daemon.py create mode 100644 neutron/agent/linux/dhcp.py create mode 100644 neutron/agent/linux/external_process.py create mode 100644 neutron/agent/linux/interface.py create mode 100644 neutron/agent/linux/ip_lib.py create mode 100644 neutron/agent/linux/iptables_firewall.py create mode 100644 neutron/agent/linux/iptables_manager.py create mode 100644 neutron/agent/linux/ovs_lib.py create mode 100644 neutron/agent/linux/ovsdb_monitor.py create mode 100644 neutron/agent/linux/polling.py create mode 100644 neutron/agent/linux/utils.py create mode 100644 neutron/agent/metadata/__init__.py create mode 100644 neutron/agent/metadata/agent.py create mode 100644 neutron/agent/metadata/namespace_proxy.py create mode 100644 neutron/agent/netns_cleanup_util.py create mode 100644 neutron/agent/ovs_cleanup_util.py create mode 100644 neutron/agent/rpc.py create mode 100644 neutron/agent/securitygroups_rpc.py create mode 100644 neutron/api/__init__.py create mode 100644 neutron/api/api_common.py create mode 100644 neutron/api/extensions.py create mode 100644 neutron/api/rpc/__init__.py create mode 100644 neutron/api/rpc/agentnotifiers/__init__.py create mode 100644 neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py create mode 100644 neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py create mode 100644 neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py create mode 100644 neutron/api/v2/__init__.py create mode 100644 neutron/api/v2/attributes.py create mode 100644 neutron/api/v2/base.py create mode 100644 neutron/api/v2/resource.py create mode 100644 neutron/api/v2/resource_helper.py create mode 100644 neutron/api/v2/router.py create mode 100644 neutron/api/versions.py create mode 100644 neutron/api/views/__init__.py create mode 100644 neutron/api/views/versions.py create mode 100644 neutron/auth.py create mode 100644 neutron/cmd/__init__.py create mode 100644 neutron/cmd/sanity/__init__.py create mode 100644 neutron/cmd/sanity/checks.py create mode 100644 neutron/cmd/sanity_check.py create mode 100644 neutron/cmd/usage_audit.py create mode 100644 neutron/common/__init__.py create mode 100644 neutron/common/config.py create mode 100644 neutron/common/constants.py create mode 100644 neutron/common/exceptions.py create mode 100644 neutron/common/ipv6_utils.py create mode 100644 neutron/common/log.py create mode 100644 neutron/common/rpc.py create mode 100644 neutron/common/rpc_compat.py create mode 100644 neutron/common/test_lib.py create mode 100644 neutron/common/topics.py create mode 100644 neutron/common/utils.py create mode 100644 neutron/context.py create mode 100644 neutron/db/__init__.py create mode 100644 neutron/db/agents_db.py create mode 100644 neutron/db/agentschedulers_db.py create mode 100644 neutron/db/allowedaddresspairs_db.py create mode 100644 neutron/db/api.py create mode 100644 neutron/db/db_base_plugin_v2.py create mode 100644 neutron/db/dhcp_rpc_base.py create mode 100644 neutron/db/external_net_db.py create mode 100644 neutron/db/extradhcpopt_db.py create mode 100644 neutron/db/extraroute_db.py create mode 100644 neutron/db/firewall/__init__.py create mode 100644 neutron/db/firewall/firewall_db.py create mode 100644 neutron/db/l3_agentschedulers_db.py create mode 100644 neutron/db/l3_db.py create mode 100644 neutron/db/l3_gwmode_db.py create mode 100644 neutron/db/l3_rpc_base.py create mode 100644 neutron/db/loadbalancer/__init__.py create mode 100644 neutron/db/loadbalancer/loadbalancer_db.py create mode 100644 neutron/db/metering/__init__.py create mode 100644 neutron/db/metering/metering_db.py create mode 100644 neutron/db/metering/metering_rpc.py create mode 100644 neutron/db/migration/README create mode 100644 neutron/db/migration/__init__.py create mode 100644 neutron/db/migration/alembic.ini create mode 100644 neutron/db/migration/alembic_migrations/__init__.py create mode 100644 neutron/db/migration/alembic_migrations/common_ext_ops.py create mode 100644 neutron/db/migration/alembic_migrations/env.py create mode 100644 neutron/db/migration/alembic_migrations/script.py.mako create mode 100644 neutron/db/migration/alembic_migrations/versions/1064e98b7917_nec_pf_port_del.py create mode 100644 neutron/db/migration/alembic_migrations/versions/10cd28e692e9_nuage_extraroute.py create mode 100644 neutron/db/migration/alembic_migrations/versions/1149d7de0cfa_port_security.py create mode 100644 neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py create mode 100644 neutron/db/migration/alembic_migrations/versions/11c6e18605c8_pool_monitor_status_.py create mode 100644 neutron/db/migration/alembic_migrations/versions/128e042a2b68_ext_gw_mode.py create mode 100644 neutron/db/migration/alembic_migrations/versions/1341ed32cc1e_nvp_netbinding_update.py create mode 100644 neutron/db/migration/alembic_migrations/versions/13de305df56e_add_nec_pf_name.py create mode 100644 neutron/db/migration/alembic_migrations/versions/1421183d533f_nsx_dhcp_metadata.py create mode 100644 neutron/db/migration/alembic_migrations/versions/14f24494ca31_arista_ml2.py create mode 100644 neutron/db/migration/alembic_migrations/versions/157a5d299379_ml2_binding_profile.py create mode 100644 neutron/db/migration/alembic_migrations/versions/176a85fc7d79_add_portbindings_db.py create mode 100644 neutron/db/migration/alembic_migrations/versions/19180cf98af6_nsx_gw_devices.py create mode 100644 neutron/db/migration/alembic_migrations/versions/1b2580001654_nsx_sec_group_mappin.py create mode 100644 neutron/db/migration/alembic_migrations/versions/1b693c095aa3_quota_ext_db_grizzly.py create mode 100644 neutron/db/migration/alembic_migrations/versions/1b837a7125a9_cisco_apic_driver.py create mode 100644 neutron/db/migration/alembic_migrations/versions/1c33fa3cd1a1_extra_route_config.py create mode 100644 neutron/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py create mode 100644 neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py create mode 100644 neutron/db/migration/alembic_migrations/versions/1efb85914233_allowedaddresspairs.py create mode 100644 neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py create mode 100644 neutron/db/migration/alembic_migrations/versions/2032abe8edac_lbaas_add_status_des.py create mode 100644 neutron/db/migration/alembic_migrations/versions/20ae61555e95_ml2_gre_type_driver.py create mode 100644 neutron/db/migration/alembic_migrations/versions/2447ad0e9585_add_ipv6_mode_props.py create mode 100644 neutron/db/migration/alembic_migrations/versions/24c7ea5160d7_cisco_csr_vpnaas.py create mode 100644 neutron/db/migration/alembic_migrations/versions/2528ceb28230_nec_pf_netid_fix.py create mode 100644 neutron/db/migration/alembic_migrations/versions/263772d65691_cisco_db_cleanup_2.py create mode 100644 neutron/db/migration/alembic_migrations/versions/27cc183af192_ml2_vnic_type.py create mode 100644 neutron/db/migration/alembic_migrations/versions/27ef74513d33_quota_in_plumgrid_pl.py create mode 100644 neutron/db/migration/alembic_migrations/versions/2a3bae1ceb8_nec_port_binding.py create mode 100644 neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py create mode 100644 neutron/db/migration/alembic_migrations/versions/2c4af419145b_l3_support.py create mode 100644 neutron/db/migration/alembic_migrations/versions/2db5203cb7a9_nuage_floatingip.py create mode 100644 neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py create mode 100644 neutron/db/migration/alembic_migrations/versions/32a65f71af51_ml2_portbinding.py create mode 100644 neutron/db/migration/alembic_migrations/versions/32b517556ec9_remove_tunnelip_mode.py create mode 100644 neutron/db/migration/alembic_migrations/versions/338d7508968c_vpnaas_peer_address_.py create mode 100644 neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py create mode 100644 neutron/db/migration/alembic_migrations/versions/33dd0a9fa487_embrane_lbaas_driver.py create mode 100644 neutron/db/migration/alembic_migrations/versions/35c7c198ddea_lbaas_healthmon_del_status.py create mode 100644 neutron/db/migration/alembic_migrations/versions/363468ac592c_nvp_network_gw.py create mode 100644 neutron/db/migration/alembic_migrations/versions/38335592a0dc_nvp_portmap.py create mode 100644 neutron/db/migration/alembic_migrations/versions/38fc1f6789f8_cisco_n1kv_overlay.py create mode 100644 neutron/db/migration/alembic_migrations/versions/39cf3f799352_fwaas_havana_2_model.py create mode 100644 neutron/db/migration/alembic_migrations/versions/3a520dd165d0_cisco_nexus_multi_switch.py create mode 100644 neutron/db/migration/alembic_migrations/versions/3b54bf9e29f7_nec_plugin_sharednet.py create mode 100644 neutron/db/migration/alembic_migrations/versions/3c6e57a23db4_add_multiprovider.py create mode 100644 neutron/db/migration/alembic_migrations/versions/3cabb850f4a5_table_to_track_port_.py create mode 100644 neutron/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py create mode 100644 neutron/db/migration/alembic_migrations/versions/3cbf70257c28_nvp_mac_learning.py create mode 100644 neutron/db/migration/alembic_migrations/versions/3d2585038b95_vmware_nsx.py create mode 100644 neutron/db/migration/alembic_migrations/versions/3d3cb89d84ee_nsx_switch_mappings.py create mode 100644 neutron/db/migration/alembic_migrations/versions/3d6fae8b70b0_nvp_lbaas_plugin.py create mode 100755 neutron/db/migration/alembic_migrations/versions/3ed8f075e38a_nvp_fwaas_plugin.py create mode 100644 neutron/db/migration/alembic_migrations/versions/40b0aff0302e_mlnx_initial.py create mode 100644 neutron/db/migration/alembic_migrations/versions/40dffbf4b549_nvp_dist_router.py create mode 100644 neutron/db/migration/alembic_migrations/versions/45680af419f9_nvp_qos.py create mode 100644 neutron/db/migration/alembic_migrations/versions/4692d074d587_agent_scheduler.py create mode 100644 neutron/db/migration/alembic_migrations/versions/46a0efbd8f0_cisco_n1kv_multisegm.py create mode 100644 neutron/db/migration/alembic_migrations/versions/477a4488d3f4_ml2_vxlan_type_driver.py create mode 100644 neutron/db/migration/alembic_migrations/versions/48b6f43f7471_service_type.py create mode 100644 neutron/db/migration/alembic_migrations/versions/492a106273f8_brocade_ml2_mech_dri.py create mode 100644 neutron/db/migration/alembic_migrations/versions/49332180ca96_ryu_plugin_update.py create mode 100644 neutron/db/migration/alembic_migrations/versions/49f5e553f61f_ml2_security_groups.py create mode 100644 neutron/db/migration/alembic_migrations/versions/4a666eb208c2_service_router.py create mode 100644 neutron/db/migration/alembic_migrations/versions/4ca36cfc898c_nsx_router_mappings.py create mode 100644 neutron/db/migration/alembic_migrations/versions/4eca4a84f08a_remove_ml2_cisco_cred_db.py create mode 100644 neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py create mode 100644 neutron/db/migration/alembic_migrations/versions/50e86cb2637a_nsx_mappings.py create mode 100644 neutron/db/migration/alembic_migrations/versions/511471cc46b_agent_ext_model_supp.py create mode 100755 neutron/db/migration/alembic_migrations/versions/51b4de912379_cisco_nexus_ml2_mech.py create mode 100644 neutron/db/migration/alembic_migrations/versions/52c5e4a18807_lbaas_pool_scheduler.py create mode 100644 neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py create mode 100644 neutron/db/migration/alembic_migrations/versions/538732fa21e1_nec_rename_quantum_id_to_neutron_id.py create mode 100644 neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py create mode 100644 neutron/db/migration/alembic_migrations/versions/54c2c487e913_lbaas.py create mode 100644 neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py create mode 100644 neutron/db/migration/alembic_migrations/versions/557edfc53098_new_service_types.py create mode 100644 neutron/db/migration/alembic_migrations/versions/569e98a8132b_metering.py create mode 100644 neutron/db/migration/alembic_migrations/versions/5918cbddab04_add_tables_for_route.py create mode 100644 neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py create mode 100644 neutron/db/migration/alembic_migrations/versions/5ac1c354a051_n1kv_segment_alloc.py create mode 100644 neutron/db/migration/alembic_migrations/versions/5ac71e65402c_ml2_initial.py create mode 100644 neutron/db/migration/alembic_migrations/versions/63afba73813_ovs_tunnelendpoints_id_unique.py create mode 100644 neutron/db/migration/alembic_migrations/versions/66a59a7f516_nec_openflow_router.py create mode 100644 neutron/db/migration/alembic_migrations/versions/6be312499f9_set_not_null_vlan_id_cisco.py create mode 100644 neutron/db/migration/alembic_migrations/versions/81c553f3776c_bsn_consistencyhashes.py create mode 100644 neutron/db/migration/alembic_migrations/versions/86cf4d88bd3_remove_bigswitch_por.py create mode 100644 neutron/db/migration/alembic_migrations/versions/8f682276ee4_ryu_plugin_quota.py create mode 100644 neutron/db/migration/alembic_migrations/versions/HEAD create mode 100644 neutron/db/migration/alembic_migrations/versions/README create mode 100644 neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py create mode 100644 neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py create mode 100644 neutron/db/migration/alembic_migrations/versions/b7a8863760e_rm_cisco_vlan_bindin.py create mode 100644 neutron/db/migration/alembic_migrations/versions/c88b6b5fea3_cisco_n1kv_tables.py create mode 100644 neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py create mode 100644 neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py create mode 100644 neutron/db/migration/alembic_migrations/versions/e6b16a30d97_cisco_provider_nets.py create mode 100644 neutron/db/migration/alembic_migrations/versions/e766b19a3bb_nuage_initial.py create mode 100644 neutron/db/migration/alembic_migrations/versions/ed93525fd003_bigswitch_quota.py create mode 100644 neutron/db/migration/alembic_migrations/versions/f44ab9871cd6_bsn_security_groups.py create mode 100644 neutron/db/migration/alembic_migrations/versions/f489cf14a79c_lbaas_havana.py create mode 100644 neutron/db/migration/alembic_migrations/versions/f9263d6df56_remove_dhcp_lease.py create mode 100644 neutron/db/migration/alembic_migrations/versions/fcac4c42e2cc_bsn_addresspairs.py create mode 100644 neutron/db/migration/alembic_migrations/versions/folsom_initial.py create mode 100644 neutron/db/migration/alembic_migrations/versions/grizzly_release.py create mode 100644 neutron/db/migration/alembic_migrations/versions/havana_release.py create mode 100644 neutron/db/migration/alembic_migrations/versions/icehouse_release.py create mode 100644 neutron/db/migration/cli.py create mode 100755 neutron/db/migration/migrate_to_ml2.py create mode 100644 neutron/db/model_base.py create mode 100644 neutron/db/models_v2.py create mode 100644 neutron/db/portbindings_base.py create mode 100644 neutron/db/portbindings_db.py create mode 100644 neutron/db/portsecurity_db.py create mode 100644 neutron/db/quota_db.py create mode 100644 neutron/db/routedserviceinsertion_db.py create mode 100644 neutron/db/routerservicetype_db.py create mode 100644 neutron/db/securitygroups_db.py create mode 100644 neutron/db/securitygroups_rpc_base.py create mode 100644 neutron/db/servicetype_db.py create mode 100644 neutron/db/sqlalchemyutils.py create mode 100644 neutron/db/vpn/__init__.py create mode 100644 neutron/db/vpn/vpn_db.py create mode 100644 neutron/debug/README create mode 100644 neutron/debug/__init__.py create mode 100644 neutron/debug/commands.py create mode 100644 neutron/debug/debug_agent.py create mode 100644 neutron/debug/shell.py create mode 100644 neutron/extensions/__init__.py create mode 100644 neutron/extensions/agent.py create mode 100644 neutron/extensions/allowedaddresspairs.py create mode 100644 neutron/extensions/dhcpagentscheduler.py create mode 100644 neutron/extensions/external_net.py create mode 100644 neutron/extensions/extra_dhcp_opt.py create mode 100644 neutron/extensions/extraroute.py create mode 100644 neutron/extensions/firewall.py create mode 100644 neutron/extensions/flavor.py create mode 100644 neutron/extensions/l3.py create mode 100644 neutron/extensions/l3_ext_gw_mode.py create mode 100644 neutron/extensions/l3agentscheduler.py create mode 100644 neutron/extensions/lbaas_agentscheduler.py create mode 100644 neutron/extensions/loadbalancer.py create mode 100644 neutron/extensions/metering.py create mode 100644 neutron/extensions/multiprovidernet.py create mode 100644 neutron/extensions/portbindings.py create mode 100644 neutron/extensions/portsecurity.py create mode 100644 neutron/extensions/providernet.py create mode 100644 neutron/extensions/quotasv2.py create mode 100644 neutron/extensions/routedserviceinsertion.py create mode 100644 neutron/extensions/routerservicetype.py create mode 100644 neutron/extensions/securitygroup.py create mode 100644 neutron/extensions/servicetype.py create mode 100644 neutron/extensions/vpnaas.py create mode 100644 neutron/hacking/__init__.py create mode 100644 neutron/hacking/checks.py create mode 100644 neutron/hooks.py create mode 100644 neutron/locale/de/LC_MESSAGES/neutron-log-error.po create mode 100644 neutron/locale/de/LC_MESSAGES/neutron-log-info.po create mode 100644 neutron/locale/de/LC_MESSAGES/neutron-log-warning.po create mode 100644 neutron/locale/en_AU/LC_MESSAGES/neutron-log-error.po create mode 100644 neutron/locale/en_AU/LC_MESSAGES/neutron-log-info.po create mode 100644 neutron/locale/en_GB/LC_MESSAGES/neutron-log-error.po create mode 100644 neutron/locale/en_GB/LC_MESSAGES/neutron-log-info.po create mode 100644 neutron/locale/en_US/LC_MESSAGES/neutron.po create mode 100644 neutron/locale/es/LC_MESSAGES/neutron-log-error.po create mode 100644 neutron/locale/es/LC_MESSAGES/neutron-log-info.po create mode 100644 neutron/locale/fr/LC_MESSAGES/neutron-log-critical.po create mode 100644 neutron/locale/fr/LC_MESSAGES/neutron-log-error.po create mode 100644 neutron/locale/fr/LC_MESSAGES/neutron-log-info.po create mode 100644 neutron/locale/it/LC_MESSAGES/neutron-log-info.po create mode 100644 neutron/locale/ja/LC_MESSAGES/neutron-log-error.po create mode 100644 neutron/locale/ja/LC_MESSAGES/neutron-log-info.po create mode 100644 neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po create mode 100644 neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po create mode 100644 neutron/locale/neutron-log-critical.pot create mode 100644 neutron/locale/neutron-log-error.pot create mode 100644 neutron/locale/neutron-log-info.pot create mode 100644 neutron/locale/neutron-log-warning.pot create mode 100644 neutron/locale/neutron.pot create mode 100644 neutron/locale/pt_BR/LC_MESSAGES/neutron-log-error.po create mode 100644 neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po create mode 100644 neutron/locale/zh_CN/LC_MESSAGES/neutron-log-error.po create mode 100644 neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po create mode 100644 neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po create mode 100644 neutron/manager.py create mode 100644 neutron/neutron_plugin_base_v2.py create mode 100644 neutron/notifiers/__init__.py create mode 100644 neutron/notifiers/nova.py create mode 100644 neutron/openstack/__init__.py create mode 100644 neutron/openstack/common/__init__.py create mode 100644 neutron/openstack/common/cache/__init__.py create mode 100644 neutron/openstack/common/cache/_backends/__init__.py create mode 100644 neutron/openstack/common/cache/_backends/memory.py create mode 100644 neutron/openstack/common/cache/backends.py create mode 100644 neutron/openstack/common/cache/cache.py create mode 100644 neutron/openstack/common/context.py create mode 100644 neutron/openstack/common/db/__init__.py create mode 100644 neutron/openstack/common/db/api.py create mode 100644 neutron/openstack/common/db/exception.py create mode 100644 neutron/openstack/common/db/options.py create mode 100644 neutron/openstack/common/db/sqlalchemy/__init__.py create mode 100644 neutron/openstack/common/db/sqlalchemy/models.py create mode 100644 neutron/openstack/common/db/sqlalchemy/provision.py create mode 100644 neutron/openstack/common/db/sqlalchemy/session.py create mode 100644 neutron/openstack/common/db/sqlalchemy/test_base.py create mode 100644 neutron/openstack/common/db/sqlalchemy/utils.py create mode 100644 neutron/openstack/common/eventlet_backdoor.py create mode 100644 neutron/openstack/common/excutils.py create mode 100644 neutron/openstack/common/fileutils.py create mode 100644 neutron/openstack/common/fixture/__init__.py create mode 100644 neutron/openstack/common/fixture/config.py create mode 100644 neutron/openstack/common/fixture/lockutils.py create mode 100644 neutron/openstack/common/fixture/mockpatch.py create mode 100644 neutron/openstack/common/fixture/moxstubout.py create mode 100644 neutron/openstack/common/gettextutils.py create mode 100644 neutron/openstack/common/importutils.py create mode 100644 neutron/openstack/common/jsonutils.py create mode 100644 neutron/openstack/common/local.py create mode 100644 neutron/openstack/common/lockutils.py create mode 100644 neutron/openstack/common/log.py create mode 100644 neutron/openstack/common/log_handler.py create mode 100644 neutron/openstack/common/loopingcall.py create mode 100644 neutron/openstack/common/middleware/__init__.py create mode 100644 neutron/openstack/common/middleware/audit.py create mode 100644 neutron/openstack/common/middleware/base.py create mode 100644 neutron/openstack/common/middleware/catch_errors.py create mode 100644 neutron/openstack/common/middleware/correlation_id.py create mode 100644 neutron/openstack/common/middleware/debug.py create mode 100644 neutron/openstack/common/middleware/notifier.py create mode 100644 neutron/openstack/common/middleware/request_id.py create mode 100644 neutron/openstack/common/middleware/sizelimit.py create mode 100644 neutron/openstack/common/network_utils.py create mode 100644 neutron/openstack/common/periodic_task.py create mode 100644 neutron/openstack/common/policy.py create mode 100644 neutron/openstack/common/processutils.py create mode 100644 neutron/openstack/common/service.py create mode 100644 neutron/openstack/common/sslutils.py create mode 100644 neutron/openstack/common/strutils.py create mode 100644 neutron/openstack/common/systemd.py create mode 100644 neutron/openstack/common/threadgroup.py create mode 100644 neutron/openstack/common/timeutils.py create mode 100644 neutron/openstack/common/uuidutils.py create mode 100644 neutron/openstack/common/versionutils.py create mode 100644 neutron/plugins/__init__.py create mode 100644 neutron/plugins/bigswitch/README create mode 100644 neutron/plugins/bigswitch/__init__.py create mode 100644 neutron/plugins/bigswitch/agent/__init__.py create mode 100644 neutron/plugins/bigswitch/agent/restproxy_agent.py create mode 100644 neutron/plugins/bigswitch/config.py create mode 100644 neutron/plugins/bigswitch/db/__init__.py create mode 100644 neutron/plugins/bigswitch/db/consistency_db.py create mode 100644 neutron/plugins/bigswitch/db/porttracker_db.py create mode 100644 neutron/plugins/bigswitch/extensions/__init__.py create mode 100644 neutron/plugins/bigswitch/extensions/routerrule.py create mode 100644 neutron/plugins/bigswitch/plugin.py create mode 100644 neutron/plugins/bigswitch/routerrule_db.py create mode 100644 neutron/plugins/bigswitch/servermanager.py create mode 100644 neutron/plugins/bigswitch/tests/__init__.py create mode 100755 neutron/plugins/bigswitch/tests/test_server.py create mode 100644 neutron/plugins/bigswitch/vcsversion.py create mode 100755 neutron/plugins/bigswitch/version.py create mode 100644 neutron/plugins/brocade/NeutronPlugin.py create mode 100644 neutron/plugins/brocade/README.md create mode 100644 neutron/plugins/brocade/__init__.py create mode 100644 neutron/plugins/brocade/db/__init__.py create mode 100644 neutron/plugins/brocade/db/models.py create mode 100644 neutron/plugins/brocade/nos/__init__.py create mode 100644 neutron/plugins/brocade/nos/fake_nosdriver.py create mode 100644 neutron/plugins/brocade/nos/nctemplates.py create mode 100644 neutron/plugins/brocade/nos/nosdriver.py create mode 100644 neutron/plugins/brocade/tests/README create mode 100644 neutron/plugins/brocade/tests/noscli.py create mode 100644 neutron/plugins/brocade/tests/nostest.py create mode 100644 neutron/plugins/brocade/vlanbm.py create mode 100644 neutron/plugins/cisco/README create mode 100644 neutron/plugins/cisco/__init__.py create mode 100644 neutron/plugins/cisco/common/__init__.py create mode 100644 neutron/plugins/cisco/common/cisco_constants.py create mode 100644 neutron/plugins/cisco/common/cisco_credentials_v2.py create mode 100644 neutron/plugins/cisco/common/cisco_exceptions.py create mode 100644 neutron/plugins/cisco/common/cisco_faults.py create mode 100644 neutron/plugins/cisco/common/config.py create mode 100644 neutron/plugins/cisco/db/__init__.py create mode 100644 neutron/plugins/cisco/db/n1kv_db_v2.py create mode 100644 neutron/plugins/cisco/db/n1kv_models_v2.py create mode 100644 neutron/plugins/cisco/db/network_db_v2.py create mode 100644 neutron/plugins/cisco/db/network_models_v2.py create mode 100644 neutron/plugins/cisco/db/nexus_db_v2.py create mode 100644 neutron/plugins/cisco/db/nexus_models_v2.py create mode 100644 neutron/plugins/cisco/extensions/__init__.py create mode 100644 neutron/plugins/cisco/extensions/_credential_view.py create mode 100644 neutron/plugins/cisco/extensions/_qos_view.py create mode 100644 neutron/plugins/cisco/extensions/credential.py create mode 100644 neutron/plugins/cisco/extensions/n1kv.py create mode 100644 neutron/plugins/cisco/extensions/network_profile.py create mode 100644 neutron/plugins/cisco/extensions/policy_profile.py create mode 100644 neutron/plugins/cisco/extensions/qos.py create mode 100644 neutron/plugins/cisco/l2device_plugin_base.py create mode 100644 neutron/plugins/cisco/models/__init__.py create mode 100644 neutron/plugins/cisco/models/virt_phy_sw_v2.py create mode 100644 neutron/plugins/cisco/n1kv/__init__.py create mode 100644 neutron/plugins/cisco/n1kv/n1kv_client.py create mode 100644 neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py create mode 100644 neutron/plugins/cisco/network_plugin.py create mode 100644 neutron/plugins/cisco/nexus/__init__.py create mode 100644 neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py create mode 100644 neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py create mode 100644 neutron/plugins/cisco/nexus/cisco_nexus_snippets.py create mode 100644 neutron/plugins/cisco/test/__init__.py create mode 100644 neutron/plugins/cisco/test/nexus/__init__.py create mode 100644 neutron/plugins/cisco/test/nexus/fake_nexus_driver.py create mode 100644 neutron/plugins/common/__init__.py create mode 100644 neutron/plugins/common/constants.py create mode 100644 neutron/plugins/common/utils.py create mode 100644 neutron/plugins/embrane/README create mode 100644 neutron/plugins/embrane/__init__.py create mode 100644 neutron/plugins/embrane/agent/__init__.py create mode 100644 neutron/plugins/embrane/agent/dispatcher.py create mode 100644 neutron/plugins/embrane/agent/operations/__init__.py create mode 100644 neutron/plugins/embrane/agent/operations/router_operations.py create mode 100644 neutron/plugins/embrane/base_plugin.py create mode 100644 neutron/plugins/embrane/common/__init__.py create mode 100644 neutron/plugins/embrane/common/config.py create mode 100644 neutron/plugins/embrane/common/constants.py create mode 100644 neutron/plugins/embrane/common/contexts.py create mode 100644 neutron/plugins/embrane/common/exceptions.py create mode 100644 neutron/plugins/embrane/common/operation.py create mode 100644 neutron/plugins/embrane/common/utils.py create mode 100644 neutron/plugins/embrane/l2base/__init__.py create mode 100644 neutron/plugins/embrane/l2base/fake/__init__.py create mode 100644 neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py create mode 100644 neutron/plugins/embrane/l2base/fake/fakeplugin_support.py create mode 100644 neutron/plugins/embrane/l2base/openvswitch/__init__.py create mode 100644 neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py create mode 100644 neutron/plugins/embrane/l2base/support_base.py create mode 100644 neutron/plugins/embrane/l2base/support_exceptions.py create mode 100644 neutron/plugins/embrane/plugins/__init__.py create mode 100644 neutron/plugins/embrane/plugins/embrane_fake_plugin.py create mode 100644 neutron/plugins/embrane/plugins/embrane_ovs_plugin.py create mode 100644 neutron/plugins/hyperv/__init__.py create mode 100644 neutron/plugins/hyperv/agent/__init__.py create mode 100644 neutron/plugins/hyperv/agent/hyperv_neutron_agent.py create mode 100644 neutron/plugins/hyperv/agent/security_groups_driver.py create mode 100644 neutron/plugins/hyperv/agent/utils.py create mode 100644 neutron/plugins/hyperv/agent/utilsfactory.py create mode 100644 neutron/plugins/hyperv/agent/utilsv2.py create mode 100644 neutron/plugins/hyperv/agent_notifier_api.py create mode 100644 neutron/plugins/hyperv/common/__init__.py create mode 100644 neutron/plugins/hyperv/common/constants.py create mode 100644 neutron/plugins/hyperv/db.py create mode 100644 neutron/plugins/hyperv/hyperv_neutron_plugin.py create mode 100644 neutron/plugins/hyperv/model.py create mode 100644 neutron/plugins/hyperv/rpc_callbacks.py create mode 100644 neutron/plugins/ibm/README create mode 100644 neutron/plugins/ibm/__init__.py create mode 100644 neutron/plugins/ibm/agent/__init__.py create mode 100644 neutron/plugins/ibm/agent/sdnve_neutron_agent.py create mode 100644 neutron/plugins/ibm/common/__init__.py create mode 100644 neutron/plugins/ibm/common/config.py create mode 100644 neutron/plugins/ibm/common/constants.py create mode 100644 neutron/plugins/ibm/common/exceptions.py create mode 100644 neutron/plugins/ibm/sdnve_api.py create mode 100644 neutron/plugins/ibm/sdnve_api_fake.py create mode 100644 neutron/plugins/ibm/sdnve_neutron_plugin.py create mode 100644 neutron/plugins/linuxbridge/README create mode 100644 neutron/plugins/linuxbridge/__init__.py create mode 100644 neutron/plugins/linuxbridge/agent/__init__.py create mode 100755 neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py create mode 100644 neutron/plugins/linuxbridge/common/__init__.py create mode 100644 neutron/plugins/linuxbridge/common/config.py create mode 100644 neutron/plugins/linuxbridge/common/constants.py create mode 100644 neutron/plugins/linuxbridge/db/__init__.py create mode 100644 neutron/plugins/linuxbridge/db/l2network_db_v2.py create mode 100644 neutron/plugins/linuxbridge/db/l2network_models_v2.py create mode 100644 neutron/plugins/linuxbridge/lb_neutron_plugin.py create mode 100644 neutron/plugins/metaplugin/README create mode 100644 neutron/plugins/metaplugin/__init__.py create mode 100644 neutron/plugins/metaplugin/common/__init__.py create mode 100644 neutron/plugins/metaplugin/common/config.py create mode 100644 neutron/plugins/metaplugin/meta_db_v2.py create mode 100644 neutron/plugins/metaplugin/meta_models_v2.py create mode 100644 neutron/plugins/metaplugin/meta_neutron_plugin.py create mode 100644 neutron/plugins/metaplugin/proxy_neutron_plugin.py create mode 100644 neutron/plugins/midonet/__init__.py create mode 100644 neutron/plugins/midonet/agent/__init__.py create mode 100644 neutron/plugins/midonet/agent/midonet_driver.py create mode 100644 neutron/plugins/midonet/common/__init__.py create mode 100644 neutron/plugins/midonet/common/config.py create mode 100644 neutron/plugins/midonet/common/net_util.py create mode 100644 neutron/plugins/midonet/midonet_lib.py create mode 100644 neutron/plugins/midonet/plugin.py create mode 100644 neutron/plugins/ml2/README create mode 100644 neutron/plugins/ml2/__init__.py create mode 100644 neutron/plugins/ml2/common/__init__.py create mode 100644 neutron/plugins/ml2/common/exceptions.py create mode 100644 neutron/plugins/ml2/config.py create mode 100644 neutron/plugins/ml2/db.py create mode 100644 neutron/plugins/ml2/driver_api.py create mode 100644 neutron/plugins/ml2/driver_context.py create mode 100644 neutron/plugins/ml2/drivers/README.fslsdn create mode 100644 neutron/plugins/ml2/drivers/README.odl create mode 100644 neutron/plugins/ml2/drivers/__init__.py create mode 100644 neutron/plugins/ml2/drivers/brocade/README.md create mode 100644 neutron/plugins/ml2/drivers/brocade/__init__.py create mode 100644 neutron/plugins/ml2/drivers/brocade/db/__init__.py create mode 100644 neutron/plugins/ml2/drivers/brocade/db/models.py create mode 100644 neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py create mode 100644 neutron/plugins/ml2/drivers/brocade/nos/__init__.py create mode 100644 neutron/plugins/ml2/drivers/brocade/nos/nctemplates.py create mode 100644 neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py create mode 100644 neutron/plugins/ml2/drivers/cisco/__init__.py create mode 100644 neutron/plugins/ml2/drivers/cisco/apic/__init__.py create mode 100644 neutron/plugins/ml2/drivers/cisco/apic/apic_client.py create mode 100644 neutron/plugins/ml2/drivers/cisco/apic/apic_manager.py create mode 100644 neutron/plugins/ml2/drivers/cisco/apic/apic_model.py create mode 100644 neutron/plugins/ml2/drivers/cisco/apic/config.py create mode 100644 neutron/plugins/ml2/drivers/cisco/apic/exceptions.py create mode 100644 neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py create mode 100644 neutron/plugins/ml2/drivers/cisco/nexus/README create mode 100644 neutron/plugins/ml2/drivers/cisco/nexus/__init__.py create mode 100644 neutron/plugins/ml2/drivers/cisco/nexus/config.py create mode 100644 neutron/plugins/ml2/drivers/cisco/nexus/constants.py create mode 100644 neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py create mode 100644 neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py create mode 100644 neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py create mode 100644 neutron/plugins/ml2/drivers/cisco/nexus/nexus_models_v2.py create mode 100644 neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py create mode 100644 neutron/plugins/ml2/drivers/cisco/nexus/nexus_snippets.py create mode 100644 neutron/plugins/ml2/drivers/l2pop/README create mode 100644 neutron/plugins/ml2/drivers/l2pop/__init__.py create mode 100644 neutron/plugins/ml2/drivers/l2pop/config.py create mode 100644 neutron/plugins/ml2/drivers/l2pop/constants.py create mode 100644 neutron/plugins/ml2/drivers/l2pop/db.py create mode 100644 neutron/plugins/ml2/drivers/l2pop/mech_driver.py create mode 100644 neutron/plugins/ml2/drivers/l2pop/rpc.py create mode 100644 neutron/plugins/ml2/drivers/mech_agent.py create mode 100644 neutron/plugins/ml2/drivers/mech_arista/README create mode 100644 neutron/plugins/ml2/drivers/mech_arista/__init__.py create mode 100644 neutron/plugins/ml2/drivers/mech_arista/config.py create mode 100644 neutron/plugins/ml2/drivers/mech_arista/db.py create mode 100644 neutron/plugins/ml2/drivers/mech_arista/exceptions.py create mode 100644 neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py create mode 100644 neutron/plugins/ml2/drivers/mech_bigswitch/__init__.py create mode 100644 neutron/plugins/ml2/drivers/mech_bigswitch/driver.py create mode 100644 neutron/plugins/ml2/drivers/mech_hyperv.py create mode 100644 neutron/plugins/ml2/drivers/mech_linuxbridge.py create mode 100644 neutron/plugins/ml2/drivers/mech_ofagent.py create mode 100644 neutron/plugins/ml2/drivers/mech_openvswitch.py create mode 100755 neutron/plugins/ml2/drivers/mechanism_fslsdn.py create mode 100644 neutron/plugins/ml2/drivers/mechanism_ncs.py create mode 100644 neutron/plugins/ml2/drivers/mechanism_odl.py create mode 100644 neutron/plugins/ml2/drivers/mlnx/__init__.py create mode 100644 neutron/plugins/ml2/drivers/mlnx/config.py create mode 100644 neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py create mode 100644 neutron/plugins/ml2/drivers/type_flat.py create mode 100644 neutron/plugins/ml2/drivers/type_gre.py create mode 100644 neutron/plugins/ml2/drivers/type_local.py create mode 100644 neutron/plugins/ml2/drivers/type_tunnel.py create mode 100644 neutron/plugins/ml2/drivers/type_vlan.py create mode 100644 neutron/plugins/ml2/drivers/type_vxlan.py create mode 100644 neutron/plugins/ml2/managers.py create mode 100644 neutron/plugins/ml2/models.py create mode 100644 neutron/plugins/ml2/plugin.py create mode 100644 neutron/plugins/ml2/rpc.py create mode 100644 neutron/plugins/mlnx/README create mode 100644 neutron/plugins/mlnx/__init__.py create mode 100644 neutron/plugins/mlnx/agent/__init__.py create mode 100644 neutron/plugins/mlnx/agent/eswitch_neutron_agent.py create mode 100644 neutron/plugins/mlnx/agent/utils.py create mode 100644 neutron/plugins/mlnx/agent_notify_api.py create mode 100644 neutron/plugins/mlnx/common/__init__.py create mode 100644 neutron/plugins/mlnx/common/comm_utils.py create mode 100644 neutron/plugins/mlnx/common/config.py create mode 100644 neutron/plugins/mlnx/common/constants.py create mode 100644 neutron/plugins/mlnx/common/exceptions.py create mode 100644 neutron/plugins/mlnx/db/__init__.py create mode 100644 neutron/plugins/mlnx/db/mlnx_db_v2.py create mode 100644 neutron/plugins/mlnx/db/mlnx_models_v2.py create mode 100644 neutron/plugins/mlnx/mlnx_plugin.py create mode 100644 neutron/plugins/mlnx/rpc_callbacks.py create mode 100644 neutron/plugins/nec/README create mode 100644 neutron/plugins/nec/__init__.py create mode 100644 neutron/plugins/nec/agent/__init__.py create mode 100755 neutron/plugins/nec/agent/nec_neutron_agent.py create mode 100644 neutron/plugins/nec/common/__init__.py create mode 100644 neutron/plugins/nec/common/config.py create mode 100644 neutron/plugins/nec/common/constants.py create mode 100644 neutron/plugins/nec/common/exceptions.py create mode 100644 neutron/plugins/nec/common/ofc_client.py create mode 100644 neutron/plugins/nec/common/utils.py create mode 100644 neutron/plugins/nec/db/__init__.py create mode 100644 neutron/plugins/nec/db/api.py create mode 100644 neutron/plugins/nec/db/models.py create mode 100644 neutron/plugins/nec/db/packetfilter.py create mode 100644 neutron/plugins/nec/db/router.py create mode 100644 neutron/plugins/nec/drivers/__init__.py create mode 100644 neutron/plugins/nec/drivers/pfc.py create mode 100644 neutron/plugins/nec/drivers/trema.py create mode 100644 neutron/plugins/nec/extensions/__init__.py create mode 100644 neutron/plugins/nec/extensions/packetfilter.py create mode 100644 neutron/plugins/nec/extensions/router_provider.py create mode 100644 neutron/plugins/nec/nec_plugin.py create mode 100644 neutron/plugins/nec/nec_router.py create mode 100644 neutron/plugins/nec/ofc_driver_base.py create mode 100644 neutron/plugins/nec/ofc_manager.py create mode 100644 neutron/plugins/nec/packet_filter.py create mode 100644 neutron/plugins/nec/router_drivers.py create mode 100644 neutron/plugins/nuage/__init__.py create mode 100644 neutron/plugins/nuage/common/__init__.py create mode 100644 neutron/plugins/nuage/common/config.py create mode 100644 neutron/plugins/nuage/common/constants.py create mode 100644 neutron/plugins/nuage/common/exceptions.py create mode 100644 neutron/plugins/nuage/extensions/__init__.py create mode 100644 neutron/plugins/nuage/extensions/netpartition.py create mode 100644 neutron/plugins/nuage/extensions/nuage_router.py create mode 100644 neutron/plugins/nuage/extensions/nuage_subnet.py create mode 100644 neutron/plugins/nuage/nuage_models.py create mode 100644 neutron/plugins/nuage/nuagedb.py create mode 100644 neutron/plugins/nuage/plugin.py create mode 100644 neutron/plugins/ofagent/README create mode 100644 neutron/plugins/ofagent/__init__.py create mode 100644 neutron/plugins/ofagent/agent/__init__.py create mode 100644 neutron/plugins/ofagent/agent/ofa_neutron_agent.py create mode 100644 neutron/plugins/ofagent/common/__init__.py create mode 100644 neutron/plugins/ofagent/common/config.py create mode 100644 neutron/plugins/oneconvergence/README create mode 100644 neutron/plugins/oneconvergence/__init__.py create mode 100644 neutron/plugins/oneconvergence/agent/__init__.py create mode 100644 neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py create mode 100644 neutron/plugins/oneconvergence/lib/__init__.py create mode 100644 neutron/plugins/oneconvergence/lib/config.py create mode 100644 neutron/plugins/oneconvergence/lib/exception.py create mode 100644 neutron/plugins/oneconvergence/lib/nvsd_db.py create mode 100644 neutron/plugins/oneconvergence/lib/nvsdlib.py create mode 100644 neutron/plugins/oneconvergence/lib/plugin_helper.py create mode 100644 neutron/plugins/oneconvergence/plugin.py create mode 100644 neutron/plugins/openvswitch/README create mode 100644 neutron/plugins/openvswitch/__init__.py create mode 100644 neutron/plugins/openvswitch/agent/__init__.py create mode 100644 neutron/plugins/openvswitch/agent/ovs_neutron_agent.py create mode 100644 neutron/plugins/openvswitch/agent/xenapi/README create mode 100755 neutron/plugins/openvswitch/agent/xenapi/contrib/build-rpm.sh create mode 100644 neutron/plugins/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec create mode 100644 neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap create mode 100644 neutron/plugins/openvswitch/common/__init__.py create mode 100644 neutron/plugins/openvswitch/common/config.py create mode 100644 neutron/plugins/openvswitch/common/constants.py create mode 100644 neutron/plugins/openvswitch/ovs_db_v2.py create mode 100644 neutron/plugins/openvswitch/ovs_models_v2.py create mode 100644 neutron/plugins/openvswitch/ovs_neutron_plugin.py create mode 100644 neutron/plugins/plumgrid/README create mode 100644 neutron/plugins/plumgrid/__init__.py create mode 100644 neutron/plugins/plumgrid/common/__init__.py create mode 100644 neutron/plugins/plumgrid/common/exceptions.py create mode 100644 neutron/plugins/plumgrid/drivers/__init__.py create mode 100644 neutron/plugins/plumgrid/drivers/fake_plumlib.py create mode 100644 neutron/plugins/plumgrid/drivers/plumlib.py create mode 100644 neutron/plugins/plumgrid/plumgrid_plugin/__init__.py create mode 100644 neutron/plugins/plumgrid/plumgrid_plugin/plugin_ver.py create mode 100644 neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py create mode 100644 neutron/plugins/ryu/README create mode 100644 neutron/plugins/ryu/__init__.py create mode 100644 neutron/plugins/ryu/agent/__init__.py create mode 100755 neutron/plugins/ryu/agent/ryu_neutron_agent.py create mode 100644 neutron/plugins/ryu/common/__init__.py create mode 100644 neutron/plugins/ryu/common/config.py create mode 100644 neutron/plugins/ryu/db/__init__.py create mode 100644 neutron/plugins/ryu/db/api_v2.py create mode 100644 neutron/plugins/ryu/db/models_v2.py create mode 100644 neutron/plugins/ryu/ryu_neutron_plugin.py create mode 100644 neutron/plugins/vmware/__init__.py create mode 100644 neutron/plugins/vmware/api_client/__init__.py create mode 100644 neutron/plugins/vmware/api_client/base.py create mode 100644 neutron/plugins/vmware/api_client/client.py create mode 100644 neutron/plugins/vmware/api_client/eventlet_client.py create mode 100644 neutron/plugins/vmware/api_client/eventlet_request.py create mode 100644 neutron/plugins/vmware/api_client/exception.py create mode 100644 neutron/plugins/vmware/api_client/request.py create mode 100644 neutron/plugins/vmware/api_client/version.py create mode 100644 neutron/plugins/vmware/check_nsx_config.py create mode 100644 neutron/plugins/vmware/common/__init__.py create mode 100644 neutron/plugins/vmware/common/config.py create mode 100644 neutron/plugins/vmware/common/exceptions.py create mode 100644 neutron/plugins/vmware/common/nsx_utils.py create mode 100644 neutron/plugins/vmware/common/securitygroups.py create mode 100644 neutron/plugins/vmware/common/sync.py create mode 100644 neutron/plugins/vmware/common/utils.py create mode 100644 neutron/plugins/vmware/dbexts/__init__.py create mode 100644 neutron/plugins/vmware/dbexts/db.py create mode 100644 neutron/plugins/vmware/dbexts/distributedrouter.py create mode 100644 neutron/plugins/vmware/dbexts/lsn_db.py create mode 100644 neutron/plugins/vmware/dbexts/maclearning.py create mode 100644 neutron/plugins/vmware/dbexts/models.py create mode 100644 neutron/plugins/vmware/dbexts/networkgw_db.py create mode 100644 neutron/plugins/vmware/dbexts/nsxrouter.py create mode 100644 neutron/plugins/vmware/dbexts/qos_db.py create mode 100644 neutron/plugins/vmware/dbexts/servicerouter.py create mode 100644 neutron/plugins/vmware/dbexts/vcns_db.py create mode 100644 neutron/plugins/vmware/dbexts/vcns_models.py create mode 100644 neutron/plugins/vmware/dhcp_meta/__init__.py create mode 100644 neutron/plugins/vmware/dhcp_meta/combined.py create mode 100644 neutron/plugins/vmware/dhcp_meta/constants.py create mode 100644 neutron/plugins/vmware/dhcp_meta/lsnmanager.py create mode 100644 neutron/plugins/vmware/dhcp_meta/migration.py create mode 100644 neutron/plugins/vmware/dhcp_meta/nsx.py create mode 100644 neutron/plugins/vmware/dhcp_meta/rpc.py create mode 100644 neutron/plugins/vmware/dhcpmeta_modes.py create mode 100644 neutron/plugins/vmware/extensions/__init__.py create mode 100644 neutron/plugins/vmware/extensions/distributedrouter.py create mode 100644 neutron/plugins/vmware/extensions/lsn.py create mode 100644 neutron/plugins/vmware/extensions/maclearning.py create mode 100644 neutron/plugins/vmware/extensions/networkgw.py create mode 100644 neutron/plugins/vmware/extensions/nvp_qos.py create mode 100644 neutron/plugins/vmware/extensions/qos.py create mode 100644 neutron/plugins/vmware/extensions/servicerouter.py create mode 100644 neutron/plugins/vmware/nsx_cluster.py create mode 100644 neutron/plugins/vmware/nsxlib/__init__.py create mode 100644 neutron/plugins/vmware/nsxlib/l2gateway.py create mode 100644 neutron/plugins/vmware/nsxlib/lsn.py create mode 100644 neutron/plugins/vmware/nsxlib/queue.py create mode 100644 neutron/plugins/vmware/nsxlib/router.py create mode 100644 neutron/plugins/vmware/nsxlib/secgroup.py create mode 100644 neutron/plugins/vmware/nsxlib/switch.py create mode 100644 neutron/plugins/vmware/nsxlib/versioning.py create mode 100644 neutron/plugins/vmware/plugin.py create mode 100644 neutron/plugins/vmware/plugins/__init__.py create mode 100644 neutron/plugins/vmware/plugins/base.py create mode 100644 neutron/plugins/vmware/plugins/service.py create mode 100644 neutron/plugins/vmware/shell/__init__.py create mode 100644 neutron/plugins/vmware/shell/commands.py create mode 100644 neutron/plugins/vmware/vshield/__init__.py create mode 100644 neutron/plugins/vmware/vshield/common/VcnsApiClient.py create mode 100644 neutron/plugins/vmware/vshield/common/__init__.py create mode 100644 neutron/plugins/vmware/vshield/common/constants.py create mode 100644 neutron/plugins/vmware/vshield/common/exceptions.py create mode 100644 neutron/plugins/vmware/vshield/edge_appliance_driver.py create mode 100644 neutron/plugins/vmware/vshield/edge_firewall_driver.py create mode 100644 neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py create mode 100644 neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py create mode 100644 neutron/plugins/vmware/vshield/tasks/__init__.py create mode 100644 neutron/plugins/vmware/vshield/tasks/constants.py create mode 100644 neutron/plugins/vmware/vshield/tasks/tasks.py create mode 100644 neutron/plugins/vmware/vshield/vcns.py create mode 100644 neutron/plugins/vmware/vshield/vcns_driver.py create mode 100644 neutron/policy.py create mode 100644 neutron/quota.py create mode 100644 neutron/scheduler/__init__.py create mode 100644 neutron/scheduler/dhcp_agent_scheduler.py create mode 100644 neutron/scheduler/l3_agent_scheduler.py create mode 100755 neutron/server/__init__.py create mode 100644 neutron/service.py create mode 100644 neutron/services/__init__.py create mode 100644 neutron/services/firewall/__init__.py create mode 100644 neutron/services/firewall/agents/__init__.py create mode 100644 neutron/services/firewall/agents/firewall_agent_api.py create mode 100644 neutron/services/firewall/agents/l3reference/__init__.py create mode 100644 neutron/services/firewall/agents/l3reference/firewall_l3_agent.py create mode 100755 neutron/services/firewall/agents/varmour/__init__.py create mode 100755 neutron/services/firewall/agents/varmour/varmour_api.py create mode 100755 neutron/services/firewall/agents/varmour/varmour_router.py create mode 100755 neutron/services/firewall/agents/varmour/varmour_utils.py create mode 100644 neutron/services/firewall/drivers/__init__.py create mode 100644 neutron/services/firewall/drivers/fwaas_base.py create mode 100644 neutron/services/firewall/drivers/linux/__init__.py create mode 100644 neutron/services/firewall/drivers/linux/iptables_fwaas.py create mode 100755 neutron/services/firewall/drivers/varmour/__init__.py create mode 100755 neutron/services/firewall/drivers/varmour/varmour_fwaas.py create mode 100644 neutron/services/firewall/fwaas_plugin.py create mode 100644 neutron/services/l3_router/README create mode 100644 neutron/services/l3_router/__init__.py create mode 100644 neutron/services/l3_router/l3_apic.py create mode 100644 neutron/services/l3_router/l3_router_plugin.py create mode 100644 neutron/services/loadbalancer/__init__.py create mode 100644 neutron/services/loadbalancer/agent/__init__.py create mode 100644 neutron/services/loadbalancer/agent/agent.py create mode 100644 neutron/services/loadbalancer/agent/agent_api.py create mode 100644 neutron/services/loadbalancer/agent/agent_device_driver.py create mode 100644 neutron/services/loadbalancer/agent/agent_manager.py create mode 100644 neutron/services/loadbalancer/agent_scheduler.py create mode 100644 neutron/services/loadbalancer/constants.py create mode 100644 neutron/services/loadbalancer/drivers/__init__.py create mode 100644 neutron/services/loadbalancer/drivers/abstract_driver.py create mode 100644 neutron/services/loadbalancer/drivers/common/__init__.py create mode 100644 neutron/services/loadbalancer/drivers/common/agent_driver_base.py create mode 100644 neutron/services/loadbalancer/drivers/embrane/README create mode 100644 neutron/services/loadbalancer/drivers/embrane/__init__.py create mode 100644 neutron/services/loadbalancer/drivers/embrane/agent/__init__.py create mode 100644 neutron/services/loadbalancer/drivers/embrane/agent/dispatcher.py create mode 100644 neutron/services/loadbalancer/drivers/embrane/agent/lb_operations.py create mode 100644 neutron/services/loadbalancer/drivers/embrane/config.py create mode 100644 neutron/services/loadbalancer/drivers/embrane/constants.py create mode 100644 neutron/services/loadbalancer/drivers/embrane/db.py create mode 100644 neutron/services/loadbalancer/drivers/embrane/driver.py create mode 100644 neutron/services/loadbalancer/drivers/embrane/models.py create mode 100644 neutron/services/loadbalancer/drivers/embrane/poller.py create mode 100644 neutron/services/loadbalancer/drivers/haproxy/__init__.py create mode 100644 neutron/services/loadbalancer/drivers/haproxy/cfg.py create mode 100644 neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py create mode 100644 neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py create mode 100644 neutron/services/loadbalancer/drivers/netscaler/__init__.py create mode 100644 neutron/services/loadbalancer/drivers/netscaler/ncc_client.py create mode 100644 neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py create mode 100644 neutron/services/loadbalancer/drivers/radware/__init__.py create mode 100644 neutron/services/loadbalancer/drivers/radware/driver.py create mode 100644 neutron/services/loadbalancer/drivers/radware/exceptions.py create mode 100644 neutron/services/loadbalancer/plugin.py create mode 100644 neutron/services/metering/__init__.py create mode 100644 neutron/services/metering/agents/__init__.py create mode 100644 neutron/services/metering/agents/metering_agent.py create mode 100644 neutron/services/metering/drivers/__init__.py create mode 100644 neutron/services/metering/drivers/abstract_driver.py create mode 100644 neutron/services/metering/drivers/iptables/__init__.py create mode 100644 neutron/services/metering/drivers/iptables/iptables_driver.py create mode 100644 neutron/services/metering/drivers/noop/__init__.py create mode 100644 neutron/services/metering/drivers/noop/noop_driver.py create mode 100644 neutron/services/metering/metering_plugin.py create mode 100644 neutron/services/provider_configuration.py create mode 100644 neutron/services/service_base.py create mode 100644 neutron/services/vpn/__init__.py create mode 100644 neutron/services/vpn/agent.py create mode 100644 neutron/services/vpn/common/__init__.py create mode 100644 neutron/services/vpn/common/topics.py create mode 100644 neutron/services/vpn/device_drivers/__init__.py create mode 100644 neutron/services/vpn/device_drivers/cisco_csr_rest_client.py create mode 100644 neutron/services/vpn/device_drivers/cisco_ipsec.py create mode 100644 neutron/services/vpn/device_drivers/ipsec.py create mode 100644 neutron/services/vpn/device_drivers/template/openswan/ipsec.conf.template create mode 100644 neutron/services/vpn/device_drivers/template/openswan/ipsec.secret.template create mode 100644 neutron/services/vpn/plugin.py create mode 100644 neutron/services/vpn/service_drivers/__init__.py create mode 100644 neutron/services/vpn/service_drivers/cisco_csr_db.py create mode 100644 neutron/services/vpn/service_drivers/cisco_ipsec.py create mode 100644 neutron/services/vpn/service_drivers/ipsec.py create mode 100644 neutron/tests/__init__.py create mode 100644 neutron/tests/base.py create mode 100644 neutron/tests/etc/api-paste.ini.test create mode 100644 neutron/tests/etc/neutron.conf.test create mode 100644 neutron/tests/etc/rootwrap.d/neutron.test.filters create mode 100644 neutron/tests/fake_notifier.py create mode 100644 neutron/tests/functional/__init__.py create mode 100644 neutron/tests/functional/agent/__init__.py create mode 100644 neutron/tests/functional/agent/linux/__init__.py create mode 100644 neutron/tests/functional/agent/linux/base.py create mode 100644 neutron/tests/functional/agent/linux/test_async_process.py create mode 100644 neutron/tests/functional/agent/linux/test_ovsdb_monitor.py create mode 100644 neutron/tests/functional/sanity/__init__.py create mode 100644 neutron/tests/functional/sanity/test_ovs_sanity.py create mode 100644 neutron/tests/post_mortem_debug.py create mode 100644 neutron/tests/tools.py create mode 100644 neutron/tests/unit/__init__.py create mode 100644 neutron/tests/unit/_test_extension_portbindings.py create mode 100644 neutron/tests/unit/_test_rootwrap_exec.py create mode 100644 neutron/tests/unit/agent/__init__.py create mode 100644 neutron/tests/unit/agent/linux/__init__.py create mode 100644 neutron/tests/unit/agent/linux/test_async_process.py create mode 100644 neutron/tests/unit/agent/linux/test_ovs_lib.py create mode 100644 neutron/tests/unit/agent/linux/test_ovsdb_monitor.py create mode 100644 neutron/tests/unit/agent/linux/test_polling.py create mode 100644 neutron/tests/unit/api/__init__.py create mode 100644 neutron/tests/unit/api/rpc/__init__.py create mode 100644 neutron/tests/unit/api/rpc/agentnotifiers/__init__.py create mode 100644 neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py create mode 100644 neutron/tests/unit/bigswitch/__init__.py create mode 100644 neutron/tests/unit/bigswitch/etc/restproxy.ini.test create mode 100644 neutron/tests/unit/bigswitch/etc/ssl/ca_certs/README create mode 100644 neutron/tests/unit/bigswitch/etc/ssl/combined/README create mode 100644 neutron/tests/unit/bigswitch/etc/ssl/host_certs/README create mode 100644 neutron/tests/unit/bigswitch/fake_server.py create mode 100644 neutron/tests/unit/bigswitch/test_agent_scheduler.py create mode 100644 neutron/tests/unit/bigswitch/test_base.py create mode 100644 neutron/tests/unit/bigswitch/test_capabilities.py create mode 100644 neutron/tests/unit/bigswitch/test_restproxy_agent.py create mode 100644 neutron/tests/unit/bigswitch/test_restproxy_plugin.py create mode 100644 neutron/tests/unit/bigswitch/test_router_db.py create mode 100644 neutron/tests/unit/bigswitch/test_security_groups.py create mode 100644 neutron/tests/unit/bigswitch/test_servermanager.py create mode 100644 neutron/tests/unit/bigswitch/test_ssl.py create mode 100644 neutron/tests/unit/brocade/__init__.py create mode 100644 neutron/tests/unit/brocade/test_brocade_db.py create mode 100644 neutron/tests/unit/brocade/test_brocade_plugin.py create mode 100644 neutron/tests/unit/brocade/test_brocade_vlan.py create mode 100644 neutron/tests/unit/cisco/__init__.py create mode 100644 neutron/tests/unit/cisco/n1kv/__init__.py create mode 100755 neutron/tests/unit/cisco/n1kv/fake_client.py create mode 100644 neutron/tests/unit/cisco/n1kv/test_n1kv_db.py create mode 100644 neutron/tests/unit/cisco/n1kv/test_n1kv_plugin.py create mode 100644 neutron/tests/unit/cisco/test_config.py create mode 100644 neutron/tests/unit/cisco/test_network_db.py create mode 100644 neutron/tests/unit/cisco/test_network_plugin.py create mode 100644 neutron/tests/unit/cisco/test_nexus_db.py create mode 100644 neutron/tests/unit/cisco/test_nexus_plugin.py create mode 100755 neutron/tests/unit/cisco/test_plugin_model.py create mode 100644 neutron/tests/unit/database_stubs.py create mode 100644 neutron/tests/unit/db/__init__.py create mode 100644 neutron/tests/unit/db/firewall/__init__.py create mode 100644 neutron/tests/unit/db/firewall/test_db_firewall.py create mode 100644 neutron/tests/unit/db/loadbalancer/__init__.py create mode 100644 neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py create mode 100644 neutron/tests/unit/db/metering/__init__.py create mode 100644 neutron/tests/unit/db/metering/test_db_metering.py create mode 100644 neutron/tests/unit/db/test_agent_db.py create mode 100644 neutron/tests/unit/db/test_quota_db.py create mode 100644 neutron/tests/unit/db/vpn/__init__.py create mode 100644 neutron/tests/unit/db/vpn/test_db_vpnaas.py create mode 100644 neutron/tests/unit/dummy_plugin.py create mode 100644 neutron/tests/unit/embrane/__init__.py create mode 100644 neutron/tests/unit/embrane/test_embrane_defaults.py create mode 100644 neutron/tests/unit/embrane/test_embrane_l3_plugin.py create mode 100644 neutron/tests/unit/embrane/test_embrane_neutron_plugin.py create mode 100644 neutron/tests/unit/extension_stubs.py create mode 100644 neutron/tests/unit/extensions/__init__.py create mode 100644 neutron/tests/unit/extensions/extendedattribute.py create mode 100644 neutron/tests/unit/extensions/extensionattribute.py create mode 100644 neutron/tests/unit/extensions/foxinsocks.py create mode 100644 neutron/tests/unit/extensions/v2attributes.py create mode 100644 neutron/tests/unit/hyperv/__init__.py create mode 100644 neutron/tests/unit/hyperv/test_hyperv_neutron_agent.py create mode 100644 neutron/tests/unit/hyperv/test_hyperv_neutron_plugin.py create mode 100644 neutron/tests/unit/hyperv/test_hyperv_rpcapi.py create mode 100644 neutron/tests/unit/hyperv/test_hyperv_security_groups_driver.py create mode 100644 neutron/tests/unit/hyperv/test_hyperv_utilsfactory.py create mode 100644 neutron/tests/unit/hyperv/test_hyperv_utilsv2.py create mode 100644 neutron/tests/unit/ibm/__init__.py create mode 100644 neutron/tests/unit/ibm/test_sdnve_agent.py create mode 100644 neutron/tests/unit/ibm/test_sdnve_api.py create mode 100644 neutron/tests/unit/ibm/test_sdnve_plugin.py create mode 100644 neutron/tests/unit/linuxbridge/__init__.py create mode 100644 neutron/tests/unit/linuxbridge/test_agent_scheduler.py create mode 100644 neutron/tests/unit/linuxbridge/test_defaults.py create mode 100644 neutron/tests/unit/linuxbridge/test_lb_db.py create mode 100644 neutron/tests/unit/linuxbridge/test_lb_neutron_agent.py create mode 100644 neutron/tests/unit/linuxbridge/test_lb_security_group.py create mode 100644 neutron/tests/unit/linuxbridge/test_linuxbridge_plugin.py create mode 100644 neutron/tests/unit/linuxbridge/test_rpcapi.py create mode 100644 neutron/tests/unit/metaplugin/__init__.py create mode 100644 neutron/tests/unit/metaplugin/fake_plugin.py create mode 100644 neutron/tests/unit/metaplugin/test_basic.py create mode 100644 neutron/tests/unit/metaplugin/test_metaplugin.py create mode 100644 neutron/tests/unit/midonet/__init__.py create mode 100644 neutron/tests/unit/midonet/etc/midonet.ini.test create mode 100644 neutron/tests/unit/midonet/mock_lib.py create mode 100644 neutron/tests/unit/midonet/test_midonet_driver.py create mode 100644 neutron/tests/unit/midonet/test_midonet_lib.py create mode 100644 neutron/tests/unit/midonet/test_midonet_plugin.py create mode 100644 neutron/tests/unit/ml2/__init__.py create mode 100644 neutron/tests/unit/ml2/_test_mech_agent.py create mode 100644 neutron/tests/unit/ml2/drivers/__init__.py create mode 100644 neutron/tests/unit/ml2/drivers/brocade/__init__.py create mode 100644 neutron/tests/unit/ml2/drivers/brocade/test_brocade_mechanism_driver.py create mode 100644 neutron/tests/unit/ml2/drivers/cisco/__init__.py create mode 100644 neutron/tests/unit/ml2/drivers/cisco/apic/__init__.py create mode 100644 neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_client.py create mode 100644 neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_common.py create mode 100644 neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_manager.py create mode 100644 neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_mechanism_driver.py create mode 100644 neutron/tests/unit/ml2/drivers/cisco/nexus/__init__.py create mode 100644 neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_config.py create mode 100644 neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_mech.py create mode 100644 neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_nexus.py create mode 100644 neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_nexus_db.py create mode 100644 neutron/tests/unit/ml2/drivers/mechanism_bulkless.py create mode 100644 neutron/tests/unit/ml2/drivers/mechanism_logger.py create mode 100644 neutron/tests/unit/ml2/drivers/mechanism_test.py create mode 100644 neutron/tests/unit/ml2/drivers/test_arista_mechanism_driver.py create mode 100644 neutron/tests/unit/ml2/drivers/test_bigswitch_mech.py create mode 100644 neutron/tests/unit/ml2/drivers/test_l2population.py create mode 100644 neutron/tests/unit/ml2/drivers/test_mech_mlnx.py create mode 100644 neutron/tests/unit/ml2/drivers/test_ofagent_mech.py create mode 100644 neutron/tests/unit/ml2/test_agent_scheduler.py create mode 100644 neutron/tests/unit/ml2/test_mech_hyperv.py create mode 100644 neutron/tests/unit/ml2/test_mech_linuxbridge.py create mode 100644 neutron/tests/unit/ml2/test_mech_openvswitch.py create mode 100644 neutron/tests/unit/ml2/test_mechanism_fslsdn.py create mode 100644 neutron/tests/unit/ml2/test_mechanism_ncs.py create mode 100644 neutron/tests/unit/ml2/test_mechanism_odl.py create mode 100644 neutron/tests/unit/ml2/test_ml2_plugin.py create mode 100644 neutron/tests/unit/ml2/test_port_binding.py create mode 100644 neutron/tests/unit/ml2/test_rpcapi.py create mode 100644 neutron/tests/unit/ml2/test_security_group.py create mode 100644 neutron/tests/unit/ml2/test_type_flat.py create mode 100644 neutron/tests/unit/ml2/test_type_gre.py create mode 100644 neutron/tests/unit/ml2/test_type_local.py create mode 100644 neutron/tests/unit/ml2/test_type_vlan.py create mode 100644 neutron/tests/unit/ml2/test_type_vxlan.py create mode 100644 neutron/tests/unit/mlnx/__init__.py create mode 100644 neutron/tests/unit/mlnx/test_agent_scheduler.py create mode 100644 neutron/tests/unit/mlnx/test_defaults.py create mode 100644 neutron/tests/unit/mlnx/test_mlnx_comm_utils.py create mode 100644 neutron/tests/unit/mlnx/test_mlnx_db.py create mode 100644 neutron/tests/unit/mlnx/test_mlnx_neutron_agent.py create mode 100644 neutron/tests/unit/mlnx/test_mlnx_plugin.py create mode 100644 neutron/tests/unit/mlnx/test_mlnx_plugin_config.py create mode 100644 neutron/tests/unit/mlnx/test_mlnx_security_group.py create mode 100644 neutron/tests/unit/mlnx/test_rpcapi.py create mode 100644 neutron/tests/unit/nec/__init__.py create mode 100644 neutron/tests/unit/nec/fake_ofc_manager.py create mode 100644 neutron/tests/unit/nec/stub_ofc_driver.py create mode 100644 neutron/tests/unit/nec/test_agent_scheduler.py create mode 100644 neutron/tests/unit/nec/test_config.py create mode 100644 neutron/tests/unit/nec/test_db.py create mode 100644 neutron/tests/unit/nec/test_nec_agent.py create mode 100644 neutron/tests/unit/nec/test_nec_plugin.py create mode 100644 neutron/tests/unit/nec/test_ofc_client.py create mode 100644 neutron/tests/unit/nec/test_ofc_manager.py create mode 100644 neutron/tests/unit/nec/test_packet_filter.py create mode 100644 neutron/tests/unit/nec/test_pfc_driver.py create mode 100644 neutron/tests/unit/nec/test_portbindings.py create mode 100644 neutron/tests/unit/nec/test_router.py create mode 100644 neutron/tests/unit/nec/test_security_group.py create mode 100644 neutron/tests/unit/nec/test_trema_driver.py create mode 100644 neutron/tests/unit/nec/test_utils.py create mode 100644 neutron/tests/unit/notifiers/__init__.py create mode 100644 neutron/tests/unit/notifiers/test_notifiers_nova.py create mode 100644 neutron/tests/unit/nuage/__init__.py create mode 100644 neutron/tests/unit/nuage/fake_nuageclient.py create mode 100644 neutron/tests/unit/nuage/test_netpartition.py create mode 100644 neutron/tests/unit/nuage/test_nuage_plugin.py create mode 100644 neutron/tests/unit/ofagent/__init__.py create mode 100644 neutron/tests/unit/ofagent/fake_oflib.py create mode 100644 neutron/tests/unit/ofagent/test_ofa_defaults.py create mode 100644 neutron/tests/unit/ofagent/test_ofa_neutron_agent.py create mode 100644 neutron/tests/unit/oneconvergence/__init__.py create mode 100644 neutron/tests/unit/oneconvergence/test_nvsd_agent.py create mode 100644 neutron/tests/unit/oneconvergence/test_nvsd_plugin.py create mode 100644 neutron/tests/unit/oneconvergence/test_nvsdlib.py create mode 100644 neutron/tests/unit/oneconvergence/test_plugin_helper.py create mode 100644 neutron/tests/unit/oneconvergence/test_security_group.py create mode 100644 neutron/tests/unit/openvswitch/__init__.py create mode 100644 neutron/tests/unit/openvswitch/test_agent_scheduler.py create mode 100644 neutron/tests/unit/openvswitch/test_openvswitch_plugin.py create mode 100644 neutron/tests/unit/openvswitch/test_ovs_db.py create mode 100644 neutron/tests/unit/openvswitch/test_ovs_defaults.py create mode 100644 neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py create mode 100644 neutron/tests/unit/openvswitch/test_ovs_rpcapi.py create mode 100644 neutron/tests/unit/openvswitch/test_ovs_security_group.py create mode 100644 neutron/tests/unit/openvswitch/test_ovs_tunnel.py create mode 100644 neutron/tests/unit/plumgrid/__init__.py create mode 100644 neutron/tests/unit/plumgrid/test_plumgrid_plugin.py create mode 100644 neutron/tests/unit/ryu/__init__.py create mode 100644 neutron/tests/unit/ryu/fake_ryu.py create mode 100644 neutron/tests/unit/ryu/test_defaults.py create mode 100644 neutron/tests/unit/ryu/test_ryu_agent.py create mode 100644 neutron/tests/unit/ryu/test_ryu_db.py create mode 100644 neutron/tests/unit/ryu/test_ryu_plugin.py create mode 100644 neutron/tests/unit/ryu/test_ryu_security_group.py create mode 100644 neutron/tests/unit/services/__init__.py create mode 100644 neutron/tests/unit/services/firewall/__init__.py create mode 100644 neutron/tests/unit/services/firewall/agents/__init__.py create mode 100644 neutron/tests/unit/services/firewall/agents/l3reference/__init__.py create mode 100644 neutron/tests/unit/services/firewall/agents/l3reference/test_firewall_l3_agent.py create mode 100644 neutron/tests/unit/services/firewall/agents/test_firewall_agent_api.py create mode 100755 neutron/tests/unit/services/firewall/agents/varmour/__init__.py create mode 100644 neutron/tests/unit/services/firewall/agents/varmour/test_varmour_router.py create mode 100644 neutron/tests/unit/services/firewall/drivers/__init__.py create mode 100644 neutron/tests/unit/services/firewall/drivers/linux/__init__.py create mode 100644 neutron/tests/unit/services/firewall/drivers/linux/test_iptables_fwaas.py create mode 100755 neutron/tests/unit/services/firewall/drivers/varmour/__init__.py create mode 100644 neutron/tests/unit/services/firewall/drivers/varmour/test_varmour_fwaas.py create mode 100644 neutron/tests/unit/services/firewall/test_fwaas_plugin.py create mode 100644 neutron/tests/unit/services/l3_router/__init__.py create mode 100644 neutron/tests/unit/services/l3_router/test_l3_apic_plugin.py create mode 100644 neutron/tests/unit/services/loadbalancer/__init__.py create mode 100644 neutron/tests/unit/services/loadbalancer/agent/__init__.py create mode 100644 neutron/tests/unit/services/loadbalancer/agent/test_agent.py create mode 100644 neutron/tests/unit/services/loadbalancer/agent/test_agent_manager.py create mode 100644 neutron/tests/unit/services/loadbalancer/agent/test_api.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/__init__.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/embrane/__init__.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/embrane/test_embrane_defaults.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/embrane/test_plugin_driver.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/haproxy/__init__.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/netscaler/__init__.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_driver.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/radware/__init__.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py create mode 100644 neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py create mode 100644 neutron/tests/unit/services/loadbalancer/test_agent_scheduler.py create mode 100644 neutron/tests/unit/services/loadbalancer/test_loadbalancer_plugin.py create mode 100644 neutron/tests/unit/services/loadbalancer/test_loadbalancer_quota_ext.py create mode 100644 neutron/tests/unit/services/metering/__init__.py create mode 100644 neutron/tests/unit/services/metering/drivers/__init__.py create mode 100644 neutron/tests/unit/services/metering/drivers/test_iptables_driver.py create mode 100644 neutron/tests/unit/services/metering/test_metering_agent.py create mode 100644 neutron/tests/unit/services/metering/test_metering_plugin.py create mode 100644 neutron/tests/unit/services/vpn/__init__.py create mode 100644 neutron/tests/unit/services/vpn/device_drivers/__init__.py create mode 100644 neutron/tests/unit/services/vpn/device_drivers/cisco_csr_mock.py create mode 100644 neutron/tests/unit/services/vpn/device_drivers/notest_cisco_csr_rest.py create mode 100644 neutron/tests/unit/services/vpn/device_drivers/test_cisco_ipsec.py create mode 100644 neutron/tests/unit/services/vpn/device_drivers/test_ipsec.py create mode 100644 neutron/tests/unit/services/vpn/service_drivers/__init__.py create mode 100644 neutron/tests/unit/services/vpn/service_drivers/test_cisco_ipsec.py create mode 100644 neutron/tests/unit/services/vpn/service_drivers/test_ipsec.py create mode 100644 neutron/tests/unit/services/vpn/test_vpn_agent.py create mode 100644 neutron/tests/unit/services/vpn/test_vpnaas_driver_plugin.py create mode 100644 neutron/tests/unit/services/vpn/test_vpnaas_extension.py create mode 100644 neutron/tests/unit/test_agent_config.py create mode 100644 neutron/tests/unit/test_agent_ext_plugin.py create mode 100644 neutron/tests/unit/test_agent_linux_utils.py create mode 100644 neutron/tests/unit/test_agent_netns_cleanup.py create mode 100644 neutron/tests/unit/test_agent_ovs_cleanup.py create mode 100644 neutron/tests/unit/test_agent_rpc.py create mode 100644 neutron/tests/unit/test_api_api_common.py create mode 100644 neutron/tests/unit/test_api_v2.py create mode 100644 neutron/tests/unit/test_api_v2_extension.py create mode 100644 neutron/tests/unit/test_api_v2_resource.py create mode 100644 neutron/tests/unit/test_attributes.py create mode 100644 neutron/tests/unit/test_auth.py create mode 100644 neutron/tests/unit/test_common_log.py create mode 100644 neutron/tests/unit/test_common_utils.py create mode 100644 neutron/tests/unit/test_config.py create mode 100644 neutron/tests/unit/test_db_migration.py create mode 100644 neutron/tests/unit/test_db_plugin.py create mode 100644 neutron/tests/unit/test_db_rpc_base.py create mode 100644 neutron/tests/unit/test_debug_commands.py create mode 100644 neutron/tests/unit/test_dhcp_agent.py create mode 100644 neutron/tests/unit/test_dhcp_scheduler.py create mode 100644 neutron/tests/unit/test_extension_allowedaddresspairs.py create mode 100644 neutron/tests/unit/test_extension_ext_gw_mode.py create mode 100644 neutron/tests/unit/test_extension_ext_net.py create mode 100644 neutron/tests/unit/test_extension_extended_attribute.py create mode 100644 neutron/tests/unit/test_extension_extradhcpopts.py create mode 100644 neutron/tests/unit/test_extension_extraroute.py create mode 100644 neutron/tests/unit/test_extension_firewall.py create mode 100644 neutron/tests/unit/test_extension_pnet.py create mode 100644 neutron/tests/unit/test_extension_portsecurity.py create mode 100644 neutron/tests/unit/test_extension_security_group.py create mode 100644 neutron/tests/unit/test_extensions.py create mode 100644 neutron/tests/unit/test_hacking.py create mode 100644 neutron/tests/unit/test_iptables_firewall.py create mode 100644 neutron/tests/unit/test_iptables_manager.py create mode 100644 neutron/tests/unit/test_ipv6.py create mode 100644 neutron/tests/unit/test_l3_agent.py create mode 100644 neutron/tests/unit/test_l3_plugin.py create mode 100644 neutron/tests/unit/test_l3_schedulers.py create mode 100644 neutron/tests/unit/test_linux_daemon.py create mode 100644 neutron/tests/unit/test_linux_dhcp.py create mode 100644 neutron/tests/unit/test_linux_external_process.py create mode 100644 neutron/tests/unit/test_linux_interface.py create mode 100644 neutron/tests/unit/test_linux_ip_lib.py create mode 100644 neutron/tests/unit/test_metadata_agent.py create mode 100644 neutron/tests/unit/test_metadata_namespace_proxy.py create mode 100644 neutron/tests/unit/test_neutron_context.py create mode 100644 neutron/tests/unit/test_neutron_manager.py create mode 100644 neutron/tests/unit/test_policy.py create mode 100644 neutron/tests/unit/test_post_mortem_debug.py create mode 100644 neutron/tests/unit/test_provider_configuration.py create mode 100644 neutron/tests/unit/test_quota_ext.py create mode 100644 neutron/tests/unit/test_routerserviceinsertion.py create mode 100644 neutron/tests/unit/test_security_groups_rpc.py create mode 100644 neutron/tests/unit/test_servicetype.py create mode 100644 neutron/tests/unit/test_wsgi.py create mode 100644 neutron/tests/unit/testlib_api.py create mode 100644 neutron/tests/unit/vmware/__init__.py create mode 100644 neutron/tests/unit/vmware/apiclient/__init__.py create mode 100644 neutron/tests/unit/vmware/apiclient/fake.py create mode 100644 neutron/tests/unit/vmware/apiclient/test_api_common.py create mode 100644 neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py create mode 100644 neutron/tests/unit/vmware/db/__init__.py create mode 100644 neutron/tests/unit/vmware/db/test_lsn_db.py create mode 100644 neutron/tests/unit/vmware/db/test_nsx_db.py create mode 100644 neutron/tests/unit/vmware/etc/fake_get_gwservice.json create mode 100644 neutron/tests/unit/vmware/etc/fake_get_lqueue.json create mode 100644 neutron/tests/unit/vmware/etc/fake_get_lrouter.json create mode 100644 neutron/tests/unit/vmware/etc/fake_get_lrouter_lport.json create mode 100644 neutron/tests/unit/vmware/etc/fake_get_lrouter_lport_att.json create mode 100644 neutron/tests/unit/vmware/etc/fake_get_lrouter_nat.json create mode 100644 neutron/tests/unit/vmware/etc/fake_get_lswitch.json create mode 100644 neutron/tests/unit/vmware/etc/fake_get_lswitch_lport.json create mode 100644 neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_att.json create mode 100644 neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_status.json create mode 100644 neutron/tests/unit/vmware/etc/fake_get_security_profile.json create mode 100644 neutron/tests/unit/vmware/etc/fake_post_gwservice.json create mode 100644 neutron/tests/unit/vmware/etc/fake_post_lqueue.json create mode 100644 neutron/tests/unit/vmware/etc/fake_post_lrouter.json create mode 100644 neutron/tests/unit/vmware/etc/fake_post_lrouter_lport.json create mode 100644 neutron/tests/unit/vmware/etc/fake_post_lrouter_nat.json create mode 100644 neutron/tests/unit/vmware/etc/fake_post_lswitch.json create mode 100644 neutron/tests/unit/vmware/etc/fake_post_lswitch_lport.json create mode 100644 neutron/tests/unit/vmware/etc/fake_post_security_profile.json create mode 100644 neutron/tests/unit/vmware/etc/fake_put_lrouter_lport_att.json create mode 100644 neutron/tests/unit/vmware/etc/fake_put_lswitch_lport_att.json create mode 100644 neutron/tests/unit/vmware/etc/neutron.conf.test create mode 100644 neutron/tests/unit/vmware/etc/nsx.ini.agentless.test create mode 100644 neutron/tests/unit/vmware/etc/nsx.ini.basic.test create mode 100644 neutron/tests/unit/vmware/etc/nsx.ini.combined.test create mode 100644 neutron/tests/unit/vmware/etc/nsx.ini.full.test create mode 100644 neutron/tests/unit/vmware/etc/nsx.ini.test create mode 100644 neutron/tests/unit/vmware/etc/nvp.ini.full.test create mode 100644 neutron/tests/unit/vmware/etc/vcns.ini.test create mode 100644 neutron/tests/unit/vmware/extensions/__init__.py create mode 100644 neutron/tests/unit/vmware/extensions/test_addresspairs.py create mode 100644 neutron/tests/unit/vmware/extensions/test_maclearning.py create mode 100644 neutron/tests/unit/vmware/extensions/test_networkgw.py create mode 100644 neutron/tests/unit/vmware/extensions/test_portsecurity.py create mode 100644 neutron/tests/unit/vmware/extensions/test_providernet.py create mode 100644 neutron/tests/unit/vmware/extensions/test_qosqueues.py create mode 100644 neutron/tests/unit/vmware/nsxlib/__init__.py create mode 100644 neutron/tests/unit/vmware/nsxlib/base.py create mode 100644 neutron/tests/unit/vmware/nsxlib/test_l2gateway.py create mode 100644 neutron/tests/unit/vmware/nsxlib/test_lsn.py create mode 100644 neutron/tests/unit/vmware/nsxlib/test_queue.py create mode 100644 neutron/tests/unit/vmware/nsxlib/test_router.py create mode 100644 neutron/tests/unit/vmware/nsxlib/test_secgroup.py create mode 100644 neutron/tests/unit/vmware/nsxlib/test_switch.py create mode 100644 neutron/tests/unit/vmware/nsxlib/test_versioning.py create mode 100644 neutron/tests/unit/vmware/test_agent_scheduler.py create mode 100644 neutron/tests/unit/vmware/test_dhcpmeta.py create mode 100644 neutron/tests/unit/vmware/test_nsx_opts.py create mode 100644 neutron/tests/unit/vmware/test_nsx_plugin.py create mode 100644 neutron/tests/unit/vmware/test_nsx_sync.py create mode 100644 neutron/tests/unit/vmware/test_nsx_utils.py create mode 100644 neutron/tests/unit/vmware/vshield/__init__.py create mode 100644 neutron/tests/unit/vmware/vshield/fake_vcns.py create mode 100644 neutron/tests/unit/vmware/vshield/test_edge_router.py create mode 100644 neutron/tests/unit/vmware/vshield/test_firewall_driver.py create mode 100644 neutron/tests/unit/vmware/vshield/test_fwaas_plugin.py create mode 100644 neutron/tests/unit/vmware/vshield/test_lbaas_plugin.py create mode 100644 neutron/tests/unit/vmware/vshield/test_loadbalancer_driver.py create mode 100644 neutron/tests/unit/vmware/vshield/test_vcns_driver.py create mode 100644 neutron/tests/unit/vmware/vshield/test_vpnaas_plugin.py create mode 100644 neutron/tests/var/ca.crt create mode 100644 neutron/tests/var/certandkey.pem create mode 100644 neutron/tests/var/certificate.crt create mode 100644 neutron/tests/var/privatekey.key create mode 100644 neutron/version.py create mode 100644 neutron/wsgi.py create mode 100644 openstack-common.conf create mode 100644 requirements.txt create mode 100755 run_tests.sh create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 test-requirements.txt create mode 100644 tools/check_i18n.py create mode 100644 tools/check_i18n_test_case.txt create mode 100755 tools/clean.sh create mode 100644 tools/i18n_cfg.py create mode 100644 tools/install_venv.py create mode 100644 tools/install_venv_common.py create mode 100755 tools/with_venv.sh create mode 100644 tox.ini diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 000000000..5b10a1115 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,7 @@ +[run] +branch = True +source = neutron +omit = neutron/tests/*,neutron/plugins/cisco/test/*,neutron/openstack/* + +[report] +ignore-errors = True diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..19526c2b1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,28 @@ +AUTHORS +build/* +build-stamp +ChangeLog +cover/ +.coverage +covhtml/ +dist/ +doc/build +*.DS_Store +*.pyc +neutron.egg-info/ +neutron/vcsversion.py +neutron/versioninfo +pbr*.egg/ +quantum.egg-info/ +quantum/vcsversion.py +quantum/versioninfo +run_tests.err.log +run_tests.log +setuptools*.egg/ +subunit.log +.testrepository +.tox/ +.venv/ +*.mo +*.sw? +*~ diff --git a/.mailmap b/.mailmap new file mode 100644 index 000000000..f3e7e5e1a --- /dev/null +++ b/.mailmap @@ -0,0 +1,11 @@ +# Format is: +# +# +lawrancejing +Jiajun Liu +Zhongyue Luo +Kun Huang +Zhenguo Niu +Isaku Yamahata +Isaku Yamahata +Morgan Fainberg diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 000000000..87fbcd3b3 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,42 @@ +# The format of this file isn't really documented; just use --generate-rcfile +[MASTER] +# Add to the black list. It should be a base name, not a +# path. You may set this option multiple times. +ignore=test + +[Messages Control] +# NOTE(justinsb): We might want to have a 2nd strict pylintrc in future +# C0111: Don't require docstrings on every method +# W0511: TODOs in code comments are fine. +# W0142: *args and **kwargs are fine. +# W0622: Redefining id is fine. +disable=C0111,W0511,W0142,W0622 + +[Basic] +# Variable names can be 1 to 31 characters long, with lowercase and underscores +variable-rgx=[a-z_][a-z0-9_]{0,30}$ + +# Argument names can be 2 to 31 characters long, with lowercase and underscores +argument-rgx=[a-z_][a-z0-9_]{1,30}$ + +# Method names should be at least 3 characters long +# and be lowecased with underscores +method-rgx=([a-z_][a-z0-9_]{2,50}|setUp|tearDown)$ + +# Module names matching neutron-* are ok (files in bin/) +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ + +# Don't require docstrings on tests. +no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ + +[Design] +max-public-methods=100 +min-public-methods=0 +max-args=6 + +[Variables] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +# _ is used by our localization +additional-builtins=_ diff --git a/.testr.conf b/.testr.conf new file mode 100644 index 000000000..c180b0319 --- /dev/null +++ b/.testr.conf @@ -0,0 +1,4 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron/tests/unit} $LISTOPT $IDOPTION +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 000000000..84f6f3e1c --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,28 @@ +Neutron Style Commandments +======================= + +- Step 1: Read the OpenStack Style Commandments + http://docs.openstack.org/developer/hacking/ +- Step 2: Read on + +Neutron Specific Commandments +-------------------------- + +- [N320] Validate that LOG messages, except debug ones, have translations + +Creating Unit Tests +------------------- +For every new feature, unit tests should be created that both test and +(implicitly) document the usage of said feature. If submitting a patch for a +bug that had no unit test, a new passing unit test should be added. If a +submitted bug fix does have a unit test, be sure to add a new one that fails +without the patch and passes with the patch. + +All unittest classes must ultimately inherit from testtools.TestCase. In the +Neutron test suite, this should be done by inheriting from +neutron.tests.base.BaseTestCase. + +All setUp and tearDown methods must upcall using the super() method. +tearDown methods should be avoided and addCleanup calls should be preferred. +Never manually create tempfiles. Always use the tempfile fixtures from +the fixture library to ensure that they are cleaned up. diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..68c771a09 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 000000000..4e527c7fa --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,14 @@ +include AUTHORS +include README.rst +include ChangeLog +include LICENSE +include neutron/db/migration/README +include neutron/db/migration/alembic.ini +include neutron/db/migration/alembic_migrations/script.py.mako +include neutron/db/migration/alembic_migrations/versions/README +recursive-include neutron/locale * + +exclude .gitignore +exclude .gitreview + +global-exclude *.pyc diff --git a/README.rst b/README.rst new file mode 100644 index 000000000..57a5930b0 --- /dev/null +++ b/README.rst @@ -0,0 +1,25 @@ +# -- Welcome! + + You have come across a cloud computing network fabric controller. It has + identified itself as "Neutron." It aims to tame your (cloud) networking! + +# -- External Resources: + + The homepage for Neutron is: http://launchpad.net/neutron . Use this + site for asking for help, and filing bugs. Code is available on github at + . + + The latest and most in-depth documentation on how to use Neutron is + available at: . This includes: + + Neutron Administrator Guide + http://docs.openstack.org/trunk/openstack-network/admin/content/ + + Neutron API Reference: + http://docs.openstack.org/api/openstack-network/2.0/content/ + + The start of some developer documentation is available at: + http://wiki.openstack.org/NeutronDevelopment + + For help using or hacking on Neutron, you can send mail to + . diff --git a/TESTING.rst b/TESTING.rst new file mode 100644 index 000000000..0d6814e8e --- /dev/null +++ b/TESTING.rst @@ -0,0 +1,180 @@ +Testing Neutron +============================================================= + +Overview +-------- + +The unit tests are meant to cover as much code as possible and should +be executed without the service running. They are designed to test +the various pieces of the neutron tree to make sure any new changes +don't break existing functionality. + +The functional tests are intended to validate actual system +interaction. Mocks should be used sparingly, if at all. Care +should be taken to ensure that existing system resources are not +modified and that resources created in tests are properly cleaned +up. + +Development process +------------------- + +It is expected that any new changes that are proposed for merge +come with tests for that feature or code area. Ideally any bugs +fixes that are submitted also have tests to prove that they stay +fixed! In addition, before proposing for merge, all of the +current tests should be passing. + +Virtual environments +~~~~~~~~~~~~~~~~~~~~ + +Testing OpenStack projects, including Neutron, is made easier with `DevStack `_. + +Create a machine (such as a VM or Vagrant box) running a distribution supported +by DevStack and install DevStack there. For example, there is a Vagrant script +for DevStack at https://github.com/bcwaldon/vagrant_devstack. + + .. note:: + + If you prefer not to use DevStack, you can still check out source code on your local + machine and develop from there. + + +Running unit tests +------------------ + +There are three mechanisms for running tests: run_tests.sh, tox, +and nose. Before submitting a patch for review you should always +ensure all test pass; a tox run is triggered by the jenkins gate +executed on gerrit for each patch pushed for review. + +With these mechanisms you can either run the tests in the standard +environment or create a virtual environment to run them in. + +By default after running all of the tests, any pep8 errors +found in the tree will be reported. + + +With `run_tests.sh` +~~~~~~~~~~~~~~~~~~~ + +You can use the `run_tests.sh` script in the root source directory to execute +tests in a virtualenv:: + + ./run_tests -V + + +With `nose` +~~~~~~~~~~~ + +You can use `nose`_ to run individual tests, as well as use for debugging +portions of your code:: + + source .venv/bin/activate + pip install nose + nosetests + +There are disadvantages to running Nose - the tests are run sequentially, so +race condition bugs will not be triggered, and the full test suite will +take significantly longer than tox & testr. The upside is that testr has +some rough edges when it comes to diagnosing errors and failures, and there is +no easy way to set a breakpoint in the Neutron code, and enter an +interactive debugging session while using testr. + +.. _nose: https://nose.readthedocs.org/en/latest/index.html + +With `tox` +~~~~~~~~~~ + +Neutron, like other OpenStack projects, uses `tox`_ for managing the virtual +environments for running test cases. It uses `Testr`_ for managing the running +of the test cases. + +Tox handles the creation of a series of `virtualenvs`_ that target specific +versions of Python (2.6, 2.7, 3.3, etc). + +Testr handles the parallel execution of series of test cases as well as +the tracking of long-running tests and other things. + +Running unit tests is as easy as executing this in the root directory of the +Neutron source code:: + + tox + +For more information on the standard Tox-based test infrastructure used by +OpenStack and how to do some common test/debugging procedures with Testr, +see this wiki page: + + https://wiki.openstack.org/wiki/Testr + +.. _Testr: https://wiki.openstack.org/wiki/Testr +.. _tox: http://tox.readthedocs.org/en/latest/ +.. _virtualenvs: https://pypi.python.org/pypi/virtualenv + + +Running individual tests +~~~~~~~~~~~~~~~~~~~~~~~~ + +For running individual test modules or cases, you just need to pass +the dot-separated path to the module you want as an argument to it. + +For executing a specific test case, specify the name of the test case +class separating it from the module path with a colon. + +For example, the following would run only the JSONV2TestCase tests from +neutron/tests/unit/test_api_v2.py:: + + $ ./run_tests.sh neutron.tests.unit.test_api_v2:JSONV2TestCase + +or:: + + $ ./tox neutron.tests.unit.test_api_v2:JSONV2TestCase + +Adding more tests +~~~~~~~~~~~~~~~~~ + +Neutron has a fast growing code base and there is plenty of areas that +need to be covered by unit and functional tests. + +To get a grasp of the areas where tests are needed, you can check +current coverage by running:: + + $ ./run_tests.sh -c + +Debugging +--------- + +By default, calls to pdb.set_trace() will be ignored when tests +are run. For pdb statements to work, invoke run_tests as follows:: + + $ ./run_tests.sh -d [test module path] + +It's possible to debug tests in a tox environment:: + + $ tox -e venv -- python -m testtools.run [test module path] + +Tox-created virtual environments (venv's) can also be activated +after a tox run and reused for debugging:: + + $ tox -e venv + $ . .tox/venv/bin/activate + $ python -m testtools.run [test module path] + +Tox packages and installs the neutron source tree in a given venv +on every invocation, but if modifications need to be made between +invocation (e.g. adding more pdb statements), it is recommended +that the source tree be installed in the venv in editable mode:: + + # run this only after activating the venv + $ pip install --editable . + +Editable mode ensures that changes made to the source tree are +automatically reflected in the venv, and that such changes are not +overwritten during the next tox run. + +Post-mortem debugging +~~~~~~~~~~~~~~~~~~~~~ + +Setting OS_POST_MORTEM_DEBUG=1 in the shell environment will ensure +that pdb.post_mortem() will be invoked on test failure:: + + $ OS_POST_MORTEM_DEBUG=1 ./run_tests.sh -d [test module path] diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 000000000..15cd6cb76 --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/bin/neutron-rootwrap b/bin/neutron-rootwrap new file mode 100755 index 000000000..284037846 --- /dev/null +++ b/bin/neutron-rootwrap @@ -0,0 +1,21 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.rootwrap import cmd + +cmd.main() diff --git a/bin/neutron-rootwrap-xen-dom0 b/bin/neutron-rootwrap-xen-dom0 new file mode 100755 index 000000000..3f4251a4e --- /dev/null +++ b/bin/neutron-rootwrap-xen-dom0 @@ -0,0 +1,141 @@ +#!/usr/bin/env python + +# Copyright (c) 2012 Openstack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Neutron root wrapper for dom0. + +Executes networking commands in dom0. The XenAPI plugin is +responsible determining whether a command is safe to execute. + +""" +from __future__ import print_function + +import ConfigParser +import json +import os +import select +import sys +import traceback + +import XenAPI + + +RC_UNAUTHORIZED = 99 +RC_NOCOMMAND = 98 +RC_BADCONFIG = 97 +RC_XENAPI_ERROR = 96 + + +def parse_args(): + # Split arguments, require at least a command + exec_name = sys.argv.pop(0) + # argv[0] required; path to conf file + if len(sys.argv) < 2: + print("%s: No command specified" % exec_name) + sys.exit(RC_NOCOMMAND) + + config_file = sys.argv.pop(0) + user_args = sys.argv[:] + + return exec_name, config_file, user_args + + +def _xenapi_section_name(config): + sections = [sect for sect in config.sections() if sect.lower() == "xenapi"] + if len(sections) == 1: + return sections[0] + + print("Multiple [xenapi] sections or no [xenapi] section found!") + sys.exit(RC_BADCONFIG) + + +def load_configuration(exec_name, config_file): + config = ConfigParser.RawConfigParser() + config.read(config_file) + try: + exec_dirs = config.get("DEFAULT", "exec_dirs").split(",") + filters_path = config.get("DEFAULT", "filters_path").split(",") + section = _xenapi_section_name(config) + url = config.get(section, "xenapi_connection_url") + username = config.get(section, "xenapi_connection_username") + password = config.get(section, "xenapi_connection_password") + except ConfigParser.Error: + print("%s: Incorrect configuration file: %s" % (exec_name, config_file)) + sys.exit(RC_BADCONFIG) + if not url or not password: + msg = ("%s: Must specify xenapi_connection_url, " + "xenapi_connection_username (optionally), and " + "xenapi_connection_password in %s") % (exec_name, config_file) + print(msg) + sys.exit(RC_BADCONFIG) + return dict( + filters_path=filters_path, + url=url, + username=username, + password=password, + exec_dirs=exec_dirs, + ) + + +def filter_command(exec_name, filters_path, user_args, exec_dirs): + # Add ../ to sys.path to allow running from branch + possible_topdir = os.path.normpath(os.path.join(os.path.abspath(exec_name), + os.pardir, os.pardir)) + if os.path.exists(os.path.join(possible_topdir, "neutron", "__init__.py")): + sys.path.insert(0, possible_topdir) + + from oslo.rootwrap import wrapper + + # Execute command if it matches any of the loaded filters + filters = wrapper.load_filters(filters_path) + filter_match = wrapper.match_filter( + filters, user_args, exec_dirs=exec_dirs) + if not filter_match: + print("Unauthorized command: %s" % ' '.join(user_args)) + sys.exit(RC_UNAUTHORIZED) + + +def run_command(url, username, password, user_args, cmd_input): + try: + session = XenAPI.Session(url) + session.login_with_password(username, password) + host = session.xenapi.session.get_this_host(session.handle) + result = session.xenapi.host.call_plugin( + host, 'netwrap', 'run_command', + {'cmd': json.dumps(user_args), 'cmd_input': json.dumps(cmd_input)}) + return json.loads(result) + except Exception as e: + traceback.print_exc() + sys.exit(RC_XENAPI_ERROR) + + +def main(): + exec_name, config_file, user_args = parse_args() + config = load_configuration(exec_name, config_file) + filter_command(exec_name, config['filters_path'], user_args, config['exec_dirs']) + + # If data is available on the standard input, we need to pass it to the + # command executed in dom0 + cmd_input = None + if select.select([sys.stdin,],[],[],0.0)[0]: + cmd_input = "".join(sys.stdin) + + return run_command(config['url'], config['username'], config['password'], + user_args, cmd_input) + + +if __name__ == '__main__': + print(main()) diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 000000000..b63e30032 --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,96 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXSOURCE = source +PAPER = +BUILDDIR = build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE) + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +.DEFAULT_GOAL = html + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + if [ -f .autogenerated ] ; then \ + cat .autogenerated | xargs rm ; \ + rm .autogenerated ; \ + fi + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nova.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nova.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/pom.xml b/doc/pom.xml new file mode 100644 index 000000000..6fc579f80 --- /dev/null +++ b/doc/pom.xml @@ -0,0 +1,135 @@ + + + + 4.0.0 + + org.openstack.docs + openstack-guide + 1.0.0-SNAPSHOT + jar + OpenStack Guides + + + + + + Rackspace Research Repositories + + true + + + + rackspace-research + Rackspace Research Repository + http://maven.research.rackspacecloud.com/content/groups/public/ + + + + + rackspace-research + Rackspace Research Repository + http://maven.research.rackspacecloud.com/content/groups/public/ + + + + + + + + + target/docbkx/pdf + + **/*.fo + + + + + + + com.rackspace.cloud.api + clouddocs-maven-plugin + 1.0.5-SNAPSHOT + + + goal1 + + generate-pdf + + generate-sources + + false + + + + goal2 + + generate-webhelp + + generate-sources + + + 0 + openstackdocs + 1 + UA-17511903-6 + + appendix toc,title + article/appendix nop + article toc,title + book title,figure,table,example,equation + chapter toc,title + part toc,title + preface toc,title + qandadiv toc + qandaset toc + reference toc,title + set toc,title + + + 0 + 0 + + + + + + + + + + + + + + + + + + + + + + + + + + true + source/docbkx + + neutron-api-1.0/neutron-api-guide.xml + + reviewer + openstack + + + + + + diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 000000000..b2f619a46 --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2010 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Keystone documentation build configuration file, created by +# sphinx-quickstart on Tue May 18 13:50:15 2010. +# +# This file is execfile()'d with the current directory set to it's containing +# dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +import sys + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +NEUTRON_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) +sys.path.insert(0, NEUTRON_DIR) + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.ifconfig', + 'sphinx.ext.intersphinx', + 'sphinx.ext.pngmath', + 'sphinx.ext.graphviz', + 'sphinx.ext.todo', + 'oslosphinx'] + +todo_include_todos = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = [] +if os.getenv('HUDSON_PUBLISH_DOCS'): + templates_path = ['_ga', '_templates'] +else: + templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Neutron' +copyright = u'2011-present, OpenStack Foundation.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# Version info +from neutron.version import version_info as neutron_version +release = neutron_version.release_string() +# The short X.Y version. +version = neutron_version.version_string() + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +# unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = [] + +# The reST default role (for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = True + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['neutron.'] + +# -- Options for man page output -------------------------------------------- + +# Grouping the document tree for man pages. +# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' + +man_pages = [ + ('man/neutron-server', 'neutron-server', u'Neutron Server', + [u'OpenStack'], 1) +] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +# html_theme_path = ["."] +# html_theme = '_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = ['_theme'] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' +git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1" +html_last_updated_fmt = os.popen(git_cmd).read() + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_use_modindex = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'neutrondoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, +# documentclass [howto/manual]). +latex_documents = [ + ('index', 'Neutron.tex', u'Neutron Documentation', + u'Neutron development team', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'python': ('http://docs.python.org/', None), + 'nova': ('http://nova.openstack.org', None), + 'swift': ('http://swift.openstack.org', None), + 'glance': ('http://glance.openstack.org', None), + 'horizon': ('http://horizon.openstack.org', None), + 'keystone': ('http://keystone.openstack.org', None), + } diff --git a/doc/source/devref/advanced_services.rst b/doc/source/devref/advanced_services.rst new file mode 100644 index 000000000..2e877128a --- /dev/null +++ b/doc/source/devref/advanced_services.rst @@ -0,0 +1,7 @@ +Advanced Services +================= + +.. toctree:: + fwaas + lbaas + vpnaas diff --git a/doc/source/devref/api_extensions.rst b/doc/source/devref/api_extensions.rst new file mode 100644 index 000000000..2c8b3f64e --- /dev/null +++ b/doc/source/devref/api_extensions.rst @@ -0,0 +1,18 @@ +============== +API Extensions +============== + +API extensions is the standard way of introducing new functionality +to the Neutron project, it allows plugins to +determine if they wish to support the functionality or not. + +Examples +======== + +The easiest way to demonstrate how an API extension is written, is +by studying an existing API extension and explaining the different layers. + +.. toctree:: + :maxdepth: 1 + + security_group_api diff --git a/doc/source/devref/api_layer.rst b/doc/source/devref/api_layer.rst new file mode 100644 index 000000000..a3e5e7f69 --- /dev/null +++ b/doc/source/devref/api_layer.rst @@ -0,0 +1,57 @@ +Neutron WSGI/HTTP API layer +=========================== + +This section will cover the internals of Neutron's HTTP API, and the classes +in Neutron that can be used to create Extensions to the Neutron API. + +Python web applications interface with webservers through the Python Web +Server Gateway Interface (WSGI) - defined in `PEP 333 `_ + +Startup +------- + +Neutron's WSGI server is started from the `server module `_ +and the entry point `serve_wsgi` is called to build an instance of the +`NeutronApiService`_, which is then returned to the server module, +which spawns a `Eventlet`_ `GreenPool`_ that will run the WSGI +application and respond to requests from clients. + + +.. _NeutronApiService: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/service.py + +.. _Eventlet: http://eventlet.net/ + +.. _GreenPool: http://eventlet.net/doc/modules/greenpool.html + +WSGI Application +---------------- + +During the building of the NeutronApiService, the `_run_wsgi` function +creates a WSGI application using the `load_paste_app` function inside +`config.py`_ - which parses `api-paste.ini`_ - in order to create a WSGI app +using `Paste`_'s `deploy`_. + +The api-paste.ini file defines the WSGI applications and routes - using the +`Paste INI file format`_. + +The INI file directs paste to instantiate the `APIRouter`_ class of +Neutron, which contains several methods that map Neutron resources (such as +Ports, Networks, Subnets) to URLs, and the controller for each resource. + + +.. _config.py: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/common/config.py + +.. _api-paste.ini: http://git.openstack.org/cgit/openstack/neutron/tree/etc/api-paste.ini + +.. _APIRouter: http://git.openstack.org/cgit/openstack/neutron/tree/neutron/api/v2/router.py + +.. _Paste: http://pythonpaste.org/ + +.. _Deploy: http://pythonpaste.org/deploy/ + +.. _Paste INI file format: http://pythonpaste.org/deploy/#applications + +Further reading +--------------- + +`Yong Sheng Gong: Deep Dive into Neutron `_ diff --git a/doc/source/devref/common.rst b/doc/source/devref/common.rst new file mode 100644 index 000000000..537d4c291 --- /dev/null +++ b/doc/source/devref/common.rst @@ -0,0 +1,25 @@ +.. + Copyright 2010-2011 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Open Stack Common +================= + +A number of modules used are from the openstack-common project. +The imported files are in 'neutron/openstack-common.conf'. +More information can be found at `OpenStack Common`_. + +.. _`OpenStack Common`: https://launchpad.net/openstack-common diff --git a/doc/source/devref/db_layer.rst b/doc/source/devref/db_layer.rst new file mode 100644 index 000000000..54eff65b7 --- /dev/null +++ b/doc/source/devref/db_layer.rst @@ -0,0 +1,2 @@ +Neutron Database Layer +====================== diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst new file mode 100644 index 000000000..dc4be0838 --- /dev/null +++ b/doc/source/devref/development.environment.rst @@ -0,0 +1,49 @@ +.. + Copyright 2010-2013 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Setting Up a Development Environment +==================================== + +This page describes how to setup a working Python development +environment that can be used in developing Neutron on Ubuntu, Fedora or +Mac OS X. These instructions assume you're already familiar with +Git and Gerrit, which is a code repository mirror and code review toolset +, however if you aren't please see `this Git tutorial`_ for an introduction +to using Git and `this wiki page`_ for a tutorial on using Gerrit and Git for +code contribution to Openstack projects. + +.. _this Git tutorial: http://git-scm.com/book/en/Getting-Started +.. _this wiki page: https://wiki.openstack.org/wiki/Gerrit_Workflow + +Following these instructions will allow you to run the Neutron unit +tests. If you want to be able to run Neutron in a full OpenStack environment, +you can use the excellent `DevStack`_ project to do so. There is a wiki page +that describes `setting up Neutron using DevStack`_. + +.. _DevStack: https://github.com/openstack-dev/devstack +.. _setting up Neutron using Devstack: https://wiki.openstack.org/wiki/NeutronDevstack + +Getting the code +---------------- + +Grab the code from GitHub:: + + git clone git://git.openstack.org/openstack/neutron.git + cd neutron + + +.. include:: ../../../TESTING.rst diff --git a/doc/source/devref/fwaas.rst b/doc/source/devref/fwaas.rst new file mode 100644 index 000000000..7b7680c6f --- /dev/null +++ b/doc/source/devref/fwaas.rst @@ -0,0 +1,30 @@ +Firewall as a Service +===================== + +`Design Document`_ + +.. _Design Document: https://docs.google.com/document/d/1PJaKvsX2MzMRlLGfR0fBkrMraHYF0flvl0sqyZ704tA/edit#heading=h.aed6tiupj0qk + +Plugin +------ +.. automodule:: neutron.services.firewall.fwaas_plugin + +.. autoclass:: FirewallPlugin + :members: + +Database layer +-------------- + +.. automodule:: neutron.db.firewall.firewall_db + +.. autoclass:: Firewall_db_mixin + :members: + + +Driver layer +------------ + +.. automodule:: neutron.services.firewall.drivers.fwaas_base + +.. autoclass:: FwaasDriverBase + :members: diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst new file mode 100644 index 000000000..46d2edd0e --- /dev/null +++ b/doc/source/devref/index.rst @@ -0,0 +1,65 @@ +.. + Copyright 2010-2011 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Developer Guide +=============== + +In the Developer Guide, you will find information on Neutron's lower level +programming APIs. There are sections that cover the core pieces of Neutron, +including its database, message queue, and scheduler components. There are +also subsections that describe specific plugins inside Neutron. + + +Programming HowTos and Tutorials +-------------------------------- +.. toctree:: + :maxdepth: 3 + + development.environment + + +Neutron Internals +----------------- +.. toctree:: + :maxdepth: 3 + + api_layer + api_extensions + plugin-api + db_layer + rpc_api + layer3 + l2_agents + advanced_services + + +Module Reference +---------------- +.. toctree:: + :maxdepth: 3 + +.. todo:: + + Add in all the big modules as automodule indexes. + + +Indices and tables +------------------ + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/doc/source/devref/l2_agents.rst b/doc/source/devref/l2_agents.rst new file mode 100644 index 000000000..83786dabe --- /dev/null +++ b/doc/source/devref/l2_agents.rst @@ -0,0 +1,7 @@ +L2 Agent Networking +------------------- +.. toctree:: + :maxdepth: 3 + + openvswitch_agent + linuxbridge_agent diff --git a/doc/source/devref/layer3.rst b/doc/source/devref/layer3.rst new file mode 100644 index 000000000..571f2a09a --- /dev/null +++ b/doc/source/devref/layer3.rst @@ -0,0 +1,199 @@ +Layer 3 Networking in Neutron - via Layer 3 agent & OpenVSwitch +=============================================================== + +This page discusses the usage of Neutron with Layer 3 functionality enabled. + +Neutron logical network setup +----------------------------- +:: + + vagrant@precise64:~/devstack$ neutron net-list + +--------------------------------------+---------+--------------------------------------------------+ + | id | name | subnets | + +--------------------------------------+---------+--------------------------------------------------+ + | 84b6b0cc-503d-448a-962f-43def05e85be | public | 3a56da7c-2f6e-41af-890a-b324d7bc374d | + | a4b4518c-800d-4357-9193-57dbb42ac5ee | private | 1a2d26fb-b733-4ab3-992e-88554a87afa6 10.0.0.0/24 | + +--------------------------------------+---------+--------------------------------------------------+ + vagrant@precise64:~/devstack$ neutron subnet-list + +--------------------------------------+------+-------------+--------------------------------------------+ + | id | name | cidr | allocation_pools | + +--------------------------------------+------+-------------+--------------------------------------------+ + | 1a2d26fb-b733-4ab3-992e-88554a87afa6 | | 10.0.0.0/24 | {"start": "10.0.0.2", "end": "10.0.0.254"} | + +--------------------------------------+------+-------------+--------------------------------------------+ + vagrant@precise64:~/devstack$ neutron port-list + +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ + | id | name | mac_address | fixed_ips | + +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ + | 0ba8700e-da06-4318-8fe9-00676dd994b8 | | fa:16:3e:78:43:5b | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.1"} | + | b2044570-ad52-4f31-a2c3-5d767dc9a8a7 | | fa:16:3e:5b:cf:4c | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.3"} | + | bb60d1bb-0cab-41cb-9678-30d2b2fdb169 | | fa:16:3e:af:a9:bd | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.2"} | + +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ + + vagrant@precise64:~/devstack$ neutron subnet-show 1a2d26fb-b733-4ab3-992e-88554a87afa6 + +------------------+--------------------------------------------+ + | Field | Value | + +------------------+--------------------------------------------+ + | allocation_pools | {"start": "10.0.0.2", "end": "10.0.0.254"} | + | cidr | 10.0.0.0/24 | + | dns_nameservers | | + | enable_dhcp | True | + | gateway_ip | 10.0.0.1 | + | host_routes | | + | id | 1a2d26fb-b733-4ab3-992e-88554a87afa6 | + | ip_version | 4 | + | name | | + | network_id | a4b4518c-800d-4357-9193-57dbb42ac5ee | + | tenant_id | 3368290ab10f417390acbb754160dbb2 | + +------------------+--------------------------------------------+ + + +Neutron logical router setup +---------------------------- + +* http://docs.openstack.org/admin-guide-cloud/content/ch_networking.html#under_the_hood_openvswitch_scenario1_network + + +:: + + vagrant@precise64:~/devstack$ neutron router-list + +--------------------------------------+---------+--------------------------------------------------------+ + | id | name | external_gateway_info | + +--------------------------------------+---------+--------------------------------------------------------+ + | 569469c7-a2a5-4d32-9cdd-f0b18a13f45e | router1 | {"network_id": "84b6b0cc-503d-448a-962f-43def05e85be"} | + +--------------------------------------+---------+--------------------------------------------------------+ + vagrant@precise64:~/devstack$ neutron router-show router1 + +-----------------------+--------------------------------------------------------+ + | Field | Value | + +-----------------------+--------------------------------------------------------+ + | admin_state_up | True | + | external_gateway_info | {"network_id": "84b6b0cc-503d-448a-962f-43def05e85be"} | + | id | 569469c7-a2a5-4d32-9cdd-f0b18a13f45e | + | name | router1 | + | routes | | + | status | ACTIVE | + | tenant_id | 3368290ab10f417390acbb754160dbb2 | + +-----------------------+--------------------------------------------------------+ + vagrant@precise64:~/devstack$ neutron router-port-list router1 + +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ + | id | name | mac_address | fixed_ips | + +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ + | 0ba8700e-da06-4318-8fe9-00676dd994b8 | | fa:16:3e:78:43:5b | {"subnet_id": "1a2d26fb-b733-4ab3-992e-88554a87afa6", "ip_address": "10.0.0.1"} | + +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+ + +Neutron Routers are realized in OpenVSwitch +------------------------------------------- + +.. image:: http://docs.openstack.org/admin-guide-cloud/content/figures/10/a/common/figures/under-the-hood-scenario-1-ovs-network.png + + +"router1" in the Neutron logical network is realized through a port ("qr-0ba8700e-da") in OpenVSwitch - attached to "br-int":: + + vagrant@precise64:~/devstack$ sudo ovs-vsctl show + b9b27fc3-5057-47e7-ba64-0b6afe70a398 + Bridge br-int + Port "qr-0ba8700e-da" + tag: 1 + Interface "qr-0ba8700e-da" + type: internal + Port br-int + Interface br-int + type: internal + Port int-br-ex + Interface int-br-ex + Port "tapbb60d1bb-0c" + tag: 1 + Interface "tapbb60d1bb-0c" + type: internal + Port "qvob2044570-ad" + tag: 1 + Interface "qvob2044570-ad" + Port "int-br-eth1" + Interface "int-br-eth1" + Bridge "br-eth1" + Port "phy-br-eth1" + Interface "phy-br-eth1" + Port "br-eth1" + Interface "br-eth1" + type: internal + Bridge br-ex + Port phy-br-ex + Interface phy-br-ex + Port "qg-0143bce1-08" + Interface "qg-0143bce1-08" + type: internal + Port br-ex + Interface br-ex + type: internal + ovs_version: "1.4.0+build0" + + + vagrant@precise64:~/devstack$ brctl show + bridge name bridge id STP enabled interfaces + br-eth1 0000.e2e7fc5ccb4d no + br-ex 0000.82ee46beaf4d no phy-br-ex + qg-39efb3f9-f0 + qg-77e0666b-cd + br-int 0000.5e46cb509849 no int-br-ex + qr-54c9cd83-43 + qvo199abeb2-63 + qvo1abbbb60-b8 + tap74b45335-cc + qbr199abeb2-63 8000.ba06e5f8675c no qvb199abeb2-63 + tap199abeb2-63 + qbr1abbbb60-b8 8000.46a87ed4fb66 no qvb1abbbb60-b8 + tap1abbbb60-b8 + virbr0 8000.000000000000 yes + +Finding the router in ip/ipconfig +--------------------------------- + +* http://docs.openstack.org/admin-guide-cloud/content/ch_networking.html + + The neutron-l3-agent uses the Linux IP stack and iptables to perform L3 forwarding and NAT. + In order to support multiple routers with potentially overlapping IP addresses, neutron-l3-agent + defaults to using Linux network namespaces to provide isolated forwarding contexts. As a result, + the IP addresses of routers will not be visible simply by running "ip addr list" or "ifconfig" on + the node. Similarly, you will not be able to directly ping fixed IPs. + + To do either of these things, you must run the command within a particular router's network + namespace. The namespace will have the name "qrouter-. + +.. image:: http://docs.openstack.org/admin-guide-cloud/content/figures/10/a/common/figures/under-the-hood-scenario-1-ovs-netns.png + +For example:: + + vagrant@precise64:~$ neutron router-list + +--------------------------------------+---------+--------------------------------------------------------+ + | id | name | external_gateway_info | + +--------------------------------------+---------+--------------------------------------------------------+ + | ad948c6e-afb6-422a-9a7b-0fc44cbb3910 | router1 | {"network_id": "e6634fef-03fa-482a-9fa7-e0304ce5c995"} | + +--------------------------------------+---------+--------------------------------------------------------+ + vagrant@precise64:~/devstack$ sudo ip netns exec qrouter-ad948c6e-afb6-422a-9a7b-0fc44cbb3910 ip addr list + 18: lo: mtu 16436 qdisc noqueue state UNKNOWN + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 19: qr-54c9cd83-43: mtu 1500 qdisc noqueue state UNKNOWN + link/ether fa:16:3e:dd:c1:8f brd ff:ff:ff:ff:ff:ff + inet 10.0.0.1/24 brd 10.0.0.255 scope global qr-54c9cd83-43 + inet6 fe80::f816:3eff:fedd:c18f/64 scope link + valid_lft forever preferred_lft forever + 20: qg-77e0666b-cd: mtu 1500 qdisc noqueue state UNKNOWN + link/ether fa:16:3e:1f:d3:ec brd ff:ff:ff:ff:ff:ff + inet 192.168.27.130/28 brd 192.168.27.143 scope global qg-77e0666b-cd + inet6 fe80::f816:3eff:fe1f:d3ec/64 scope link + valid_lft forever preferred_lft forever + + +Provider Networking +------------------- + +Neutron can also be configured to create `provider networks `_ + +Further Reading +--------------- +* `Packet Pushers - Neutron Network Implementation on Linux `_ +* `OpenStack Cloud Administrator Guide `_ +* `Neutron - Layer 3 API extension usage guide `_ +* `Darragh O'Reilly - The Quantum L3 router and floating IPs `_ diff --git a/doc/source/devref/lbaas.rst b/doc/source/devref/lbaas.rst new file mode 100644 index 000000000..86f408718 --- /dev/null +++ b/doc/source/devref/lbaas.rst @@ -0,0 +1,32 @@ +Loadbalancer as a Service +========================= + + +https://wiki.openstack.org/wiki/Neutron/LBaaS/Architecture + +https://wiki.openstack.org/wiki/Neutron/LBaaS/API_1.0 + + +Plugin +------ +.. automodule:: neutron.services.loadbalancer.plugin + +.. autoclass:: LoadBalancerPlugin + :members: + +Database layer +-------------- + +.. automodule:: neutron.db.loadbalancer.loadbalancer_db + +.. autoclass:: LoadBalancerPluginDb + :members: + + +Driver layer +------------ + +.. automodule:: neutron.services.loadbalancer.drivers.abstract_driver + +.. autoclass:: LoadBalancerAbstractDriver + :members: diff --git a/doc/source/devref/linuxbridge_agent.rst b/doc/source/devref/linuxbridge_agent.rst new file mode 100644 index 000000000..2c7b81d4f --- /dev/null +++ b/doc/source/devref/linuxbridge_agent.rst @@ -0,0 +1,2 @@ +L2 Networking with Linux Bridge +------------------------------- diff --git a/doc/source/devref/openvswitch_agent.rst b/doc/source/devref/openvswitch_agent.rst new file mode 100644 index 000000000..1c441e381 --- /dev/null +++ b/doc/source/devref/openvswitch_agent.rst @@ -0,0 +1,21 @@ +==================== +OpenVSwitch L2 Agent +==================== + +This Agent uses the `OpenVSwitch`_ virtual switch to create L2 +connectivity for instances, along with bridges created in conjunction +with OpenStack Nova for filtering. + +ovs-neutron-agent can be configured to use two different networking technologies to create tenant isolation, either GRE tunnels or VLAN tags. + +VLAN Tags +--------- + +.. image:: http://docs.openstack.org/admin-guide-cloud/content/figures/10/a/common/figures/under-the-hood-scenario-1-ovs-compute.png + +.. _OpenVSwitch: http://openvswitch.org + +Further Reading +--------------- + +* `Darragh O'Reilly - The Open vSwitch plugin with VLANs `_ diff --git a/doc/source/devref/plugin-api.rst b/doc/source/devref/plugin-api.rst new file mode 100644 index 000000000..bec544b0e --- /dev/null +++ b/doc/source/devref/plugin-api.rst @@ -0,0 +1,12 @@ +Neutron Plugin Architecture +=========================== + +`Salvatore Orlando: How to write a Neutron Plugin (if you really need to) `_ + +Plugin API +---------- + +.. automodule:: neutron.neutron_plugin_base_v2 + +.. autoclass:: NeutronPluginBaseV2 + :members: diff --git a/doc/source/devref/rpc_api.rst b/doc/source/devref/rpc_api.rst new file mode 100644 index 000000000..77c851103 --- /dev/null +++ b/doc/source/devref/rpc_api.rst @@ -0,0 +1,2 @@ +Neutron RCP API Layer +===================== diff --git a/doc/source/devref/security_group_api.rst b/doc/source/devref/security_group_api.rst new file mode 100644 index 000000000..ad990d9e4 --- /dev/null +++ b/doc/source/devref/security_group_api.rst @@ -0,0 +1,50 @@ +Guided Tour: The Neutron Security Group API +=========================================== + +https://wiki.openstack.org/wiki/Neutron/SecurityGroups + + +API Extension +------------- + +The API extension is the 'front' end portion of the code, which handles defining a `REST-ful API`_, which is used by tenants. + + +.. _`REST-ful API`: https://github.com/openstack/neutron/blob/master/neutron/extensions/securitygroup.py + + +Database API +------------ + +The Security Group API extension adds a number of `methods to the database layer`_ of Neutron + +.. _`methods to the database layer`: https://github.com/openstack/neutron/blob/master/neutron/db/securitygroups_db.py + +Agent RPC +--------- + +This portion of the code handles processing requests from tenants, after they have been stored in the database. It involves messaging all the L2 agents +running on the compute nodes, and modifying the IPTables rules on each hypervisor. + + +* `Plugin RPC classes `_ + + * `SecurityGroupServerRpcCallbackMixin `_ - defines the RPC API that the plugin uses to communicate with the agents running on the compute nodes + * SecurityGroupServerRpcMixin - Defines the API methods used to fetch data from the database, in order to return responses to agents via the RPC API + +* `Agent RPC classes `_ + + * The SecurityGroupServerRpcApiMixin defines the API methods that can be called by agents, back to the plugin that runs on the Neutron controller + * The SecurityGroupAgentRpcCallbackMixin defines methods that a plugin uses to call back to an agent after performing an action called by an agent. + + +IPTables Driver +--------------- + +* ``prepare_port_filter`` takes a ``port`` argument, which is a ``dictionary`` object that contains information about the port - including the ``security_group_rules`` + +* ``prepare_port_filter`` `appends the port to an internal dictionary `_, ``filtered_ports`` which is used to track the internal state. + +* Each security group has a `chain `_ in Iptables. + +* The ``IptablesFirewallDriver`` has a method to `convert security group rules into iptables statements `_ diff --git a/doc/source/devref/vpnaas.rst b/doc/source/devref/vpnaas.rst new file mode 100644 index 000000000..e2d2f1b6d --- /dev/null +++ b/doc/source/devref/vpnaas.rst @@ -0,0 +1,21 @@ +VPN as a Service +===================== + +`API Specification`_ + +.. _API Specification: http://docs.openstack.org/api/openstack-network/2.0/content/vpnaas_ext.html + +Plugin +------ +.. automodule:: neutron.services.vpn.plugin + +.. autoclass:: VPNPlugin + :members: + +Database layer +-------------- + +.. automodule:: neutron.db.vpn.vpn_db + +.. autoclass:: VPNPluginDb + :members: diff --git a/doc/source/docbkx/docbkx-example/README b/doc/source/docbkx/docbkx-example/README new file mode 100644 index 000000000..e1545671b --- /dev/null +++ b/doc/source/docbkx/docbkx-example/README @@ -0,0 +1,14 @@ +README + +This docbkx-example folder is provided for those who want to use the maven mojo supplied with the project to build their own documents to PDF and HTML (webhelp) format. It's intended to be a template and model. + +You can edit the src/docbkx/example.xml file using vi, emacs, or another DocBook editor. At Rackspace we use Oxygen. Both Oxygen and XML Mind offer free licenses to those working on open source project documentation. + +To build the output, install Apache Maven (https://maven.apache.org/) and then run: + +mvn clean generate-sources + +in the directory containing the pom.xml file. + +Feel free to ask questions of the openstack-docs team at https://launchpad.net/~openstack-doc. + diff --git a/doc/source/docbkx/docbkx-example/pom.xml b/doc/source/docbkx/docbkx-example/pom.xml new file mode 100644 index 000000000..f281971a5 --- /dev/null +++ b/doc/source/docbkx/docbkx-example/pom.xml @@ -0,0 +1,38 @@ + + 4.0.0 + my-groupid + my-guide + 1.0.0-SNAPSHOT + jar + OpenStack stand alone documentation examples + + + + + com.agilejava.docbkx + docbkx-maven-plugin + + + + generate-pdf + generate-webhelp + + generate-sources + + + + true + 100 + + + + + + + + + + + + + diff --git a/doc/source/docbkx/docbkx-example/src/docbkx/example.xml b/doc/source/docbkx/docbkx-example/src/docbkx/example.xml new file mode 100644 index 000000000..96f1c64c1 --- /dev/null +++ b/doc/source/docbkx/docbkx-example/src/docbkx/example.xml @@ -0,0 +1,318 @@ + + Maven Example Documentation + + + + + + + + Badges! We don't need any stinking badges! + + + + 2011 + Timothy D. Witham + + Example v0.1 + Product Name Doesn't Exist - it's an example!™ + 2011-01-01 + + + Copyright details are filled in by the template. Change the value of the role + attribute on the legalnotice element to change the license. + + + + This document is intended for individuals who whish to produce documentation using Maven and having + the same "feel" as the documentation that is produced by the mainline OpenStack projects. + + + + this is a placeholder for the front cover + + + this is a placeholder for the back cover + + + + Overview + Welcome to the getting started with Maven documentation. Congratulations you have + successfully downloaded and built the example. + + For more details on the Product Name service, please refer to http://www.rackspacecloud.com/cloud_hosting_products/product name + + We welcome feedback, comments, and bug reports at support@rackspacecloud.com. +
+ Intended Audience + This guide is intended to individuals who want to develop standalone documentation + to use within an OpenStack deployment. Using this tool chain will give you the look and + feel of the mainline OpenStack documentation. + +
+
+ Document Change History + This version of the Maven Getting Started Guide replaces and obsoletes all previous versions. The + most recent changes are described in the table below: + + + + Revision Date + Summary of Changes + + + + + July. 14, 2011 + + + + Initial document creation. + + + + + + +
+
+ Additional Resources + + + + + Openstack - Cloud Software + + + + + + + Docbook Main Web Site + + + + + + + Docbook Quick Reference + + + + +
+
+ + Concepts + + Need to put something here. + + + + How do I? + +
+ Notes and including images + So I want an note and an image in this section ... + + This is an example of a note. + + + Here's a sample figure in svg and png formats: +
+ Sample Image + + + + + + + + +
+
+
+ Multiple Related Documents + + What you need to do in order to have multiple documents fit within the + build structure. + +
+
+ Using multiple files for a document + + What you need to do in order to have a single document that is made up of multiple + files. + +
+
+ Who, What, Where, When and Why of pom.xml + + You will of noticed the pom.xml file at the root directory. + This file is used to set the project parameters for the documentation. Including + what type of documentation to produce and any post processing that needs to happen. + If you want to know more about + + pom.xml - need a link + + then follow the link. + + For the pom.xmlfile that was included in this distribution we will + parse the individual lines and explaine the meaning. + + + + +
+ <project> + + What is all of this stuff and why is it important? + +
+
+ <modelVersion> + + What goes in here and why? + +
+
+ <groupId> + + What goes in here and why? + +
+
+ <artifactId> + + What goes in here and why? + +
+
+ <version> + + What goes in here and why? + +
+
+ <packaging> + + What goes in here and why? + +
+
+ <name> + + Name of your document. + +
+
+ <build> + + Make some documents. + +
+ <plugin(s)> + + What does this do and why? + +
+ <groupId> + + What goes in here and why? + +
+
+ <artifactId> + + What goes in here and why? + +
+
+ <execution(s)> + + What goes in here and why? + +
+ <goal(s)> + + Different types of goals and why you use them. + +
+
+ <phase> + + What does this section do? What phases can you specify. + +
+
+
+ <configuration> + + What does this section do? + +
+ <xincludeSupported> + + What does this do and why? + +
+
+ <chunkSectionDepth> + + What does this do and why? + +
+
+ <postprocess> + + What does this section do? What are possible pieces? + +
+ <copy> + + What does this section do? What are possible pieces? + +
+ <fileset> + + What does this section do? What are possible pieces? + +
+ <include> + + What does this section do? What are possible pieces? + +
+
+
+
+
+
+
+
+
+ Who, What, Where, When and Why of build.xml + + You will of noticed the build.xml file at the root directory. + This file is used to set the project parameters for the documentation. Including + what type of documentation to produce and any post processing that needs to happen. + If you want to know more about + + pom.xml - need a link + + then follow the link. + +
+
+ + Troubleshooting + Sometimes things go wrong... + +
diff --git a/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.sdx b/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.sdx new file mode 100644 index 000000000..3f2d86366 --- /dev/null +++ b/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.sdx @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.svg b/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.svg new file mode 100644 index 000000000..58b98232d --- /dev/null +++ b/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.svg @@ -0,0 +1,523 @@ + + + + +Creator: Quick Sequence Diagram Editor Producer: org.freehep.graphicsio.svg.SVGGraphics2D Revision: 12753 Source: Date: Monday, May 2, 2011 2:44:33 PM CDT + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/source/docbkx/quantum-api-1.0/common.ent b/doc/source/docbkx/quantum-api-1.0/common.ent new file mode 100644 index 000000000..19acc7e9f --- /dev/null +++ b/doc/source/docbkx/quantum-api-1.0/common.ent @@ -0,0 +1,112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + '> + + + + + + '> + + + + + + '> + + + + + + '> + + + + + + '> + + + + + + + + +
+ ]]> + + + + + + + + +

+ Full file path. +

+
+
+
+ +
+
+ + + + + + + + + + + + + + + + +

+ A collection of addresses. +

+
+
+
+ + +
+ + + +

+ A id of an address list. This is typically a name + used to identify a network. +

+
+
+
+ +
+ + + + + + + +

+ An IP address. +

+
+
+
+ + + +

+ The IP Address version can be 4 or 6. The version + attribute is optional if it is left off, the type of + address will be determined by from its address + format. If it is specified it should + match the address format. +

+

+ The OpenStack compute API will always fill in the + version number as a convinence to the client. +

+
+
+
+ +
+ + + + + +

+ An extensible server status type allows all of the + strings defined in ServerStatus or an alias prefixed + status. +

+
+
+ +
+ + + + + + +

+ The server is ready to use. +

+
+
+
+ + + +

+ The server is in an inactive (suspended) state. +

+
+
+
+ + + +

+ The server has been deleted. +

+
+
+
+ + + +

+ The server is being resized. +

+
+
+
+ + + +

+ The server is waiting for the resize operation to be + confirmed so that the original server may be removed. +

+
+
+
+ + + +

+ The requested operation failed, the server is in an + error state. +

+
+
+
+ + + +

+ The server is being built. +

+
+
+
+ + + +

+ The server password is being changed. +

+
+
+
+ + + +

+ The server is being rebuilt. +

+
+
+
+ + + +

+ The server is going through a SOFT reboot. +

+
+
+
+ + + +

+ The server is going through a HARD reboot. +

+
+
+
+ + + +

+ The server is in an unknown state. +

+
+
+
+
+
+ + + + + + + + + + + + + + + + + + +

+ Denotes IPv4. +

+
+
+
+ + + +

+ Denotes IPv6. +

+
+
+
+
+
+ diff --git a/doc/source/docbkx/quantum-api-1.0/xsd/shareip.xjb b/doc/source/docbkx/quantum-api-1.0/xsd/shareip.xjb new file mode 100644 index 000000000..239caec7e --- /dev/null +++ b/doc/source/docbkx/quantum-api-1.0/xsd/shareip.xjb @@ -0,0 +1,11 @@ + + + + + + + diff --git a/doc/source/docbkx/quantum-api-1.0/xsd/shareip.xsd b/doc/source/docbkx/quantum-api-1.0/xsd/shareip.xsd new file mode 100644 index 000000000..b59836741 --- /dev/null +++ b/doc/source/docbkx/quantum-api-1.0/xsd/shareip.xsd @@ -0,0 +1,83 @@ + + + + + + + + +

+ The element defines request to share a public IP address. +

+
+ + + + + + + + + + +
+
+ + + + +

+ This type is used to represent a request to share an IP + address. +

+
+
+ + + + + + +

+ The + shared IP group + + use to + share the address. +

+
+
+
+ + + +

+ If true, the server is configured with the new address + though the address may not be enabled. +

+
+
+
+ +
+
\ No newline at end of file diff --git a/doc/source/docbkx/quantum-api-1.0/xsd/txt.htaccess b/doc/source/docbkx/quantum-api-1.0/xsd/txt.htaccess new file mode 100644 index 000000000..8aa2a2878 --- /dev/null +++ b/doc/source/docbkx/quantum-api-1.0/xsd/txt.htaccess @@ -0,0 +1,4 @@ +DirectoryIndex api.xsd +AddType application/xml wadl +AddType application/xml xsd +AddType application/xml xslt diff --git a/doc/source/docbkx/quantum-api-1.0/xsd/version.xsd b/doc/source/docbkx/quantum-api-1.0/xsd/version.xsd new file mode 100644 index 000000000..c89c2b2f5 --- /dev/null +++ b/doc/source/docbkx/quantum-api-1.0/xsd/version.xsd @@ -0,0 +1,355 @@ + + + + + + + + + + Version Types + + + +

+ This schema file defines all types related to versioning. +

+
+
+ + + + + + + + +

+ This element is returned when the version of the + resource cannot be determined. The element + provides a list of choices for the resource. +

+
+ + + + + + + + + + +
+
+ + + + + +

+ Provides a list of supported versions. +

+
+ + + + + + + + + + + + + +
+
+ + + +

+ This element provides detailed meta information + regarding the status of the current API version. + This is the XSD 1.0 compatible element definition. +

+
+
+
+ + + + +

+ This element provides detailed meta information + regarding the status of the current API + version. The description should include a pointer + to both a human readable and a machine processable + description of the API service. +

+
+ + + + + + + + + + + + + +
+
+ + + + + + + The VersionStatus type describes a service's operational status. + + + + + + + + + + This is a new service the API. Thi API + contract may be set, but the implementaiton + may not be 100% complient with it. Developers + are encouraged to begin testing aganst an + ALPHA version to provide feedback. + + + + + + + + + A status of BETA indicates that this + version is a candidate for the next major + release and may feature functionality not + available in the current + version. Developers are encouraged to test + and begin the migration processes to a + BETA version. Note that a BETA version is + undergoing testing, it has not been + officially released, and my not be stable. + + + + + + + + + The API version is stable and has been + tested. Developers are encouraged to + develop against this API version. The + current released version of the API will + always be marked as CURRENT. + + + + + + + + + A status of DEPRECATED indicates that a + newer version of the API is + available. Application developers are + discouraged from using this version and + should instead develop against the latest + current version of the API. + + + + + + + + + + + + A version choice list outlines a collection of + resources at various versions. + + + + + + + + + + + + + In version lists, every single version must + contain at least one self link. + + + + + + + + + + + + + + When used as a root element, a version choice + must contain at least one describedby link. + + + + + + + + + + + + + A version choice contains relevant information + about an available service that a user can then + use to target a specific version of the service. + + + + + + + + + + + + + + + The ID of a version choice represents the service version's unique + identifier. This ID is guaranteed to be unique only among the + service version choices outlined in the VersionChoiceList. + + + + + + + + + + A version choice's status describes the current operational state of + the given service version. The operational status is captured in a + simple type enumeration called VersionStatus. + + + + + + + + + + A version choice's updated attribute describes + the time when the version was updated. The + time should be updated anytime + anything in the + version has changed: documentation, + extensions, bug fixes. + + + + + + + + + + + + A MediaTypeList outlines a collection of valid media types for a given + service version. + + + + + + + + + + + + + + + + A MediaType describes what content types the service version understands. + + + + + + + + + + + The base of a given media type describes the + simple MIME type that then a more complicated + media type can be derived from. These types + are basic and provide no namespace or version + specific data are are only provided as a + convenience. + + + + + + + + + + The type attribute of a MediaType describes + the MIME specific identifier of the media type + in question. + + + + + + +
diff --git a/doc/source/docbkx/quantum-api-1.0/xslt/schema.xsl b/doc/source/docbkx/quantum-api-1.0/xslt/schema.xsl new file mode 100644 index 000000000..d8bc7fe16 --- /dev/null +++ b/doc/source/docbkx/quantum-api-1.0/xslt/schema.xsl @@ -0,0 +1,1342 @@ + + + + + + + + + + + + + + + + + .. + + + + + + XML Schema Documentation + application/xhtml+xml + http://www.w3.org/2001/XMLSchema + http://web4.w3.org/TR/2001/REC-xmlschema-2-20010502/# + + " + ' + + + + + + + + + + + + + + + + + + + + element_ + attrib_ + attgrp_ + grp_ + type_ + + + + http://yui.yahooapis.com/2.7.0/build/ + + + + + + + + + + + + + + + + + stylesheet + text/css + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <xslt:value-of select="xsd:annotation/xsd:appinfo/xsdxt:title"/> + + + <xslt:value-of select="$defaultTitle"/> + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
Loading...
+
+
+
+ + + +

+
+ +

+
+
+ + + + + + + + + + + +
+ + + + + + + + + +

Namespaces

+ + + +
+

+ Your browser does not seem to have support for + namespace nodes in XPath. If you're a Firefox + user, please consider voting to get this issue + resolved: + + https://bugzilla.mozilla.org/show_bug.cgi?id=94270 + +

+
+
+ + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ + + + + trc.schema.controller.links[' + + ']=[ + + + + + + + + + + + + + + + + + + + + + + + + + + , + + + ]; + + + + + + trc.schema.controller.index = + + + + + + index + + + Index Schema Document + + + ; + + + + + + + trc.schema.controller.links[' + + ']=[ + + + + # + + + + + + + + See definition of + + + + + , + + + ]; + + + + + + + + { href : + + + + + + , name : + + + + + + , title : + + + + + + } + + + + + + + + +

Imports

+ + + + + + + + + +
+ + +
+
+ + + Visit + + +
+
+ +
+
+
+
+ + +

Includes

+ + + + + + + + +
+
+
+ + + Visit + + +
+
+ +
+
+
+
+ + +

Elements

+ + + + + + + + +
+ + + + + + +
+
+ + + trc.schema.sampleManager.showSample( + + + + ); + + + + + + + + + + + + + + + + + + + + +
+
+ + + + Sample +
+ +
+ +
+
+
+ + + +
+ + + + + + + Loading... + + + + + + +
+
+ + + + + + +

Complex Types

+ + + + + + +
+ + +

Simple Types

+ + + + + + +
+ + + + + + # + + + + + + + + + + + + + + + + + + +

+ +

+ + + + + +
+ extends: + + + + , + + +
+
+ +
+ restricts: + + + + , + + +
+
+
+
+ + + +
+ + + + + + + + + + SubAttributes + + + Attributes + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + SubDocumentation + + + Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Sequence + + +
+
+ + + +
+
+
+ +
+ + + +
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + <?> (Any Element) + + + + + + + + @? (Any Attribute) + + + + + +
+ restriction +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
enum values
+ + + + + + + + +
+ + + + + +
+
+ + + (id = + + ) + +
+ +
+ +
+
+
+ +
+ + + + + + + + (id = + + ) + + + (fixed) + + + + + + + + + + + + +
+ +
+ +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + < + + > + + + + + + @ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 000000000..51c63d277 --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,60 @@ +.. + Copyright 2011-2013 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Welcome to Neutron's developer documentation! +============================================= + +Neutron is an OpenStack project to provide "network connectivity as a service" +between interface devices (e.g., vNICs) managed by other OpenStack services +(e.g., nova). It implements the `Neutron API`_. + +.. _`Neutron API`: http://docs.openstack.org/api/openstack-network/2.0/content/ + +This document describes Neutron for contributors of the project, and assumes +that you are already familiar with Neutron from an `end-user perspective`_. + +.. _`end-user perspective`: http://docs.openstack.org/trunk/openstack-network/admin/content/index.html + +This documentation is generated by the Sphinx toolkit and lives in the source +tree. Additional documentation on Neutron and other components of OpenStack +can be found on the `OpenStack wiki`_ and the `Neutron section of the wiki`. +The `Neutron Development wiki`_ is also a good resource for new contributors. + +.. _`OpenStack wiki`: http://wiki.openstack.org +.. _`Neutron section of the wiki`: http://wiki.openstack.org/Neutron +.. _`Neutron Development wiki`: http://wiki.openstack.org/NeutronDevelopment + +Enjoy! + +Developer Docs +============== + +.. toctree:: + :maxdepth: 1 + + devref/index + +API Extensions +============== + +Go to http://api.openstack.org for information about OpenStack Network API extensions. + +Man Pages +--------- + +.. toctree:: + + man/neutron-server diff --git a/doc/source/man/neutron-server.rst b/doc/source/man/neutron-server.rst new file mode 100644 index 000000000..ea6c4cbbb --- /dev/null +++ b/doc/source/man/neutron-server.rst @@ -0,0 +1,75 @@ +============== +neutron-server +============== + +-------------- +Neutron Server +-------------- + +:Author: openstack@lists.openstack.org +:Date: 2012-04-05 +:Copyright: OpenStack Foundation +:Version: 2012.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + + neutron-server [options] + +DESCRIPTION +=========== + +neutron-server provides a webserver that exposes the Neutron API, and +passes all webservice calls to the Neutron plugin for processing. + +OPTIONS +======= + + --version show program's version number and exit + -h, --help show this help message and exit + -v, --verbose Print more verbose output + -d, --debug Print debugging output + --config-file=PATH Path to the config file to use, for example, + /etc/neutron/neutron.conf. When not specified + (the default), we generally look at the first argument + specified to be a config file, and if that is also + missing, we search standard directories for a config + file. (/etc/neutron/, + /usr/lib/pythonX/site-packages/neutron/) + + Logging Options: + The following configuration options are specific to logging + functionality for this program. + + --log-config=PATH If this option is specified, the logging configuration + file specified is used and overrides any other logging + options specified. Please see the Python logging + module documentation for details on logging + configuration files. + --log-date-format=FORMAT + Format string for %(asctime)s in log records. Default: + %Y-%m-%d %H:%M:%S + --use-syslog Output logs to syslog. + --log-file=PATH (Optional) Name of log file to output to. If not set, + logging will go to stdout. + --log-dir=LOG_DIR (Optional) The directory to keep log files in (will be + prepended to --logfile) + +FILES +======== + +plugins.ini file contains the plugin information +neutron.conf file contains configuration information in the form of python-gflags. + +SEE ALSO +======== + +* `OpenStack Neutron `__ + +BUGS +==== + +* Neutron is sourced in Launchpad so you can view current bugs at `OpenStack Bugs `__ + diff --git a/etc/api-paste.ini b/etc/api-paste.ini new file mode 100644 index 000000000..be8aae17f --- /dev/null +++ b/etc/api-paste.ini @@ -0,0 +1,30 @@ +[composite:neutron] +use = egg:Paste#urlmap +/: neutronversions +/v2.0: neutronapi_v2_0 + +[composite:neutronapi_v2_0] +use = call:neutron.auth:pipeline_factory +noauth = request_id catch_errors extensions neutronapiapp_v2_0 +keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0 + +[filter:request_id] +paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory + +[filter:catch_errors] +paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory + +[filter:keystonecontext] +paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory + +[filter:extensions] +paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory + +[app:neutronversions] +paste.app_factory = neutron.api.versions:Versions.factory + +[app:neutronapiapp_v2_0] +paste.app_factory = neutron.api.v2.router:APIRouter.factory diff --git a/etc/dhcp_agent.ini b/etc/dhcp_agent.ini new file mode 100644 index 000000000..9836d3500 --- /dev/null +++ b/etc/dhcp_agent.ini @@ -0,0 +1,88 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False + +# The DHCP agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +# resync_interval = 5 + +# The DHCP agent requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Name of Open vSwitch bridge to use +# ovs_integration_bridge = br-int + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires +# no additional setup of the DHCP server. +# dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +# use_namespaces = True + +# The DHCP server can assist with providing metadata support on isolated +# networks. Setting this value to True will cause the DHCP server to append +# specific host routes to the DHCP request. The metadata service will only +# be activated when the subnet does not contain any router port. The guest +# instance must be configured to request host routes via DHCP (Option 121). +# enable_isolated_metadata = False + +# Allows for serving metadata requests coming from a dedicated metadata +# access network whose cidr is 169.254.169.254/16 (or larger prefix), and +# is connected to a Neutron router from which the VMs send metadata +# request. In this case DHCP Option 121 will not be injected in VMs, as +# they will be able to reach 169.254.169.254 through a router. +# This option requires enable_isolated_metadata = True +# enable_metadata_network = False + +# Number of threads to use during sync process. Should not exceed connection +# pool size configured on server. +# num_sync_threads = 4 + +# Location to store DHCP server config files +# dhcp_confs = $state_path/dhcp + +# Domain to use for building the hostnames +# dhcp_domain = openstacklocal + +# Override the default dnsmasq settings with this file +# dnsmasq_config_file = + +# Comma-separated list of DNS servers which will be used by dnsmasq +# as forwarders. +# dnsmasq_dns_servers = + +# Limit number of leases to prevent a denial-of-service. +# dnsmasq_lease_max = 16777216 + +# Location to DHCP lease relay UNIX domain socket +# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# dhcp_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the dhcp agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a dhcp server is disabled. +# dhcp_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/etc/fwaas_driver.ini b/etc/fwaas_driver.ini new file mode 100644 index 000000000..41f761abf --- /dev/null +++ b/etc/fwaas_driver.ini @@ -0,0 +1,3 @@ +[fwaas] +#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver +#enabled = True diff --git a/etc/init.d/neutron-server b/etc/init.d/neutron-server new file mode 100755 index 000000000..98e5da610 --- /dev/null +++ b/etc/init.d/neutron-server @@ -0,0 +1,68 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: neutron-server +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: neutron-server +# Description: Provides the Neutron networking service +### END INIT INFO + +set -e + +PIDFILE=/var/run/neutron/neutron-server.pid +LOGFILE=/var/log/neutron/neutron-server.log + +DAEMON=/usr/bin/neutron-server +DAEMON_ARGS="--log-file=$LOGFILE" +DAEMON_DIR=/var/run + +ENABLED=true + +if test -f /etc/default/neutron-server; then + . /etc/default/neutron-server +fi + +mkdir -p /var/run/neutron +mkdir -p /var/log/neutron + +. /lib/lsb/init-functions + +export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" +export TMPDIR=/var/lib/neutron/tmp + +if [ ! -x ${DAEMON} ] ; then + exit 0 +fi + +case "$1" in + start) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Starting neutron server" "neutron-server" + start-stop-daemon -Sbmv --pidfile $PIDFILE --chdir $DAEMON_DIR --exec $DAEMON -- $DAEMON_ARGS + log_end_msg $? + ;; + stop) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Stopping neutron server" "neutron-server" + start-stop-daemon --stop --oknodo --pidfile ${PIDFILE} + log_end_msg $? + ;; + restart|force-reload) + test "$ENABLED" = "true" || exit 1 + $0 stop + sleep 1 + $0 start + ;; + status) + test "$ENABLED" = "true" || exit 0 + status_of_proc -p $PIDFILE $DAEMON neutron-server && exit 0 || exit $? + ;; + *) + log_action_msg "Usage: /etc/init.d/neutron-server {start|stop|restart|force-reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/etc/l3_agent.ini b/etc/l3_agent.ini new file mode 100644 index 000000000..e6903988d --- /dev/null +++ b/etc/l3_agent.ini @@ -0,0 +1,79 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False + +# L3 requires that an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) +# that supports L3 agent +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +# use_namespaces = True + +# If use_namespaces is set as False then the agent can only configure one router. + +# This is done by setting the specific router_id. +# router_id = + +# When external_network_bridge is set, each L3 agent can be associated +# with no more than one external network. This value should be set to the UUID +# of that external network. To allow L3 agent support multiple external +# networks, both the external_network_bridge and gateway_external_network_id +# must be left empty. +# gateway_external_network_id = + +# Indicates that this L3 agent should also handle routers that do not have +# an external network gateway configured. This option should be True only +# for a single agent in a Neutron deployment, and may be False for all agents +# if all routers must have an external network gateway +# handle_internal_only_routers = True + +# Name of bridge used for external network traffic. This should be set to +# empty value for the linux bridge. when this parameter is set, each L3 agent +# can be associated with no more than one external network. +# external_network_bridge = br-ex + +# TCP Port used by Neutron metadata server +# metadata_port = 9697 + +# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 +# to disable this feature. +# send_arp_for_ha = 3 + +# seconds between re-sync routers' data if needed +# periodic_interval = 40 + +# seconds to start to sync routers' data after +# starting agent +# periodic_fuzzy_delay = 5 + +# enable_metadata_proxy, which is true by default, can be set to False +# if the Nova metadata server is not available +# enable_metadata_proxy = True + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# router_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the L3 agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a router is destroyed. +# router_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/etc/lbaas_agent.ini b/etc/lbaas_agent.ini new file mode 100644 index 000000000..68a2759e6 --- /dev/null +++ b/etc/lbaas_agent.ini @@ -0,0 +1,42 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output). +# debug = False + +# The LBaaS agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +# periodic_interval = 10 + +# LBaas requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version. +# Multiple device drivers reflecting different service providers could be specified: +# device_driver = path.to.provider1.driver.Driver +# device_driver = path.to.provider2.driver.Driver +# Default is: +# device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver + +[haproxy] +# Location to store config and state files +# loadbalancer_state_path = $state_path/lbaas + +# The user group +# user_group = nogroup + +# When delete and re-add the same vip, send this many gratuitous ARPs to flush +# the ARP cache in the Router. Set it below or equal to 0 to disable this feature. +# send_gratuitous_arp = 3 diff --git a/etc/metadata_agent.ini b/etc/metadata_agent.ini new file mode 100644 index 000000000..84442ea1a --- /dev/null +++ b/etc/metadata_agent.ini @@ -0,0 +1,59 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = True + +# The Neutron user information for accessing the Neutron API. +auth_url = http://localhost:5000/v2.0 +auth_region = RegionOne +# Turn off verification of the certificate for ssl +# auth_insecure = False +# Certificate Authority public key (CA cert) file for ssl +# auth_ca_cert = +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% + +# Network service endpoint type to pull from the keystone catalog +# endpoint_type = adminURL + +# IP address used by Nova metadata server +# nova_metadata_ip = 127.0.0.1 + +# TCP Port used by Nova metadata server +# nova_metadata_port = 8775 + +# Which protocol to use for requests to Nova metadata server, http or https +# nova_metadata_protocol = http + +# Whether insecure SSL connection should be accepted for Nova metadata server +# requests +# nova_metadata_insecure = False + +# Client certificate for nova api, needed when nova api requires client +# certificates +# nova_client_cert = + +# Private key for nova client certificate +# nova_client_priv_key = + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it must match here and in the configuration used by the Nova Metadata +# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret +# metadata_proxy_shared_secret = + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# Number of separate worker processes for metadata server. Defaults to +# half the number of CPU cores +# metadata_workers = + +# Number of backlog requests to configure the metadata server socket with +# metadata_backlog = 4096 + +# URL to connect to the cache backend. +# default_ttl=0 parameter will cause cache entries to never expire. +# Otherwise default_ttl specifies time in seconds a cache entry is valid for. +# No cache is used in case no value is passed. +# cache_url = memory://?default_ttl=5 diff --git a/etc/metering_agent.ini b/etc/metering_agent.ini new file mode 100644 index 000000000..e6ab52209 --- /dev/null +++ b/etc/metering_agent.ini @@ -0,0 +1,15 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = True + +# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver + +# Interval between two metering measures +# measure_interval = 30 + +# Interval between two metering reports +# report_interval = 300 + +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# use_namespaces = True diff --git a/etc/neutron.conf b/etc/neutron.conf new file mode 100644 index 000000000..31977a874 --- /dev/null +++ b/etc/neutron.conf @@ -0,0 +1,479 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +# verbose = False + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +# debug = False + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +# state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +# log_dir = + +# publish_errors = False + +# Address to bind the API server to +# bind_host = 0.0.0.0 + +# Port the bind the API server to +# bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +# core_plugin = +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering + +# Paste configuration file +# api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +# auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds). Use -1 to +# tell dnsmasq to use infinite lease times. +# dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +# allow_overlapping_ips = False +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +# Size of RPC thread pool +# rpc_thread_pool_size = 64 +# Size of RPC connection pool +# rpc_conn_pool_size = 30 +# Seconds to wait for a response from call or multicall +# rpc_response_timeout = 60 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +# rpc_cast_timeout = 30 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# IP address of the RabbitMQ installation +# rabbit_host = localhost +# Password of the RabbitMQ server +# rabbit_password = guest +# Port where RabbitMQ server is running/listening +# rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +# rabbit_userid = guest +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false + +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +# default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +# notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +# agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +# api_workers = 0 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +# rpc_workers = 0 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +# notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +# notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +# nova_url = http://127.0.0.1:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +# nova_region_name = + +# Username for connection to nova in admin context +# nova_admin_username = + +# The uuid of the admin nova tenant +# nova_admin_tenant_id = + +# Password for connection to nova in admin context. +# nova_admin_password = + +# Authorization URL for connection to nova in admin context. +# nova_admin_auth_url = + +# CA file for novaclient to verify server certificates +# nova_ca_certificates_file = + +# Boolean to control ignoring SSL errors on the nova url +# nova_api_insecure = False + +# Number of seconds between sending events to nova if there are any events to send +# send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +[quotas] +# Default driver to use for quota checks +# quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +# quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +# default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +# quota_network = 10 + +# Number of subnets allowed per tenant. A negative value means unlimited. +# quota_subnet = 10 + +# Number of ports allowed per tenant. A negative value means unlimited. +# quota_port = 50 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +# quota_security_group = 10 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +# quota_security_group_rule = 100 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitor = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +# root_helper = sudo + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +# report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:// +# NOTE: In deployment the [database] section and its connection attribute may +# be set in the corresponding core plugin '.ini' file. However, it is suggested +# to put the [database] section and its connection attribute in this +# configuration file. + +# Database engine for which script will be generated when using offline +# migration +# engine = + +# The SQLAlchemy connection string used to connect to the slave database +# slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +# max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +# retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +# min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# max_pool_size = 10 + +# Timeout in seconds before idle sql connections are reaped +# idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +# max_overflow = 20 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +# connection_debug = 0 + +# Add python stack traces to SQL as comment strings +# connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/etc/neutron/plugins/bigswitch/restproxy.ini b/etc/neutron/plugins/bigswitch/restproxy.ini new file mode 100644 index 000000000..256f7855b --- /dev/null +++ b/etc/neutron/plugins/bigswitch/restproxy.ini @@ -0,0 +1,114 @@ +# Config file for neutron-proxy-plugin. + +[restproxy] +# All configuration for this plugin is in section '[restproxy]' +# +# The following parameters are supported: +# servers : [,]* (Error if not set) +# server_auth : (default: no auth) +# server_ssl : True | False (default: True) +# ssl_cert_directory : (default: /etc/neutron/plugins/bigswitch/ssl) +# no_ssl_validation : True | False (default: False) +# ssl_sticky : True | False (default: True) +# sync_data : True | False (default: False) +# auto_sync_on_failure : True | False (default: True) +# consistency_interval : (default: 60 seconds) +# server_timeout : (default: 10 seconds) +# neutron_id : (default: neutron-) +# add_meta_server_route : True | False (default: True) +# thread_pool_size : (default: 4) + +# A comma separated list of BigSwitch or Floodlight servers and port numbers. The plugin proxies the requests to the BigSwitch/Floodlight server, which performs the networking configuration. Note that only one server is needed per deployment, but you may wish to deploy multiple servers to support failover. +servers=localhost:8080 + +# The username and password for authenticating against the BigSwitch or Floodlight controller. +# server_auth=username:password + +# Use SSL when connecting to the BigSwitch or Floodlight controller. +# server_ssl=True + +# Directory which contains the ca_certs and host_certs to be used to validate +# controller certificates. +# ssl_cert_directory=/etc/neutron/plugins/bigswitch/ssl/ + +# If a certificate does not exist for a controller, trust and store the first +# certificate received for that controller and use it to validate future +# connections to that controller. +# ssl_sticky=True + +# Do not validate the controller certificates for SSL +# Warning: This will not provide protection against man-in-the-middle attacks +# no_ssl_validation=False + +# Sync data on connect +# sync_data=False + +# If neutron fails to create a resource because the backend controller +# doesn't know of a dependency, automatically trigger a full data +# synchronization to the controller. +# auto_sync_on_failure=True + +# Time between verifications that the backend controller +# database is consistent with Neutron. (0 to disable) +# consistency_interval = 60 + +# Maximum number of seconds to wait for proxy request to connect and complete. +# server_timeout=10 + +# User defined identifier for this Neutron deployment +# neutron_id = + +# Flag to decide if a route to the metadata server should be injected into the VM +# add_meta_server_route = True + +# Number of threads to use to handle large volumes of port creation requests +# thread_pool_size = 4 + +[nova] +# Specify the VIF_TYPE that will be controlled on the Nova compute instances +# options: ivs or ovs +# default: ovs +# vif_type = ovs + +# Overrides for vif types based on nova compute node host IDs +# Comma separated list of host IDs to fix to a specific VIF type +# The VIF type is taken from the end of the configuration item +# node_override_vif_ +# For example, the following would set the VIF type to IVS for +# host-id1 and host-id2 +# node_overrride_vif_ivs=host-id1,host-id2 + +[router] +# Specify the default router rules installed in newly created tenant routers +# Specify multiple times for multiple rules +# Format is ::: +# Optionally, a comma-separated list of nexthops may be included after +# Use an * to specify default for all tenants +# Default is any any allow for all tenants +# tenant_default_router_rule=*:any:any:permit + +# Maximum number of rules that a single router may have +# Default is 200 +# max_router_rules=200 + +[restproxyagent] + +# Specify the name of the bridge used on compute nodes +# for attachment. +# Default: br-int +# integration_bridge=br-int + +# Change the frequency of polling by the restproxy agent. +# Value is seconds +# Default: 5 +# polling_interval=5 + +# Virtual switch type on the compute node. +# Options: ovs or ivs +# Default: ovs +# virtual_switch_type = ovs + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/etc/neutron/plugins/bigswitch/ssl/ca_certs/README b/etc/neutron/plugins/bigswitch/ssl/ca_certs/README new file mode 100644 index 000000000..e7e47a27c --- /dev/null +++ b/etc/neutron/plugins/bigswitch/ssl/ca_certs/README @@ -0,0 +1,3 @@ +Certificates in this folder will be used to +verify signatures for any controllers the plugin +connects to. diff --git a/etc/neutron/plugins/bigswitch/ssl/host_certs/README b/etc/neutron/plugins/bigswitch/ssl/host_certs/README new file mode 100644 index 000000000..8f5f5e77c --- /dev/null +++ b/etc/neutron/plugins/bigswitch/ssl/host_certs/README @@ -0,0 +1,6 @@ +Certificates in this folder must match the name +of the controller they should be used to authenticate +with a .pem extension. + +For example, the certificate for the controller +"192.168.0.1" should be named "192.168.0.1.pem". diff --git a/etc/neutron/plugins/brocade/brocade.ini b/etc/neutron/plugins/brocade/brocade.ini new file mode 100644 index 000000000..916e9e5d2 --- /dev/null +++ b/etc/neutron/plugins/brocade/brocade.ini @@ -0,0 +1,29 @@ +[switch] +# username = The SSH username to use +# password = The SSH password to use +# address = The address of the host to SSH to +# ostype = Should be NOS, but is unused otherwise +# +# Example: +# username = admin +# password = password +# address = 10.24.84.38 +# ostype = NOS + +[physical_interface] +# physical_interface = The network interface to use when creating a port +# +# Example: +# physical_interface = physnet1 + +[vlans] +# network_vlan_ranges = :nnnn:mmmm +# +# Example: +# network_vlan_ranges = physnet1:1000:2999 + +[linux_bridge] +# physical_interface_mappings = : +# +# Example: +# physical_interface_mappings = physnet1:em1 diff --git a/etc/neutron/plugins/cisco/cisco_plugins.ini b/etc/neutron/plugins/cisco/cisco_plugins.ini new file mode 100644 index 000000000..13d81f182 --- /dev/null +++ b/etc/neutron/plugins/cisco/cisco_plugins.ini @@ -0,0 +1,138 @@ +[cisco_plugins] + +# (StrOpt) Period-separated module path to the plugin class to use for +# the Cisco Nexus switches. +# +# nexus_plugin = neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin + +# (StrOpt) Period-separated module path to the plugin class to use for +# the virtual switches on compute nodes. +# +# vswitch_plugin = neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2 + + +[cisco] + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# VLAN interface. For example, if an interface is being created for +# VLAN 2001 it will be named 'q-2001' using the default prefix. +# +# vlan_name_prefix = q- +# Example: vlan_name_prefix = vnet- + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# provider VLAN interface. For example, if an interface is being created +# for provider VLAN 3003 it will be named 'p-3003' using the default prefix. +# +# provider_vlan_name_prefix = p- +# Example: provider_vlan_name_prefix = PV- + +# (BoolOpt) A flag indicating whether Openstack networking should manage the +# creation and removal of VLAN interfaces for provider networks on the Nexus +# switches. If the flag is set to False then Openstack will not create or +# remove VLAN interfaces for provider networks, and the administrator needs +# to manage these interfaces manually or by external orchestration. +# +# provider_vlan_auto_create = True + +# (BoolOpt) A flag indicating whether Openstack networking should manage +# the adding and removing of provider VLANs from trunk ports on the Nexus +# switches. If the flag is set to False then Openstack will not add or +# remove provider VLANs from trunk ports, and the administrator needs to +# manage these operations manually or by external orchestration. +# +# provider_vlan_auto_trunk = True + +# (StrOpt) Period-separated module path to the model class to use for +# the Cisco neutron plugin. +# +# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2 + +# (StrOpt) Period-separated module path to the driver class to use for +# the Cisco Nexus switches. +# +# If no value is configured, a fake driver will be used. +# nexus_driver = neutron.plugins.cisco.test.nexus.fake_nexus_driver.CiscoNEXUSFakeDriver +# With real hardware, use the CiscoNEXUSDriver class: +# nexus_driver = neutron.plugins.cisco.nexus.cisco_nexus_network_driver_v2.CiscoNEXUSDriver + +# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches. +# Note: This feature is not supported on all models/versions of Cisco +# Nexus switches. To use this feature, all of the Nexus switches in the +# deployment must support it. +# nexus_l3_enable = False + +# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. +# svi_round_robin = False + +# Cisco Nexus Switch configurations. +# Each switch to be managed by Openstack Neutron must be configured here. +# +# Cisco Nexus Switch Format. +# [NEXUS_SWITCH:] +# = (1) +# ssh_port= (2) +# username= (3) +# password= (4) +# +# (1) For each host connected to a port on the switch, specify the hostname +# and the Nexus physical port (interface) it is connected to. +# (2) The TCP port for connecting via SSH to manage the switch. This is +# port number 22 unless the switch has been configured otherwise. +# (3) The username for logging into the switch to manage it. +# (4) The password for logging into the switch to manage it. +# +# Example: +# [NEXUS_SWITCH:1.1.1.1] +# compute1=1/1 +# compute2=1/2 +# ssh_port=22 +# username=admin +# password=mySecretPassword + +# +# N1KV Format. +# [N1KV:] +# username= +# password= +# +# Example: +# [N1KV:2.2.2.2] +# username=admin +# password=mySecretPassword + +[cisco_n1k] + +# (StrOpt) Specify the name of the integration bridge to which the VIFs are +# attached. +# +# integration_bridge = br-int + +# (StrOpt) Name of the policy profile to be associated with a port when no +# policy profile is specified during port creates. +# +# default_policy_profile = +# Example: default_policy_profile = service_profile + +# (StrOpt) Name of the policy profile to be associated with a port owned by +# network node (dhcp, router). +# +# network_node_policy_profile = +# Example: network_node_policy_profile = dhcp_pp + +# (StrOpt) Name of the network profile to be associated with a network when no +# network profile is specified during network creates. Admin should pre-create +# a network profile with this name. +# +# default_network_profile = +# Example: default_network_profile = network_pool + +# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in +# policy profiles. +# +# poll_duration = +# Example: poll_duration = 180 + +# (IntOpt) Number of threads to use to make HTTP requests to the VSM. +# +# http_pool_size = 4 diff --git a/etc/neutron/plugins/cisco/cisco_vpn_agent.ini b/etc/neutron/plugins/cisco/cisco_vpn_agent.ini new file mode 100644 index 000000000..d15069b7c --- /dev/null +++ b/etc/neutron/plugins/cisco/cisco_vpn_agent.ini @@ -0,0 +1,22 @@ +[cisco_csr_ipsec] +# Status check interval in seconds, for VPNaaS IPSec connections used on CSR +# status_check_interval = 60 + +# Cisco CSR management port information for REST access used by VPNaaS +# TODO(pcm): Remove once CSR is integrated in as a Neutron router. +# +# Format is: +# [cisco_csr_rest:] +# rest_mgmt = +# tunnel_ip = +# username = +# password = +# timeout = +# +# where: +# public IP ----- Public IP address of router used with a VPN service (1:1 with CSR) +# tunnel IP ----- Public IP address of the CSR used for the IPSec tunnel +# mgmt port IP -- IP address of CSR for REST API access (not console port) +# user ---------- Username for REST management port access to Cisco CSR +# password ------ Password for REST management port access to Cisco CSR +# timeout ------- REST request timeout to Cisco CSR (optional) diff --git a/etc/neutron/plugins/embrane/heleos_conf.ini b/etc/neutron/plugins/embrane/heleos_conf.ini new file mode 100644 index 000000000..0ca9b46f8 --- /dev/null +++ b/etc/neutron/plugins/embrane/heleos_conf.ini @@ -0,0 +1,41 @@ +[heleos] +#configure the ESM management address +#in the first version of this plugin, only one ESM can be specified +#Example: +#esm_mgmt= + +#configure admin username and password +#admin_username= +#admin_password= + +#router image id +#Example: +#router_image=932ce713-e210-3d54-a0a5-518b0b5ee1b0 + +#mgmt shared security zone id +#defines the shared management security zone. Each tenant can have a private one configured through the ESM +#Example: +#mgmt_id=c0bc9b6c-f110-46cf-bb01-733bfe4b5a1a + +#in-band shared security zone id +#defines the shared in-band security zone. Each tenant can have a private one configured through the ESM +#Example: +#inband_id=a6b7999d-3806-4b04-81f6-e0c5c8271afc + +#oob-band shared security zone id +#defines the shared out-of-band security zone. Each tenant can have a private one configured through the ESM +#Example: +#oob_id=e7eda5cc-b977-46cb-9c14-cab43c1b7871 + +#dummy security zone id +#defines the dummy security zone ID. this security zone will be used by the DVAs with no neutron interfaces +#Example: +#dummy_utif_id=d9911310-25fc-4733-a2e0-c0eda024ef08 + +#resource pool id +#define the shared resource pool. Each tenant can have a private one configured through the ESM +#Example +#resource_pool_id= + +#define if the requests have to be executed asynchronously by the plugin or not +#async_requests= diff --git a/etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini b/etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini new file mode 100644 index 000000000..5eeec5706 --- /dev/null +++ b/etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini @@ -0,0 +1,63 @@ +[hyperv] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST either change this +# to 'vlan' and configure network_vlan_ranges below or to 'flat'. +# Set to 'none' to disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = vlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only gre and local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (ListOpt) Comma separated list of : +# where the physical networks can be expressed with wildcards, +# e.g.: ."*:external". +# The referred external virtual switches need to be already present on +# the Hyper-V server. +# If a given physical network name will not match any value in the list +# the plugin will look for a virtual switch with the same name. +# +# physical_network_vswitch_mappings = *:external +# Example: physical_network_vswitch_mappings = net1:external1,net2:external2 + +# (StrOpt) Private virtual switch name used for local networking. +# +# local_network_vswitch = private +# Example: local_network_vswitch = custom_vswitch + +# (BoolOpt) Enables metrics collections for switch ports by using Hyper-V's +# metric APIs. Collected data can by retrieved by other apps and services, +# e.g.: Ceilometer. Requires Hyper-V / Windows Server 2012 and above. +# +# enable_metrics_collection = False + +#----------------------------------------------------------------------------- +# Sample Configurations. +#----------------------------------------------------------------------------- +# +# Neutron server: +# +# [HYPERV] +# tenant_network_type = vlan +# network_vlan_ranges = default:2000:3999 +# +# Agent running on Hyper-V node: +# +# [AGENT] +# polling_interval = 2 +# physical_network_vswitch_mappings = *:external +# local_network_vswitch = private diff --git a/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini b/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini new file mode 100644 index 000000000..0fab50706 --- /dev/null +++ b/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini @@ -0,0 +1,50 @@ +[sdnve] +# (ListOpt) The IP address of one (or more) SDN-VE controllers +# Default value is: controller_ips = 127.0.0.1 +# Example: controller_ips = 127.0.0.1,127.0.0.2 +# (StrOpt) The integration bridge for OF based implementation +# The default value for integration_bridge is None +# Example: integration_bridge = br-int +# (ListOpt) The interface mapping connecting the integration +# bridge to external network as a list of physical network names and +# interfaces: : +# Example: interface_mappings = default:eth2 +# (BoolOpt) Used to reset the integration bridge, if exists +# The default value for reset_bridge is True +# Example: reset_bridge = False +# (BoolOpt) Used to set the OVS controller as out-of-band +# The default value for out_of_band is True +# Example: out_of_band = False +# +# (BoolOpt) The fake controller for testing purposes +# Default value is: use_fake_controller = False +# (StrOpt) The port number for use with controller +# The default value for the port is 8443 +# Example: port = 8443 +# (StrOpt) The userid for use with controller +# The default value for the userid is admin +# Example: userid = sdnve_user +# (StrOpt) The password for use with controller +# The default value for the password is admin +# Example: password = sdnve_password +# +# (StrOpt) The default type of tenants (and associated resources) +# Available choices are: OVERLAY or OF +# The default value for tenant type is OVERLAY +# Example: default_tenant_type = OVERLAY +# (StrOpt) The string in tenant description that indicates +# Default value for OF tenants: of_signature = SDNVE-OF +# (StrOpt) The string in tenant description that indicates +# Default value for OVERLAY tenants: overlay_signature = SDNVE-OVERLAY + +[sdnve_agent] +# (IntOpt) Agent's polling interval in seconds +# polling_interval = 2 +# (StrOpt) What to use for root helper +# The default value: root_helper = 'sudo' +# (BoolOpt) Whether to use rpc or not +# The default value: rpc = True + +[securitygroup] +# The security group is not supported: +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver diff --git a/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini b/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini new file mode 100644 index 000000000..94fe98036 --- /dev/null +++ b/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini @@ -0,0 +1,78 @@ +[vlans] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST change this to +# 'vlan' and configure network_vlan_ranges below in order for tenant +# networks to provide connectivity between hosts. Set to 'none' to +# disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = vlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +[linux_bridge] +# (ListOpt) Comma-separated list of +# : tuples mapping physical +# network names to the agent's node-specific physical network +# interfaces to be used for flat and VLAN networks. All physical +# networks listed in network_vlan_ranges on the server should have +# mappings to appropriate interfaces on each agent. +# +# physical_interface_mappings = +# Example: physical_interface_mappings = physnet1:eth1 + +[vxlan] +# (BoolOpt) enable VXLAN on the agent +# VXLAN support can be enabled when agent is managed by ml2 plugin using +# linuxbridge mechanism driver. Useless if set while using linuxbridge plugin. +# enable_vxlan = False +# +# (IntOpt) use specific TTL for vxlan interface protocol packets +# ttl = +# +# (IntOpt) use specific TOS for vxlan interface protocol packets +# tos = +# +# (StrOpt) multicast group to use for broadcast emulation. +# This group must be the same on all the agents. +# vxlan_group = 224.0.0.1 +# +# (StrOpt) Local IP address to use for VXLAN endpoints (required) +# local_ip = +# +# (BoolOpt) Flag to enable l2population extension. This option should be used +# in conjunction with ml2 plugin l2population mechanism driver (in that case, +# both linuxbridge and l2population mechanism drivers should be loaded). +# It enables plugin to populate VXLAN forwarding table, in order to limit +# the use of broadcast emulation (multicast will be turned off if kernel and +# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10) +# l2_population = False + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (BoolOpt) Enable server RPC compatibility with old (pre-havana) +# agents. +# +# rpc_support_old_agents = False +# Example: rpc_support_old_agents = True + +[securitygroup] +# Firewall driver for realizing neutron security group function +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver +# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/etc/neutron/plugins/metaplugin/metaplugin.ini b/etc/neutron/plugins/metaplugin/metaplugin.ini new file mode 100644 index 000000000..2b9bfa5ea --- /dev/null +++ b/etc/neutron/plugins/metaplugin/metaplugin.ini @@ -0,0 +1,31 @@ +# Config file for Metaplugin + +[meta] +# Comma separated list of flavor:neutron_plugin for plugins to load. +# Extension method is searched in the list order and the first one is used. +plugin_list = 'ml2:neutron.plugins.ml2.plugin.Ml2Plugin,nvp:neutron.plugins.vmware.plugin.NsxPluginV2' + +# Comma separated list of flavor:neutron_plugin for L3 service plugins +# to load. +# This is intended for specifying L2 plugins which support L3 functions. +# If you use a router service plugin, set this blank. +l3_plugin_list = + +# Default flavor to use, when flavor:network is not specified at network +# creation. +default_flavor = 'nvp' + +# Default L3 flavor to use, when flavor:router is not specified at router +# creation. +# Ignored if 'l3_plugin_list' is blank. +default_l3_flavor = + +# Comma separated list of supported extension aliases. +supported_extension_aliases = 'provider,binding,agent,dhcp_agent_scheduler' + +# Comma separated list of method:flavor to select specific plugin for a method. +# This has priority over method search order based on 'plugin_list'. +extension_map = 'get_port_stats:nvp' + +# Specifies flavor for plugin to handle 'q-plugin' RPC requests. +rpc_flavor = 'ml2' diff --git a/etc/neutron/plugins/midonet/midonet.ini b/etc/neutron/plugins/midonet/midonet.ini new file mode 100644 index 000000000..f2e940529 --- /dev/null +++ b/etc/neutron/plugins/midonet/midonet.ini @@ -0,0 +1,19 @@ + +[midonet] +# MidoNet API server URI +# midonet_uri = http://localhost:8080/midonet-api + +# MidoNet admin username +# username = admin + +# MidoNet admin password +# password = passw0rd + +# ID of the project that MidoNet admin user belongs to +# project_id = 77777777-7777-7777-7777-777777777777 + +# Virtual provider router ID +# provider_router_id = 00112233-0011-0011-0011-001122334455 + +# Path to midonet host uuid file +# midonet_host_uuid_path = /etc/midolman/host_uuid.properties diff --git a/etc/neutron/plugins/ml2/ml2_conf.ini b/etc/neutron/plugins/ml2/ml2_conf.ini new file mode 100644 index 000000000..54722df91 --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf.ini @@ -0,0 +1,62 @@ +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +# type_drivers = local,flat,vlan,gre,vxlan +# Example: type_drivers = flat,vlan,gre,vxlan + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +# Example: tenant_network_types = vlan,gre,vxlan + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism_drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +# flat_networks = +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +# tunnel_id_ranges = + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +# vni_ranges = + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +# vxlan_group = +# Example: vxlan_group = 239.1.1.1 + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/etc/neutron/plugins/ml2/ml2_conf_arista.ini b/etc/neutron/plugins/ml2/ml2_conf_arista.ini new file mode 100644 index 000000000..a4cfee0cd --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_arista.ini @@ -0,0 +1,45 @@ +# Defines configuration options specific for Arista ML2 Mechanism driver + +[ml2_arista] +# (StrOpt) EOS IP address. This is required field. If not set, all +# communications to Arista EOS will fail +# +# eapi_host = +# Example: eapi_host = 192.168.0.1 +# +# (StrOpt) EOS command API username. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# eapi_username = +# Example: arista_eapi_username = admin +# +# (StrOpt) EOS command API password. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# eapi_password = +# Example: eapi_password = my_password +# +# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs +# ("node1.domain.com") or as short names ("node1"). This is +# optional. If not set, a value of "True" is assumed. +# +# use_fqdn = +# Example: use_fqdn = True +# +# (IntOpt) Sync interval in seconds between Neutron plugin and EOS. +# This field defines how often the synchronization is performed. +# This is an optional field. If not set, a value of 180 seconds +# is assumed. +# +# sync_interval = +# Example: sync_interval = 60 +# +# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller. +# This is useful when multiple OpenStack/Neutron controllers are +# managing the same Arista HW clusters. Note that this name must +# match with the region name registered (or known) to keystone +# service. Authentication with Keysotne is performed by EOS. +# This is optional. If not set, a value of "RegionOne" is assumed. +# +# region_name = +# Example: region_name = RegionOne diff --git a/etc/neutron/plugins/ml2/ml2_conf_brocade.ini b/etc/neutron/plugins/ml2/ml2_conf_brocade.ini new file mode 100644 index 000000000..66987e991 --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_brocade.ini @@ -0,0 +1,13 @@ +[ml2_brocade] +# username = +# password = +# address = +# ostype = NOS +# physical_networks = physnet1,physnet2 +# +# Example: +# username = admin +# password = password +# address = 10.24.84.38 +# ostype = NOS +# physical_networks = physnet1,physnet2 diff --git a/etc/neutron/plugins/ml2/ml2_conf_cisco.ini b/etc/neutron/plugins/ml2/ml2_conf_cisco.ini new file mode 100644 index 000000000..95f963f83 --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_cisco.ini @@ -0,0 +1,94 @@ +[ml2_cisco] + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# VLAN interface. For example, if an interface is being created for +# VLAN 2001 it will be named 'q-2001' using the default prefix. +# +# vlan_name_prefix = q- +# Example: vlan_name_prefix = vnet- + +# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. +# svi_round_robin = False + +# +# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch. +# This string value must be present in the ml2_conf.ini network_vlan_ranges +# variable. +# +# managed_physical_network = +# Example: managed_physical_network = physnet1 + +# Cisco Nexus Switch configurations. +# Each switch to be managed by Openstack Neutron must be configured here. +# +# Cisco Nexus Switch Format. +# [ml2_mech_cisco_nexus:] +# = (1) +# ssh_port= (2) +# username= (3) +# password= (4) +# +# (1) For each host connected to a port on the switch, specify the hostname +# and the Nexus physical port (interface) it is connected to. +# Valid intf_type's are 'ethernet' and 'port-channel'. +# The default setting for is 'ethernet' and need not be +# added to this setting. +# (2) The TCP port for connecting via SSH to manage the switch. This is +# port number 22 unless the switch has been configured otherwise. +# (3) The username for logging into the switch to manage it. +# (4) The password for logging into the switch to manage it. +# +# Example: +# [ml2_mech_cisco_nexus:1.1.1.1] +# compute1=1/1 +# compute2=ethernet:1/2 +# compute3=port-channel:1 +# ssh_port=22 +# username=admin +# password=mySecretPassword + +[ml2_cisco_apic] + +# Hostname for the APIC controller +# apic_host=1.1.1.1 + +# Username for the APIC controller +# apic_username=user + +# Password for the APIC controller +# apic_password=password + +# Port for the APIC Controller +# apic_port=80 + +# Names for APIC objects used by Neutron +# Note: When deploying multiple clouds against one APIC, +# these names must be unique between the clouds. +# apic_vmm_domain=openstack +# apic_vlan_ns_name=openstack_ns +# apic_node_profile=openstack_profile +# apic_entity_profile=openstack_entity +# apic_function_profile=openstack_function + +# The following flag will cause all the node profiles on the APIC to +# be cleared when neutron-server starts. This is typically used only +# for test environments that require clean-slate startup conditions. +# apic_clear_node_profiles=False + +# Specify your network topology. +# This section indicates how your compute nodes are connected to the fabric's +# switches and ports. The format is as follows: +# +# [switch:] +# ,= +# +# You can have multiple sections, one for each switch in your fabric that is +# participating in Openstack. e.g. +# +# [switch:17] +# ubuntu,ubuntu1=1/10 +# ubuntu2,ubuntu3=1/11 +# +# [switch:18] +# ubuntu5,ubuntu6=1/1 +# ubuntu7,ubuntu8=1/2 diff --git a/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini b/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini new file mode 100644 index 000000000..6ee4a4e00 --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini @@ -0,0 +1,52 @@ +# Defines Configuration options for FSL SDN OS Mechanism Driver +# Cloud Resource Discovery (CRD) authorization credentials +[ml2_fslsdn] +#(StrOpt) User name for authentication to CRD. +# e.g.: user12 +# +# crd_user_name = + +#(StrOpt) Password for authentication to CRD. +# e.g.: secret +# +# crd_password = + +#(StrOpt) Tenant name for CRD service. +# e.g.: service +# +# crd_tenant_name = + +#(StrOpt) CRD auth URL. +# e.g.: http://127.0.0.1:5000/v2.0/ +# +# crd_auth_url = + +#(StrOpt) URL for connecting to CRD Service. +# e.g.: http://127.0.0.1:9797 +# +# crd_url= + +#(IntOpt) Timeout value for connecting to CRD service +# in seconds, e.g.: 30 +# +# crd_url_timeout= + +#(StrOpt) Region name for connecting to CRD in +# admin context, e.g.: RegionOne +# +# crd_region_name= + +#(BoolOpt)If set, ignore any SSL validation issues (boolean value) +# e.g.: False +# +# crd_api_insecure= + +#(StrOpt)Authorization strategy for connecting to CRD in admin +# context, e.g.: keystone +# +# crd_auth_strategy= + +#(StrOpt)Location of CA certificates file to use for CRD client +# requests. +# +# crd_ca_certificates_file= diff --git a/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini b/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini new file mode 100644 index 000000000..01b0797cf --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini @@ -0,0 +1,6 @@ +[eswitch] +# (StrOpt) Type of Network Interface to allocate for VM: +# mlnx_direct or hostdev according to libvirt terminology +# vnic_type = mlnx_direct +# (BoolOpt) Enable server compatibility with old nova +# apply_profile_patch = False diff --git a/etc/neutron/plugins/ml2/ml2_conf_ncs.ini b/etc/neutron/plugins/ml2/ml2_conf_ncs.ini new file mode 100644 index 000000000..dbbfcbd28 --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_ncs.ini @@ -0,0 +1,28 @@ +# Defines configuration options specific to the Tail-f NCS Mechanism Driver + +[ml2_ncs] +# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack +# subtree. +# If this is not set then no HTTP requests will be made. +# +# url = +# Example: url = http://ncs/api/running/services/openstack + +# (StrOpt) Username for HTTP basic authentication to NCS. +# This is an optional parameter. If unspecified then no authentication is used. +# +# username = +# Example: username = admin + +# (StrOpt) Password for HTTP basic authentication to NCS. +# This is an optional parameter. If unspecified then no authentication is used. +# +# password = +# Example: password = admin + +# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion. +# This is an optional parameter, default value is 10 seconds. +# +# timeout = +# Example: timeout = 15 + diff --git a/etc/neutron/plugins/ml2/ml2_conf_odl.ini b/etc/neutron/plugins/ml2/ml2_conf_odl.ini new file mode 100644 index 000000000..9e88c1bbf --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_odl.ini @@ -0,0 +1,30 @@ +# Configuration for the OpenDaylight MechanismDriver + +[ml2_odl] +# (StrOpt) OpenDaylight REST URL +# If this is not set then no HTTP requests will be made. +# +# url = +# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron + +# (StrOpt) Username for HTTP basic authentication to ODL. +# +# username = +# Example: username = admin + +# (StrOpt) Password for HTTP basic authentication to ODL. +# +# password = +# Example: password = admin + +# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion. +# This is an optional parameter, default value is 10 seconds. +# +# timeout = 10 +# Example: timeout = 15 + +# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout. +# This is an optional parameter, default value is 30 minutes. +# +# session_timeout = 30 +# Example: session_timeout = 60 diff --git a/etc/neutron/plugins/ml2/ml2_conf_ofa.ini b/etc/neutron/plugins/ml2/ml2_conf_ofa.ini new file mode 100644 index 000000000..4a94b9870 --- /dev/null +++ b/etc/neutron/plugins/ml2/ml2_conf_ofa.ini @@ -0,0 +1,13 @@ +# Defines configuration options specific to the OpenFlow Agent Mechanism Driver + +[ovs] +# Please refer to configuration options to the OpenvSwitch + +[agent] +# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath. +# This is an optional parameter, default value is 60 seconds. +# +# get_datapath_retry_times = +# Example: get_datapath_retry_times = 30 + +# Please refer to configuration options to the OpenvSwitch else the above. diff --git a/etc/neutron/plugins/mlnx/mlnx_conf.ini b/etc/neutron/plugins/mlnx/mlnx_conf.ini new file mode 100644 index 000000000..b12251116 --- /dev/null +++ b/etc/neutron/plugins/mlnx/mlnx_conf.ini @@ -0,0 +1,79 @@ +[mlnx] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value is 'vlan' You MUST configure network_vlan_ranges below +# in order for tenant networks to provide connectivity between hosts. +# Set to 'none' to disable creation of tenant networks. +# +# tenant_network_type = vlan +# Example: tenant_network_type = vlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = default:1:100 + +# (ListOpt) Comma-separated list of +# : tuples mapping physical +# network names to physical network types. All physical +# networks listed in network_vlan_ranges should have +# mappings to appropriate physical network type. +# Type of the physical network can be either eth (Ethernet) or +# ib (InfiniBand). If empty, physical network eth type is assumed. +# +# physical_network_type_mappings = +# Example: physical_network_type_mappings = default:eth + +# (StrOpt) Type of the physical network, can be either 'eth' or 'ib' +# The default value is 'eth' +# physical_network_type = eth + +[eswitch] +# (ListOpt) Comma-separated list of +# : tuples mapping physical +# network names to the agent's node-specific physical network +# interfaces to be used for flat and VLAN networks. All physical +# networks listed in network_vlan_ranges on the server should have +# mappings to appropriate interfaces on each agent. +# +# physical_interface_mappings = +# Example: physical_interface_mappings = default:eth2 + +# (StrOpt) Type of Network Interface to allocate for VM: +# direct or hosdev according to libvirt terminology +# vnic_type = mlnx_direct + +# (StrOpt) Eswitch daemon end point connection url +# daemon_endpoint = 'tcp://127.0.0.1:60001' + +# The number of milliseconds the agent will wait for +# response on request to daemon +# request_timeout = 3000 + +# The number of retries the agent will send request +# to daemon before giving up +# retries = 3 + +# The backoff rate multiplier for waiting period between retries +# on request to daemon, i.e. value of 2 will double +# the request timeout each retry +# backoff_rate = 2 + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (BoolOpt) Enable server RPC compatibility with old (pre-havana) +# agents. +# +# rpc_support_old_agents = False + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/etc/neutron/plugins/nec/nec.ini b/etc/neutron/plugins/nec/nec.ini new file mode 100644 index 000000000..aa4171da7 --- /dev/null +++ b/etc/neutron/plugins/nec/nec.ini @@ -0,0 +1,60 @@ +# Sample Configurations + +[ovs] +# Do not change this parameter unless you have a good reason to. +# This is the name of the OVS integration bridge. There is one per hypervisor. +# The integration bridge acts as a virtual "patch port". All VM VIFs are +# attached to this bridge and then "patched" according to their network +# connectivity. +# integration_bridge = br-int + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +[securitygroup] +# Firewall driver for realizing neutron security group function +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +[ofc] +# Specify OpenFlow Controller Host, Port and Driver to connect. +# host = 127.0.0.1 +# port = 8888 + +# Base URL of OpenFlow Controller REST API. +# It is prepended to a path of each API request. +# path_prefix = + +# Drivers are in neutron/plugins/nec/drivers/ . +# driver = trema + +# PacketFilter is available when it's enabled in this configuration +# and supported by the driver. +# enable_packet_filter = true + +# Use SSL to connect +# use_ssl = false + +# Key file +# key_file = + +# Certificate file +# cert_file = + +# Disable SSL certificate verification +# insecure_ssl = false + +# Maximum attempts per OFC API request. NEC plugin retries +# API request to OFC when OFC returns ServiceUnavailable (503). +# The value must be greater than 0. +# api_max_attempts = 3 + +[provider] +# Default router provider to use. +# default_router_provider = l3-agent +# List of enabled router providers. +# router_providers = l3-agent,openflow diff --git a/etc/neutron/plugins/nuage/nuage_plugin.ini b/etc/neutron/plugins/nuage/nuage_plugin.ini new file mode 100644 index 000000000..994d1206c --- /dev/null +++ b/etc/neutron/plugins/nuage/nuage_plugin.ini @@ -0,0 +1,10 @@ +# Please fill in the correct data for all the keys below and uncomment key-value pairs +[restproxy] +#default_net_partition_name = +#auth_resource = /auth +#server = ip:port +#organization = org +#serverauth = uname:pass +#serverssl = True +#base_uri = /base + diff --git a/etc/neutron/plugins/oneconvergence/nvsdplugin.ini b/etc/neutron/plugins/oneconvergence/nvsdplugin.ini new file mode 100644 index 000000000..a1c05d971 --- /dev/null +++ b/etc/neutron/plugins/oneconvergence/nvsdplugin.ini @@ -0,0 +1,35 @@ +[nvsd] +# Configure the NVSD controller. The plugin proxies the api calls using +# to NVSD controller which implements the required functionality. + +# IP address of NVSD controller api server +# nvsd_ip = + +# Port number of NVSD controller api server +# nvsd_port = 8082 + +# Authentication credentials to access the api server +# nvsd_user = +# nvsd_passwd = + +# API request timeout in seconds +# request_timeout = + +# Maximum number of retry attempts to login to the NVSD controller +# Specify 0 to retry until success (default) +# nvsd_retries = 0 + +[securitygroup] +# Specify firewall_driver option, if neutron security groups are disabled, +# then NoopFirewallDriver otherwise OVSHybridIptablesFirewallDriver. +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +[agent] +# root_helper = sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf + +[database] +# connection = mysql://root:@127.0.0.1/?charset=utf8 diff --git a/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini b/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini new file mode 100644 index 000000000..4beee58fa --- /dev/null +++ b/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini @@ -0,0 +1,179 @@ +[ovs] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST either change this +# to 'vlan' and configure network_vlan_ranges below or change this to +# 'gre' or 'vxlan' and configure tunnel_id_ranges below in order for +# tenant networks to provide connectivity between hosts. Set to 'none' +# to disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = gre +# Example: tenant_network_type = vxlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only gre, vxlan and local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +# (BoolOpt) Set to True in the server and the agents to enable support +# for GRE or VXLAN networks. Requires kernel support for OVS patch ports and +# GRE or VXLAN tunneling. +# +# WARNING: This option will be deprecated in the Icehouse release, at which +# point setting tunnel_type below will be required to enable +# tunneling. +# +# enable_tunneling = False + +# (StrOpt) The type of tunnel network, if any, supported by the plugin. If +# this is set, it will cause tunneling to be enabled. If this is not set and +# the option enable_tunneling is set, this will default to 'gre'. +# +# tunnel_type = +# Example: tunnel_type = gre +# Example: tunnel_type = vxlan + +# (ListOpt) Comma-separated list of : tuples +# enumerating ranges of GRE or VXLAN tunnel IDs that are available for +# tenant network allocation if tenant_network_type is 'gre' or 'vxlan'. +# +# tunnel_id_ranges = +# Example: tunnel_id_ranges = 1:1000 + +# Do not change this parameter unless you have a good reason to. +# This is the name of the OVS integration bridge. There is one per hypervisor. +# The integration bridge acts as a virtual "patch bay". All VM VIFs are +# attached to this bridge and then "patched" according to their network +# connectivity. +# +# integration_bridge = br-int + +# Only used for the agent if tunnel_id_ranges (above) is not empty for +# the server. In most cases, the default value should be fine. +# +# tunnel_bridge = br-tun + +# Peer patch port in integration bridge for tunnel bridge +# int_peer_patch_port = patch-tun + +# Peer patch port in tunnel bridge for integration bridge +# tun_peer_patch_port = patch-int + +# Uncomment this line for the agent if tunnel_id_ranges (above) is not +# empty for the server. Set local-ip to be the local IP address of +# this hypervisor. +# +# local_ip = + +# (ListOpt) Comma-separated list of : tuples +# mapping physical network names to the agent's node-specific OVS +# bridge names to be used for flat and VLAN networks. The length of +# bridge names should be no more than 11. Each bridge must +# exist, and should have a physical network interface configured as a +# port. All physical networks listed in network_vlan_ranges on the +# server should have mappings to appropriate bridges on each agent. +# +# bridge_mappings = +# Example: bridge_mappings = physnet1:br-eth1 + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# Minimize polling by monitoring ovsdb for interface changes +# minimize_polling = True + +# When minimize_polling = True, the number of seconds to wait before +# respawning the ovsdb monitor after losing communication with it +# ovsdb_monitor_respawn_interval = 30 + +# (ListOpt) The types of tenant network tunnels supported by the agent. +# Setting this will enable tunneling support in the agent. This can be set to +# either 'gre' or 'vxlan'. If this is unset, it will default to [] and +# disable tunneling support in the agent. When running the agent with the OVS +# plugin, this value must be the same as "tunnel_type" in the "[ovs]" section. +# When running the agent with ML2, you can specify as many values here as +# your compute hosts supports. +# +# tunnel_types = +# Example: tunnel_types = gre +# Example: tunnel_types = vxlan +# Example: tunnel_types = vxlan, gre + +# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By +# default, this will make use of the Open vSwitch default value of '4789' if +# not specified. +# +# vxlan_udp_port = +# Example: vxlan_udp_port = 8472 + +# (IntOpt) This is the MTU size of veth interfaces. +# Do not change unless you have a good reason to. +# The default MTU size of veth interfaces is 1500. +# veth_mtu = +# Example: veth_mtu = 1504 + +# (BoolOpt) Flag to enable l2-population extension. This option should only be +# used in conjunction with ml2 plugin and l2population mechanism driver. It'll +# enable plugin to populate remote ports macs and IPs (using fdb_add/remove +# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to +# optimize tunnel management. +# +# l2_population = False + +# Enable local ARP responder. Requires OVS 2.1. This is only used by the l2 +# population ML2 MechanismDriver. +# +# arp_responder = False + +# (BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet +# carrying GRE/VXLAN tunnel. The default value is True. +# +# dont_fragment = True + +[securitygroup] +# Firewall driver for realizing neutron security group function. +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver +# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +#----------------------------------------------------------------------------- +# Sample Configurations. +#----------------------------------------------------------------------------- +# +# 1. With VLANs on eth1. +# [ovs] +# network_vlan_ranges = default:2000:3999 +# tunnel_id_ranges = +# integration_bridge = br-int +# bridge_mappings = default:br-eth1 +# +# 2. With GRE tunneling. +# [ovs] +# network_vlan_ranges = +# tunnel_id_ranges = 1:1000 +# integration_bridge = br-int +# tunnel_bridge = br-tun +# local_ip = 10.0.0.3 +# +# 3. With VXLAN tunneling. +# [ovs] +# network_vlan_ranges = +# tenant_network_type = vxlan +# tunnel_type = vxlan +# tunnel_id_ranges = 1:1000 +# integration_bridge = br-int +# tunnel_bridge = br-tun +# local_ip = 10.0.0.3 +# [agent] +# tunnel_types = vxlan diff --git a/etc/neutron/plugins/plumgrid/plumgrid.ini b/etc/neutron/plugins/plumgrid/plumgrid.ini new file mode 100644 index 000000000..bfe8062ae --- /dev/null +++ b/etc/neutron/plugins/plumgrid/plumgrid.ini @@ -0,0 +1,14 @@ +# Config file for Neutron PLUMgrid Plugin + +[plumgriddirector] +# This line should be pointing to the PLUMgrid Director, +# for the PLUMgrid platform. +# director_server= +# director_server_port= +# Authentification parameters for the Director. +# These are the admin credentials to manage and control +# the PLUMgrid Director server. +# username= +# password= +# servertimeout=5 +# driver= diff --git a/etc/neutron/plugins/ryu/ryu.ini b/etc/neutron/plugins/ryu/ryu.ini new file mode 100644 index 000000000..9d9cfa258 --- /dev/null +++ b/etc/neutron/plugins/ryu/ryu.ini @@ -0,0 +1,44 @@ +[ovs] +# integration_bridge = br-int + +# openflow_rest_api = : +# openflow_rest_api = 127.0.0.1:8080 + +# tunnel key range: 0 < tunnel_key_min < tunnel_key_max +# VLAN: 12bits, GRE, VXLAN: 24bits +# tunnel_key_min = 1 +# tunnel_key_max = 0xffffff + +# tunnel_ip = +# tunnel_interface = interface for tunneling +# when tunnel_ip is NOT specified, ip address is read +# from this interface +# tunnel_ip = +# tunnel_interface = +tunnel_interface = eth0 + +# ovsdb_port = port number on which ovsdb is listening +# ryu-agent uses this parameter to setup ovsdb. +# ovs-vsctl set-manager ptcp: +# See set-manager section of man ovs-vsctl for details. +# currently ptcp is only supported. +# ovsdb_ip = +# ovsdb_interface = interface for ovsdb +# when ovsdb_addr NOT specifiied, ip address is gotten +# from this interface +# ovsdb_port = 6634 +# ovsdb_ip = +# ovsdb_interface = +ovsdb_interface = eth0 + +[securitygroup] +# Firewall driver for realizing neutron security group function +# firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 diff --git a/etc/neutron/plugins/vmware/nsx.ini b/etc/neutron/plugins/vmware/nsx.ini new file mode 100644 index 000000000..6ce36c04b --- /dev/null +++ b/etc/neutron/plugins/vmware/nsx.ini @@ -0,0 +1,202 @@ +[DEFAULT] +# User name for NSX controller +# nsx_user = admin + +# Password for NSX controller +# nsx_password = admin + +# Total time limit for a cluster request +# (including retries across different controllers) +# req_timeout = 30 + +# Time before aborting a request on an unresponsive controller +# http_timeout = 30 + +# Maximum number of times a particular request should be retried +# retries = 2 + +# Maximum number of times a redirect response should be followed +# redirects = 2 + +# Comma-separated list of NSX controller endpoints (:). When port +# is omitted, 443 is assumed. This option MUST be specified, e.g.: +# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80 + +# UUID of the pre-existing default NSX Transport zone to be used for creating +# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.: +# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53 + +# (Optional) UUID for the default l3 gateway service to use with this cluster. +# To be specified if planning to use logical routers with external gateways. +# default_l3_gw_service_uuid = + +# (Optional) UUID for the default l2 gateway service to use with this cluster. +# To be specified for providing a predefined gateway tenant for connecting their networks. +# default_l2_gw_service_uuid = + +# (Optional) UUID for the default service cluster. A service cluster is introduced to +# represent a group of gateways and it is needed in order to use Logical Services like +# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this +# config parameter *MUST BE* set to a valid pre-existent service cluster uuid. +# default_service_cluster_uuid = + +# Name of the default interface name to be used on network-gateway. This value +# will be used for any device associated with a network gateway for which an +# interface name was not specified +# default_interface_name = breth0 + +[quotas] +# number of network gateways allowed per tenant, -1 means unlimited +# quota_network_gateway = 5 + +[vcns] +# URL for VCNS manager +# manager_uri = https://management_ip + +# User name for VCNS manager +# user = admin + +# Password for VCNS manager +# password = default + +# (Optional) Datacenter ID for Edge deployment +# datacenter_moid = + +# (Optional) Deployment Container ID for NSX Edge deployment +# If not specified, either a default global container will be used, or +# the resource pool and datastore specified below will be used +# deployment_container_id = + +# (Optional) Resource pool ID for NSX Edge deployment +# resource_pool_id = + +# (Optional) Datastore ID for NSX Edge deployment +# datastore_id = + +# (Required) UUID of logic switch for physical network connectivity +# external_network = + +# (Optional) Asynchronous task status check interval +# default is 2000 (millisecond) +# task_status_check_interval = 2000 + +[nsx] +# Maximum number of ports for each bridged logical switch +# The recommended value for this parameter varies with NSX version +# Please use: +# NSX 2.x -> 64 +# NSX 3.0, 3.1 -> 5000 +# NSX 3.2 -> 10000 +# max_lp_per_bridged_ls = 5000 + +# Maximum number of ports for each overlay (stt, gre) logical switch +# max_lp_per_overlay_ls = 256 + +# Number of connections to each controller node. +# default is 10 +# concurrent_connections = 10 + +# Number of seconds a generation id should be valid for (default -1 meaning do not time out) +# nsx_gen_timeout = -1 + +# Acceptable values for 'metadata_mode' are: +# - 'access_network': this enables a dedicated connection to the metadata +# proxy for metadata server access via Neutron router. +# - 'dhcp_host_route': this enables host route injection via the dhcp agent. +# This option is only useful if running on a host that does not support +# namespaces otherwise access_network should be used. +# metadata_mode = access_network + +# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt) +# default_transport_type = stt + +# Specifies in which mode the plugin needs to operate in order to provide DHCP and +# metadata proxy services to tenant instances. If 'agent' is chosen (default) +# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to +# provide such services. In this mode, the plugin supports API extensions 'agent' +# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse), +# the plugin will use NSX logical services for DHCP and metadata proxy. This +# simplifies the deployment model for Neutron, in that the plugin no longer requires +# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode +# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above. +# Furthermore, a 'combined' mode is also provided and is used to support existing +# deployments that want to adopt the agentless mode going forward. With this mode, +# existing networks keep being served by the existing infrastructure (thus preserving +# backward compatibility, whereas new networks will be served by the new infrastructure. +# Migration tools are provided to 'move' one network from one model to another; with +# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is +# ignored, as new networks will no longer be scheduled to existing dhcp agents. +# agent_mode = agent + +# Specifies which mode packet replication should be done in. If set to service +# a service node is required in order to perform packet replication. This can +# also be set to source if one wants replication to be performed locally (NOTE: +# usually only useful for testing if one does not want to deploy a service node). +# replication_mode = service + +[nsx_sync] +# Interval in seconds between runs of the status synchronization task. +# The plugin will aim at resynchronizing operational status for all +# resources in this interval, and it should be therefore large enough +# to ensure the task is feasible. Otherwise the plugin will be +# constantly synchronizing resource status, ie: a new task is started +# as soon as the previous is completed. +# If this value is set to 0, the state synchronization thread for this +# Neutron instance will be disabled. +# state_sync_interval = 10 + +# Random additional delay between two runs of the state synchronization task. +# An additional wait time between 0 and max_random_sync_delay seconds +# will be added on top of state_sync_interval. +# max_random_sync_delay = 0 + +# Minimum delay, in seconds, between two status synchronization requests for NSX. +# Depending on chunk size, controller load, and other factors, state +# synchronization requests might be pretty heavy. This means the +# controller might take time to respond, and its load might be quite +# increased by them. This parameter allows to specify a minimum +# interval between two subsequent requests. +# The value for this parameter must never exceed state_sync_interval. +# If this does, an error will be raised at startup. +# min_sync_req_delay = 1 + +# Minimum number of resources to be retrieved from NSX in a single status +# synchronization request. +# The actual size of the chunk will increase if the number of resources is such +# that using the minimum chunk size will cause the interval between two +# requests to be less than min_sync_req_delay +# min_chunk_size = 500 + +# Enable this option to allow punctual state synchronization on show +# operations. In this way, show operations will always fetch the operational +# status of the resource from the NSX backend, and this might have +# a considerable impact on overall performance. +# always_read_status = False + +[nsx_lsn] +# Pull LSN information from NSX in case it is missing from the local +# data store. This is useful to rebuild the local store in case of +# server recovery +# sync_on_missing_data = False + +[nsx_dhcp] +# (Optional) Comma separated list of additional dns servers. Default is an empty list +# extra_domain_name_servers = + +# Domain to use for building the hostnames +# domain_name = openstacklocal + +# Default DHCP lease time +# default_lease_time = 43200 + +[nsx_metadata] +# IP address used by Metadata server +# metadata_server_address = 127.0.0.1 + +# TCP Port used by Metadata server +# metadata_server_port = 8775 + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it MUST match with the configuration used by the Metadata server +# metadata_shared_secret = diff --git a/etc/neutron/rootwrap.d/debug.filters b/etc/neutron/rootwrap.d/debug.filters new file mode 100644 index 000000000..b61d96017 --- /dev/null +++ b/etc/neutron/rootwrap.d/debug.filters @@ -0,0 +1,14 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# This is needed because we should ping +# from inside a namespace which requires root +ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+ +ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+ diff --git a/etc/neutron/rootwrap.d/dhcp.filters b/etc/neutron/rootwrap.d/dhcp.filters new file mode 100644 index 000000000..88d61e8e3 --- /dev/null +++ b/etc/neutron/rootwrap.d/dhcp.filters @@ -0,0 +1,38 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# dhcp-agent +dnsmasq: EnvFilter, dnsmasq, root, NEUTRON_NETWORK_ID= +# dhcp-agent uses kill as well, that's handled by the generic KillFilter +# it looks like these are the only signals needed, per +# neutron/agent/linux/dhcp.py +kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP +kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP + +ovs-vsctl: CommandFilter, ovs-vsctl, root +ivs-ctl: CommandFilter, ivs-ctl, root +mm-ctl: CommandFilter, mm-ctl, root +dhcp_release: CommandFilter, dhcp_release, root + +# metadata proxy +metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root +metadata_proxy_quantum: CommandFilter, quantum-ns-metadata-proxy, root +# If installed from source (say, by devstack), the prefix will be +# /usr/local instead of /usr/bin. +metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root +metadata_proxy_local_quantum: CommandFilter, /usr/local/bin/quantum-ns-metadata-proxy, root +# RHEL invocation of the metadata proxy will report /usr/bin/python +kill_metadata: KillFilter, root, /usr/bin/python, -9 +kill_metadata7: KillFilter, root, /usr/bin/python2.7, -9 +kill_metadata6: KillFilter, root, /usr/bin/python2.6, -9 + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/etc/neutron/rootwrap.d/iptables-firewall.filters b/etc/neutron/rootwrap.d/iptables-firewall.filters new file mode 100644 index 000000000..b8a6ab5b3 --- /dev/null +++ b/etc/neutron/rootwrap.d/iptables-firewall.filters @@ -0,0 +1,21 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# neutron/agent/linux/iptables_manager.py +# "iptables-save", ... +iptables-save: CommandFilter, iptables-save, root +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-save: CommandFilter, ip6tables-save, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# neutron/agent/linux/iptables_manager.py +# "iptables", "-A", ... +iptables: CommandFilter, iptables, root +ip6tables: CommandFilter, ip6tables, root diff --git a/etc/neutron/rootwrap.d/l3.filters b/etc/neutron/rootwrap.d/l3.filters new file mode 100644 index 000000000..2031d779e --- /dev/null +++ b/etc/neutron/rootwrap.d/l3.filters @@ -0,0 +1,41 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# arping +arping: CommandFilter, arping, root + +# l3_agent +sysctl: CommandFilter, sysctl, root +route: CommandFilter, route, root + +# metadata proxy +metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root +metadata_proxy_quantum: CommandFilter, quantum-ns-metadata-proxy, root +# If installed from source (say, by devstack), the prefix will be +# /usr/local instead of /usr/bin. +metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root +metadata_proxy_local_quantum: CommandFilter, /usr/local/bin/quantum-ns-metadata-proxy, root +# RHEL invocation of the metadata proxy will report /usr/bin/python +kill_metadata: KillFilter, root, /usr/bin/python, -9 +kill_metadata7: KillFilter, root, /usr/bin/python2.7, -9 +kill_metadata6: KillFilter, root, /usr/bin/python2.6, -9 + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root + +# ovs_lib (if OVSInterfaceDriver is used) +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# iptables_manager +iptables-save: CommandFilter, iptables-save, root +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-save: CommandFilter, ip6tables-save, root +ip6tables-restore: CommandFilter, ip6tables-restore, root diff --git a/etc/neutron/rootwrap.d/lbaas-haproxy.filters b/etc/neutron/rootwrap.d/lbaas-haproxy.filters new file mode 100644 index 000000000..b4e1ecba2 --- /dev/null +++ b/etc/neutron/rootwrap.d/lbaas-haproxy.filters @@ -0,0 +1,26 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# haproxy +haproxy: CommandFilter, haproxy, root + +# lbaas-agent uses kill as well, that's handled by the generic KillFilter +kill_haproxy_usr: KillFilter, root, /usr/sbin/haproxy, -9, -HUP + +ovs-vsctl: CommandFilter, ovs-vsctl, root +mm-ctl: CommandFilter, mm-ctl, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root +route: CommandFilter, route, root + +# arping +arping: CommandFilter, arping, root diff --git a/etc/neutron/rootwrap.d/linuxbridge-plugin.filters b/etc/neutron/rootwrap.d/linuxbridge-plugin.filters new file mode 100644 index 000000000..03df39592 --- /dev/null +++ b/etc/neutron/rootwrap.d/linuxbridge-plugin.filters @@ -0,0 +1,19 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# linuxbridge-agent +# unclear whether both variants are necessary, but I'm transliterating +# from the old mechanism +brctl: CommandFilter, brctl, root +bridge: CommandFilter, bridge, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/etc/neutron/rootwrap.d/nec-plugin.filters b/etc/neutron/rootwrap.d/nec-plugin.filters new file mode 100644 index 000000000..89c4cfe35 --- /dev/null +++ b/etc/neutron/rootwrap.d/nec-plugin.filters @@ -0,0 +1,12 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# nec_neutron_agent +ovs-vsctl: CommandFilter, ovs-vsctl, root diff --git a/etc/neutron/rootwrap.d/openvswitch-plugin.filters b/etc/neutron/rootwrap.d/openvswitch-plugin.filters new file mode 100644 index 000000000..b63a83b94 --- /dev/null +++ b/etc/neutron/rootwrap.d/openvswitch-plugin.filters @@ -0,0 +1,22 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# openvswitch-agent +# unclear whether both variants are necessary, but I'm transliterating +# from the old mechanism +ovs-vsctl: CommandFilter, ovs-vsctl, root +ovs-ofctl: CommandFilter, ovs-ofctl, root +kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9 +ovsdb-client: CommandFilter, ovsdb-client, root +xe: CommandFilter, xe, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/etc/neutron/rootwrap.d/ryu-plugin.filters b/etc/neutron/rootwrap.d/ryu-plugin.filters new file mode 100644 index 000000000..0a70b8bc9 --- /dev/null +++ b/etc/neutron/rootwrap.d/ryu-plugin.filters @@ -0,0 +1,21 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# ryu-agent +# unclear whether both variants are necessary, but I'm transliterating +# from the old mechanism + +# neutron/plugins/ryu/agent/ryu_neutron_agent.py: +# "ovs-vsctl", "--timeout=2", ... +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# neutron/plugins/ryu/agent/ryu_neutron_agent.py: +# "xe", "vif-param-get", ... +xe: CommandFilter, xe, root diff --git a/etc/neutron/rootwrap.d/vpnaas.filters b/etc/neutron/rootwrap.d/vpnaas.filters new file mode 100644 index 000000000..7848136b9 --- /dev/null +++ b/etc/neutron/rootwrap.d/vpnaas.filters @@ -0,0 +1,13 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root +openswan: CommandFilter, ipsec, root diff --git a/etc/policy.json b/etc/policy.json new file mode 100644 index 000000000..369e0a80d --- /dev/null +++ b/etc/policy.json @@ -0,0 +1,136 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", + "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", + "admin_only": "rule:context_is_admin", + "regular_user": "", + "shared": "field:networks:shared=True", + "shared_firewalls": "field:firewalls:shared=True", + "external": "field:networks:router:external=True", + "default": "rule:admin_or_owner", + + "subnets:private:read": "rule:admin_or_owner", + "subnets:private:write": "rule:admin_or_owner", + "subnets:shared:read": "rule:regular_user", + "subnets:shared:write": "rule:admin_only", + + "create_subnet": "rule:admin_or_network_owner", + "get_subnet": "rule:admin_or_owner or rule:shared", + "update_subnet": "rule:admin_or_network_owner", + "delete_subnet": "rule:admin_or_network_owner", + + "create_network": "", + "get_network": "rule:admin_or_owner or rule:shared or rule:external", + "get_network:router:external": "rule:regular_user", + "get_network:segments": "rule:admin_only", + "get_network:provider:network_type": "rule:admin_only", + "get_network:provider:physical_network": "rule:admin_only", + "get_network:provider:segmentation_id": "rule:admin_only", + "get_network:queue_id": "rule:admin_only", + "create_network:shared": "rule:admin_only", + "create_network:router:external": "rule:admin_only", + "create_network:segments": "rule:admin_only", + "create_network:provider:network_type": "rule:admin_only", + "create_network:provider:physical_network": "rule:admin_only", + "create_network:provider:segmentation_id": "rule:admin_only", + "update_network": "rule:admin_or_owner", + "update_network:segments": "rule:admin_only", + "update_network:shared": "rule:admin_only", + "update_network:provider:network_type": "rule:admin_only", + "update_network:provider:physical_network": "rule:admin_only", + "update_network:provider:segmentation_id": "rule:admin_only", + "delete_network": "rule:admin_or_owner", + + "create_port": "", + "create_port:mac_address": "rule:admin_or_network_owner", + "create_port:fixed_ips": "rule:admin_or_network_owner", + "create_port:port_security_enabled": "rule:admin_or_network_owner", + "create_port:binding:host_id": "rule:admin_only", + "create_port:binding:profile": "rule:admin_only", + "create_port:mac_learning_enabled": "rule:admin_or_network_owner", + "get_port": "rule:admin_or_owner", + "get_port:queue_id": "rule:admin_only", + "get_port:binding:vif_type": "rule:admin_only", + "get_port:binding:vif_details": "rule:admin_only", + "get_port:binding:host_id": "rule:admin_only", + "get_port:binding:profile": "rule:admin_only", + "update_port": "rule:admin_or_owner", + "update_port:fixed_ips": "rule:admin_or_network_owner", + "update_port:port_security_enabled": "rule:admin_or_network_owner", + "update_port:binding:host_id": "rule:admin_only", + "update_port:binding:profile": "rule:admin_only", + "update_port:mac_learning_enabled": "rule:admin_or_network_owner", + "delete_port": "rule:admin_or_owner", + + "create_router:external_gateway_info:enable_snat": "rule:admin_only", + "update_router:external_gateway_info:enable_snat": "rule:admin_only", + + "create_firewall": "", + "get_firewall": "rule:admin_or_owner", + "create_firewall:shared": "rule:admin_only", + "get_firewall:shared": "rule:admin_only", + "update_firewall": "rule:admin_or_owner", + "update_firewall:shared": "rule:admin_only", + "delete_firewall": "rule:admin_or_owner", + + "create_firewall_policy": "", + "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls", + "create_firewall_policy:shared": "rule:admin_or_owner", + "update_firewall_policy": "rule:admin_or_owner", + "delete_firewall_policy": "rule:admin_or_owner", + + "create_firewall_rule": "", + "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", + "update_firewall_rule": "rule:admin_or_owner", + "delete_firewall_rule": "rule:admin_or_owner", + + "create_qos_queue": "rule:admin_only", + "get_qos_queue": "rule:admin_only", + + "update_agent": "rule:admin_only", + "delete_agent": "rule:admin_only", + "get_agent": "rule:admin_only", + + "create_dhcp-network": "rule:admin_only", + "delete_dhcp-network": "rule:admin_only", + "get_dhcp-networks": "rule:admin_only", + "create_l3-router": "rule:admin_only", + "delete_l3-router": "rule:admin_only", + "get_l3-routers": "rule:admin_only", + "get_dhcp-agents": "rule:admin_only", + "get_l3-agents": "rule:admin_only", + "get_loadbalancer-agent": "rule:admin_only", + "get_loadbalancer-pools": "rule:admin_only", + + "create_router": "rule:regular_user", + "get_router": "rule:admin_or_owner", + "update_router:add_router_interface": "rule:admin_or_owner", + "update_router:remove_router_interface": "rule:admin_or_owner", + "delete_router": "rule:admin_or_owner", + + "create_floatingip": "rule:regular_user", + "update_floatingip": "rule:admin_or_owner", + "delete_floatingip": "rule:admin_or_owner", + "get_floatingip": "rule:admin_or_owner", + + "create_network_profile": "rule:admin_only", + "update_network_profile": "rule:admin_only", + "delete_network_profile": "rule:admin_only", + "get_network_profiles": "", + "get_network_profile": "", + "update_policy_profiles": "rule:admin_only", + "get_policy_profiles": "", + "get_policy_profile": "", + + "create_metering_label": "rule:admin_only", + "delete_metering_label": "rule:admin_only", + "get_metering_label": "rule:admin_only", + + "create_metering_label_rule": "rule:admin_only", + "delete_metering_label_rule": "rule:admin_only", + "get_metering_label_rule": "rule:admin_only", + + "get_service_provider": "rule:regular_user", + "get_lsn": "rule:admin_only", + "create_lsn": "rule:admin_only" +} diff --git a/etc/rootwrap.conf b/etc/rootwrap.conf new file mode 100644 index 000000000..dee1dd94b --- /dev/null +++ b/etc/rootwrap.conf @@ -0,0 +1,34 @@ +# Configuration for neutron-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, local0, local1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR + +[xenapi] +# XenAPI configuration is only required by the L2 agent if it is to +# target a XenServer/XCP compute host's dom0. +xenapi_connection_url= +xenapi_connection_username=root +xenapi_connection_password= diff --git a/etc/services.conf b/etc/services.conf new file mode 100644 index 000000000..f8a609005 --- /dev/null +++ b/etc/services.conf @@ -0,0 +1,40 @@ +[radware] +#vdirect_address = 0.0.0.0 +#ha_secondary_address= +#vdirect_user = vDirect +#vdirect_password = radware +#service_ha_pair = False +#service_throughput = 1000 +#service_ssl_throughput = 200 +#service_compression_throughput = 100 +#service_cache = 20 +#service_adc_type = VA +#service_adc_version= +#service_session_mirroring_enabled = False +#service_isl_vlan = -1 +#service_resource_pool_ids = [] +#actions_to_skip = 'setup_l2_l3' +#l4_action_name = 'BaseCreate' +#l2_l3_workflow_name = openstack_l2_l3 +#l4_workflow_name = openstack_l4 +#l2_l3_ctor_params = service: _REPLACE_, ha_network_name: HA-Network, ha_ip_pool_name: default, allocate_ha_vrrp: True, allocate_ha_ips: True +#l2_l3_setup_params = data_port: 1, data_ip_address: 192.168.200.99, data_ip_mask: 255.255.255.0, gateway: 192.168.200.1, ha_port: 2 + +[netscaler_driver] +#netscaler_ncc_uri = https://ncc_server.acme.org/ncc/v1/api +#netscaler_ncc_username = admin +#netscaler_ncc_password = secret + +[heleoslb] +#esm_mgmt = +#admin_username = +#admin_password = +#lb_image = +#inband_id = +#oob_id = +#mgmt_id = +#dummy_utif_id = +#resource_pool_id = +#async_requests = +#lb_flavor = small +#sync_interval = 60 diff --git a/etc/vpn_agent.ini b/etc/vpn_agent.ini new file mode 100644 index 000000000..c3089df95 --- /dev/null +++ b/etc/vpn_agent.ini @@ -0,0 +1,14 @@ +[DEFAULT] +# VPN-Agent configuration file +# Note vpn-agent inherits l3-agent, so you can use configs on l3-agent also + +[vpnagent] +# vpn device drivers which vpn agent will use +# If we want to use multiple drivers, we need to define this option multiple times. +# vpn_device_driver=neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver +# vpn_device_driver=neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver +# vpn_device_driver=another_driver + +[ipsec] +# Status check interval +# ipsec_status_check_interval=60 diff --git a/neutron/__init__.py b/neutron/__init__.py new file mode 100644 index 000000000..b2c81bde7 --- /dev/null +++ b/neutron/__init__.py @@ -0,0 +1,21 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import gettext + + +gettext.install('neutron', unicode=1) diff --git a/neutron/agent/__init__.py b/neutron/agent/__init__.py new file mode 100644 index 000000000..0b3d2db5e --- /dev/null +++ b/neutron/agent/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/agent/common/__init__.py b/neutron/agent/common/__init__.py new file mode 100644 index 000000000..0b3d2db5e --- /dev/null +++ b/neutron/agent/common/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/agent/common/config.py b/neutron/agent/common/config.py new file mode 100644 index 000000000..5f83517d5 --- /dev/null +++ b/neutron/agent/common/config.py @@ -0,0 +1,123 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo.config import cfg + +from neutron.common import config +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +ROOT_HELPER_OPTS = [ + cfg.StrOpt('root_helper', default='sudo', + help=_('Root helper application.')), +] + +AGENT_STATE_OPTS = [ + cfg.FloatOpt('report_interval', default=30, + help=_('Seconds between nodes reporting state to server; ' + 'should be less than agent_down_time, best if it ' + 'is half or less than agent_down_time.')), +] + +INTERFACE_DRIVER_OPTS = [ + cfg.StrOpt('interface_driver', + help=_("The driver used to manage the virtual interface.")), +] + +USE_NAMESPACES_OPTS = [ + cfg.BoolOpt('use_namespaces', default=True, + help=_("Allow overlapping IP.")), +] + + +def get_log_args(conf, log_file_name): + cmd_args = [] + if conf.debug: + cmd_args.append('--debug') + if conf.verbose: + cmd_args.append('--verbose') + if (conf.log_dir or conf.log_file): + cmd_args.append('--log-file=%s' % log_file_name) + log_dir = None + if conf.log_dir and conf.log_file: + log_dir = os.path.dirname( + os.path.join(conf.log_dir, conf.log_file)) + elif conf.log_dir: + log_dir = conf.log_dir + elif conf.log_file: + log_dir = os.path.dirname(conf.log_file) + if log_dir: + cmd_args.append('--log-dir=%s' % log_dir) + else: + if conf.use_syslog: + cmd_args.append('--use-syslog') + if conf.syslog_log_facility: + cmd_args.append( + '--syslog-log-facility=%s' % conf.syslog_log_facility) + return cmd_args + + +def register_root_helper(conf): + # The first call is to ensure backward compatibility + conf.register_opts(ROOT_HELPER_OPTS) + conf.register_opts(ROOT_HELPER_OPTS, 'AGENT') + + +def register_agent_state_opts_helper(conf): + conf.register_opts(AGENT_STATE_OPTS, 'AGENT') + + +def register_interface_driver_opts_helper(conf): + conf.register_opts(INTERFACE_DRIVER_OPTS) + + +def register_use_namespaces_opts_helper(conf): + conf.register_opts(USE_NAMESPACES_OPTS) + + +def get_root_helper(conf): + root_helper = conf.AGENT.root_helper + if root_helper != 'sudo': + return root_helper + + root_helper = conf.root_helper + if root_helper != 'sudo': + LOG.deprecated(_('DEFAULT.root_helper is deprecated! Please move ' + 'root_helper configuration to [AGENT] section.')) + return root_helper + + return 'sudo' + + +def setup_conf(): + bind_opts = [ + cfg.StrOpt('state_path', + default='/var/lib/neutron', + help=_('Top-level directory for maintaining dhcp state')), + ] + + conf = cfg.ConfigOpts() + conf.register_opts(bind_opts) + return conf + +# add a logging setup method here for convenience +setup_logging = config.setup_logging diff --git a/neutron/agent/dhcp_agent.py b/neutron/agent/dhcp_agent.py new file mode 100644 index 000000000..5cdb30d0e --- /dev/null +++ b/neutron/agent/dhcp_agent.py @@ -0,0 +1,622 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +import eventlet +eventlet.monkey_patch() + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import dhcp +from neutron.agent.linux import external_process +from neutron.agent.linux import interface +from neutron.agent.linux import ovs_lib # noqa +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants +from neutron.common import exceptions +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils +from neutron import context +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import service +from neutron import service as neutron_service + +LOG = logging.getLogger(__name__) + + +class DhcpAgent(manager.Manager): + OPTS = [ + cfg.IntOpt('resync_interval', default=5, + help=_("Interval to resync.")), + cfg.StrOpt('dhcp_driver', + default='neutron.agent.linux.dhcp.Dnsmasq', + help=_("The driver used to manage the DHCP server.")), + cfg.BoolOpt('enable_isolated_metadata', default=False, + help=_("Support Metadata requests on isolated networks.")), + cfg.BoolOpt('enable_metadata_network', default=False, + help=_("Allows for serving metadata requests from a " + "dedicated network. Requires " + "enable_isolated_metadata = True")), + cfg.IntOpt('num_sync_threads', default=4, + help=_('Number of threads to use during sync process.')), + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location of Metadata Proxy UNIX domain ' + 'socket')), + ] + + def __init__(self, host=None): + super(DhcpAgent, self).__init__(host=host) + self.needs_resync_reasons = [] + self.conf = cfg.CONF + self.cache = NetworkCache() + self.root_helper = config.get_root_helper(self.conf) + self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver) + ctx = context.get_admin_context_without_session() + self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, + ctx, self.conf.use_namespaces) + # create dhcp dir to store dhcp info + dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path) + if not os.path.isdir(dhcp_dir): + os.makedirs(dhcp_dir, 0o755) + self.dhcp_version = self.dhcp_driver_cls.check_version() + self._populate_networks_cache() + + def _populate_networks_cache(self): + """Populate the networks cache when the DHCP-agent starts.""" + try: + existing_networks = self.dhcp_driver_cls.existing_dhcp_networks( + self.conf, + self.root_helper + ) + for net_id in existing_networks: + net = dhcp.NetModel(self.conf.use_namespaces, + {"id": net_id, + "subnets": [], + "ports": []}) + self.cache.put(net) + except NotImplementedError: + # just go ahead with an empty networks cache + LOG.debug( + _("The '%s' DHCP-driver does not support retrieving of a " + "list of existing networks"), + self.conf.dhcp_driver + ) + + def after_start(self): + self.run() + LOG.info(_("DHCP agent started")) + + def run(self): + """Activate the DHCP agent.""" + self.sync_state() + self.periodic_resync() + + def call_driver(self, action, network, **action_kwargs): + """Invoke an action on a DHCP driver instance.""" + LOG.debug(_('Calling driver for network: %(net)s action: %(action)s'), + {'net': network.id, 'action': action}) + try: + # the Driver expects something that is duck typed similar to + # the base models. + driver = self.dhcp_driver_cls(self.conf, + network, + self.root_helper, + self.dhcp_version, + self.plugin_rpc) + + getattr(driver, action)(**action_kwargs) + return True + except exceptions.Conflict: + # No need to resync here, the agent will receive the event related + # to a status update for the network + LOG.warning(_('Unable to %(action)s dhcp for %(net_id)s: there is ' + 'a conflict with its current state; please check ' + 'that the network and/or its subnet(s) still exist.') + % {'net_id': network.id, 'action': action}) + except Exception as e: + self.schedule_resync(e) + if (isinstance(e, rpc_compat.RemoteError) + and e.exc_type == 'NetworkNotFound' + or isinstance(e, exceptions.NetworkNotFound)): + LOG.warning(_("Network %s has been deleted."), network.id) + else: + LOG.exception(_('Unable to %(action)s dhcp for %(net_id)s.') + % {'net_id': network.id, 'action': action}) + + def schedule_resync(self, reason): + """Schedule a resync for a given reason.""" + self.needs_resync_reasons.append(reason) + + @utils.synchronized('dhcp-agent') + def sync_state(self): + """Sync the local DHCP state with Neutron.""" + LOG.info(_('Synchronizing state')) + pool = eventlet.GreenPool(cfg.CONF.num_sync_threads) + known_network_ids = set(self.cache.get_network_ids()) + + try: + active_networks = self.plugin_rpc.get_active_networks_info() + active_network_ids = set(network.id for network in active_networks) + for deleted_id in known_network_ids - active_network_ids: + try: + self.disable_dhcp_helper(deleted_id) + except Exception as e: + self.schedule_resync(e) + LOG.exception(_('Unable to sync network state on deleted ' + 'network %s'), deleted_id) + + for network in active_networks: + pool.spawn(self.safe_configure_dhcp_for_network, network) + pool.waitall() + LOG.info(_('Synchronizing state complete')) + + except Exception as e: + self.schedule_resync(e) + LOG.exception(_('Unable to sync network state.')) + + def _periodic_resync_helper(self): + """Resync the dhcp state at the configured interval.""" + while True: + eventlet.sleep(self.conf.resync_interval) + if self.needs_resync_reasons: + # be careful to avoid a race with additions to list + # from other threads + reasons = self.needs_resync_reasons + self.needs_resync_reasons = [] + for r in reasons: + LOG.debug(_("resync: %(reason)s"), + {"reason": r}) + self.sync_state() + + def periodic_resync(self): + """Spawn a thread to periodically resync the dhcp state.""" + eventlet.spawn(self._periodic_resync_helper) + + def safe_get_network_info(self, network_id): + try: + network = self.plugin_rpc.get_network_info(network_id) + if not network: + LOG.warn(_('Network %s has been deleted.'), network_id) + return network + except Exception as e: + self.schedule_resync(e) + LOG.exception(_('Network %s info call failed.'), network_id) + + def enable_dhcp_helper(self, network_id): + """Enable DHCP for a network that meets enabling criteria.""" + network = self.safe_get_network_info(network_id) + if network: + self.configure_dhcp_for_network(network) + + def safe_configure_dhcp_for_network(self, network): + try: + self.configure_dhcp_for_network(network) + except (exceptions.NetworkNotFound, RuntimeError): + LOG.warn(_('Network %s may have been deleted and its resources ' + 'may have already been disposed.'), network.id) + + def configure_dhcp_for_network(self, network): + if not network.admin_state_up: + return + + for subnet in network.subnets: + if subnet.enable_dhcp: + if self.call_driver('enable', network): + if (self.conf.use_namespaces and + self.conf.enable_isolated_metadata): + self.enable_isolated_metadata_proxy(network) + self.cache.put(network) + break + + def disable_dhcp_helper(self, network_id): + """Disable DHCP for a network known to the agent.""" + network = self.cache.get_network_by_id(network_id) + if network: + if (self.conf.use_namespaces and + self.conf.enable_isolated_metadata): + self.disable_isolated_metadata_proxy(network) + if self.call_driver('disable', network): + self.cache.remove(network) + + def refresh_dhcp_helper(self, network_id): + """Refresh or disable DHCP for a network depending on the current state + of the network. + """ + old_network = self.cache.get_network_by_id(network_id) + if not old_network: + # DHCP current not running for network. + return self.enable_dhcp_helper(network_id) + + network = self.safe_get_network_info(network_id) + if not network: + return + + old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp) + new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp) + + if new_cidrs and old_cidrs == new_cidrs: + self.call_driver('reload_allocations', network) + self.cache.put(network) + elif new_cidrs: + if self.call_driver('restart', network): + self.cache.put(network) + else: + self.disable_dhcp_helper(network.id) + + @utils.synchronized('dhcp-agent') + def network_create_end(self, context, payload): + """Handle the network.create.end notification event.""" + network_id = payload['network']['id'] + self.enable_dhcp_helper(network_id) + + @utils.synchronized('dhcp-agent') + def network_update_end(self, context, payload): + """Handle the network.update.end notification event.""" + network_id = payload['network']['id'] + if payload['network']['admin_state_up']: + self.enable_dhcp_helper(network_id) + else: + self.disable_dhcp_helper(network_id) + + @utils.synchronized('dhcp-agent') + def network_delete_end(self, context, payload): + """Handle the network.delete.end notification event.""" + self.disable_dhcp_helper(payload['network_id']) + + @utils.synchronized('dhcp-agent') + def subnet_update_end(self, context, payload): + """Handle the subnet.update.end notification event.""" + network_id = payload['subnet']['network_id'] + self.refresh_dhcp_helper(network_id) + + # Use the update handler for the subnet create event. + subnet_create_end = subnet_update_end + + @utils.synchronized('dhcp-agent') + def subnet_delete_end(self, context, payload): + """Handle the subnet.delete.end notification event.""" + subnet_id = payload['subnet_id'] + network = self.cache.get_network_by_subnet_id(subnet_id) + if network: + self.refresh_dhcp_helper(network.id) + + @utils.synchronized('dhcp-agent') + def port_update_end(self, context, payload): + """Handle the port.update.end notification event.""" + updated_port = dhcp.DictModel(payload['port']) + network = self.cache.get_network_by_id(updated_port.network_id) + if network: + self.cache.put_port(updated_port) + self.call_driver('reload_allocations', network) + + # Use the update handler for the port create event. + port_create_end = port_update_end + + @utils.synchronized('dhcp-agent') + def port_delete_end(self, context, payload): + """Handle the port.delete.end notification event.""" + port = self.cache.get_port_by_id(payload['port_id']) + if port: + network = self.cache.get_network_by_id(port.network_id) + self.cache.remove_port(port) + self.call_driver('reload_allocations', network) + + def enable_isolated_metadata_proxy(self, network): + + # The proxy might work for either a single network + # or all the networks connected via a router + # to the one passed as a parameter + neutron_lookup_param = '--network_id=%s' % network.id + meta_cidr = netaddr.IPNetwork(dhcp.METADATA_DEFAULT_CIDR) + has_metadata_subnet = any(netaddr.IPNetwork(s.cidr) in meta_cidr + for s in network.subnets) + if (self.conf.enable_metadata_network and has_metadata_subnet): + router_ports = [port for port in network.ports + if (port.device_owner == + constants.DEVICE_OWNER_ROUTER_INTF)] + if router_ports: + # Multiple router ports should not be allowed + if len(router_ports) > 1: + LOG.warning(_("%(port_num)d router ports found on the " + "metadata access network. Only the port " + "%(port_id)s, for router %(router_id)s " + "will be considered"), + {'port_num': len(router_ports), + 'port_id': router_ports[0].id, + 'router_id': router_ports[0].device_id}) + neutron_lookup_param = ('--router_id=%s' % + router_ports[0].device_id) + + def callback(pid_file): + metadata_proxy_socket = cfg.CONF.metadata_proxy_socket + proxy_cmd = ['neutron-ns-metadata-proxy', + '--pid_file=%s' % pid_file, + '--metadata_proxy_socket=%s' % metadata_proxy_socket, + neutron_lookup_param, + '--state_path=%s' % self.conf.state_path, + '--metadata_port=%d' % dhcp.METADATA_PORT] + proxy_cmd.extend(config.get_log_args( + cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % network.id)) + return proxy_cmd + + pm = external_process.ProcessManager( + self.conf, + network.id, + self.root_helper, + network.namespace) + pm.enable(callback) + + def disable_isolated_metadata_proxy(self, network): + pm = external_process.ProcessManager( + self.conf, + network.id, + self.root_helper, + network.namespace) + pm.disable() + + +class DhcpPluginApi(rpc_compat.RpcProxy): + """Agent side of the dhcp rpc API. + + API version history: + 1.0 - Initial version. + 1.1 - Added get_active_networks_info, create_dhcp_port, + and update_dhcp_port methods. + + """ + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic, context, use_namespaces): + super(DhcpPluginApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.context = context + self.host = cfg.CONF.host + self.use_namespaces = use_namespaces + + def get_active_networks_info(self): + """Make a remote process call to retrieve all network info.""" + networks = self.call(self.context, + self.make_msg('get_active_networks_info', + host=self.host), + topic=self.topic) + return [dhcp.NetModel(self.use_namespaces, n) for n in networks] + + def get_network_info(self, network_id): + """Make a remote process call to retrieve network info.""" + network = self.call(self.context, + self.make_msg('get_network_info', + network_id=network_id, + host=self.host), + topic=self.topic) + if network: + return dhcp.NetModel(self.use_namespaces, network) + + def get_dhcp_port(self, network_id, device_id): + """Make a remote process call to get the dhcp port.""" + port = self.call(self.context, + self.make_msg('get_dhcp_port', + network_id=network_id, + device_id=device_id, + host=self.host), + topic=self.topic) + if port: + return dhcp.DictModel(port) + + def create_dhcp_port(self, port): + """Make a remote process call to create the dhcp port.""" + port = self.call(self.context, + self.make_msg('create_dhcp_port', + port=port, + host=self.host), + topic=self.topic) + if port: + return dhcp.DictModel(port) + + def update_dhcp_port(self, port_id, port): + """Make a remote process call to update the dhcp port.""" + port = self.call(self.context, + self.make_msg('update_dhcp_port', + port_id=port_id, + port=port, + host=self.host), + topic=self.topic) + if port: + return dhcp.DictModel(port) + + def release_dhcp_port(self, network_id, device_id): + """Make a remote process call to release the dhcp port.""" + return self.call(self.context, + self.make_msg('release_dhcp_port', + network_id=network_id, + device_id=device_id, + host=self.host), + topic=self.topic) + + def release_port_fixed_ip(self, network_id, device_id, subnet_id): + """Make a remote process call to release a fixed_ip on the port.""" + return self.call(self.context, + self.make_msg('release_port_fixed_ip', + network_id=network_id, + subnet_id=subnet_id, + device_id=device_id, + host=self.host), + topic=self.topic) + + +class NetworkCache(object): + """Agent cache of the current network state.""" + def __init__(self): + self.cache = {} + self.subnet_lookup = {} + self.port_lookup = {} + + def get_network_ids(self): + return self.cache.keys() + + def get_network_by_id(self, network_id): + return self.cache.get(network_id) + + def get_network_by_subnet_id(self, subnet_id): + return self.cache.get(self.subnet_lookup.get(subnet_id)) + + def get_network_by_port_id(self, port_id): + return self.cache.get(self.port_lookup.get(port_id)) + + def put(self, network): + if network.id in self.cache: + self.remove(self.cache[network.id]) + + self.cache[network.id] = network + + for subnet in network.subnets: + self.subnet_lookup[subnet.id] = network.id + + for port in network.ports: + self.port_lookup[port.id] = network.id + + def remove(self, network): + del self.cache[network.id] + + for subnet in network.subnets: + del self.subnet_lookup[subnet.id] + + for port in network.ports: + del self.port_lookup[port.id] + + def put_port(self, port): + network = self.get_network_by_id(port.network_id) + for index in range(len(network.ports)): + if network.ports[index].id == port.id: + network.ports[index] = port + break + else: + network.ports.append(port) + + self.port_lookup[port.id] = network.id + + def remove_port(self, port): + network = self.get_network_by_port_id(port.id) + + for index in range(len(network.ports)): + if network.ports[index] == port: + del network.ports[index] + del self.port_lookup[port.id] + break + + def get_port_by_id(self, port_id): + network = self.get_network_by_port_id(port_id) + if network: + for port in network.ports: + if port.id == port_id: + return port + + def get_state(self): + net_ids = self.get_network_ids() + num_nets = len(net_ids) + num_subnets = 0 + num_ports = 0 + for net_id in net_ids: + network = self.get_network_by_id(net_id) + num_subnets += len(network.subnets) + num_ports += len(network.ports) + return {'networks': num_nets, + 'subnets': num_subnets, + 'ports': num_ports} + + +class DhcpAgentWithStateReport(DhcpAgent): + def __init__(self, host=None): + super(DhcpAgentWithStateReport, self).__init__(host=host) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-dhcp-agent', + 'host': host, + 'topic': topics.DHCP_AGENT, + 'configurations': { + 'dhcp_driver': cfg.CONF.dhcp_driver, + 'use_namespaces': cfg.CONF.use_namespaces, + 'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration}, + 'start_flag': True, + 'agent_type': constants.AGENT_TYPE_DHCP} + report_interval = cfg.CONF.AGENT.report_interval + self.use_call = True + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + try: + self.agent_state.get('configurations').update( + self.cache.get_state()) + ctx = context.get_admin_context_without_session() + self.state_rpc.report_state(ctx, self.agent_state, self.use_call) + self.use_call = False + except AttributeError: + # This means the server does not support report_state + LOG.warn(_("Neutron server does not support state report." + " State report for this agent will be disabled.")) + self.heartbeat.stop() + self.run() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + return + if self.agent_state.pop('start_flag', None): + self.run() + + def agent_updated(self, context, payload): + """Handle the agent_updated notification event.""" + self.schedule_resync(_("Agent updated: %(payload)s") % + {"payload": payload}) + LOG.info(_("agent_updated by server side %s!"), payload) + + def after_start(self): + LOG.info(_("DHCP agent started")) + + +def register_options(): + cfg.CONF.register_opts(DhcpAgent.OPTS) + config.register_interface_driver_opts_helper(cfg.CONF) + config.register_use_namespaces_opts_helper(cfg.CONF) + config.register_agent_state_opts_helper(cfg.CONF) + config.register_root_helper(cfg.CONF) + cfg.CONF.register_opts(dhcp.OPTS) + cfg.CONF.register_opts(interface.OPTS) + + +def main(): + register_options() + common_config.init(sys.argv[1:]) + config.setup_logging(cfg.CONF) + server = neutron_service.Service.create( + binary='neutron-dhcp-agent', + topic=topics.DHCP_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager='neutron.agent.dhcp_agent.DhcpAgentWithStateReport') + service.launch(server).wait() diff --git a/neutron/agent/firewall.py b/neutron/agent/firewall.py new file mode 100644 index 000000000..6c9cd2502 --- /dev/null +++ b/neutron/agent/firewall.py @@ -0,0 +1,138 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import contextlib + +import six + + +@six.add_metaclass(abc.ABCMeta) +class FirewallDriver(object): + """Firewall Driver base class. + + Defines methods that any driver providing security groups + and provider firewall functionality should implement. + Note port attribute should have information of security group ids and + security group rules. + + the dict of port should have + device : interface name + fixed_ips: ips of the device + mac_address: mac_address of the device + security_groups: [sgid, sgid] + security_group_rules : [ rule, rule ] + the rule must contain ethertype and direction + the rule may contain security_group_id, + protocol, port_min, port_max + source_ip_prefix, source_port_min, + source_port_max, dest_ip_prefix, and + remote_group_id + Note: source_group_ip in REST API should be converted by this rule + if direction is ingress: + remote_group_ip will be a source_ip_prefix + if direction is egress: + remote_group_ip will be a dest_ip_prefix + Note: remote_group_id in REST API should be converted by this rule + if direction is ingress: + remote_group_id will be a list of source_ip_prefix + if direction is egress: + remote_group_id will be a list of dest_ip_prefix + remote_group_id will also remaining membership update management + """ + + def prepare_port_filter(self, port): + """Prepare filters for the port. + + This method should be called before the port is created. + """ + raise NotImplementedError() + + def apply_port_filter(self, port): + """Apply port filter. + + Once this method returns, the port should be firewalled + appropriately. This method should as far as possible be a + no-op. It's vastly preferred to get everything set up in + prepare_port_filter. + """ + raise NotImplementedError() + + def update_port_filter(self, port): + """Refresh security group rules from data store + + Gets called when an port gets added to or removed from + the security group the port is a member of or if the + group gains or looses a rule. + """ + raise NotImplementedError() + + def remove_port_filter(self, port): + """Stop filtering port.""" + raise NotImplementedError() + + def filter_defer_apply_on(self): + """Defer application of filtering rule.""" + pass + + def filter_defer_apply_off(self): + """Turn off deferral of rules and apply the rules now.""" + pass + + @property + def ports(self): + """Returns filtered ports.""" + pass + + @contextlib.contextmanager + def defer_apply(self): + """Defer apply context.""" + self.filter_defer_apply_on() + try: + yield + finally: + self.filter_defer_apply_off() + + +class NoopFirewallDriver(FirewallDriver): + """Noop Firewall Driver. + + Firewall driver which does nothing. + This driver is for disabling the firewall functionality. + """ + + def prepare_port_filter(self, port): + pass + + def apply_port_filter(self, port): + pass + + def update_port_filter(self, port): + pass + + def remove_port_filter(self, port): + pass + + def filter_defer_apply_on(self): + pass + + def filter_defer_apply_off(self): + pass + + @property + def ports(self): + return {} diff --git a/neutron/agent/l2population_rpc.py b/neutron/agent/l2population_rpc.py new file mode 100644 index 000000000..80c5a97de --- /dev/null +++ b/neutron/agent/l2population_rpc.py @@ -0,0 +1,56 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +import abc + +from oslo.config import cfg +import six + +from neutron.common import log + + +@six.add_metaclass(abc.ABCMeta) +class L2populationRpcCallBackMixin(object): + + @log.log + def add_fdb_entries(self, context, fdb_entries, host=None): + if not host or host == cfg.CONF.host: + self.fdb_add(context, fdb_entries) + + @log.log + def remove_fdb_entries(self, context, fdb_entries, host=None): + if not host or host == cfg.CONF.host: + self.fdb_remove(context, fdb_entries) + + @log.log + def update_fdb_entries(self, context, fdb_entries, host=None): + if not host or host == cfg.CONF.host: + self.fdb_update(context, fdb_entries) + + @abc.abstractmethod + def fdb_add(self, context, fdb_entries): + pass + + @abc.abstractmethod + def fdb_remove(self, context, fdb_entries): + pass + + @abc.abstractmethod + def fdb_update(self, context, fdb_entries): + pass diff --git a/neutron/agent/l3_agent.py b/neutron/agent/l3_agent.py new file mode 100644 index 000000000..7dcb81e3b --- /dev/null +++ b/neutron/agent/l3_agent.py @@ -0,0 +1,990 @@ +# Copyright 2012 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sys + +import eventlet +eventlet.monkey_patch() + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import external_process +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.agent.linux import iptables_manager +from neutron.agent.linux import ovs_lib # noqa +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants as l3_constants +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils as common_utils +from neutron import context +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import periodic_task +from neutron.openstack.common import processutils +from neutron.openstack.common import service +from neutron import service as neutron_service +from neutron.services.firewall.agents.l3reference import firewall_l3_agent + +LOG = logging.getLogger(__name__) +NS_PREFIX = 'qrouter-' +INTERNAL_DEV_PREFIX = 'qr-' +EXTERNAL_DEV_PREFIX = 'qg-' +RPC_LOOP_INTERVAL = 1 +FLOATING_IP_CIDR_SUFFIX = '/32' + + +class L3PluginApi(rpc_compat.RpcProxy): + """Agent side of the l3 agent RPC API. + + API version history: + 1.0 - Initial version. + 1.1 - Floating IP operational status updates + + """ + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic, host): + super(L3PluginApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.host = host + + def get_routers(self, context, router_ids=None): + """Make a remote process call to retrieve the sync data for routers.""" + return self.call(context, + self.make_msg('sync_routers', host=self.host, + router_ids=router_ids), + topic=self.topic) + + def get_external_network_id(self, context): + """Make a remote process call to retrieve the external network id. + + @raise rpc_compat.RemoteError: with TooManyExternalNetworks + as exc_type if there are + more than one external network + """ + return self.call(context, + self.make_msg('get_external_network_id', + host=self.host), + topic=self.topic) + + def update_floatingip_statuses(self, context, router_id, fip_statuses): + """Call the plugin update floating IPs's operational status.""" + return self.call(context, + self.make_msg('update_floatingip_statuses', + router_id=router_id, + fip_statuses=fip_statuses), + topic=self.topic, + version='1.1') + + +class RouterInfo(object): + + def __init__(self, router_id, root_helper, use_namespaces, router): + self.router_id = router_id + self.ex_gw_port = None + self._snat_enabled = None + self._snat_action = None + self.internal_ports = [] + self.floating_ips = set() + self.root_helper = root_helper + self.use_namespaces = use_namespaces + # Invoke the setter for establishing initial SNAT action + self.router = router + self.ns_name = NS_PREFIX + router_id if use_namespaces else None + self.iptables_manager = iptables_manager.IptablesManager( + root_helper=root_helper, + #FIXME(danwent): use_ipv6=True, + namespace=self.ns_name) + self.routes = [] + + @property + def router(self): + return self._router + + @router.setter + def router(self, value): + self._router = value + if not self._router: + return + # enable_snat by default if it wasn't specified by plugin + self._snat_enabled = self._router.get('enable_snat', True) + # Set a SNAT action for the router + if self._router.get('gw_port'): + self._snat_action = ('add_rules' if self._snat_enabled + else 'remove_rules') + elif self.ex_gw_port: + # Gateway port was removed, remove rules + self._snat_action = 'remove_rules' + + def perform_snat_action(self, snat_callback, *args): + # Process SNAT rules for attached subnets + if self._snat_action: + snat_callback(self, self._router.get('gw_port'), + *args, action=self._snat_action) + self._snat_action = None + + +class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, manager.Manager): + """Manager for L3NatAgent + + API version history: + 1.0 initial Version + 1.1 changed the type of the routers parameter + to the routers_updated method. + It was previously a list of routers in dict format. + It is now a list of router IDs only. + Per rpc versioning rules, it is backwards compatible. + """ + RPC_API_VERSION = '1.1' + + OPTS = [ + cfg.StrOpt('external_network_bridge', default='br-ex', + help=_("Name of bridge used for external network " + "traffic.")), + cfg.IntOpt('metadata_port', + default=9697, + help=_("TCP Port used by Neutron metadata namespace " + "proxy.")), + cfg.IntOpt('send_arp_for_ha', + default=3, + help=_("Send this many gratuitous ARPs for HA setup, if " + "less than or equal to 0, the feature is disabled")), + cfg.StrOpt('router_id', default='', + help=_("If namespaces is disabled, the l3 agent can only" + " configure a router that has the matching router " + "ID.")), + cfg.BoolOpt('handle_internal_only_routers', + default=True, + help=_("Agent should implement routers with no gateway")), + cfg.StrOpt('gateway_external_network_id', default='', + help=_("UUID of external network for routers implemented " + "by the agents.")), + cfg.BoolOpt('enable_metadata_proxy', default=True, + help=_("Allow running metadata proxy.")), + cfg.BoolOpt('router_delete_namespaces', default=False, + help=_("Delete namespace after removing a router.")), + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location of Metadata Proxy UNIX domain ' + 'socket')), + ] + + def __init__(self, host, conf=None): + if conf: + self.conf = conf + else: + self.conf = cfg.CONF + self.root_helper = config.get_root_helper(self.conf) + self.router_info = {} + + self._check_config_params() + + try: + self.driver = importutils.import_object( + self.conf.interface_driver, + self.conf + ) + except Exception: + msg = _("Error importing interface driver " + "'%s'") % self.conf.interface_driver + LOG.error(msg) + raise SystemExit(1) + + self.context = context.get_admin_context_without_session() + self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) + self.fullsync = True + self.updated_routers = set() + self.removed_routers = set() + self.sync_progress = False + + self._clean_stale_namespaces = self.conf.use_namespaces + + self.rpc_loop = loopingcall.FixedIntervalLoopingCall( + self._rpc_loop) + self.rpc_loop.start(interval=RPC_LOOP_INTERVAL) + super(L3NATAgent, self).__init__(conf=self.conf) + + self.target_ex_net_id = None + + def _check_config_params(self): + """Check items in configuration files. + + Check for required and invalid configuration items. + The actual values are not verified for correctness. + """ + if not self.conf.interface_driver: + msg = _('An interface driver must be specified') + LOG.error(msg) + raise SystemExit(1) + + if not self.conf.use_namespaces and not self.conf.router_id: + msg = _('Router id is required if not using namespaces.') + LOG.error(msg) + raise SystemExit(1) + + def _cleanup_namespaces(self, routers): + """Destroy stale router namespaces on host when L3 agent restarts + + This routine is called when self._clean_stale_namespaces is True. + + The argument routers is the list of routers that are recorded in + the database as being hosted on this node. + """ + try: + root_ip = ip_lib.IPWrapper(self.root_helper) + + host_namespaces = root_ip.get_namespaces(self.root_helper) + router_namespaces = set(ns for ns in host_namespaces + if ns.startswith(NS_PREFIX)) + ns_to_ignore = set(NS_PREFIX + r['id'] for r in routers) + ns_to_destroy = router_namespaces - ns_to_ignore + except RuntimeError: + LOG.exception(_('RuntimeError in obtaining router list ' + 'for namespace cleanup.')) + else: + self._destroy_stale_router_namespaces(ns_to_destroy) + + def _destroy_stale_router_namespaces(self, router_namespaces): + """Destroys the stale router namespaces + + The argumenet router_namespaces is a list of stale router namespaces + + As some stale router namespaces may not be able to be deleted, only + one attempt will be made to delete them. + """ + for ns in router_namespaces: + if self.conf.enable_metadata_proxy: + self._destroy_metadata_proxy(ns[len(NS_PREFIX):], ns) + + try: + self._destroy_router_namespace(ns) + except RuntimeError: + LOG.exception(_('Failed to destroy stale router namespace ' + '%s'), ns) + self._clean_stale_namespaces = False + + def _destroy_router_namespace(self, namespace): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=namespace) + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(INTERNAL_DEV_PREFIX): + # device is on default bridge + self.driver.unplug(d.name, namespace=namespace, + prefix=INTERNAL_DEV_PREFIX) + elif d.name.startswith(EXTERNAL_DEV_PREFIX): + self.driver.unplug(d.name, + bridge=self.conf.external_network_bridge, + namespace=namespace, + prefix=EXTERNAL_DEV_PREFIX) + + if self.conf.router_delete_namespaces: + try: + ns_ip.netns.delete(namespace) + except RuntimeError: + msg = _('Failed trying to delete namespace: %s') + LOG.exception(msg % namespace) + + def _create_router_namespace(self, ri): + ip_wrapper_root = ip_lib.IPWrapper(self.root_helper) + ip_wrapper = ip_wrapper_root.ensure_namespace(ri.ns_name) + ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1']) + + def _fetch_external_net_id(self, force=False): + """Find UUID of single external network for this agent.""" + if self.conf.gateway_external_network_id: + return self.conf.gateway_external_network_id + + # L3 agent doesn't use external_network_bridge to handle external + # networks, so bridge_mappings with provider networks will be used + # and the L3 agent is able to handle any external networks. + if not self.conf.external_network_bridge: + return + + if not force and self.target_ex_net_id: + return self.target_ex_net_id + + try: + self.target_ex_net_id = self.plugin_rpc.get_external_network_id( + self.context) + return self.target_ex_net_id + except rpc_compat.RemoteError as e: + with excutils.save_and_reraise_exception() as ctx: + if e.exc_type == 'TooManyExternalNetworks': + ctx.reraise = False + msg = _( + "The 'gateway_external_network_id' option must be " + "configured for this agent as Neutron has more than " + "one external network.") + raise Exception(msg) + + def _router_added(self, router_id, router): + ri = RouterInfo(router_id, self.root_helper, + self.conf.use_namespaces, router) + self.router_info[router_id] = ri + if self.conf.use_namespaces: + self._create_router_namespace(ri) + for c, r in self.metadata_filter_rules(): + ri.iptables_manager.ipv4['filter'].add_rule(c, r) + for c, r in self.metadata_nat_rules(): + ri.iptables_manager.ipv4['nat'].add_rule(c, r) + ri.iptables_manager.apply() + super(L3NATAgent, self).process_router_add(ri) + if self.conf.enable_metadata_proxy: + self._spawn_metadata_proxy(ri.router_id, ri.ns_name) + + def _router_removed(self, router_id): + ri = self.router_info.get(router_id) + if ri is None: + LOG.warn(_("Info for router %s were not found. " + "Skipping router removal"), router_id) + return + ri.router['gw_port'] = None + ri.router[l3_constants.INTERFACE_KEY] = [] + ri.router[l3_constants.FLOATINGIP_KEY] = [] + self.process_router(ri) + for c, r in self.metadata_filter_rules(): + ri.iptables_manager.ipv4['filter'].remove_rule(c, r) + for c, r in self.metadata_nat_rules(): + ri.iptables_manager.ipv4['nat'].remove_rule(c, r) + ri.iptables_manager.apply() + if self.conf.enable_metadata_proxy: + self._destroy_metadata_proxy(ri.router_id, ri.ns_name) + del self.router_info[router_id] + self._destroy_router_namespace(ri.ns_name) + + def _spawn_metadata_proxy(self, router_id, ns_name): + def callback(pid_file): + metadata_proxy_socket = cfg.CONF.metadata_proxy_socket + proxy_cmd = ['neutron-ns-metadata-proxy', + '--pid_file=%s' % pid_file, + '--metadata_proxy_socket=%s' % metadata_proxy_socket, + '--router_id=%s' % router_id, + '--state_path=%s' % self.conf.state_path, + '--metadata_port=%s' % self.conf.metadata_port] + proxy_cmd.extend(config.get_log_args( + cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % + router_id)) + return proxy_cmd + + pm = external_process.ProcessManager( + self.conf, + router_id, + self.root_helper, + ns_name) + pm.enable(callback) + + def _destroy_metadata_proxy(self, router_id, ns_name): + pm = external_process.ProcessManager( + self.conf, + router_id, + self.root_helper, + ns_name) + pm.disable() + + def _set_subnet_info(self, port): + ips = port['fixed_ips'] + if not ips: + raise Exception(_("Router port %s has no IP address") % port['id']) + if len(ips) > 1: + LOG.error(_("Ignoring multiple IPs on router port %s"), + port['id']) + prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen + port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen) + + def _get_existing_devices(self, ri): + ip_wrapper = ip_lib.IPWrapper(root_helper=self.root_helper, + namespace=ri.ns_name) + ip_devs = ip_wrapper.get_devices(exclude_loopback=True) + return [ip_dev.name for ip_dev in ip_devs] + + def process_router(self, ri): + ri.iptables_manager.defer_apply_on() + ex_gw_port = self._get_ex_gw_port(ri) + internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) + existing_port_ids = set([p['id'] for p in ri.internal_ports]) + current_port_ids = set([p['id'] for p in internal_ports + if p['admin_state_up']]) + new_ports = [p for p in internal_ports if + p['id'] in current_port_ids and + p['id'] not in existing_port_ids] + old_ports = [p for p in ri.internal_ports if + p['id'] not in current_port_ids] + for p in new_ports: + self._set_subnet_info(p) + self.internal_network_added(ri, p['network_id'], p['id'], + p['ip_cidr'], p['mac_address']) + ri.internal_ports.append(p) + + for p in old_ports: + self.internal_network_removed(ri, p['id'], p['ip_cidr']) + ri.internal_ports.remove(p) + + existing_devices = self._get_existing_devices(ri) + current_internal_devs = set([n for n in existing_devices + if n.startswith(INTERNAL_DEV_PREFIX)]) + current_port_devs = set([self.get_internal_device_name(id) for + id in current_port_ids]) + stale_devs = current_internal_devs - current_port_devs + for stale_dev in stale_devs: + LOG.debug(_('Deleting stale internal router device: %s'), + stale_dev) + self.driver.unplug(stale_dev, + namespace=ri.ns_name, + prefix=INTERNAL_DEV_PREFIX) + + # Get IPv4 only internal CIDRs + internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports + if netaddr.IPNetwork(p['ip_cidr']).version == 4] + # TODO(salv-orlando): RouterInfo would be a better place for + # this logic too + ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or + ri.ex_gw_port and ri.ex_gw_port['id']) + + interface_name = None + if ex_gw_port_id: + interface_name = self.get_external_device_name(ex_gw_port_id) + if ex_gw_port and ex_gw_port != ri.ex_gw_port: + self._set_subnet_info(ex_gw_port) + self.external_gateway_added(ri, ex_gw_port, + interface_name, internal_cidrs) + elif not ex_gw_port and ri.ex_gw_port: + self.external_gateway_removed(ri, ri.ex_gw_port, + interface_name, internal_cidrs) + + stale_devs = [dev for dev in existing_devices + if dev.startswith(EXTERNAL_DEV_PREFIX) + and dev != interface_name] + for stale_dev in stale_devs: + LOG.debug(_('Deleting stale external router device: %s'), + stale_dev) + self.driver.unplug(stale_dev, + bridge=self.conf.external_network_bridge, + namespace=ri.ns_name, + prefix=EXTERNAL_DEV_PREFIX) + + # Process static routes for router + self.routes_updated(ri) + # Process SNAT rules for external gateway + ri.perform_snat_action(self._handle_router_snat_rules, + internal_cidrs, interface_name) + + # Process SNAT/DNAT rules for floating IPs + fip_statuses = {} + try: + if ex_gw_port: + existing_floating_ips = ri.floating_ips + self.process_router_floating_ip_nat_rules(ri) + ri.iptables_manager.defer_apply_off() + # Once NAT rules for floating IPs are safely in place + # configure their addresses on the external gateway port + fip_statuses = self.process_router_floating_ip_addresses( + ri, ex_gw_port) + except Exception: + # TODO(salv-orlando): Less broad catching + # All floating IPs must be put in error state + for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []): + fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR + + if ex_gw_port: + # Identify floating IPs which were disabled + ri.floating_ips = set(fip_statuses.keys()) + for fip_id in existing_floating_ips - ri.floating_ips: + fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN + # Update floating IP status on the neutron server + self.plugin_rpc.update_floatingip_statuses( + self.context, ri.router_id, fip_statuses) + + # Update ex_gw_port and enable_snat on the router info cache + ri.ex_gw_port = ex_gw_port + ri.enable_snat = ri.router.get('enable_snat') + + def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs, + interface_name, action): + # Remove all the rules + # This is safe because if use_namespaces is set as False + # then the agent can only configure one router, otherwise + # each router's SNAT rules will be in their own namespace + ri.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') + ri.iptables_manager.ipv4['nat'].empty_chain('snat') + + # Add back the jump to float-snat + ri.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') + + # And add them back if the action if add_rules + if action == 'add_rules' and ex_gw_port: + # ex_gw_port should not be None in this case + # NAT rules are added only if ex_gw_port has an IPv4 address + for ip_addr in ex_gw_port['fixed_ips']: + ex_gw_ip = ip_addr['ip_address'] + if netaddr.IPAddress(ex_gw_ip).version == 4: + rules = self.external_gateway_nat_rules(ex_gw_ip, + internal_cidrs, + interface_name) + for rule in rules: + ri.iptables_manager.ipv4['nat'].add_rule(*rule) + break + ri.iptables_manager.apply() + + def process_router_floating_ip_nat_rules(self, ri): + """Configure NAT rules for the router's floating IPs. + + Configures iptables rules for the floating ips of the given router + """ + # Clear out all iptables rules for floating ips + ri.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip') + + # Loop once to ensure that floating ips are configured. + for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []): + # Rebuild iptables rules for the floating ip. + fixed = fip['fixed_ip_address'] + fip_ip = fip['floating_ip_address'] + for chain, rule in self.floating_forward_rules(fip_ip, fixed): + ri.iptables_manager.ipv4['nat'].add_rule(chain, rule, + tag='floating_ip') + + ri.iptables_manager.apply() + + def process_router_floating_ip_addresses(self, ri, ex_gw_port): + """Configure IP addresses on router's external gateway interface. + + Ensures addresses for existing floating IPs and cleans up + those that should not longer be configured. + """ + fip_statuses = {} + interface_name = self.get_external_device_name(ex_gw_port['id']) + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ri.ns_name) + existing_cidrs = set([addr['cidr'] for addr in device.addr.list()]) + new_cidrs = set() + + # Loop once to ensure that floating ips are configured. + for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []): + fip_ip = fip['floating_ip_address'] + ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX + + new_cidrs.add(ip_cidr) + + if ip_cidr not in existing_cidrs: + net = netaddr.IPNetwork(ip_cidr) + try: + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + except (processutils.UnknownArgumentError, + processutils.ProcessExecutionError): + # any exception occurred here should cause the floating IP + # to be set in error state + fip_statuses[fip['id']] = ( + l3_constants.FLOATINGIP_STATUS_ERROR) + LOG.warn(_("Unable to configure IP address for " + "floating IP: %s"), fip['id']) + continue + # As GARP is processed in a distinct thread the call below + # won't raise an exception to be handled. + self._send_gratuitous_arp_packet( + ri, interface_name, fip_ip) + fip_statuses[fip['id']] = ( + l3_constants.FLOATINGIP_STATUS_ACTIVE) + + # Clean up addresses that no longer belong on the gateway interface. + for ip_cidr in existing_cidrs - new_cidrs: + if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX): + net = netaddr.IPNetwork(ip_cidr) + device.addr.delete(net.version, ip_cidr) + return fip_statuses + + def _get_ex_gw_port(self, ri): + return ri.router.get('gw_port') + + def _arping(self, ri, interface_name, ip_address): + arping_cmd = ['arping', '-A', + '-I', interface_name, + '-c', self.conf.send_arp_for_ha, + ip_address] + try: + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ri.ns_name) + ip_wrapper.netns.execute(arping_cmd, check_exit_code=True) + except Exception as e: + LOG.error(_("Failed sending gratuitous ARP: %s"), str(e)) + + def _send_gratuitous_arp_packet(self, ri, interface_name, ip_address): + if self.conf.send_arp_for_ha > 0: + eventlet.spawn_n(self._arping, ri, interface_name, ip_address) + + def get_internal_device_name(self, port_id): + return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_external_device_name(self, port_id): + return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def external_gateway_added(self, ri, ex_gw_port, + interface_name, internal_cidrs): + + self.driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], interface_name, + ex_gw_port['mac_address'], + bridge=self.conf.external_network_bridge, + namespace=ri.ns_name, + prefix=EXTERNAL_DEV_PREFIX) + + # Compute a list of addresses this router is supposed to have. + # This avoids unnecessarily removing those addresses and + # causing a momentarily network outage. + floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) + preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX + for ip in floating_ips] + + self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], + namespace=ri.ns_name, + gateway=ex_gw_port['subnet'].get('gateway_ip'), + extra_subnets=ex_gw_port.get('extra_subnets', []), + preserve_ips=preserve_ips) + ip_address = ex_gw_port['ip_cidr'].split('/')[0] + self._send_gratuitous_arp_packet(ri, interface_name, ip_address) + + def external_gateway_removed(self, ri, ex_gw_port, + interface_name, internal_cidrs): + + self.driver.unplug(interface_name, + bridge=self.conf.external_network_bridge, + namespace=ri.ns_name, + prefix=EXTERNAL_DEV_PREFIX) + + def metadata_filter_rules(self): + rules = [] + if self.conf.enable_metadata_proxy: + rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 ' + '-p tcp -m tcp --dport %s ' + '-j ACCEPT' % self.conf.metadata_port)) + return rules + + def metadata_nat_rules(self): + rules = [] + if self.conf.enable_metadata_proxy: + rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 ' + '-p tcp -m tcp --dport 80 -j REDIRECT ' + '--to-port %s' % self.conf.metadata_port)) + return rules + + def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs, + interface_name): + rules = [('POSTROUTING', '! -i %(interface_name)s ' + '! -o %(interface_name)s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % + {'interface_name': interface_name})] + for cidr in internal_cidrs: + rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr)) + return rules + + def internal_network_added(self, ri, network_id, port_id, + internal_cidr, mac_address): + interface_name = self.get_internal_device_name(port_id) + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ri.ns_name): + self.driver.plug(network_id, port_id, interface_name, mac_address, + namespace=ri.ns_name, + prefix=INTERNAL_DEV_PREFIX) + + self.driver.init_l3(interface_name, [internal_cidr], + namespace=ri.ns_name) + ip_address = internal_cidr.split('/')[0] + self._send_gratuitous_arp_packet(ri, interface_name, ip_address) + + def internal_network_removed(self, ri, port_id, internal_cidr): + interface_name = self.get_internal_device_name(port_id) + if ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ri.ns_name): + self.driver.unplug(interface_name, namespace=ri.ns_name, + prefix=INTERNAL_DEV_PREFIX) + + def internal_network_nat_rules(self, ex_gw_ip, internal_cidr): + rules = [('snat', '-s %s -j SNAT --to-source %s' % + (internal_cidr, ex_gw_ip))] + return rules + + def floating_forward_rules(self, floating_ip, fixed_ip): + return [('PREROUTING', '-d %s -j DNAT --to %s' % + (floating_ip, fixed_ip)), + ('OUTPUT', '-d %s -j DNAT --to %s' % + (floating_ip, fixed_ip)), + ('float-snat', '-s %s -j SNAT --to %s' % + (fixed_ip, floating_ip))] + + def router_deleted(self, context, router_id): + """Deal with router deletion RPC message.""" + LOG.debug(_('Got router deleted notification for %s'), router_id) + self.removed_routers.add(router_id) + + def routers_updated(self, context, routers): + """Deal with routers modification and creation RPC message.""" + LOG.debug(_('Got routers updated notification :%s'), routers) + if routers: + # This is needed for backward compatibility + if isinstance(routers[0], dict): + routers = [router['id'] for router in routers] + self.updated_routers.update(routers) + + def router_removed_from_agent(self, context, payload): + LOG.debug(_('Got router removed from agent :%r'), payload) + self.removed_routers.add(payload['router_id']) + + def router_added_to_agent(self, context, payload): + LOG.debug(_('Got router added to agent :%r'), payload) + self.routers_updated(context, payload) + + def _process_routers(self, routers, all_routers=False): + pool = eventlet.GreenPool() + if (self.conf.external_network_bridge and + not ip_lib.device_exists(self.conf.external_network_bridge)): + LOG.error(_("The external network bridge '%s' does not exist"), + self.conf.external_network_bridge) + return + + target_ex_net_id = self._fetch_external_net_id() + # if routers are all the routers we have (They are from router sync on + # starting or when error occurs during running), we seek the + # routers which should be removed. + # If routers are from server side notification, we seek them + # from subset of incoming routers and ones we have now. + if all_routers: + prev_router_ids = set(self.router_info) + else: + prev_router_ids = set(self.router_info) & set( + [router['id'] for router in routers]) + cur_router_ids = set() + for r in routers: + # If namespaces are disabled, only process the router associated + # with the configured agent id. + if (not self.conf.use_namespaces and + r['id'] != self.conf.router_id): + continue + ex_net_id = (r['external_gateway_info'] or {}).get('network_id') + if not ex_net_id and not self.conf.handle_internal_only_routers: + continue + if (target_ex_net_id and ex_net_id and + ex_net_id != target_ex_net_id): + # Double check that our single external_net_id has not changed + # by forcing a check by RPC. + if (ex_net_id != self._fetch_external_net_id(force=True)): + continue + cur_router_ids.add(r['id']) + if r['id'] not in self.router_info: + self._router_added(r['id'], r) + ri = self.router_info[r['id']] + ri.router = r + pool.spawn_n(self.process_router, ri) + # identify and remove routers that no longer exist + for router_id in prev_router_ids - cur_router_ids: + pool.spawn_n(self._router_removed, router_id) + pool.waitall() + + @lockutils.synchronized('l3-agent', 'neutron-') + def _rpc_loop(self): + # _rpc_loop and _sync_routers_task will not be + # executed in the same time because of lock. + # so we can clear the value of updated_routers + # and removed_routers, but they can be updated by + # updated_routers and removed_routers rpc call + try: + LOG.debug(_("Starting RPC loop for %d updated routers"), + len(self.updated_routers)) + if self.updated_routers: + # We're capturing and clearing the list, and will + # process the "captured" updates in this loop, + # and any updates that happen due to a context switch + # will be picked up on the next pass. + updated_routers = set(self.updated_routers) + self.updated_routers.clear() + router_ids = list(updated_routers) + routers = self.plugin_rpc.get_routers( + self.context, router_ids) + # routers with admin_state_up=false will not be in the fetched + fetched = set([r['id'] for r in routers]) + self.removed_routers.update(updated_routers - fetched) + + self._process_routers(routers) + self._process_router_delete() + LOG.debug(_("RPC loop successfully completed")) + except Exception: + LOG.exception(_("Failed synchronizing routers")) + self.fullsync = True + + def _process_router_delete(self): + current_removed_routers = list(self.removed_routers) + for router_id in current_removed_routers: + self._router_removed(router_id) + self.removed_routers.remove(router_id) + + def _router_ids(self): + if not self.conf.use_namespaces: + return [self.conf.router_id] + + @periodic_task.periodic_task + @lockutils.synchronized('l3-agent', 'neutron-') + def _sync_routers_task(self, context): + if self.services_sync: + super(L3NATAgent, self).process_services_sync(context) + LOG.debug(_("Starting _sync_routers_task - fullsync:%s"), + self.fullsync) + if not self.fullsync: + return + try: + router_ids = self._router_ids() + self.updated_routers.clear() + self.removed_routers.clear() + routers = self.plugin_rpc.get_routers( + context, router_ids) + + LOG.debug(_('Processing :%r'), routers) + self._process_routers(routers, all_routers=True) + self.fullsync = False + LOG.debug(_("_sync_routers_task successfully completed")) + except rpc_compat.RPCException: + LOG.exception(_("Failed synchronizing routers due to RPC error")) + self.fullsync = True + return + except Exception: + LOG.exception(_("Failed synchronizing routers")) + self.fullsync = True + + # Resync is not necessary for the cleanup of stale + # namespaces. + if self._clean_stale_namespaces: + self._cleanup_namespaces(routers) + + def after_start(self): + LOG.info(_("L3 agent started")) + + def _update_routing_table(self, ri, operation, route): + cmd = ['ip', 'route', operation, 'to', route['destination'], + 'via', route['nexthop']] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ri.ns_name) + ip_wrapper.netns.execute(cmd, check_exit_code=False) + + def routes_updated(self, ri): + new_routes = ri.router['routes'] + old_routes = ri.routes + adds, removes = common_utils.diff_list_of_dict(old_routes, + new_routes) + for route in adds: + LOG.debug(_("Added route entry is '%s'"), route) + # remove replaced route from deleted route + for del_route in removes: + if route['destination'] == del_route['destination']: + removes.remove(del_route) + #replace success even if there is no existing route + self._update_routing_table(ri, 'replace', route) + for route in removes: + LOG.debug(_("Removed route entry is '%s'"), route) + self._update_routing_table(ri, 'delete', route) + ri.routes = new_routes + + +class L3NATAgentWithStateReport(L3NATAgent): + + def __init__(self, host, conf=None): + super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-l3-agent', + 'host': host, + 'topic': topics.L3_AGENT, + 'configurations': { + 'use_namespaces': self.conf.use_namespaces, + 'router_id': self.conf.router_id, + 'handle_internal_only_routers': + self.conf.handle_internal_only_routers, + 'external_network_bridge': self.conf.external_network_bridge, + 'gateway_external_network_id': + self.conf.gateway_external_network_id, + 'interface_driver': self.conf.interface_driver}, + 'start_flag': True, + 'agent_type': l3_constants.AGENT_TYPE_L3} + report_interval = cfg.CONF.AGENT.report_interval + self.use_call = True + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + LOG.debug(_("Report state task started")) + num_ex_gw_ports = 0 + num_interfaces = 0 + num_floating_ips = 0 + router_infos = self.router_info.values() + num_routers = len(router_infos) + for ri in router_infos: + ex_gw_port = self._get_ex_gw_port(ri) + if ex_gw_port: + num_ex_gw_ports += 1 + num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY, + [])) + num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY, + [])) + configurations = self.agent_state['configurations'] + configurations['routers'] = num_routers + configurations['ex_gw_ports'] = num_ex_gw_ports + configurations['interfaces'] = num_interfaces + configurations['floating_ips'] = num_floating_ips + try: + self.state_rpc.report_state(self.context, self.agent_state, + self.use_call) + self.agent_state.pop('start_flag', None) + self.use_call = False + LOG.debug(_("Report state task successfully completed")) + except AttributeError: + # This means the server does not support report_state + LOG.warn(_("Neutron server does not support state report." + " State report for this agent will be disabled.")) + self.heartbeat.stop() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + + def agent_updated(self, context, payload): + """Handle the agent_updated notification event.""" + self.fullsync = True + LOG.info(_("agent_updated by server side %s!"), payload) + + +def main(manager='neutron.agent.l3_agent.L3NATAgentWithStateReport'): + conf = cfg.CONF + conf.register_opts(L3NATAgent.OPTS) + config.register_interface_driver_opts_helper(conf) + config.register_use_namespaces_opts_helper(conf) + config.register_agent_state_opts_helper(conf) + config.register_root_helper(conf) + conf.register_opts(interface.OPTS) + conf.register_opts(external_process.OPTS) + common_config.init(sys.argv[1:]) + config.setup_logging(conf) + server = neutron_service.Service.create( + binary='neutron-l3-agent', + topic=topics.L3_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager=manager) + service.launch(server).wait() diff --git a/neutron/agent/linux/__init__.py b/neutron/agent/linux/__init__.py new file mode 100644 index 000000000..0b3d2db5e --- /dev/null +++ b/neutron/agent/linux/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/agent/linux/async_process.py b/neutron/agent/linux/async_process.py new file mode 100644 index 000000000..d0fc3214a --- /dev/null +++ b/neutron/agent/linux/async_process.py @@ -0,0 +1,223 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +import eventlet.event +import eventlet.queue + +from neutron.agent.linux import utils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class AsyncProcessException(Exception): + pass + + +class AsyncProcess(object): + """Manages an asynchronous process. + + This class spawns a new process via subprocess and uses + greenthreads to read stderr and stdout asynchronously into queues + that can be read via repeatedly calling iter_stdout() and + iter_stderr(). + + If respawn_interval is non-zero, any error in communicating with + the managed process will result in the process and greenthreads + being cleaned up and the process restarted after the specified + interval. + + Example usage: + + >>> import time + >>> proc = AsyncProcess(['ping']) + >>> proc.start() + >>> time.sleep(5) + >>> proc.stop() + >>> for line in proc.iter_stdout(): + ... print line + """ + + def __init__(self, cmd, root_helper=None, respawn_interval=None): + """Constructor. + + :param cmd: The list of command arguments to invoke. + :param root_helper: Optional, utility to use when running shell cmds. + :param respawn_interval: Optional, the interval in seconds to wait + to respawn after unexpected process death. Respawn will + only be attempted if a value of 0 or greater is provided. + """ + self.cmd = cmd + self.root_helper = root_helper + if respawn_interval is not None and respawn_interval < 0: + raise ValueError(_('respawn_interval must be >= 0 if provided.')) + self.respawn_interval = respawn_interval + self._process = None + self._kill_event = None + self._reset_queues() + self._watchers = [] + + def _reset_queues(self): + self._stdout_lines = eventlet.queue.LightQueue() + self._stderr_lines = eventlet.queue.LightQueue() + + def start(self): + """Launch a process and monitor it asynchronously.""" + if self._kill_event: + raise AsyncProcessException(_('Process is already started')) + else: + LOG.debug(_('Launching async process [%s].'), self.cmd) + self._spawn() + + def stop(self): + """Halt the process and watcher threads.""" + if self._kill_event: + LOG.debug(_('Halting async process [%s].'), self.cmd) + self._kill() + else: + raise AsyncProcessException(_('Process is not running.')) + + def _spawn(self): + """Spawn a process and its watchers.""" + self._kill_event = eventlet.event.Event() + self._process, cmd = utils.create_process(self.cmd, + root_helper=self.root_helper) + self._watchers = [] + for reader in (self._read_stdout, self._read_stderr): + # Pass the stop event directly to the greenthread to + # ensure that assignment of a new event to the instance + # attribute does not prevent the greenthread from using + # the original event. + watcher = eventlet.spawn(self._watch_process, + reader, + self._kill_event) + self._watchers.append(watcher) + + def _kill(self, respawning=False): + """Kill the process and the associated watcher greenthreads. + + :param respawning: Optional, whether respawn will be subsequently + attempted. + """ + # Halt the greenthreads + self._kill_event.send() + + pid = self._get_pid_to_kill() + if pid: + self._kill_process(pid) + + if not respawning: + # Clear the kill event to ensure the process can be + # explicitly started again. + self._kill_event = None + + def _get_pid_to_kill(self): + pid = self._process.pid + # If root helper was used, two or more processes will be created: + # + # - a root helper process (e.g. sudo myscript) + # - possibly a rootwrap script (e.g. neutron-rootwrap) + # - a child process (e.g. myscript) + # + # Killing the root helper process will leave the child process + # running, re-parented to init, so the only way to ensure that both + # die is to target the child process directly. + if self.root_helper: + try: + pid = utils.find_child_pids(pid)[0] + except IndexError: + # Process is already dead + return None + while True: + try: + # We shouldn't have more than one child per process + # so keep getting the children of the first one + pid = utils.find_child_pids(pid)[0] + except IndexError: + # Last process in the tree, return it + break + return pid + + def _kill_process(self, pid): + try: + # A process started by a root helper will be running as + # root and need to be killed via the same helper. + utils.execute(['kill', '-9', pid], root_helper=self.root_helper) + except Exception as ex: + stale_pid = (isinstance(ex, RuntimeError) and + 'No such process' in str(ex)) + if not stale_pid: + LOG.exception(_('An error occurred while killing [%s].'), + self.cmd) + return False + return True + + def _handle_process_error(self): + """Kill the async process and respawn if necessary.""" + LOG.debug(_('Halting async process [%s] in response to an error.'), + self.cmd) + respawning = self.respawn_interval >= 0 + self._kill(respawning=respawning) + if respawning: + eventlet.sleep(self.respawn_interval) + LOG.debug(_('Respawning async process [%s].'), self.cmd) + self._spawn() + + def _watch_process(self, callback, kill_event): + while not kill_event.ready(): + try: + if not callback(): + break + except Exception: + LOG.exception(_('An error occurred while communicating ' + 'with async process [%s].'), self.cmd) + break + # Ensure that watching a process with lots of output does + # not block execution of other greenthreads. + eventlet.sleep() + # The kill event not being ready indicates that the loop was + # broken out of due to an error in the watched process rather + # than the loop condition being satisfied. + if not kill_event.ready(): + self._handle_process_error() + + def _read(self, stream, queue): + data = stream.readline() + if data: + data = data.strip() + queue.put(data) + return data + + def _read_stdout(self): + return self._read(self._process.stdout, self._stdout_lines) + + def _read_stderr(self): + return self._read(self._process.stderr, self._stderr_lines) + + def _iter_queue(self, queue): + while True: + try: + yield queue.get_nowait() + except eventlet.queue.Empty: + break + + def iter_stdout(self): + return self._iter_queue(self._stdout_lines) + + def iter_stderr(self): + return self._iter_queue(self._stderr_lines) diff --git a/neutron/agent/linux/daemon.py b/neutron/agent/linux/daemon.py new file mode 100644 index 000000000..59bcd8eb0 --- /dev/null +++ b/neutron/agent/linux/daemon.py @@ -0,0 +1,151 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import atexit +import fcntl +import os +import signal +import sys + +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class Pidfile(object): + def __init__(self, pidfile, procname, uuid=None): + self.pidfile = pidfile + self.procname = procname + self.uuid = uuid + try: + self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR) + fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + LOG.exception(_("Error while handling pidfile: %s"), pidfile) + sys.exit(1) + + def __str__(self): + return self.pidfile + + def unlock(self): + if not not fcntl.flock(self.fd, fcntl.LOCK_UN): + raise IOError(_('Unable to unlock pid file')) + + def write(self, pid): + os.ftruncate(self.fd, 0) + os.write(self.fd, "%d" % pid) + os.fsync(self.fd) + + def read(self): + try: + pid = int(os.read(self.fd, 128)) + os.lseek(self.fd, 0, os.SEEK_SET) + return pid + except ValueError: + return + + def is_running(self): + pid = self.read() + if not pid: + return False + + cmdline = '/proc/%s/cmdline' % pid + try: + with open(cmdline, "r") as f: + exec_out = f.readline() + return self.procname in exec_out and (not self.uuid or + self.uuid in exec_out) + except IOError: + return False + + +class Daemon(object): + """A generic daemon class. + + Usage: subclass the Daemon class and override the run() method + """ + def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', + stderr='/dev/null', procname='python', uuid=None): + self.stdin = stdin + self.stdout = stdout + self.stderr = stderr + self.procname = procname + self.pidfile = Pidfile(pidfile, procname, uuid) + + def _fork(self): + try: + pid = os.fork() + if pid > 0: + sys.exit(0) + except OSError: + LOG.exception(_('Fork failed')) + sys.exit(1) + + def daemonize(self): + """Daemonize process by doing Stevens double fork.""" + # fork first time + self._fork() + + # decouple from parent environment + os.chdir("/") + os.setsid() + os.umask(0) + + # fork second time + self._fork() + + # redirect standard file descriptors + sys.stdout.flush() + sys.stderr.flush() + stdin = open(self.stdin, 'r') + stdout = open(self.stdout, 'a+') + stderr = open(self.stderr, 'a+', 0) + os.dup2(stdin.fileno(), sys.stdin.fileno()) + os.dup2(stdout.fileno(), sys.stdout.fileno()) + os.dup2(stderr.fileno(), sys.stderr.fileno()) + + # write pidfile + atexit.register(self.delete_pid) + signal.signal(signal.SIGTERM, self.handle_sigterm) + self.pidfile.write(os.getpid()) + + def delete_pid(self): + os.remove(str(self.pidfile)) + + def handle_sigterm(self, signum, frame): + sys.exit(0) + + def start(self): + """Start the daemon.""" + + if self.pidfile.is_running(): + self.pidfile.unlock() + message = _('Pidfile %s already exist. Daemon already running?') + LOG.error(message, self.pidfile) + sys.exit(1) + + # Start the daemon + self.daemonize() + self.run() + + def run(self): + """Override this method when subclassing Daemon. + + start() will call this method after the process has daemonized. + """ + pass diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py new file mode 100644 index 000000000..84de415be --- /dev/null +++ b/neutron/agent/linux/dhcp.py @@ -0,0 +1,908 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import collections +import os +import re +import shutil +import socket +import sys + +import netaddr +from oslo.config import cfg +import six + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.common import constants +from neutron.common import exceptions +from neutron.common import utils as commonutils +from neutron.openstack.common import importutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.StrOpt('dhcp_confs', + default='$state_path/dhcp', + help=_('Location to store DHCP server config files')), + cfg.StrOpt('dhcp_domain', + default='openstacklocal', + help=_('Domain to use for building the hostnames')), + cfg.StrOpt('dnsmasq_config_file', + default='', + help=_('Override the default dnsmasq settings with this file')), + cfg.ListOpt('dnsmasq_dns_servers', + help=_('Comma-separated list of the DNS servers which will be ' + 'used as forwarders.'), + deprecated_name='dnsmasq_dns_server'), + cfg.BoolOpt('dhcp_delete_namespaces', default=False, + help=_("Delete namespace after removing a dhcp server.")), + cfg.IntOpt( + 'dnsmasq_lease_max', + default=(2 ** 24), + help=_('Limit number of leases to prevent a denial-of-service.')), +] + +IPV4 = 4 +IPV6 = 6 +UDP = 'udp' +TCP = 'tcp' +DNS_PORT = 53 +DHCPV4_PORT = 67 +DHCPV6_PORT = 547 +METADATA_DEFAULT_PREFIX = 16 +METADATA_DEFAULT_IP = '169.254.169.254' +METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP, + METADATA_DEFAULT_PREFIX) +METADATA_PORT = 80 +WIN2k3_STATIC_DNS = 249 +NS_PREFIX = 'qdhcp-' + + +class DictModel(dict): + """Convert dict into an object that provides attribute access to values.""" + + def __init__(self, *args, **kwargs): + """Convert dict values to DictModel values.""" + super(DictModel, self).__init__(*args, **kwargs) + + def needs_upgrade(item): + """Check if `item` is a dict and needs to be changed to DictModel. + """ + return isinstance(item, dict) and not isinstance(item, DictModel) + + def upgrade(item): + """Upgrade item if it needs to be upgraded.""" + if needs_upgrade(item): + return DictModel(item) + else: + return item + + for key, value in self.iteritems(): + if isinstance(value, (list, tuple)): + # Keep the same type but convert dicts to DictModels + self[key] = type(value)( + (upgrade(item) for item in value) + ) + elif needs_upgrade(value): + # Change dict instance values to DictModel instance values + self[key] = DictModel(value) + + def __getattr__(self, name): + try: + return self[name] + except KeyError as e: + raise AttributeError(e) + + def __setattr__(self, name, value): + self[name] = value + + def __delattr__(self, name): + del self[name] + + +class NetModel(DictModel): + + def __init__(self, use_namespaces, d): + super(NetModel, self).__init__(d) + + self._ns_name = (use_namespaces and + "%s%s" % (NS_PREFIX, self.id) or None) + + @property + def namespace(self): + return self._ns_name + + +@six.add_metaclass(abc.ABCMeta) +class DhcpBase(object): + + def __init__(self, conf, network, root_helper='sudo', + version=None, plugin=None): + self.conf = conf + self.network = network + self.root_helper = root_helper + self.device_manager = DeviceManager(self.conf, + self.root_helper, plugin) + self.version = version + + @abc.abstractmethod + def enable(self): + """Enables DHCP for this network.""" + + @abc.abstractmethod + def disable(self, retain_port=False): + """Disable dhcp for this network.""" + + def restart(self): + """Restart the dhcp service for the network.""" + self.disable(retain_port=True) + self.enable() + + @abc.abstractproperty + def active(self): + """Boolean representing the running state of the DHCP server.""" + + @abc.abstractmethod + def reload_allocations(self): + """Force the DHCP server to reload the assignment database.""" + + @classmethod + def existing_dhcp_networks(cls, conf, root_helper): + """Return a list of existing networks ids that we have configs for.""" + + raise NotImplementedError + + @classmethod + def check_version(cls): + """Execute version checks on DHCP server.""" + + raise NotImplementedError + + +class DhcpLocalProcess(DhcpBase): + PORTS = [] + + def _enable_dhcp(self): + """check if there is a subnet within the network with dhcp enabled.""" + for subnet in self.network.subnets: + if subnet.enable_dhcp: + return True + return False + + def enable(self): + """Enables DHCP for this network by spawning a local process.""" + interface_name = self.device_manager.setup(self.network) + if self.active: + self.restart() + elif self._enable_dhcp(): + self.interface_name = interface_name + self.spawn_process() + + def disable(self, retain_port=False): + """Disable DHCP for this network by killing the local process.""" + pid = self.pid + + if pid: + if self.active: + cmd = ['kill', '-9', pid] + utils.execute(cmd, self.root_helper) + else: + LOG.debug(_('DHCP for %(net_id)s is stale, pid %(pid)d ' + 'does not exist, performing cleanup'), + {'net_id': self.network.id, 'pid': pid}) + if not retain_port: + self.device_manager.destroy(self.network, + self.interface_name) + else: + LOG.debug(_('No DHCP started for %s'), self.network.id) + + self._remove_config_files() + + if not retain_port: + if self.conf.dhcp_delete_namespaces and self.network.namespace: + ns_ip = ip_lib.IPWrapper(self.root_helper, + self.network.namespace) + try: + ns_ip.netns.delete(self.network.namespace) + except RuntimeError: + msg = _('Failed trying to delete namespace: %s') + LOG.exception(msg, self.network.namespace) + + def _remove_config_files(self): + confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs)) + conf_dir = os.path.join(confs_dir, self.network.id) + shutil.rmtree(conf_dir, ignore_errors=True) + + def get_conf_file_name(self, kind, ensure_conf_dir=False): + """Returns the file name for a given kind of config file.""" + confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs)) + conf_dir = os.path.join(confs_dir, self.network.id) + if ensure_conf_dir: + if not os.path.isdir(conf_dir): + os.makedirs(conf_dir, 0o755) + + return os.path.join(conf_dir, kind) + + def _get_value_from_conf_file(self, kind, converter=None): + """A helper function to read a value from one of the state files.""" + file_name = self.get_conf_file_name(kind) + msg = _('Error while reading %s') + + try: + with open(file_name, 'r') as f: + try: + return converter and converter(f.read()) or f.read() + except ValueError: + msg = _('Unable to convert value in %s') + except IOError: + msg = _('Unable to access %s') + + LOG.debug(msg % file_name) + return None + + @property + def pid(self): + """Last known pid for the DHCP process spawned for this network.""" + return self._get_value_from_conf_file('pid', int) + + @property + def active(self): + pid = self.pid + if pid is None: + return False + + cmdline = '/proc/%s/cmdline' % pid + try: + with open(cmdline, "r") as f: + return self.network.id in f.readline() + except IOError: + return False + + @property + def interface_name(self): + return self._get_value_from_conf_file('interface') + + @interface_name.setter + def interface_name(self, value): + interface_file_path = self.get_conf_file_name('interface', + ensure_conf_dir=True) + utils.replace_file(interface_file_path, value) + + @abc.abstractmethod + def spawn_process(self): + pass + + +class Dnsmasq(DhcpLocalProcess): + # The ports that need to be opened when security policies are active + # on the Neutron port used for DHCP. These are provided as a convenience + # for users of this class. + PORTS = {IPV4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)], + IPV6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)], + } + + _TAG_PREFIX = 'tag%d' + + NEUTRON_NETWORK_ID_KEY = 'NEUTRON_NETWORK_ID' + NEUTRON_RELAY_SOCKET_PATH_KEY = 'NEUTRON_RELAY_SOCKET_PATH' + MINIMUM_VERSION = 2.59 + + @classmethod + def check_version(cls): + ver = 0 + try: + cmd = ['dnsmasq', '--version'] + out = utils.execute(cmd) + ver = re.findall("\d+.\d+", out)[0] + is_valid_version = float(ver) >= cls.MINIMUM_VERSION + if not is_valid_version: + LOG.warning(_('FAILED VERSION REQUIREMENT FOR DNSMASQ. ' + 'DHCP AGENT MAY NOT RUN CORRECTLY! ' + 'Please ensure that its version is %s ' + 'or above!'), cls.MINIMUM_VERSION) + except (OSError, RuntimeError, IndexError, ValueError): + LOG.warning(_('Unable to determine dnsmasq version. ' + 'Please ensure that its version is %s ' + 'or above!'), cls.MINIMUM_VERSION) + return float(ver) + + @classmethod + def existing_dhcp_networks(cls, conf, root_helper): + """Return a list of existing networks ids that we have configs for.""" + + confs_dir = os.path.abspath(os.path.normpath(conf.dhcp_confs)) + + return [ + c for c in os.listdir(confs_dir) + if uuidutils.is_uuid_like(c) + ] + + def spawn_process(self): + """Spawns a Dnsmasq process for the network.""" + env = { + self.NEUTRON_NETWORK_ID_KEY: self.network.id, + } + + cmd = [ + 'dnsmasq', + '--no-hosts', + '--no-resolv', + '--strict-order', + '--bind-interfaces', + '--interface=%s' % self.interface_name, + '--except-interface=lo', + '--pid-file=%s' % self.get_conf_file_name( + 'pid', ensure_conf_dir=True), + '--dhcp-hostsfile=%s' % self._output_hosts_file(), + '--addn-hosts=%s' % self._output_addn_hosts_file(), + '--dhcp-optsfile=%s' % self._output_opts_file(), + '--leasefile-ro', + ] + + possible_leases = 0 + for i, subnet in enumerate(self.network.subnets): + # if a subnet is specified to have dhcp disabled + if not subnet.enable_dhcp: + continue + if subnet.ip_version == 4: + mode = 'static' + else: + # TODO(mark): how do we indicate other options + # ra-only, slaac, ra-nameservers, and ra-stateless. + mode = 'static' + if self.version >= self.MINIMUM_VERSION: + set_tag = 'set:' + else: + set_tag = '' + + cidr = netaddr.IPNetwork(subnet.cidr) + + if self.conf.dhcp_lease_duration == -1: + lease = 'infinite' + else: + lease = '%ss' % self.conf.dhcp_lease_duration + + cmd.append('--dhcp-range=%s%s,%s,%s,%s' % + (set_tag, self._TAG_PREFIX % i, + cidr.network, mode, lease)) + + possible_leases += cidr.size + + # Cap the limit because creating lots of subnets can inflate + # this possible lease cap. + cmd.append('--dhcp-lease-max=%d' % + min(possible_leases, self.conf.dnsmasq_lease_max)) + + cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file) + if self.conf.dnsmasq_dns_servers: + cmd.extend( + '--server=%s' % server + for server in self.conf.dnsmasq_dns_servers) + + if self.conf.dhcp_domain: + cmd.append('--domain=%s' % self.conf.dhcp_domain) + + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + self.network.namespace) + ip_wrapper.netns.execute(cmd, addl_env=env) + + def _release_lease(self, mac_address, ip): + """Release a DHCP lease.""" + cmd = ['dhcp_release', self.interface_name, ip, mac_address] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + self.network.namespace) + ip_wrapper.netns.execute(cmd) + + def reload_allocations(self): + """Rebuild the dnsmasq config and signal the dnsmasq to reload.""" + + # If all subnets turn off dhcp, kill the process. + if not self._enable_dhcp(): + self.disable() + LOG.debug(_('Killing dhcpmasq for network since all subnets have ' + 'turned off DHCP: %s'), self.network.id) + return + + self._release_unused_leases() + self._output_hosts_file() + self._output_addn_hosts_file() + self._output_opts_file() + if self.active: + cmd = ['kill', '-HUP', self.pid] + utils.execute(cmd, self.root_helper) + else: + LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), self.pid) + LOG.debug(_('Reloading allocations for network: %s'), self.network.id) + self.device_manager.update(self.network, self.interface_name) + + def _iter_hosts(self): + """Iterate over hosts. + + For each host on the network we yield a tuple containing: + ( + port, # a DictModel instance representing the port. + alloc, # a DictModel instance of the allocated ip and subnet. + host_name, # Host name. + name, # Host name and domain name in the format 'hostname.domain'. + ) + """ + for port in self.network.ports: + for alloc in port.fixed_ips: + hostname = 'host-%s' % alloc.ip_address.replace( + '.', '-').replace(':', '-') + fqdn = '%s.%s' % (hostname, self.conf.dhcp_domain) + yield (port, alloc, hostname, fqdn) + + def _output_hosts_file(self): + """Writes a dnsmasq compatible dhcp hosts file. + + The generated file is sent to the --dhcp-hostsfile option of dnsmasq, + and lists the hosts on the network which should receive a dhcp lease. + Each line in this file is in the form:: + + 'mac_address,FQDN,ip_address' + + IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in + this file if it did not give a lease to a host listed in it (e.g.: + multiple dnsmasq instances on the same network if this network is on + multiple network nodes). This file is only defining hosts which + should receive a dhcp lease, the hosts resolution in itself is + defined by the `_output_addn_hosts_file` method. + """ + buf = six.StringIO() + filename = self.get_conf_file_name('host') + + LOG.debug(_('Building host file: %s'), filename) + for (port, alloc, hostname, name) in self._iter_hosts(): + set_tag = '' + # (dzyu) Check if it is legal ipv6 address, if so, need wrap + # it with '[]' to let dnsmasq to distinguish MAC address from + # IPv6 address. + ip_address = alloc.ip_address + if netaddr.valid_ipv6(ip_address): + ip_address = '[%s]' % ip_address + + LOG.debug(_('Adding %(mac)s : %(name)s : %(ip)s'), + {"mac": port.mac_address, "name": name, + "ip": ip_address}) + + if getattr(port, 'extra_dhcp_opts', False): + if self.version >= self.MINIMUM_VERSION: + set_tag = 'set:' + + buf.write('%s,%s,%s,%s%s\n' % + (port.mac_address, name, ip_address, + set_tag, port.id)) + else: + buf.write('%s,%s,%s\n' % + (port.mac_address, name, ip_address)) + + utils.replace_file(filename, buf.getvalue()) + LOG.debug(_('Done building host file %s'), filename) + return filename + + def _read_hosts_file_leases(self, filename): + leases = set() + if os.path.exists(filename): + with open(filename) as f: + for l in f.readlines(): + host = l.strip().split(',') + leases.add((host[2], host[0])) + return leases + + def _release_unused_leases(self): + filename = self.get_conf_file_name('host') + old_leases = self._read_hosts_file_leases(filename) + + new_leases = set() + for port in self.network.ports: + for alloc in port.fixed_ips: + new_leases.add((alloc.ip_address, port.mac_address)) + + for ip, mac in old_leases - new_leases: + self._release_lease(mac, ip) + + def _output_addn_hosts_file(self): + """Writes a dnsmasq compatible additional hosts file. + + The generated file is sent to the --addn-hosts option of dnsmasq, + and lists the hosts on the network which should be resolved even if + the dnsmaq instance did not give a lease to the host (see the + `_output_hosts_file` method). + Each line in this file is in the same form as a standard /etc/hosts + file. + """ + buf = six.StringIO() + for (port, alloc, hostname, fqdn) in self._iter_hosts(): + # It is compulsory to write the `fqdn` before the `hostname` in + # order to obtain it in PTR responses. + buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname)) + addn_hosts = self.get_conf_file_name('addn_hosts') + utils.replace_file(addn_hosts, buf.getvalue()) + return addn_hosts + + def _output_opts_file(self): + """Write a dnsmasq compatible options file.""" + + if self.conf.enable_isolated_metadata: + subnet_to_interface_ip = self._make_subnet_interface_ip_map() + + options = [] + + dhcp_ips = collections.defaultdict(list) + subnet_idx_map = {} + for i, subnet in enumerate(self.network.subnets): + if not subnet.enable_dhcp: + continue + if subnet.dns_nameservers: + options.append( + self._format_option(i, 'dns-server', + ','.join(subnet.dns_nameservers))) + else: + # use the dnsmasq ip as nameservers only if there is no + # dns-server submitted by the server + subnet_idx_map[subnet.id] = i + + gateway = subnet.gateway_ip + host_routes = [] + for hr in subnet.host_routes: + if hr.destination == "0.0.0.0/0": + if not gateway: + gateway = hr.nexthop + else: + host_routes.append("%s,%s" % (hr.destination, hr.nexthop)) + + # Add host routes for isolated network segments + + if self._enable_metadata(subnet): + subnet_dhcp_ip = subnet_to_interface_ip[subnet.id] + host_routes.append( + '%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip) + ) + + if host_routes: + if gateway and subnet.ip_version == 4: + host_routes.append("%s,%s" % ("0.0.0.0/0", gateway)) + options.append( + self._format_option(i, 'classless-static-route', + ','.join(host_routes))) + options.append( + self._format_option(i, WIN2k3_STATIC_DNS, + ','.join(host_routes))) + + if subnet.ip_version == 4: + if gateway: + options.append(self._format_option(i, 'router', gateway)) + else: + options.append(self._format_option(i, 'router')) + + for port in self.network.ports: + if getattr(port, 'extra_dhcp_opts', False): + options.extend( + self._format_option(port.id, opt.opt_name, opt.opt_value) + for opt in port.extra_dhcp_opts) + + # provides all dnsmasq ip as dns-server if there is more than + # one dnsmasq for a subnet and there is no dns-server submitted + # by the server + if port.device_owner == constants.DEVICE_OWNER_DHCP: + for ip in port.fixed_ips: + i = subnet_idx_map.get(ip.subnet_id) + if i is None: + continue + dhcp_ips[i].append(ip.ip_address) + + for i, ips in dhcp_ips.items(): + if len(ips) > 1: + options.append(self._format_option(i, + 'dns-server', + ','.join(ips))) + + name = self.get_conf_file_name('opts') + utils.replace_file(name, '\n'.join(options)) + return name + + def _make_subnet_interface_ip_map(self): + ip_dev = ip_lib.IPDevice( + self.interface_name, + self.root_helper, + self.network.namespace + ) + + subnet_lookup = dict( + (netaddr.IPNetwork(subnet.cidr), subnet.id) + for subnet in self.network.subnets + ) + + retval = {} + + for addr in ip_dev.addr.list(): + ip_net = netaddr.IPNetwork(addr['cidr']) + + if ip_net in subnet_lookup: + retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0] + + return retval + + def _format_option(self, tag, option, *args): + """Format DHCP option by option name or code.""" + if self.version >= self.MINIMUM_VERSION: + set_tag = 'tag:' + else: + set_tag = '' + + option = str(option) + + if isinstance(tag, int): + tag = self._TAG_PREFIX % tag + + if not option.isdigit(): + option = 'option:%s' % option + + return ','.join((set_tag + tag, '%s' % option) + args) + + def _enable_metadata(self, subnet): + '''Determine if the metadata route will be pushed to hosts on subnet. + + If subnet has a Neutron router attached, we want the hosts to get + metadata from the router's proxy via their default route instead. + ''' + if self.conf.enable_isolated_metadata and subnet.ip_version == 4: + if subnet.gateway_ip is None: + return True + else: + for port in self.network.ports: + if port.device_owner == constants.DEVICE_OWNER_ROUTER_INTF: + for alloc in port.fixed_ips: + if alloc.subnet_id == subnet.id: + return False + return True + else: + return False + + @classmethod + def lease_update(cls): + network_id = os.environ.get(cls.NEUTRON_NETWORK_ID_KEY) + dhcp_relay_socket = os.environ.get(cls.NEUTRON_RELAY_SOCKET_PATH_KEY) + + action = sys.argv[1] + if action not in ('add', 'del', 'old'): + sys.exit() + + mac_address = sys.argv[2] + ip_address = sys.argv[3] + + if action == 'del': + lease_remaining = 0 + else: + lease_remaining = int(os.environ.get('DNSMASQ_TIME_REMAINING', 0)) + + data = dict(network_id=network_id, mac_address=mac_address, + ip_address=ip_address, lease_remaining=lease_remaining) + + if os.path.exists(dhcp_relay_socket): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(dhcp_relay_socket) + sock.send(jsonutils.dumps(data)) + sock.close() + + +class DeviceManager(object): + + def __init__(self, conf, root_helper, plugin): + self.conf = conf + self.root_helper = root_helper + self.plugin = plugin + if not conf.interface_driver: + msg = _('An interface driver must be specified') + LOG.error(msg) + raise SystemExit(1) + try: + self.driver = importutils.import_object( + conf.interface_driver, conf) + except Exception as e: + msg = (_("Error importing interface driver '%(driver)s': " + "%(inner)s") % {'driver': conf.interface_driver, + 'inner': e}) + LOG.error(msg) + raise SystemExit(1) + + def get_interface_name(self, network, port): + """Return interface(device) name for use by the DHCP process.""" + return self.driver.get_device_name(port) + + def get_device_id(self, network): + """Return a unique DHCP device ID for this host on the network.""" + # There could be more than one dhcp server per network, so create + # a device id that combines host and network ids + return commonutils.get_dhcp_agent_device_id(network.id, self.conf.host) + + def _set_default_route(self, network, device_name): + """Sets the default gateway for this dhcp namespace. + + This method is idempotent and will only adjust the route if adjusting + it would change it from what it already is. This makes it safe to call + and avoids unnecessary perturbation of the system. + """ + device = ip_lib.IPDevice(device_name, + self.root_helper, + network.namespace) + gateway = device.route.get_gateway() + if gateway: + gateway = gateway['gateway'] + + for subnet in network.subnets: + skip_subnet = ( + subnet.ip_version != 4 + or not subnet.enable_dhcp + or subnet.gateway_ip is None) + + if skip_subnet: + continue + + if gateway != subnet.gateway_ip: + m = _('Setting gateway for dhcp netns on net %(n)s to %(ip)s') + LOG.debug(m, {'n': network.id, 'ip': subnet.gateway_ip}) + + device.route.add_gateway(subnet.gateway_ip) + + return + + # No subnets on the network have a valid gateway. Clean it up to avoid + # confusion from seeing an invalid gateway here. + if gateway is not None: + msg = _('Removing gateway for dhcp netns on net %s') + LOG.debug(msg, network.id) + + device.route.delete_gateway(gateway) + + def setup_dhcp_port(self, network): + """Create/update DHCP port for the host if needed and return port.""" + + device_id = self.get_device_id(network) + subnets = {} + dhcp_enabled_subnet_ids = [] + for subnet in network.subnets: + if subnet.enable_dhcp: + dhcp_enabled_subnet_ids.append(subnet.id) + subnets[subnet.id] = subnet + + dhcp_port = None + for port in network.ports: + port_device_id = getattr(port, 'device_id', None) + if port_device_id == device_id: + port_fixed_ips = [] + for fixed_ip in port.fixed_ips: + port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id, + 'ip_address': fixed_ip.ip_address}) + if fixed_ip.subnet_id in dhcp_enabled_subnet_ids: + dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id) + + # If there are dhcp_enabled_subnet_ids here that means that + # we need to add those to the port and call update. + if dhcp_enabled_subnet_ids: + port_fixed_ips.extend( + [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + dhcp_port = self.plugin.update_dhcp_port( + port.id, {'port': {'network_id': network.id, + 'fixed_ips': port_fixed_ips}}) + if not dhcp_port: + raise exceptions.Conflict() + else: + dhcp_port = port + # break since we found port that matches device_id + break + + # check for a reserved DHCP port + if dhcp_port is None: + LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s' + ' does not yet exist. Checking for a reserved port.'), + {'device_id': device_id, 'network_id': network.id}) + for port in network.ports: + port_device_id = getattr(port, 'device_id', None) + if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT: + dhcp_port = self.plugin.update_dhcp_port( + port.id, {'port': {'network_id': network.id, + 'device_id': device_id}}) + if dhcp_port: + break + + # DHCP port has not yet been created. + if dhcp_port is None: + LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s' + ' does not yet exist.'), {'device_id': device_id, + 'network_id': network.id}) + port_dict = dict( + name='', + admin_state_up=True, + device_id=device_id, + network_id=network.id, + tenant_id=network.tenant_id, + fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + dhcp_port = self.plugin.create_dhcp_port({'port': port_dict}) + + if not dhcp_port: + raise exceptions.Conflict() + + # Convert subnet_id to subnet dict + fixed_ips = [dict(subnet_id=fixed_ip.subnet_id, + ip_address=fixed_ip.ip_address, + subnet=subnets[fixed_ip.subnet_id]) + for fixed_ip in dhcp_port.fixed_ips] + + ips = [DictModel(item) if isinstance(item, dict) else item + for item in fixed_ips] + dhcp_port.fixed_ips = ips + + return dhcp_port + + def setup(self, network): + """Create and initialize a device for network's DHCP on this host.""" + port = self.setup_dhcp_port(network) + interface_name = self.get_interface_name(network, port) + + if ip_lib.ensure_device_is_ready(interface_name, + self.root_helper, + network.namespace): + LOG.debug(_('Reusing existing device: %s.'), interface_name) + else: + self.driver.plug(network.id, + port.id, + interface_name, + port.mac_address, + namespace=network.namespace) + ip_cidrs = [] + for fixed_ip in port.fixed_ips: + subnet = fixed_ip.subnet + net = netaddr.IPNetwork(subnet.cidr) + ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen) + ip_cidrs.append(ip_cidr) + + if (self.conf.enable_isolated_metadata and + self.conf.use_namespaces): + ip_cidrs.append(METADATA_DEFAULT_CIDR) + + self.driver.init_l3(interface_name, ip_cidrs, + namespace=network.namespace) + + # ensure that the dhcp interface is first in the list + if network.namespace is None: + device = ip_lib.IPDevice(interface_name, + self.root_helper) + device.route.pullup_route(interface_name) + + if self.conf.use_namespaces: + self._set_default_route(network, interface_name) + + return interface_name + + def update(self, network, device_name): + """Update device settings for the network's DHCP on this host.""" + if self.conf.use_namespaces: + self._set_default_route(network, device_name) + + def destroy(self, network, device_name): + """Destroy the device used for the network's DHCP on this host.""" + self.driver.unplug(device_name, namespace=network.namespace) + + self.plugin.release_dhcp_port(network.id, + self.get_device_id(network)) diff --git a/neutron/agent/linux/external_process.py b/neutron/agent/linux/external_process.py new file mode 100644 index 000000000..d0e990880 --- /dev/null +++ b/neutron/agent/linux/external_process.py @@ -0,0 +1,104 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import os + +from oslo.config import cfg + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.StrOpt('external_pids', + default='$state_path/external/pids', + help=_('Location to store child pid files')), +] + +cfg.CONF.register_opts(OPTS) + + +class ProcessManager(object): + """An external process manager for Neutron spawned processes. + + Note: The manager expects uuid to be in cmdline. + """ + def __init__(self, conf, uuid, root_helper='sudo', namespace=None): + self.conf = conf + self.uuid = uuid + self.root_helper = root_helper + self.namespace = namespace + + def enable(self, cmd_callback): + if not self.active: + cmd = cmd_callback(self.get_pid_file_name(ensure_pids_dir=True)) + + ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace) + ip_wrapper.netns.execute(cmd) + + def disable(self): + pid = self.pid + + if self.active: + cmd = ['kill', '-9', pid] + utils.execute(cmd, self.root_helper) + elif pid: + LOG.debug(_('Process for %(uuid)s pid %(pid)d is stale, ignoring ' + 'command'), {'uuid': self.uuid, 'pid': pid}) + else: + LOG.debug(_('No process started for %s'), self.uuid) + + def get_pid_file_name(self, ensure_pids_dir=False): + """Returns the file name for a given kind of config file.""" + pids_dir = os.path.abspath(os.path.normpath(self.conf.external_pids)) + if ensure_pids_dir and not os.path.isdir(pids_dir): + os.makedirs(pids_dir, 0o755) + + return os.path.join(pids_dir, self.uuid + '.pid') + + @property + def pid(self): + """Last known pid for this external process spawned for this uuid.""" + file_name = self.get_pid_file_name() + msg = _('Error while reading %s') + + try: + with open(file_name, 'r') as f: + return int(f.read()) + except IOError: + msg = _('Unable to access %s') + except ValueError: + msg = _('Unable to convert value in %s') + + LOG.debug(msg, file_name) + return None + + @property + def active(self): + pid = self.pid + if pid is None: + return False + + cmdline = '/proc/%s/cmdline' % pid + try: + with open(cmdline, "r") as f: + return self.uuid in f.readline() + except IOError: + return False diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py new file mode 100644 index 000000000..a31250ee7 --- /dev/null +++ b/neutron/agent/linux/interface.py @@ -0,0 +1,450 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import netaddr +from oslo.config import cfg +import six + +from neutron.agent.common import config +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import utils +from neutron.common import exceptions +from neutron.extensions import flavor +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.StrOpt('ovs_integration_bridge', + default='br-int', + help=_('Name of Open vSwitch bridge to use')), + cfg.BoolOpt('ovs_use_veth', + default=False, + help=_('Uses veth for an interface or not')), + cfg.IntOpt('network_device_mtu', + help=_('MTU setting for device.')), + cfg.StrOpt('meta_flavor_driver_mappings', + help=_('Mapping between flavor and LinuxInterfaceDriver')), + cfg.StrOpt('admin_user', + help=_("Admin username")), + cfg.StrOpt('admin_password', + help=_("Admin password"), + secret=True), + cfg.StrOpt('admin_tenant_name', + help=_("Admin tenant name")), + cfg.StrOpt('auth_url', + help=_("Authentication URL")), + cfg.StrOpt('auth_strategy', default='keystone', + help=_("The type of authentication to use")), + cfg.StrOpt('auth_region', + help=_("Authentication region")), +] + + +@six.add_metaclass(abc.ABCMeta) +class LinuxInterfaceDriver(object): + + # from linux IF_NAMESIZE + DEV_NAME_LEN = 14 + DEV_NAME_PREFIX = 'tap' + + def __init__(self, conf): + self.conf = conf + self.root_helper = config.get_root_helper(conf) + + def init_l3(self, device_name, ip_cidrs, namespace=None, + preserve_ips=[], gateway=None, extra_subnets=[]): + """Set the L3 settings for the interface using data from the port. + + ip_cidrs: list of 'X.X.X.X/YY' strings + preserve_ips: list of ip cidrs that should not be removed from device + """ + device = ip_lib.IPDevice(device_name, + self.root_helper, + namespace=namespace) + + previous = {} + for address in device.addr.list(scope='global', filters=['permanent']): + previous[address['cidr']] = address['ip_version'] + + # add new addresses + for ip_cidr in ip_cidrs: + + net = netaddr.IPNetwork(ip_cidr) + # Convert to compact IPv6 address because the return values of + # "ip addr list" are compact. + if net.version == 6: + ip_cidr = str(net) + if ip_cidr in previous: + del previous[ip_cidr] + continue + + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + + # clean up any old addresses + for ip_cidr, ip_version in previous.items(): + if ip_cidr not in preserve_ips: + device.addr.delete(ip_version, ip_cidr) + + if gateway: + device.route.add_gateway(gateway) + + new_onlink_routes = set(s['cidr'] for s in extra_subnets) + existing_onlink_routes = set(device.route.list_onlink_routes()) + for route in new_onlink_routes - existing_onlink_routes: + device.route.add_onlink_route(route) + for route in existing_onlink_routes - new_onlink_routes: + device.route.delete_onlink_route(route) + + def check_bridge_exists(self, bridge): + if not ip_lib.device_exists(bridge): + raise exceptions.BridgeDoesNotExist(bridge=bridge) + + def get_device_name(self, port): + return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN] + + @abc.abstractmethod + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """Plug in the interface.""" + + @abc.abstractmethod + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + """Unplug the interface.""" + + +class NullDriver(LinuxInterfaceDriver): + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + pass + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + pass + + +class OVSInterfaceDriver(LinuxInterfaceDriver): + """Driver for creating an internal interface on an OVS bridge.""" + + DEV_NAME_PREFIX = 'tap' + + def __init__(self, conf): + super(OVSInterfaceDriver, self).__init__(conf) + if self.conf.ovs_use_veth: + self.DEV_NAME_PREFIX = 'ns-' + + def _get_tap_name(self, dev_name, prefix=None): + if self.conf.ovs_use_veth: + dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, 'tap') + return dev_name + + def _ovs_add_port(self, bridge, device_name, port_id, mac_address, + internal=True): + cmd = ['ovs-vsctl', '--', '--if-exists', 'del-port', device_name, '--', + 'add-port', bridge, device_name] + if internal: + cmd += ['--', 'set', 'Interface', device_name, 'type=internal'] + cmd += ['--', 'set', 'Interface', device_name, + 'external-ids:iface-id=%s' % port_id, + '--', 'set', 'Interface', device_name, + 'external-ids:iface-status=active', + '--', 'set', 'Interface', device_name, + 'external-ids:attached-mac=%s' % mac_address] + utils.execute(cmd, self.root_helper) + + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """Plug in the interface.""" + if not bridge: + bridge = self.conf.ovs_integration_bridge + + if not ip_lib.device_exists(device_name, + self.root_helper, + namespace=namespace): + + self.check_bridge_exists(bridge) + + ip = ip_lib.IPWrapper(self.root_helper) + tap_name = self._get_tap_name(device_name, prefix) + + if self.conf.ovs_use_veth: + # Create ns_dev in a namespace if one is configured. + root_dev, ns_dev = ip.add_veth(tap_name, + device_name, + namespace2=namespace) + else: + ns_dev = ip.device(device_name) + + internal = not self.conf.ovs_use_veth + self._ovs_add_port(bridge, tap_name, port_id, mac_address, + internal=internal) + + ns_dev.link.set_address(mac_address) + + if self.conf.network_device_mtu: + ns_dev.link.set_mtu(self.conf.network_device_mtu) + if self.conf.ovs_use_veth: + root_dev.link.set_mtu(self.conf.network_device_mtu) + + # Add an interface created by ovs to the namespace. + if not self.conf.ovs_use_veth and namespace: + namespace_obj = ip.ensure_namespace(namespace) + namespace_obj.add_device_to_namespace(ns_dev) + + ns_dev.link.set_up() + if self.conf.ovs_use_veth: + root_dev.link.set_up() + else: + LOG.info(_("Device %s already exists"), device_name) + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + """Unplug the interface.""" + if not bridge: + bridge = self.conf.ovs_integration_bridge + + tap_name = self._get_tap_name(device_name, prefix) + self.check_bridge_exists(bridge) + ovs = ovs_lib.OVSBridge(bridge, self.root_helper) + + try: + ovs.delete_port(tap_name) + if self.conf.ovs_use_veth: + device = ip_lib.IPDevice(device_name, + self.root_helper, + namespace) + device.link.delete() + LOG.debug(_("Unplugged interface '%s'"), device_name) + except RuntimeError: + LOG.error(_("Failed unplugging interface '%s'"), + device_name) + + +class MidonetInterfaceDriver(LinuxInterfaceDriver): + + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """This method is called by the Dhcp agent or by the L3 agent + when a new network is created + """ + if not ip_lib.device_exists(device_name, + self.root_helper, + namespace=namespace): + ip = ip_lib.IPWrapper(self.root_helper) + tap_name = device_name.replace(prefix or 'tap', 'tap') + + # Create ns_dev in a namespace if one is configured. + root_dev, ns_dev = ip.add_veth(tap_name, device_name, + namespace2=namespace) + + ns_dev.link.set_address(mac_address) + + # Add an interface created by ovs to the namespace. + namespace_obj = ip.ensure_namespace(namespace) + namespace_obj.add_device_to_namespace(ns_dev) + + ns_dev.link.set_up() + root_dev.link.set_up() + + cmd = ['mm-ctl', '--bind-port', port_id, device_name] + utils.execute(cmd, self.root_helper) + + else: + LOG.info(_("Device %s already exists"), device_name) + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + # the port will be deleted by the dhcp agent that will call the plugin + device = ip_lib.IPDevice(device_name, + self.root_helper, + namespace) + try: + device.link.delete() + except RuntimeError: + LOG.error(_("Failed unplugging interface '%s'"), device_name) + LOG.debug(_("Unplugged interface '%s'"), device_name) + + ip_lib.IPWrapper( + self.root_helper, namespace).garbage_collect_namespace() + + +class IVSInterfaceDriver(LinuxInterfaceDriver): + """Driver for creating an internal interface on an IVS bridge.""" + + DEV_NAME_PREFIX = 'tap' + + def __init__(self, conf): + super(IVSInterfaceDriver, self).__init__(conf) + self.DEV_NAME_PREFIX = 'ns-' + + def _get_tap_name(self, dev_name, prefix=None): + dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, 'tap') + return dev_name + + def _ivs_add_port(self, device_name, port_id, mac_address): + cmd = ['ivs-ctl', 'add-port', device_name] + utils.execute(cmd, self.root_helper) + + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """Plug in the interface.""" + if not ip_lib.device_exists(device_name, + self.root_helper, + namespace=namespace): + + ip = ip_lib.IPWrapper(self.root_helper) + tap_name = self._get_tap_name(device_name, prefix) + + root_dev, ns_dev = ip.add_veth(tap_name, device_name) + + self._ivs_add_port(tap_name, port_id, mac_address) + + ns_dev = ip.device(device_name) + ns_dev.link.set_address(mac_address) + + if self.conf.network_device_mtu: + ns_dev.link.set_mtu(self.conf.network_device_mtu) + root_dev.link.set_mtu(self.conf.network_device_mtu) + + if namespace: + namespace_obj = ip.ensure_namespace(namespace) + namespace_obj.add_device_to_namespace(ns_dev) + + ns_dev.link.set_up() + root_dev.link.set_up() + else: + LOG.info(_("Device %s already exists"), device_name) + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + """Unplug the interface.""" + tap_name = self._get_tap_name(device_name, prefix) + try: + cmd = ['ivs-ctl', 'del-port', tap_name] + utils.execute(cmd, self.root_helper) + device = ip_lib.IPDevice(device_name, + self.root_helper, + namespace) + device.link.delete() + LOG.debug(_("Unplugged interface '%s'"), device_name) + except RuntimeError: + LOG.error(_("Failed unplugging interface '%s'"), + device_name) + + +class BridgeInterfaceDriver(LinuxInterfaceDriver): + """Driver for creating bridge interfaces.""" + + DEV_NAME_PREFIX = 'ns-' + + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """Plugin the interface.""" + if not ip_lib.device_exists(device_name, + self.root_helper, + namespace=namespace): + ip = ip_lib.IPWrapper(self.root_helper) + + # Enable agent to define the prefix + if prefix: + tap_name = device_name.replace(prefix, 'tap') + else: + tap_name = device_name.replace(self.DEV_NAME_PREFIX, 'tap') + # Create ns_veth in a namespace if one is configured. + root_veth, ns_veth = ip.add_veth(tap_name, device_name, + namespace2=namespace) + ns_veth.link.set_address(mac_address) + + if self.conf.network_device_mtu: + root_veth.link.set_mtu(self.conf.network_device_mtu) + ns_veth.link.set_mtu(self.conf.network_device_mtu) + + root_veth.link.set_up() + ns_veth.link.set_up() + + else: + LOG.info(_("Device %s already exists"), device_name) + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + """Unplug the interface.""" + device = ip_lib.IPDevice(device_name, self.root_helper, namespace) + try: + device.link.delete() + LOG.debug(_("Unplugged interface '%s'"), device_name) + except RuntimeError: + LOG.error(_("Failed unplugging interface '%s'"), + device_name) + + +class MetaInterfaceDriver(LinuxInterfaceDriver): + def __init__(self, conf): + super(MetaInterfaceDriver, self).__init__(conf) + from neutronclient.v2_0 import client + self.neutron = client.Client( + username=self.conf.admin_user, + password=self.conf.admin_password, + tenant_name=self.conf.admin_tenant_name, + auth_url=self.conf.auth_url, + auth_strategy=self.conf.auth_strategy, + region_name=self.conf.auth_region + ) + self.flavor_driver_map = {} + for net_flavor, driver_name in [ + driver_set.split(':') + for driver_set in + self.conf.meta_flavor_driver_mappings.split(',')]: + self.flavor_driver_map[net_flavor] = self._load_driver(driver_name) + + def _get_flavor_by_network_id(self, network_id): + network = self.neutron.show_network(network_id) + return network['network'][flavor.FLAVOR_NETWORK] + + def _get_driver_by_network_id(self, network_id): + net_flavor = self._get_flavor_by_network_id(network_id) + return self.flavor_driver_map[net_flavor] + + def _set_device_plugin_tag(self, network_id, device_name, namespace=None): + plugin_tag = self._get_flavor_by_network_id(network_id) + device = ip_lib.IPDevice(device_name, self.conf.root_helper, namespace) + device.link.set_alias(plugin_tag) + + def _get_device_plugin_tag(self, device_name, namespace=None): + device = ip_lib.IPDevice(device_name, self.conf.root_helper, namespace) + return device.link.alias + + def get_device_name(self, port): + driver = self._get_driver_by_network_id(port.network_id) + return driver.get_device_name(port) + + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + driver = self._get_driver_by_network_id(network_id) + ret = driver.plug(network_id, port_id, device_name, mac_address, + bridge=bridge, namespace=namespace, prefix=prefix) + self._set_device_plugin_tag(network_id, device_name, namespace) + return ret + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + plugin_tag = self._get_device_plugin_tag(device_name, namespace) + driver = self.flavor_driver_map[plugin_tag] + return driver.unplug(device_name, bridge, namespace, prefix) + + def _load_driver(self, driver_provider): + LOG.debug(_("Driver location: %s"), driver_provider) + plugin_klass = importutils.import_class(driver_provider) + return plugin_klass(self.conf) diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py new file mode 100644 index 000000000..c88c54c20 --- /dev/null +++ b/neutron/agent/linux/ip_lib.py @@ -0,0 +1,569 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo.config import cfg + +from neutron.agent.linux import utils +from neutron.common import exceptions + + +OPTS = [ + cfg.BoolOpt('ip_lib_force_root', + default=False, + help=_('Force ip_lib calls to use the root helper')), +] + + +VETH_MAX_NAME_LENGTH = 15 +LOOPBACK_DEVNAME = 'lo' +# NOTE(ethuleau): depend of the version of iproute2, the vlan +# interface details vary. +VLAN_INTERFACE_DETAIL = ['vlan protocol 802.1q', + 'vlan protocol 802.1Q', + 'vlan id'] + + +class SubProcessBase(object): + def __init__(self, root_helper=None, namespace=None): + self.root_helper = root_helper + self.namespace = namespace + try: + self.force_root = cfg.CONF.ip_lib_force_root + except cfg.NoSuchOptError: + # Only callers that need to force use of the root helper + # need to register the option. + self.force_root = False + + def _run(self, options, command, args): + if self.namespace: + return self._as_root(options, command, args) + elif self.force_root: + # Force use of the root helper to ensure that commands + # will execute in dom0 when running under XenServer/XCP. + return self._execute(options, command, args, self.root_helper) + else: + return self._execute(options, command, args) + + def _as_root(self, options, command, args, use_root_namespace=False): + if not self.root_helper: + raise exceptions.SudoRequired() + + namespace = self.namespace if not use_root_namespace else None + + return self._execute(options, + command, + args, + self.root_helper, + namespace) + + @classmethod + def _execute(cls, options, command, args, root_helper=None, + namespace=None): + opt_list = ['-%s' % o for o in options] + if namespace: + ip_cmd = ['ip', 'netns', 'exec', namespace, 'ip'] + else: + ip_cmd = ['ip'] + return utils.execute(ip_cmd + opt_list + [command] + list(args), + root_helper=root_helper) + + +class IPWrapper(SubProcessBase): + def __init__(self, root_helper=None, namespace=None): + super(IPWrapper, self).__init__(root_helper=root_helper, + namespace=namespace) + self.netns = IpNetnsCommand(self) + + def device(self, name): + return IPDevice(name, self.root_helper, self.namespace) + + def get_devices(self, exclude_loopback=False): + retval = [] + output = self._execute(['o', 'd'], 'link', ('list',), + self.root_helper, self.namespace) + for line in output.split('\n'): + if '<' not in line: + continue + tokens = line.split(' ', 2) + if len(tokens) == 3: + if any(v in tokens[2] for v in VLAN_INTERFACE_DETAIL): + delimiter = '@' + else: + delimiter = ':' + name = tokens[1].rpartition(delimiter)[0].strip() + + if exclude_loopback and name == LOOPBACK_DEVNAME: + continue + + retval.append(IPDevice(name, + self.root_helper, + self.namespace)) + return retval + + def add_tuntap(self, name, mode='tap'): + self._as_root('', 'tuntap', ('add', name, 'mode', mode)) + return IPDevice(name, self.root_helper, self.namespace) + + def add_veth(self, name1, name2, namespace2=None): + args = ['add', name1, 'type', 'veth', 'peer', 'name', name2] + + if namespace2 is None: + namespace2 = self.namespace + else: + self.ensure_namespace(namespace2) + args += ['netns', namespace2] + + self._as_root('', 'link', tuple(args)) + + return (IPDevice(name1, self.root_helper, self.namespace), + IPDevice(name2, self.root_helper, namespace2)) + + def ensure_namespace(self, name): + if not self.netns.exists(name): + ip = self.netns.add(name) + lo = ip.device(LOOPBACK_DEVNAME) + lo.link.set_up() + else: + ip = IPWrapper(self.root_helper, name) + return ip + + def namespace_is_empty(self): + return not self.get_devices(exclude_loopback=True) + + def garbage_collect_namespace(self): + """Conditionally destroy the namespace if it is empty.""" + if self.namespace and self.netns.exists(self.namespace): + if self.namespace_is_empty(): + self.netns.delete(self.namespace) + return True + return False + + def add_device_to_namespace(self, device): + if self.namespace: + device.link.set_netns(self.namespace) + + def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None, + local=None, port=None, proxy=False): + cmd = ['add', name, 'type', 'vxlan', 'id', vni] + if group: + cmd.extend(['group', group]) + if dev: + cmd.extend(['dev', dev]) + if ttl: + cmd.extend(['ttl', ttl]) + if tos: + cmd.extend(['tos', tos]) + if local: + cmd.extend(['local', local]) + if proxy: + cmd.append('proxy') + # tuple: min,max + if port and len(port) == 2: + cmd.extend(['port', port[0], port[1]]) + elif port: + raise exceptions.NetworkVxlanPortRangeError(vxlan_range=port) + self._as_root('', 'link', cmd) + return (IPDevice(name, self.root_helper, self.namespace)) + + @classmethod + def get_namespaces(cls, root_helper): + output = cls._execute('', 'netns', ('list',), root_helper=root_helper) + return [l.strip() for l in output.split('\n')] + + +class IpRule(IPWrapper): + def add_rule_from(self, ip, table, rule_pr): + args = ['add', 'from', ip, 'lookup', table, 'priority', rule_pr] + ip = self._as_root('', 'rule', tuple(args)) + return ip + + def delete_rule_priority(self, rule_pr): + args = ['del', 'priority', rule_pr] + ip = self._as_root('', 'rule', tuple(args)) + return ip + + +class IPDevice(SubProcessBase): + def __init__(self, name, root_helper=None, namespace=None): + super(IPDevice, self).__init__(root_helper=root_helper, + namespace=namespace) + self.name = name + self.link = IpLinkCommand(self) + self.addr = IpAddrCommand(self) + self.route = IpRouteCommand(self) + self.neigh = IpNeighCommand(self) + + def __eq__(self, other): + return (other is not None and self.name == other.name + and self.namespace == other.namespace) + + def __str__(self): + return self.name + + +class IpCommandBase(object): + COMMAND = '' + + def __init__(self, parent): + self._parent = parent + + def _run(self, *args, **kwargs): + return self._parent._run(kwargs.get('options', []), self.COMMAND, args) + + def _as_root(self, *args, **kwargs): + return self._parent._as_root(kwargs.get('options', []), + self.COMMAND, + args, + kwargs.get('use_root_namespace', False)) + + +class IpDeviceCommandBase(IpCommandBase): + @property + def name(self): + return self._parent.name + + +class IpLinkCommand(IpDeviceCommandBase): + COMMAND = 'link' + + def set_address(self, mac_address): + self._as_root('set', self.name, 'address', mac_address) + + def set_mtu(self, mtu_size): + self._as_root('set', self.name, 'mtu', mtu_size) + + def set_up(self): + self._as_root('set', self.name, 'up') + + def set_down(self): + self._as_root('set', self.name, 'down') + + def set_netns(self, namespace): + self._as_root('set', self.name, 'netns', namespace) + self._parent.namespace = namespace + + def set_name(self, name): + self._as_root('set', self.name, 'name', name) + self._parent.name = name + + def set_alias(self, alias_name): + self._as_root('set', self.name, 'alias', alias_name) + + def delete(self): + self._as_root('delete', self.name) + + @property + def address(self): + return self.attributes.get('link/ether') + + @property + def state(self): + return self.attributes.get('state') + + @property + def mtu(self): + return self.attributes.get('mtu') + + @property + def qdisc(self): + return self.attributes.get('qdisc') + + @property + def qlen(self): + return self.attributes.get('qlen') + + @property + def alias(self): + return self.attributes.get('alias') + + @property + def attributes(self): + return self._parse_line(self._run('show', self.name, options='o')) + + def _parse_line(self, value): + if not value: + return {} + + device_name, settings = value.replace("\\", '').split('>', 1) + tokens = settings.split() + keys = tokens[::2] + values = [int(v) if v.isdigit() else v for v in tokens[1::2]] + + retval = dict(zip(keys, values)) + return retval + + +class IpAddrCommand(IpDeviceCommandBase): + COMMAND = 'addr' + + def add(self, ip_version, cidr, broadcast, scope='global'): + self._as_root('add', + cidr, + 'brd', + broadcast, + 'scope', + scope, + 'dev', + self.name, + options=[ip_version]) + + def delete(self, ip_version, cidr): + self._as_root('del', + cidr, + 'dev', + self.name, + options=[ip_version]) + + def flush(self): + self._as_root('flush', self.name) + + def list(self, scope=None, to=None, filters=None): + if filters is None: + filters = [] + + retval = [] + + if scope: + filters += ['scope', scope] + if to: + filters += ['to', to] + + for line in self._run('show', self.name, *filters).split('\n'): + line = line.strip() + if not line.startswith('inet'): + continue + parts = line.split() + if parts[0] == 'inet6': + version = 6 + scope = parts[3] + broadcast = '::' + else: + version = 4 + if parts[2] == 'brd': + broadcast = parts[3] + scope = parts[5] + else: + # sometimes output of 'ip a' might look like: + # inet 192.168.100.100/24 scope global eth0 + # and broadcast needs to be calculated from CIDR + broadcast = str(netaddr.IPNetwork(parts[1]).broadcast) + scope = parts[3] + + retval.append(dict(cidr=parts[1], + broadcast=broadcast, + scope=scope, + ip_version=version, + dynamic=('dynamic' == parts[-1]))) + return retval + + +class IpRouteCommand(IpDeviceCommandBase): + COMMAND = 'route' + + def add_gateway(self, gateway, metric=None, table=None): + args = ['replace', 'default', 'via', gateway] + if metric: + args += ['metric', metric] + args += ['dev', self.name] + if table: + args += ['table', table] + self._as_root(*args) + + def delete_gateway(self, gateway=None, table=None): + args = ['del', 'default'] + if gateway: + args += ['via', gateway] + args += ['dev', self.name] + if table: + args += ['table', table] + self._as_root(*args) + + def list_onlink_routes(self): + def iterate_routes(): + output = self._run('list', 'dev', self.name, 'scope', 'link') + for line in output.split('\n'): + line = line.strip() + if line and not line.count('src'): + yield line + + return [x for x in iterate_routes()] + + def add_onlink_route(self, cidr): + self._as_root('replace', cidr, 'dev', self.name, 'scope', 'link') + + def delete_onlink_route(self, cidr): + self._as_root('del', cidr, 'dev', self.name, 'scope', 'link') + + def get_gateway(self, scope=None, filters=None): + if filters is None: + filters = [] + + retval = None + + if scope: + filters += ['scope', scope] + + route_list_lines = self._run('list', 'dev', self.name, + *filters).split('\n') + default_route_line = next((x.strip() for x in + route_list_lines if + x.strip().startswith('default')), None) + if default_route_line: + gateway_index = 2 + parts = default_route_line.split() + retval = dict(gateway=parts[gateway_index]) + if 'metric' in parts: + metric_index = parts.index('metric') + 1 + retval.update(metric=int(parts[metric_index])) + + return retval + + def pullup_route(self, interface_name): + """Ensures that the route entry for the interface is before all + others on the same subnet. + """ + device_list = [] + device_route_list_lines = self._run('list', 'proto', 'kernel', + 'dev', interface_name).split('\n') + for device_route_line in device_route_list_lines: + try: + subnet = device_route_line.split()[0] + except Exception: + continue + subnet_route_list_lines = self._run('list', 'proto', 'kernel', + 'match', subnet).split('\n') + for subnet_route_line in subnet_route_list_lines: + i = iter(subnet_route_line.split()) + while(i.next() != 'dev'): + pass + device = i.next() + try: + while(i.next() != 'src'): + pass + src = i.next() + except Exception: + src = '' + if device != interface_name: + device_list.append((device, src)) + else: + break + + for (device, src) in device_list: + self._as_root('del', subnet, 'dev', device) + if (src != ''): + self._as_root('append', subnet, 'proto', 'kernel', + 'src', src, 'dev', device) + else: + self._as_root('append', subnet, 'proto', 'kernel', + 'dev', device) + + def add_route(self, cidr, ip, table=None): + args = ['replace', cidr, 'via', ip, 'dev', self.name] + if table: + args += ['table', table] + self._as_root(*args) + + def delete_route(self, cidr, ip, table=None): + args = ['del', cidr, 'via', ip, 'dev', self.name] + if table: + args += ['table', table] + self._as_root(*args) + + +class IpNeighCommand(IpDeviceCommandBase): + COMMAND = 'neigh' + + def add(self, ip_version, ip_address, mac_address): + self._as_root('replace', + ip_address, + 'lladdr', + mac_address, + 'nud', + 'permanent', + 'dev', + self.name, + options=[ip_version]) + + def delete(self, ip_version, ip_address, mac_address): + self._as_root('del', + ip_address, + 'lladdr', + mac_address, + 'dev', + self.name, + options=[ip_version]) + + +class IpNetnsCommand(IpCommandBase): + COMMAND = 'netns' + + def add(self, name): + self._as_root('add', name, use_root_namespace=True) + return IPWrapper(self._parent.root_helper, name) + + def delete(self, name): + self._as_root('delete', name, use_root_namespace=True) + + def execute(self, cmds, addl_env={}, check_exit_code=True): + if not self._parent.root_helper: + raise exceptions.SudoRequired() + ns_params = [] + if self._parent.namespace: + ns_params = ['ip', 'netns', 'exec', self._parent.namespace] + + env_params = [] + if addl_env: + env_params = (['env'] + + ['%s=%s' % pair for pair in addl_env.items()]) + return utils.execute( + ns_params + env_params + list(cmds), + root_helper=self._parent.root_helper, + check_exit_code=check_exit_code) + + def exists(self, name): + output = self._parent._execute('o', 'netns', ['list']) + + for line in output.split('\n'): + if name == line.strip(): + return True + return False + + +def device_exists(device_name, root_helper=None, namespace=None): + try: + address = IPDevice(device_name, root_helper, namespace).link.address + except RuntimeError: + return False + return bool(address) + + +def ensure_device_is_ready(device_name, root_helper=None, namespace=None): + dev = IPDevice(device_name, root_helper, namespace) + try: + # Ensure the device is up, even if it is already up. If the device + # doesn't exist, a RuntimeError will be raised. + dev.link.set_up() + except RuntimeError: + return False + return True + + +def iproute_arg_supported(command, arg, root_helper=None): + command += ['help'] + stdout, stderr = utils.execute(command, root_helper=root_helper, + check_exit_code=False, return_stderr=True) + return any(arg in line for line in stderr.split('\n')) diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py new file mode 100644 index 000000000..eecebaa6e --- /dev/null +++ b/neutron/agent/linux/iptables_firewall.py @@ -0,0 +1,383 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo.config import cfg + +from neutron.agent import firewall +from neutron.agent.linux import iptables_manager +from neutron.common import constants +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) +SG_CHAIN = 'sg-chain' +INGRESS_DIRECTION = 'ingress' +EGRESS_DIRECTION = 'egress' +SPOOF_FILTER = 'spoof-filter' +CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i', + EGRESS_DIRECTION: 'o', + SPOOF_FILTER: 's'} +LINUX_DEV_LEN = 14 + + +class IptablesFirewallDriver(firewall.FirewallDriver): + """Driver which enforces security groups through iptables rules.""" + IPTABLES_DIRECTION = {INGRESS_DIRECTION: 'physdev-out', + EGRESS_DIRECTION: 'physdev-in'} + + def __init__(self): + self.iptables = iptables_manager.IptablesManager( + root_helper=cfg.CONF.AGENT.root_helper, + use_ipv6=True) + # list of port which has security group + self.filtered_ports = {} + self._add_fallback_chain_v4v6() + self._defer_apply = False + self._pre_defer_filtered_ports = None + + @property + def ports(self): + return self.filtered_ports + + def prepare_port_filter(self, port): + LOG.debug(_("Preparing device (%s) filter"), port['device']) + self._remove_chains() + self.filtered_ports[port['device']] = port + # each security group has it own chains + self._setup_chains() + self.iptables.apply() + + def update_port_filter(self, port): + LOG.debug(_("Updating device (%s) filter"), port['device']) + if port['device'] not in self.filtered_ports: + LOG.info(_('Attempted to update port filter which is not ' + 'filtered %s'), port['device']) + return + self._remove_chains() + self.filtered_ports[port['device']] = port + self._setup_chains() + self.iptables.apply() + + def remove_port_filter(self, port): + LOG.debug(_("Removing device (%s) filter"), port['device']) + if not self.filtered_ports.get(port['device']): + LOG.info(_('Attempted to remove port filter which is not ' + 'filtered %r'), port) + return + self._remove_chains() + self.filtered_ports.pop(port['device'], None) + self._setup_chains() + self.iptables.apply() + + def _setup_chains(self): + """Setup ingress and egress chain for a port.""" + if not self._defer_apply: + self._setup_chains_apply(self.filtered_ports) + + def _setup_chains_apply(self, ports): + self._add_chain_by_name_v4v6(SG_CHAIN) + for port in ports.values(): + self._setup_chain(port, INGRESS_DIRECTION) + self._setup_chain(port, EGRESS_DIRECTION) + self.iptables.ipv4['filter'].add_rule(SG_CHAIN, '-j ACCEPT') + self.iptables.ipv6['filter'].add_rule(SG_CHAIN, '-j ACCEPT') + + def _remove_chains(self): + """Remove ingress and egress chain for a port.""" + if not self._defer_apply: + self._remove_chains_apply(self.filtered_ports) + + def _remove_chains_apply(self, ports): + for port in ports.values(): + self._remove_chain(port, INGRESS_DIRECTION) + self._remove_chain(port, EGRESS_DIRECTION) + self._remove_chain(port, SPOOF_FILTER) + self._remove_chain_by_name_v4v6(SG_CHAIN) + + def _setup_chain(self, port, DIRECTION): + self._add_chain(port, DIRECTION) + self._add_rule_by_security_group(port, DIRECTION) + + def _remove_chain(self, port, DIRECTION): + chain_name = self._port_chain_name(port, DIRECTION) + self._remove_chain_by_name_v4v6(chain_name) + + def _add_fallback_chain_v4v6(self): + self.iptables.ipv4['filter'].add_chain('sg-fallback') + self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP') + self.iptables.ipv6['filter'].add_chain('sg-fallback') + self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') + + def _add_chain_by_name_v4v6(self, chain_name): + self.iptables.ipv6['filter'].add_chain(chain_name) + self.iptables.ipv4['filter'].add_chain(chain_name) + + def _remove_chain_by_name_v4v6(self, chain_name): + self.iptables.ipv4['filter'].ensure_remove_chain(chain_name) + self.iptables.ipv6['filter'].ensure_remove_chain(chain_name) + + def _add_rule_to_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules): + for rule in ipv4_rules: + self.iptables.ipv4['filter'].add_rule(chain_name, rule) + + for rule in ipv6_rules: + self.iptables.ipv6['filter'].add_rule(chain_name, rule) + + def _get_device_name(self, port): + return port['device'] + + def _add_chain(self, port, direction): + chain_name = self._port_chain_name(port, direction) + self._add_chain_by_name_v4v6(chain_name) + + # Note(nati) jump to the security group chain (SG_CHAIN) + # This is needed because the packet may much two rule in port + # if the two port is in the same host + # We accept the packet at the end of SG_CHAIN. + + # jump to the security group chain + device = self._get_device_name(port) + jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' + '-j $%s' % (self.IPTABLES_DIRECTION[direction], + device, + SG_CHAIN)] + self._add_rule_to_chain_v4v6('FORWARD', jump_rule, jump_rule) + + # jump to the chain based on the device + jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' + '-j $%s' % (self.IPTABLES_DIRECTION[direction], + device, + chain_name)] + self._add_rule_to_chain_v4v6(SG_CHAIN, jump_rule, jump_rule) + + if direction == EGRESS_DIRECTION: + self._add_rule_to_chain_v4v6('INPUT', jump_rule, jump_rule) + + def _split_sgr_by_ethertype(self, security_group_rules): + ipv4_sg_rules = [] + ipv6_sg_rules = [] + for rule in security_group_rules: + if rule.get('ethertype') == constants.IPv4: + ipv4_sg_rules.append(rule) + elif rule.get('ethertype') == constants.IPv6: + if rule.get('protocol') == 'icmp': + rule['protocol'] = 'icmpv6' + ipv6_sg_rules.append(rule) + return ipv4_sg_rules, ipv6_sg_rules + + def _select_sgr_by_direction(self, port, direction): + return [rule + for rule in port.get('security_group_rules', []) + if rule['direction'] == direction] + + def _setup_spoof_filter_chain(self, port, table, mac_ip_pairs, rules): + if mac_ip_pairs: + chain_name = self._port_chain_name(port, SPOOF_FILTER) + table.add_chain(chain_name) + for mac, ip in mac_ip_pairs: + if ip is None: + # If fixed_ips is [] this rule will be added to the end + # of the list after the allowed_address_pair rules. + table.add_rule(chain_name, + '-m mac --mac-source %s -j RETURN' + % mac) + else: + table.add_rule(chain_name, + '-m mac --mac-source %s -s %s -j RETURN' + % (mac, ip)) + table.add_rule(chain_name, '-j DROP') + rules.append('-j $%s' % chain_name) + + def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs, + mac_ipv6_pairs): + if netaddr.IPNetwork(ip_address).version == 4: + mac_ipv4_pairs.append((mac, ip_address)) + else: + mac_ipv6_pairs.append((mac, ip_address)) + + def _spoofing_rule(self, port, ipv4_rules, ipv6_rules): + #Note(nati) allow dhcp or RA packet + ipv4_rules += ['-p udp -m udp --sport 68 --dport 67 -j RETURN'] + ipv6_rules += ['-p icmpv6 -j RETURN'] + ipv6_rules += ['-p udp -m udp --sport 546 --dport 547 -j RETURN'] + mac_ipv4_pairs = [] + mac_ipv6_pairs = [] + + if isinstance(port.get('allowed_address_pairs'), list): + for address_pair in port['allowed_address_pairs']: + self._build_ipv4v6_mac_ip_list(address_pair['mac_address'], + address_pair['ip_address'], + mac_ipv4_pairs, + mac_ipv6_pairs) + + for ip in port['fixed_ips']: + self._build_ipv4v6_mac_ip_list(port['mac_address'], ip, + mac_ipv4_pairs, mac_ipv6_pairs) + if not port['fixed_ips']: + mac_ipv4_pairs.append((port['mac_address'], None)) + mac_ipv6_pairs.append((port['mac_address'], None)) + + self._setup_spoof_filter_chain(port, self.iptables.ipv4['filter'], + mac_ipv4_pairs, ipv4_rules) + self._setup_spoof_filter_chain(port, self.iptables.ipv6['filter'], + mac_ipv6_pairs, ipv6_rules) + + def _drop_dhcp_rule(self, ipv4_rules, ipv6_rules): + #Note(nati) Drop dhcp packet from VM + ipv4_rules += ['-p udp -m udp --sport 67 --dport 68 -j DROP'] + ipv6_rules += ['-p udp -m udp --sport 547 --dport 546 -j DROP'] + + def _accept_inbound_icmpv6(self): + # Allow multicast listener, neighbor solicitation and + # neighbor advertisement into the instance + icmpv6_rules = [] + for icmp6_type in constants.ICMPV6_ALLOWED_TYPES: + icmpv6_rules += ['-p icmpv6 --icmpv6-type %s -j RETURN' % + icmp6_type] + return icmpv6_rules + + def _add_rule_by_security_group(self, port, direction): + chain_name = self._port_chain_name(port, direction) + # select rules for current direction + security_group_rules = self._select_sgr_by_direction(port, direction) + # split groups by ip version + # for ipv4, iptables command is used + # for ipv6, iptables6 command is used + ipv4_sg_rules, ipv6_sg_rules = self._split_sgr_by_ethertype( + security_group_rules) + ipv4_iptables_rule = [] + ipv6_iptables_rule = [] + if direction == EGRESS_DIRECTION: + self._spoofing_rule(port, + ipv4_iptables_rule, + ipv6_iptables_rule) + self._drop_dhcp_rule(ipv4_iptables_rule, ipv6_iptables_rule) + if direction == INGRESS_DIRECTION: + ipv6_iptables_rule += self._accept_inbound_icmpv6() + ipv4_iptables_rule += self._convert_sgr_to_iptables_rules( + ipv4_sg_rules) + ipv6_iptables_rule += self._convert_sgr_to_iptables_rules( + ipv6_sg_rules) + self._add_rule_to_chain_v4v6(chain_name, + ipv4_iptables_rule, + ipv6_iptables_rule) + + def _convert_sgr_to_iptables_rules(self, security_group_rules): + iptables_rules = [] + self._drop_invalid_packets(iptables_rules) + self._allow_established(iptables_rules) + for rule in security_group_rules: + # These arguments MUST be in the format iptables-save will + # display them: source/dest, protocol, sport, dport, target + # Otherwise the iptables_manager code won't be able to find + # them to preserve their [packet:byte] counts. + args = self._ip_prefix_arg('s', + rule.get('source_ip_prefix')) + args += self._ip_prefix_arg('d', + rule.get('dest_ip_prefix')) + args += self._protocol_arg(rule.get('protocol')) + args += self._port_arg('sport', + rule.get('protocol'), + rule.get('source_port_range_min'), + rule.get('source_port_range_max')) + args += self._port_arg('dport', + rule.get('protocol'), + rule.get('port_range_min'), + rule.get('port_range_max')) + args += ['-j RETURN'] + iptables_rules += [' '.join(args)] + + iptables_rules += ['-j $sg-fallback'] + + return iptables_rules + + def _drop_invalid_packets(self, iptables_rules): + # Always drop invalid packets + iptables_rules += ['-m state --state ' 'INVALID -j DROP'] + return iptables_rules + + def _allow_established(self, iptables_rules): + # Allow established connections + iptables_rules += ['-m state --state RELATED,ESTABLISHED -j RETURN'] + return iptables_rules + + def _protocol_arg(self, protocol): + if not protocol: + return [] + + iptables_rule = ['-p', protocol] + # iptables always adds '-m protocol' for udp and tcp + if protocol in ['udp', 'tcp']: + iptables_rule += ['-m', protocol] + return iptables_rule + + def _port_arg(self, direction, protocol, port_range_min, port_range_max): + if (protocol not in ['udp', 'tcp', 'icmp', 'icmpv6'] + or not port_range_min): + return [] + + if protocol in ['icmp', 'icmpv6']: + # Note(xuhanp): port_range_min/port_range_max represent + # icmp type/code when protocol is icmp or icmpv6 + # icmp code can be 0 so we cannot use "if port_range_max" here + if port_range_max is not None: + return ['--%s-type' % protocol, + '%s/%s' % (port_range_min, port_range_max)] + return ['--%s-type' % protocol, '%s' % port_range_min] + elif port_range_min == port_range_max: + return ['--%s' % direction, '%s' % (port_range_min,)] + else: + return ['-m', 'multiport', + '--%ss' % direction, + '%s:%s' % (port_range_min, port_range_max)] + + def _ip_prefix_arg(self, direction, ip_prefix): + #NOTE (nati) : source_group_id is converted to list of source_ + # ip_prefix in server side + if ip_prefix: + return ['-%s' % direction, ip_prefix] + return [] + + def _port_chain_name(self, port, direction): + return iptables_manager.get_chain_name( + '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:])) + + def filter_defer_apply_on(self): + if not self._defer_apply: + self.iptables.defer_apply_on() + self._pre_defer_filtered_ports = dict(self.filtered_ports) + self._defer_apply = True + + def filter_defer_apply_off(self): + if self._defer_apply: + self._defer_apply = False + self._remove_chains_apply(self._pre_defer_filtered_ports) + self._pre_defer_filtered_ports = None + self._setup_chains_apply(self.filtered_ports) + self.iptables.defer_apply_off() + + +class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver): + OVS_HYBRID_TAP_PREFIX = 'tap' + + def _port_chain_name(self, port, direction): + return iptables_manager.get_chain_name( + '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'])) + + def _get_device_name(self, port): + return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN] diff --git a/neutron/agent/linux/iptables_manager.py b/neutron/agent/linux/iptables_manager.py new file mode 100644 index 000000000..fb0a21ff5 --- /dev/null +++ b/neutron/agent/linux/iptables_manager.py @@ -0,0 +1,668 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Locaweb. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @author: Juliano Martinez, Locaweb. +# based on +# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py + +"""Implements iptables rules using linux utilities.""" + +import inspect +import os +import re + +from neutron.agent.linux import utils as linux_utils +from neutron.common import utils +from neutron.openstack.common import excutils +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +# NOTE(vish): Iptables supports chain names of up to 28 characters, and we +# add up to 12 characters to binary_name which is used as a prefix, +# so we limit it to 16 characters. +# (max_chain_name_length - len('-POSTROUTING') == 16) +def get_binary_name(): + """Grab the name of the binary we're running in.""" + return os.path.basename(inspect.stack()[-1][1])[:16] + +binary_name = get_binary_name() + +# A length of a chain name must be less than or equal to 11 characters. +# - ( + '-') = 28-(16+1) = 11 +MAX_CHAIN_LEN_WRAP = 11 +MAX_CHAIN_LEN_NOWRAP = 28 + +# Number of iptables rules to print before and after a rule that causes a +# a failure during iptables-restore +IPTABLES_ERROR_LINES_OF_CONTEXT = 5 + + +def get_chain_name(chain_name, wrap=True): + if wrap: + return chain_name[:MAX_CHAIN_LEN_WRAP] + else: + return chain_name[:MAX_CHAIN_LEN_NOWRAP] + + +class IptablesRule(object): + """An iptables rule. + + You shouldn't need to use this class directly, it's only used by + IptablesManager. + + """ + + def __init__(self, chain, rule, wrap=True, top=False, + binary_name=binary_name, tag=None): + self.chain = get_chain_name(chain, wrap) + self.rule = rule + self.wrap = wrap + self.top = top + self.wrap_name = binary_name[:16] + self.tag = tag + + def __eq__(self, other): + return ((self.chain == other.chain) and + (self.rule == other.rule) and + (self.top == other.top) and + (self.wrap == other.wrap)) + + def __ne__(self, other): + return not self == other + + def __str__(self): + if self.wrap: + chain = '%s-%s' % (self.wrap_name, self.chain) + else: + chain = self.chain + return '-A %s %s' % (chain, self.rule) + + +class IptablesTable(object): + """An iptables table.""" + + def __init__(self, binary_name=binary_name): + self.rules = [] + self.remove_rules = [] + self.chains = set() + self.unwrapped_chains = set() + self.remove_chains = set() + self.wrap_name = binary_name[:16] + + def add_chain(self, name, wrap=True): + """Adds a named chain to the table. + + The chain name is wrapped to be unique for the component creating + it, so different components of Nova can safely create identically + named chains without interfering with one another. + + At the moment, its wrapped name is -, + so if nova-compute creates a chain named 'OUTPUT', it'll actually + end up named 'nova-compute-OUTPUT'. + + """ + name = get_chain_name(name, wrap) + if wrap: + self.chains.add(name) + else: + self.unwrapped_chains.add(name) + + def _select_chain_set(self, wrap): + if wrap: + return self.chains + else: + return self.unwrapped_chains + + def ensure_remove_chain(self, name, wrap=True): + """Ensure the chain is removed. + + This removal "cascades". All rule in the chain are removed, as are + all rules in other chains that jump to it. + """ + name = get_chain_name(name, wrap) + chain_set = self._select_chain_set(wrap) + if name not in chain_set: + return + + self.remove_chain(name, wrap) + + def remove_chain(self, name, wrap=True): + """Remove named chain. + + This removal "cascades". All rule in the chain are removed, as are + all rules in other chains that jump to it. + + If the chain is not found, this is merely logged. + + """ + name = get_chain_name(name, wrap) + chain_set = self._select_chain_set(wrap) + + if name not in chain_set: + LOG.warn(_('Attempted to remove chain %s which does not exist'), + name) + return + + chain_set.remove(name) + + if not wrap: + # non-wrapped chains and rules need to be dealt with specially, + # so we keep a list of them to be iterated over in apply() + self.remove_chains.add(name) + + # first, add rules to remove that have a matching chain name + self.remove_rules += [r for r in self.rules if r.chain == name] + + # next, remove rules from list that have a matching chain name + self.rules = [r for r in self.rules if r.chain != name] + + if not wrap: + jump_snippet = '-j %s' % name + # next, add rules to remove that have a matching jump chain + self.remove_rules += [r for r in self.rules + if jump_snippet in r.rule] + else: + jump_snippet = '-j %s-%s' % (self.wrap_name, name) + + # finally, remove rules from list that have a matching jump chain + self.rules = [r for r in self.rules + if jump_snippet not in r.rule] + + def add_rule(self, chain, rule, wrap=True, top=False, tag=None): + """Add a rule to the table. + + This is just like what you'd feed to iptables, just without + the '-A ' bit at the start. + + However, if you need to jump to one of your wrapped chains, + prepend its name with a '$' which will ensure the wrapping + is applied correctly. + + """ + chain = get_chain_name(chain, wrap) + if wrap and chain not in self.chains: + raise LookupError(_('Unknown chain: %r') % chain) + + if '$' in rule: + rule = ' '.join( + self._wrap_target_chain(e, wrap) for e in rule.split(' ')) + + self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name, + tag)) + + def _wrap_target_chain(self, s, wrap): + if s.startswith('$'): + s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap))) + + return s + + def remove_rule(self, chain, rule, wrap=True, top=False): + """Remove a rule from a chain. + + Note: The rule must be exactly identical to the one that was added. + You cannot switch arguments around like you can with the iptables + CLI tool. + + """ + chain = get_chain_name(chain, wrap) + try: + if '$' in rule: + rule = ' '.join( + self._wrap_target_chain(e, wrap) for e in rule.split(' ')) + + self.rules.remove(IptablesRule(chain, rule, wrap, top, + self.wrap_name)) + if not wrap: + self.remove_rules.append(IptablesRule(chain, rule, wrap, top, + self.wrap_name)) + except ValueError: + LOG.warn(_('Tried to remove rule that was not there:' + ' %(chain)r %(rule)r %(wrap)r %(top)r'), + {'chain': chain, 'rule': rule, + 'top': top, 'wrap': wrap}) + + def empty_chain(self, chain, wrap=True): + """Remove all rules from a chain.""" + chain = get_chain_name(chain, wrap) + chained_rules = [rule for rule in self.rules + if rule.chain == chain and rule.wrap == wrap] + for rule in chained_rules: + self.rules.remove(rule) + + def clear_rules_by_tag(self, tag): + if not tag: + return + rules = [rule for rule in self.rules if rule.tag == tag] + for rule in rules: + self.rules.remove(rule) + + +class IptablesManager(object): + """Wrapper for iptables. + + See IptablesTable for some usage docs + + A number of chains are set up to begin with. + + First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its + name is not wrapped, so it's shared between the various nova workers. It's + intended for rules that need to live at the top of the FORWARD and OUTPUT + chains. It's in both the ipv4 and ipv6 set of tables. + + For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains + are wrapped, meaning that the "real" INPUT chain has a rule that jumps to + the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named + "local" which is jumped to from neutron-filter-top. + + For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are + wrapped in the same was as the built-in filter chains. Additionally, + there's a snat chain that is applied after the POSTROUTING chain. + + """ + + def __init__(self, _execute=None, state_less=False, + root_helper=None, use_ipv6=False, namespace=None, + binary_name=binary_name): + if _execute: + self.execute = _execute + else: + self.execute = linux_utils.execute + + self.use_ipv6 = use_ipv6 + self.root_helper = root_helper + self.namespace = namespace + self.iptables_apply_deferred = False + self.wrap_name = binary_name[:16] + + self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)} + self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)} + + # Add a neutron-filter-top chain. It's intended to be shared + # among the various nova components. It sits at the very top + # of FORWARD and OUTPUT. + for tables in [self.ipv4, self.ipv6]: + tables['filter'].add_chain('neutron-filter-top', wrap=False) + tables['filter'].add_rule('FORWARD', '-j neutron-filter-top', + wrap=False, top=True) + tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top', + wrap=False, top=True) + + tables['filter'].add_chain('local') + tables['filter'].add_rule('neutron-filter-top', '-j $local', + wrap=False) + + # Wrap the built-in chains + builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}, + 6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}} + + if not state_less: + self.ipv4.update( + {'nat': IptablesTable(binary_name=self.wrap_name)}) + builtin_chains[4].update({'nat': ['PREROUTING', + 'OUTPUT', 'POSTROUTING']}) + + for ip_version in builtin_chains: + if ip_version == 4: + tables = self.ipv4 + elif ip_version == 6: + tables = self.ipv6 + + for table, chains in builtin_chains[ip_version].iteritems(): + for chain in chains: + tables[table].add_chain(chain) + tables[table].add_rule(chain, '-j $%s' % + (chain), wrap=False) + + if not state_less: + # Add a neutron-postrouting-bottom chain. It's intended to be + # shared among the various nova components. We set it as the last + # chain of POSTROUTING chain. + self.ipv4['nat'].add_chain('neutron-postrouting-bottom', + wrap=False) + self.ipv4['nat'].add_rule('POSTROUTING', + '-j neutron-postrouting-bottom', + wrap=False) + + # We add a snat chain to the shared neutron-postrouting-bottom + # chain so that it's applied last. + self.ipv4['nat'].add_chain('snat') + self.ipv4['nat'].add_rule('neutron-postrouting-bottom', + '-j $snat', wrap=False) + + # And then we add a float-snat chain and jump to first thing in + # the snat chain. + self.ipv4['nat'].add_chain('float-snat') + self.ipv4['nat'].add_rule('snat', '-j $float-snat') + + def defer_apply_on(self): + self.iptables_apply_deferred = True + + def defer_apply_off(self): + self.iptables_apply_deferred = False + self._apply() + + def apply(self): + if self.iptables_apply_deferred: + return + + self._apply() + + def _apply(self): + lock_name = 'iptables' + if self.namespace: + lock_name += '-' + self.namespace + + try: + with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True): + LOG.debug(_('Got semaphore / lock "%s"'), lock_name) + return self._apply_synchronized() + finally: + LOG.debug(_('Semaphore / lock released "%s"'), lock_name) + + def _apply_synchronized(self): + """Apply the current in-memory set of iptables rules. + + This will blow away any rules left over from previous runs of the + same component of Nova, and replace them with our current set of + rules. This happens atomically, thanks to iptables-restore. + + """ + s = [('iptables', self.ipv4)] + if self.use_ipv6: + s += [('ip6tables', self.ipv6)] + + for cmd, tables in s: + args = ['%s-save' % (cmd,), '-c'] + if self.namespace: + args = ['ip', 'netns', 'exec', self.namespace] + args + all_tables = self.execute(args, root_helper=self.root_helper) + all_lines = all_tables.split('\n') + for table_name, table in tables.iteritems(): + start, end = self._find_table(all_lines, table_name) + all_lines[start:end] = self._modify_rules( + all_lines[start:end], table, table_name) + + args = ['%s-restore' % (cmd,), '-c'] + if self.namespace: + args = ['ip', 'netns', 'exec', self.namespace] + args + try: + self.execute(args, process_input='\n'.join(all_lines), + root_helper=self.root_helper) + except RuntimeError as r_error: + with excutils.save_and_reraise_exception(): + try: + line_no = int(re.search( + 'iptables-restore: line ([0-9]+?) failed', + str(r_error)).group(1)) + context = IPTABLES_ERROR_LINES_OF_CONTEXT + log_start = max(0, line_no - context) + log_end = line_no + context + except AttributeError: + # line error wasn't found, print all lines instead + log_start = 0 + log_end = len(all_lines) + log_lines = ('%7d. %s' % (idx, l) + for idx, l in enumerate( + all_lines[log_start:log_end], + log_start + 1) + ) + LOG.error(_("IPTablesManager.apply failed to apply the " + "following set of iptables rules:\n%s"), + '\n'.join(log_lines)) + LOG.debug(_("IPTablesManager.apply completed with success")) + + def _find_table(self, lines, table_name): + if len(lines) < 3: + # length only <2 when fake iptables + return (0, 0) + try: + start = lines.index('*%s' % table_name) - 1 + except ValueError: + # Couldn't find table_name + LOG.debug(_('Unable to find table %s'), table_name) + return (0, 0) + end = lines[start:].index('COMMIT') + start + 2 + return (start, end) + + def _find_rules_index(self, lines): + seen_chains = False + rules_index = 0 + for rules_index, rule in enumerate(lines): + if not seen_chains: + if rule.startswith(':'): + seen_chains = True + else: + if not rule.startswith(':'): + break + + if not seen_chains: + rules_index = 2 + + return rules_index + + def _find_last_entry(self, filter_list, match_str): + # find a matching entry, starting from the bottom + for s in reversed(filter_list): + s = s.strip() + if match_str in s: + return s + + def _modify_rules(self, current_lines, table, table_name): + unwrapped_chains = table.unwrapped_chains + chains = table.chains + remove_chains = table.remove_chains + rules = table.rules + remove_rules = table.remove_rules + + if not current_lines: + fake_table = ['# Generated by iptables_manager', + '*' + table_name, 'COMMIT', + '# Completed by iptables_manager'] + current_lines = fake_table + + # Fill old_filter with any chains or rules we might have added, + # they could have a [packet:byte] count we want to preserve. + # Fill new_filter with any chains or rules without our name in them. + old_filter, new_filter = [], [] + for line in current_lines: + (old_filter if self.wrap_name in line else + new_filter).append(line.strip()) + + rules_index = self._find_rules_index(new_filter) + + all_chains = [':%s' % name for name in unwrapped_chains] + all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains] + + # Iterate through all the chains, trying to find an existing + # match. + our_chains = [] + for chain in all_chains: + chain_str = str(chain).strip() + + old = self._find_last_entry(old_filter, chain_str) + if not old: + dup = self._find_last_entry(new_filter, chain_str) + new_filter = [s for s in new_filter if chain_str not in s.strip()] + + # if no old or duplicates, use original chain + if old or dup: + chain_str = str(old or dup) + else: + # add-on the [packet:bytes] + chain_str += ' - [0:0]' + + our_chains += [chain_str] + + # Iterate through all the rules, trying to find an existing + # match. + our_rules = [] + bot_rules = [] + for rule in rules: + rule_str = str(rule).strip() + # Further down, we weed out duplicates from the bottom of the + # list, so here we remove the dupes ahead of time. + + old = self._find_last_entry(old_filter, rule_str) + if not old: + dup = self._find_last_entry(new_filter, rule_str) + new_filter = [s for s in new_filter if rule_str not in s.strip()] + + # if no old or duplicates, use original rule + if old or dup: + rule_str = str(old or dup) + # backup one index so we write the array correctly + if not old: + rules_index -= 1 + else: + # add-on the [packet:bytes] + rule_str = '[0:0] ' + rule_str + + if rule.top: + # rule.top == True means we want this rule to be at the top. + our_rules += [rule_str] + else: + bot_rules += [rule_str] + + our_rules += bot_rules + + new_filter[rules_index:rules_index] = our_rules + new_filter[rules_index:rules_index] = our_chains + + def _strip_packets_bytes(line): + # strip any [packet:byte] counts at start or end of lines + if line.startswith(':'): + # it's a chain, for example, ":neutron-billing - [0:0]" + line = line.split(':')[1] + line = line.split(' - [', 1)[0] + elif line.startswith('['): + # it's a rule, for example, "[0:0] -A neutron-billing..." + line = line.split('] ', 1)[1] + line = line.strip() + return line + + seen_chains = set() + + def _weed_out_duplicate_chains(line): + # ignore [packet:byte] counts at end of lines + if line.startswith(':'): + line = _strip_packets_bytes(line) + if line in seen_chains: + return False + else: + seen_chains.add(line) + + # Leave it alone + return True + + seen_rules = set() + + def _weed_out_duplicate_rules(line): + if line.startswith('['): + line = _strip_packets_bytes(line) + if line in seen_rules: + return False + else: + seen_rules.add(line) + + # Leave it alone + return True + + def _weed_out_removes(line): + # We need to find exact matches here + if line.startswith(':'): + line = _strip_packets_bytes(line) + for chain in remove_chains: + if chain == line: + remove_chains.remove(chain) + return False + elif line.startswith('['): + line = _strip_packets_bytes(line) + for rule in remove_rules: + rule_str = _strip_packets_bytes(str(rule)) + if rule_str == line: + remove_rules.remove(rule) + return False + + # Leave it alone + return True + + # We filter duplicates. Go through the chains and rules, letting + # the *last* occurrence take precendence since it could have a + # non-zero [packet:byte] count we want to preserve. We also filter + # out anything in the "remove" list. + new_filter.reverse() + new_filter = [line for line in new_filter + if _weed_out_duplicate_chains(line) and + _weed_out_duplicate_rules(line) and + _weed_out_removes(line)] + new_filter.reverse() + + # flush lists, just in case we didn't find something + remove_chains.clear() + for rule in remove_rules: + remove_rules.remove(rule) + + return new_filter + + def _get_traffic_counters_cmd_tables(self, chain, wrap=True): + name = get_chain_name(chain, wrap) + + cmd_tables = [('iptables', key) for key, table in self.ipv4.items() + if name in table._select_chain_set(wrap)] + + cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items() + if name in table._select_chain_set(wrap)] + + return cmd_tables + + def get_traffic_counters(self, chain, wrap=True, zero=False): + """Return the sum of the traffic counters of all rules of a chain.""" + cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap) + if not cmd_tables: + LOG.warn(_('Attempted to get traffic counters of chain %s which ' + 'does not exist'), chain) + return + + name = get_chain_name(chain, wrap) + acc = {'pkts': 0, 'bytes': 0} + + for cmd, table in cmd_tables: + args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x'] + if zero: + args.append('-Z') + if self.namespace: + args = ['ip', 'netns', 'exec', self.namespace] + args + current_table = (self.execute(args, + root_helper=self.root_helper)) + current_lines = current_table.split('\n') + + for line in current_lines[2:]: + if not line: + break + data = line.split() + if (len(data) < 2 or + not data[0].isdigit() or + not data[1].isdigit()): + break + + acc['pkts'] += int(data[0]) + acc['bytes'] += int(data[1]) + + return acc diff --git a/neutron/agent/linux/ovs_lib.py b/neutron/agent/linux/ovs_lib.py new file mode 100644 index 000000000..4197b4ec8 --- /dev/null +++ b/neutron/agent/linux/ovs_lib.py @@ -0,0 +1,564 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.common import exceptions +from neutron.common import utils as common_utils +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as p_const +# TODO(JLH) Should we remove the explicit include of the ovs plugin here +from neutron.plugins.openvswitch.common import constants + +# Default timeout for ovs-vsctl command +DEFAULT_OVS_VSCTL_TIMEOUT = 10 +OPTS = [ + cfg.IntOpt('ovs_vsctl_timeout', + default=DEFAULT_OVS_VSCTL_TIMEOUT, + help=_('Timeout in seconds for ovs-vsctl commands')), +] +cfg.CONF.register_opts(OPTS) + +LOG = logging.getLogger(__name__) + + +class VifPort: + def __init__(self, port_name, ofport, vif_id, vif_mac, switch): + self.port_name = port_name + self.ofport = ofport + self.vif_id = vif_id + self.vif_mac = vif_mac + self.switch = switch + + def __str__(self): + return ("iface-id=" + self.vif_id + ", vif_mac=" + + self.vif_mac + ", port_name=" + self.port_name + + ", ofport=" + str(self.ofport) + ", bridge_name=" + + self.switch.br_name) + + +class BaseOVS(object): + + def __init__(self, root_helper): + self.root_helper = root_helper + self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout + + def run_vsctl(self, args, check_error=False): + full_args = ["ovs-vsctl", "--timeout=%d" % self.vsctl_timeout] + args + try: + return utils.execute(full_args, root_helper=self.root_helper) + except Exception as e: + with excutils.save_and_reraise_exception() as ctxt: + LOG.error(_("Unable to execute %(cmd)s. " + "Exception: %(exception)s"), + {'cmd': full_args, 'exception': e}) + if not check_error: + ctxt.reraise = False + + def add_bridge(self, bridge_name): + self.run_vsctl(["--", "--may-exist", "add-br", bridge_name]) + return OVSBridge(bridge_name, self.root_helper) + + def delete_bridge(self, bridge_name): + self.run_vsctl(["--", "--if-exists", "del-br", bridge_name]) + + def bridge_exists(self, bridge_name): + try: + self.run_vsctl(['br-exists', bridge_name], check_error=True) + except RuntimeError as e: + with excutils.save_and_reraise_exception() as ctxt: + if 'Exit code: 2\n' in str(e): + ctxt.reraise = False + return False + return True + + def get_bridge_name_for_port_name(self, port_name): + try: + return self.run_vsctl(['port-to-br', port_name], check_error=True) + except RuntimeError as e: + with excutils.save_and_reraise_exception() as ctxt: + if 'Exit code: 1\n' in str(e): + ctxt.reraise = False + + def port_exists(self, port_name): + return bool(self.get_bridge_name_for_port_name(port_name)) + + +class OVSBridge(BaseOVS): + def __init__(self, br_name, root_helper): + super(OVSBridge, self).__init__(root_helper) + self.br_name = br_name + self.defer_apply_flows = False + self.deferred_flows = {'add': '', 'mod': '', 'del': ''} + + def set_controller(self, controller_names): + vsctl_command = ['--', 'set-controller', self.br_name] + vsctl_command.extend(controller_names) + self.run_vsctl(vsctl_command, check_error=True) + + def del_controller(self): + self.run_vsctl(['--', 'del-controller', self.br_name], + check_error=True) + + def get_controller(self): + res = self.run_vsctl(['--', 'get-controller', self.br_name], + check_error=True) + if res: + return res.strip().split('\n') + return res + + def set_secure_mode(self): + self.run_vsctl(['--', 'set-fail-mode', self.br_name, 'secure'], + check_error=True) + + def set_protocols(self, protocols): + self.run_vsctl(['--', 'set', 'bridge', self.br_name, + "protocols=%s" % protocols], + check_error=True) + + def create(self): + self.add_bridge(self.br_name) + + def destroy(self): + self.delete_bridge(self.br_name) + + def reset_bridge(self): + self.destroy() + self.create() + + def add_port(self, port_name): + self.run_vsctl(["--", "--may-exist", "add-port", self.br_name, + port_name]) + return self.get_port_ofport(port_name) + + def delete_port(self, port_name): + self.run_vsctl(["--", "--if-exists", "del-port", self.br_name, + port_name]) + + def set_db_attribute(self, table_name, record, column, value): + args = ["set", table_name, record, "%s=%s" % (column, value)] + self.run_vsctl(args) + + def clear_db_attribute(self, table_name, record, column): + args = ["clear", table_name, record, column] + self.run_vsctl(args) + + def run_ofctl(self, cmd, args, process_input=None): + full_args = ["ovs-ofctl", cmd, self.br_name] + args + try: + return utils.execute(full_args, root_helper=self.root_helper, + process_input=process_input) + except Exception as e: + LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"), + {'cmd': full_args, 'exception': e}) + + def count_flows(self): + flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:] + return len(flow_list) - 1 + + def remove_all_flows(self): + self.run_ofctl("del-flows", []) + + def get_port_ofport(self, port_name): + ofport = self.db_get_val("Interface", port_name, "ofport") + # This can return a non-integer string, like '[]' so ensure a + # common failure case + try: + int(ofport) + return ofport + except ValueError: + return constants.INVALID_OFPORT + + def get_datapath_id(self): + return self.db_get_val('Bridge', + self.br_name, 'datapath_id').strip('"') + + def add_flow(self, **kwargs): + flow_str = _build_flow_expr_str(kwargs, 'add') + if self.defer_apply_flows: + self.deferred_flows['add'] += flow_str + '\n' + else: + self.run_ofctl("add-flow", [flow_str]) + + def mod_flow(self, **kwargs): + flow_str = _build_flow_expr_str(kwargs, 'mod') + if self.defer_apply_flows: + self.deferred_flows['mod'] += flow_str + '\n' + else: + self.run_ofctl("mod-flows", [flow_str]) + + def delete_flows(self, **kwargs): + flow_expr_str = _build_flow_expr_str(kwargs, 'del') + if self.defer_apply_flows: + self.deferred_flows['del'] += flow_expr_str + '\n' + else: + self.run_ofctl("del-flows", [flow_expr_str]) + + def dump_flows_for_table(self, table): + retval = None + flow_str = "table=%s" % table + flows = self.run_ofctl("dump-flows", [flow_str]) + if flows: + retval = '\n'.join(item for item in flows.splitlines() + if 'NXST' not in item) + return retval + + def defer_apply_on(self): + LOG.debug(_('defer_apply_on')) + self.defer_apply_flows = True + + def defer_apply_off(self): + LOG.debug(_('defer_apply_off')) + # Note(ethuleau): stash flows and disable deferred mode. Then apply + # flows from the stashed reference to be sure to not purge flows that + # were added between two ofctl commands. + stashed_deferred_flows, self.deferred_flows = ( + self.deferred_flows, {'add': '', 'mod': '', 'del': ''} + ) + self.defer_apply_flows = False + for action, flows in stashed_deferred_flows.items(): + if flows: + LOG.debug(_('Applying following deferred flows ' + 'to bridge %s'), self.br_name) + for line in flows.splitlines(): + LOG.debug(_('%(action)s: %(flow)s'), + {'action': action, 'flow': line}) + self.run_ofctl('%s-flows' % action, ['-'], flows) + + def add_tunnel_port(self, port_name, remote_ip, local_ip, + tunnel_type=p_const.TYPE_GRE, + vxlan_udp_port=constants.VXLAN_UDP_PORT, + dont_fragment=True): + vsctl_command = ["--", "--may-exist", "add-port", self.br_name, + port_name] + vsctl_command.extend(["--", "set", "Interface", port_name, + "type=%s" % tunnel_type]) + if tunnel_type == p_const.TYPE_VXLAN: + # Only set the VXLAN UDP port if it's not the default + if vxlan_udp_port != constants.VXLAN_UDP_PORT: + vsctl_command.append("options:dst_port=%s" % vxlan_udp_port) + vsctl_command.append(("options:df_default=%s" % + bool(dont_fragment)).lower()) + vsctl_command.extend(["options:remote_ip=%s" % remote_ip, + "options:local_ip=%s" % local_ip, + "options:in_key=flow", + "options:out_key=flow"]) + self.run_vsctl(vsctl_command) + ofport = self.get_port_ofport(port_name) + if (tunnel_type == p_const.TYPE_VXLAN and + ofport == constants.INVALID_OFPORT): + LOG.error(_('Unable to create VXLAN tunnel port. Please ensure ' + 'that an openvswitch version that supports VXLAN is ' + 'installed.')) + return ofport + + def add_patch_port(self, local_name, remote_name): + self.run_vsctl(["add-port", self.br_name, local_name, + "--", "set", "Interface", local_name, + "type=patch", "options:peer=%s" % remote_name]) + return self.get_port_ofport(local_name) + + def db_get_map(self, table, record, column, check_error=False): + output = self.run_vsctl(["get", table, record, column], check_error) + if output: + output_str = output.rstrip("\n\r") + return self.db_str_to_map(output_str) + return {} + + def db_get_val(self, table, record, column, check_error=False): + output = self.run_vsctl(["get", table, record, column], check_error) + if output: + return output.rstrip("\n\r") + + def db_str_to_map(self, full_str): + list = full_str.strip("{}").split(", ") + ret = {} + for e in list: + if e.find("=") == -1: + continue + arr = e.split("=") + ret[arr[0]] = arr[1].strip("\"") + return ret + + def get_port_name_list(self): + res = self.run_vsctl(["list-ports", self.br_name], check_error=True) + if res: + return res.strip().split("\n") + return [] + + def get_port_stats(self, port_name): + return self.db_get_map("Interface", port_name, "statistics") + + def get_xapi_iface_id(self, xs_vif_uuid): + args = ["xe", "vif-param-get", "param-name=other-config", + "param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid] + try: + return utils.execute(args, root_helper=self.root_helper).strip() + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_("Unable to execute %(cmd)s. " + "Exception: %(exception)s"), + {'cmd': args, 'exception': e}) + + # returns a VIF object for each VIF port + def get_vif_ports(self): + edge_ports = [] + port_names = self.get_port_name_list() + for name in port_names: + external_ids = self.db_get_map("Interface", name, "external_ids", + check_error=True) + ofport = self.db_get_val("Interface", name, "ofport", + check_error=True) + if "iface-id" in external_ids and "attached-mac" in external_ids: + p = VifPort(name, ofport, external_ids["iface-id"], + external_ids["attached-mac"], self) + edge_ports.append(p) + elif ("xs-vif-uuid" in external_ids and + "attached-mac" in external_ids): + # if this is a xenserver and iface-id is not automatically + # synced to OVS from XAPI, we grab it from XAPI directly + iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"]) + p = VifPort(name, ofport, iface_id, + external_ids["attached-mac"], self) + edge_ports.append(p) + + return edge_ports + + def get_vif_port_set(self): + port_names = self.get_port_name_list() + edge_ports = set() + args = ['--format=json', '--', '--columns=name,external_ids,ofport', + 'list', 'Interface'] + result = self.run_vsctl(args, check_error=True) + if not result: + return edge_ports + for row in jsonutils.loads(result)['data']: + name = row[0] + if name not in port_names: + continue + external_ids = dict(row[1][1]) + # Do not consider VIFs which aren't yet ready + # This can happen when ofport values are either [] or ["set", []] + # We will therefore consider only integer values for ofport + ofport = row[2] + try: + int_ofport = int(ofport) + except (ValueError, TypeError): + LOG.warn(_("Found not yet ready openvswitch port: %s"), row) + else: + if int_ofport > 0: + if ("iface-id" in external_ids and + "attached-mac" in external_ids): + edge_ports.add(external_ids['iface-id']) + elif ("xs-vif-uuid" in external_ids and + "attached-mac" in external_ids): + # if this is a xenserver and iface-id is not + # automatically synced to OVS from XAPI, we grab it + # from XAPI directly + iface_id = self.get_xapi_iface_id( + external_ids["xs-vif-uuid"]) + edge_ports.add(iface_id) + else: + LOG.warn(_("Found failed openvswitch port: %s"), row) + return edge_ports + + def get_port_tag_dict(self): + """Get a dict of port names and associated vlan tags. + + e.g. the returned dict is of the following form:: + + {u'int-br-eth2': [], + u'patch-tun': [], + u'qr-76d9e6b6-21': 1, + u'tapce5318ff-78': 1, + u'tape1400310-e6': 1} + + The TAG ID is only available in the "Port" table and is not available + in the "Interface" table queried by the get_vif_port_set() method. + + """ + port_names = self.get_port_name_list() + args = ['--format=json', '--', '--columns=name,tag', 'list', 'Port'] + result = self.run_vsctl(args, check_error=True) + port_tag_dict = {} + if not result: + return port_tag_dict + for name, tag in jsonutils.loads(result)['data']: + if name not in port_names: + continue + # 'tag' can be [u'set', []] or an integer + if isinstance(tag, list): + tag = tag[1] + port_tag_dict[name] = tag + return port_tag_dict + + def get_vif_port_by_id(self, port_id): + args = ['--format=json', '--', '--columns=external_ids,name,ofport', + 'find', 'Interface', + 'external_ids:iface-id="%s"' % port_id] + result = self.run_vsctl(args) + if not result: + return + json_result = jsonutils.loads(result) + try: + # Retrieve the indexes of the columns we're looking for + headings = json_result['headings'] + ext_ids_idx = headings.index('external_ids') + name_idx = headings.index('name') + ofport_idx = headings.index('ofport') + # If data attribute is missing or empty the line below will raise + # an exeception which will be captured in this block. + # We won't deal with the possibility of ovs-vsctl return multiple + # rows since the interface identifier is unique + data = json_result['data'][0] + port_name = data[name_idx] + switch = get_bridge_for_iface(self.root_helper, port_name) + if switch != self.br_name: + LOG.info(_("Port: %(port_name)s is on %(switch)s," + " not on %(br_name)s"), {'port_name': port_name, + 'switch': switch, + 'br_name': self.br_name}) + return + ofport = data[ofport_idx] + # ofport must be integer otherwise return None + if not isinstance(ofport, int) or ofport == -1: + LOG.warn(_("ofport: %(ofport)s for VIF: %(vif)s is not a " + "positive integer"), {'ofport': ofport, + 'vif': port_id}) + return + # Find VIF's mac address in external ids + ext_id_dict = dict((item[0], item[1]) for item in + data[ext_ids_idx][1]) + vif_mac = ext_id_dict['attached-mac'] + return VifPort(port_name, ofport, port_id, vif_mac, self) + except Exception as e: + LOG.warn(_("Unable to parse interface details. Exception: %s"), e) + return + + def delete_ports(self, all_ports=False): + if all_ports: + port_names = self.get_port_name_list() + else: + port_names = (port.port_name for port in self.get_vif_ports()) + + for port_name in port_names: + self.delete_port(port_name) + + def get_local_port_mac(self): + """Retrieve the mac of the bridge's local port.""" + address = ip_lib.IPDevice(self.br_name, self.root_helper).link.address + if address: + return address + else: + msg = _('Unable to determine mac address for %s') % self.br_name + raise Exception(msg) + + def __enter__(self): + self.create() + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.destroy() + + +def get_bridge_for_iface(root_helper, iface): + args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout, + "iface-to-br", iface] + try: + return utils.execute(args, root_helper=root_helper).strip() + except Exception: + LOG.exception(_("Interface %s not found."), iface) + return None + + +def get_bridges(root_helper): + args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout, + "list-br"] + try: + return utils.execute(args, root_helper=root_helper).strip().split("\n") + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e) + + +def get_bridge_external_bridge_id(root_helper, bridge): + args = ["ovs-vsctl", "--timeout=2", "br-get-external-id", + bridge, "bridge-id"] + try: + return utils.execute(args, root_helper=root_helper).strip() + except Exception: + LOG.exception(_("Bridge %s not found."), bridge) + return None + + +def _build_flow_expr_str(flow_dict, cmd): + flow_expr_arr = [] + actions = None + + if cmd == 'add': + flow_expr_arr.append("hard_timeout=%s" % + flow_dict.pop('hard_timeout', '0')) + flow_expr_arr.append("idle_timeout=%s" % + flow_dict.pop('idle_timeout', '0')) + flow_expr_arr.append("priority=%s" % + flow_dict.pop('priority', '1')) + elif 'priority' in flow_dict: + msg = _("Cannot match priority on flow deletion or modification") + raise exceptions.InvalidInput(error_message=msg) + + if cmd != 'del': + if "actions" not in flow_dict: + msg = _("Must specify one or more actions on flow addition" + " or modification") + raise exceptions.InvalidInput(error_message=msg) + actions = "actions=%s" % flow_dict.pop('actions') + + for key, value in flow_dict.iteritems(): + if key == 'proto': + flow_expr_arr.append(value) + else: + flow_expr_arr.append("%s=%s" % (key, str(value))) + + if actions: + flow_expr_arr.append(actions) + + return ','.join(flow_expr_arr) + + +def ofctl_arg_supported(root_helper, cmd, args): + '''Verify if ovs-ofctl binary supports command with specific args. + + :param root_helper: utility to use when running shell cmds. + :param cmd: ovs-vsctl command to use for test. + :param args: arguments to test with command. + :returns: a boolean if the args supported. + ''' + supported = True + br_name = 'br-test-%s' % common_utils.get_random_string(6) + test_br = OVSBridge(br_name, root_helper) + test_br.reset_bridge() + + full_args = ["ovs-ofctl", cmd, test_br.br_name] + args + try: + utils.execute(full_args, root_helper=root_helper) + except Exception: + supported = False + + test_br.destroy() + return supported diff --git a/neutron/agent/linux/ovsdb_monitor.py b/neutron/agent/linux/ovsdb_monitor.py new file mode 100644 index 000000000..33f1c5e72 --- /dev/null +++ b/neutron/agent/linux/ovsdb_monitor.py @@ -0,0 +1,107 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet + +from neutron.agent.linux import async_process +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class OvsdbMonitor(async_process.AsyncProcess): + """Manages an invocation of 'ovsdb-client monitor'.""" + + def __init__(self, table_name, columns=None, format=None, + root_helper=None, respawn_interval=None): + + cmd = ['ovsdb-client', 'monitor', table_name] + if columns: + cmd.append(','.join(columns)) + if format: + cmd.append('--format=%s' % format) + super(OvsdbMonitor, self).__init__(cmd, + root_helper=root_helper, + respawn_interval=respawn_interval) + + def _read_stdout(self): + data = self._process.stdout.readline() + if not data: + return + self._stdout_lines.put(data) + LOG.debug(_('Output received from ovsdb monitor: %s') % data) + return data + + def _read_stderr(self): + data = super(OvsdbMonitor, self)._read_stderr() + if data: + LOG.error(_('Error received from ovsdb monitor: %s') % data) + # Do not return value to ensure that stderr output will + # stop the monitor. + + +class SimpleInterfaceMonitor(OvsdbMonitor): + """Monitors the Interface table of the local host's ovsdb for changes. + + The has_updates() method indicates whether changes to the ovsdb + Interface table have been detected since the monitor started or + since the previous access. + """ + + def __init__(self, root_helper=None, respawn_interval=None): + super(SimpleInterfaceMonitor, self).__init__( + 'Interface', + columns=['name', 'ofport'], + format='json', + root_helper=root_helper, + respawn_interval=respawn_interval, + ) + self.data_received = False + + @property + def is_active(self): + return (self.data_received and + self._kill_event and + not self._kill_event.ready()) + + @property + def has_updates(self): + """Indicate whether the ovsdb Interface table has been updated. + + True will be returned if the monitor process is not active. + This 'failing open' minimizes the risk of falsely indicating + the absence of updates at the expense of potential false + positives. + """ + return bool(list(self.iter_stdout())) or not self.is_active + + def start(self, block=False, timeout=5): + super(SimpleInterfaceMonitor, self).start() + if block: + eventlet.timeout.Timeout(timeout) + while not self.is_active: + eventlet.sleep() + + def _kill(self, *args, **kwargs): + self.data_received = False + super(SimpleInterfaceMonitor, self)._kill(*args, **kwargs) + + def _read_stdout(self): + data = super(SimpleInterfaceMonitor, self)._read_stdout() + if data and not self.data_received: + self.data_received = True + return data diff --git a/neutron/agent/linux/polling.py b/neutron/agent/linux/polling.py new file mode 100644 index 000000000..8cce60be4 --- /dev/null +++ b/neutron/agent/linux/polling.py @@ -0,0 +1,114 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +import eventlet + +from neutron.agent.linux import ovsdb_monitor +from neutron.plugins.openvswitch.common import constants + + +@contextlib.contextmanager +def get_polling_manager(minimize_polling=False, + root_helper=None, + ovsdb_monitor_respawn_interval=( + constants.DEFAULT_OVSDBMON_RESPAWN)): + if minimize_polling: + pm = InterfacePollingMinimizer( + root_helper=root_helper, + ovsdb_monitor_respawn_interval=ovsdb_monitor_respawn_interval) + pm.start() + else: + pm = AlwaysPoll() + try: + yield pm + finally: + if minimize_polling: + pm.stop() + + +class BasePollingManager(object): + + def __init__(self): + self._force_polling = False + self._polling_completed = True + + def force_polling(self): + self._force_polling = True + + def polling_completed(self): + self._polling_completed = True + + def _is_polling_required(self): + raise NotImplemented + + @property + def is_polling_required(self): + # Always consume the updates to minimize polling. + polling_required = self._is_polling_required() + + # Polling is required regardless of whether updates have been + # detected. + if self._force_polling: + self._force_polling = False + polling_required = True + + # Polling is required if not yet done for previously detected + # updates. + if not self._polling_completed: + polling_required = True + + if polling_required: + # Track whether polling has been completed to ensure that + # polling can be required until the caller indicates via a + # call to polling_completed() that polling has been + # successfully performed. + self._polling_completed = False + + return polling_required + + +class AlwaysPoll(BasePollingManager): + + @property + def is_polling_required(self): + return True + + +class InterfacePollingMinimizer(BasePollingManager): + """Monitors ovsdb to determine when polling is required.""" + + def __init__(self, root_helper=None, + ovsdb_monitor_respawn_interval=( + constants.DEFAULT_OVSDBMON_RESPAWN)): + + super(InterfacePollingMinimizer, self).__init__() + self._monitor = ovsdb_monitor.SimpleInterfaceMonitor( + root_helper=root_helper, + respawn_interval=ovsdb_monitor_respawn_interval) + + def start(self): + self._monitor.start() + + def stop(self): + self._monitor.stop() + + def _is_polling_required(self): + # Maximize the chances of update detection having a chance to + # collect output. + eventlet.sleep() + return self._monitor.has_updates diff --git a/neutron/agent/linux/utils.py b/neutron/agent/linux/utils.py new file mode 100644 index 000000000..d4ef237e5 --- /dev/null +++ b/neutron/agent/linux/utils.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Locaweb. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Juliano Martinez, Locaweb. + +import fcntl +import os +import shlex +import socket +import struct +import tempfile + +from eventlet.green import subprocess +from eventlet import greenthread + +from neutron.common import utils +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def create_process(cmd, root_helper=None, addl_env=None): + """Create a process object for the given command. + + The return value will be a tuple of the process object and the + list of command arguments used to create it. + """ + if root_helper: + cmd = shlex.split(root_helper) + cmd + cmd = map(str, cmd) + + LOG.debug(_("Running command: %s"), cmd) + env = os.environ.copy() + if addl_env: + env.update(addl_env) + + obj = utils.subprocess_popen(cmd, shell=False, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env) + + return obj, cmd + + +def execute(cmd, root_helper=None, process_input=None, addl_env=None, + check_exit_code=True, return_stderr=False): + try: + obj, cmd = create_process(cmd, root_helper=root_helper, + addl_env=addl_env) + _stdout, _stderr = (process_input and + obj.communicate(process_input) or + obj.communicate()) + obj.stdin.close() + m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n" + "Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode, + 'stdout': _stdout, 'stderr': _stderr} + if obj.returncode: + LOG.error(m) + if check_exit_code: + raise RuntimeError(m) + else: + LOG.debug(m) + finally: + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) + + return return_stderr and (_stdout, _stderr) or _stdout + + +def get_interface_mac(interface): + DEVICE_NAME_LEN = 15 + MAC_START = 18 + MAC_END = 24 + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + info = fcntl.ioctl(s.fileno(), 0x8927, + struct.pack('256s', interface[:DEVICE_NAME_LEN])) + return ''.join(['%02x:' % ord(char) + for char in info[MAC_START:MAC_END]])[:-1] + + +def replace_file(file_name, data): + """Replaces the contents of file_name with data in a safe manner. + + First write to a temp file and then rename. Since POSIX renames are + atomic, the file is unlikely to be corrupted by competing writes. + + We create the tempfile on the same device to ensure that it can be renamed. + """ + + base_dir = os.path.dirname(os.path.abspath(file_name)) + tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False) + tmp_file.write(data) + tmp_file.close() + os.chmod(tmp_file.name, 0o644) + os.rename(tmp_file.name, file_name) + + +def find_child_pids(pid): + """Retrieve a list of the pids of child processes of the given pid.""" + + try: + raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid=']) + except RuntimeError as e: + # Unexpected errors are the responsibility of the caller + with excutils.save_and_reraise_exception() as ctxt: + # Exception has already been logged by execute + no_children_found = 'Exit code: 1' in str(e) + if no_children_found: + ctxt.reraise = False + return [] + return [x.strip() for x in raw_pids.split('\n') if x.strip()] diff --git a/neutron/agent/metadata/__init__.py b/neutron/agent/metadata/__init__.py new file mode 100644 index 000000000..6e2c06205 --- /dev/null +++ b/neutron/agent/metadata/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost diff --git a/neutron/agent/metadata/agent.py b/neutron/agent/metadata/agent.py new file mode 100644 index 000000000..52ffa9121 --- /dev/null +++ b/neutron/agent/metadata/agent.py @@ -0,0 +1,392 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import hashlib +import hmac +import os +import socket +import sys + +import eventlet +eventlet.monkey_patch() + +import httplib2 +from neutronclient.v2_0 import client +from oslo.config import cfg +import six.moves.urllib.parse as urlparse +import webob + +from neutron.agent.common import config as agent_conf +from neutron.agent import rpc as agent_rpc +from neutron.common import config +from neutron.common import constants as n_const +from neutron.common import topics +from neutron.common import utils +from neutron import context +from neutron.openstack.common.cache import cache +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import service +from neutron import wsgi + +LOG = logging.getLogger(__name__) + + +class MetadataProxyHandler(object): + OPTS = [ + cfg.StrOpt('admin_user', + help=_("Admin user")), + cfg.StrOpt('admin_password', + help=_("Admin password"), + secret=True), + cfg.StrOpt('admin_tenant_name', + help=_("Admin tenant name")), + cfg.StrOpt('auth_url', + help=_("Authentication URL")), + cfg.StrOpt('auth_strategy', default='keystone', + help=_("The type of authentication to use")), + cfg.StrOpt('auth_region', + help=_("Authentication region")), + cfg.BoolOpt('auth_insecure', + default=False, + help=_("Turn off verification of the certificate for" + " ssl")), + cfg.StrOpt('auth_ca_cert', + help=_("Certificate Authority public key (CA cert) " + "file for ssl")), + cfg.StrOpt('endpoint_type', + default='adminURL', + help=_("Network service endpoint type to pull from " + "the keystone catalog")), + cfg.StrOpt('nova_metadata_ip', default='127.0.0.1', + help=_("IP address used by Nova metadata server.")), + cfg.IntOpt('nova_metadata_port', + default=8775, + help=_("TCP Port used by Nova metadata server.")), + cfg.StrOpt('metadata_proxy_shared_secret', + default='', + help=_('Shared secret to sign instance-id request'), + secret=True), + cfg.StrOpt('nova_metadata_protocol', + default='http', + choices=['http', 'https'], + help=_("Protocol to access nova metadata, http or https")), + cfg.BoolOpt('nova_metadata_insecure', default=False, + help=_("Allow to perform insecure SSL (https) requests to " + "nova metadata")), + cfg.StrOpt('nova_client_cert', + default='', + help=_("Client certificate for nova metadata api server.")), + cfg.StrOpt('nova_client_priv_key', + default='', + help=_("Private key of client certificate.")) + ] + + def __init__(self, conf): + self.conf = conf + self.auth_info = {} + if self.conf.cache_url: + self._cache = cache.get_cache(self.conf.cache_url) + else: + self._cache = False + + def _get_neutron_client(self): + qclient = client.Client( + username=self.conf.admin_user, + password=self.conf.admin_password, + tenant_name=self.conf.admin_tenant_name, + auth_url=self.conf.auth_url, + auth_strategy=self.conf.auth_strategy, + region_name=self.conf.auth_region, + token=self.auth_info.get('auth_token'), + insecure=self.conf.auth_insecure, + ca_cert=self.conf.auth_ca_cert, + endpoint_url=self.auth_info.get('endpoint_url'), + endpoint_type=self.conf.endpoint_type + ) + return qclient + + @webob.dec.wsgify(RequestClass=webob.Request) + def __call__(self, req): + try: + LOG.debug(_("Request: %s"), req) + + instance_id, tenant_id = self._get_instance_and_tenant_id(req) + if instance_id: + return self._proxy_request(instance_id, tenant_id, req) + else: + return webob.exc.HTTPNotFound() + + except Exception: + LOG.exception(_("Unexpected error.")) + msg = _('An unknown error has occurred. ' + 'Please try your request again.') + return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + + @utils.cache_method_results + def _get_router_networks(self, router_id): + """Find all networks connected to given router.""" + qclient = self._get_neutron_client() + + internal_ports = qclient.list_ports( + device_id=router_id, + device_owner=n_const.DEVICE_OWNER_ROUTER_INTF)['ports'] + return tuple(p['network_id'] for p in internal_ports) + + @utils.cache_method_results + def _get_ports_for_remote_address(self, remote_address, networks): + """Get list of ports that has given ip address and are part of + given networks. + + :param networks: list of networks in which the ip address will be + searched for + + """ + qclient = self._get_neutron_client() + + return qclient.list_ports( + network_id=networks, + fixed_ips=['ip_address=%s' % remote_address])['ports'] + + def _get_ports(self, remote_address, network_id=None, router_id=None): + """Search for all ports that contain passed ip address and belongs to + given network. + + If no network is passed ports are searched on all networks connected to + given router. Either one of network_id or router_id must be passed. + + """ + if network_id: + networks = (network_id,) + elif router_id: + networks = self._get_router_networks(router_id) + else: + raise TypeError(_("Either one of parameter network_id or router_id" + " must be passed to _get_ports method.")) + + return self._get_ports_for_remote_address(remote_address, networks) + + def _get_instance_and_tenant_id(self, req): + qclient = self._get_neutron_client() + + remote_address = req.headers.get('X-Forwarded-For') + network_id = req.headers.get('X-Neutron-Network-ID') + router_id = req.headers.get('X-Neutron-Router-ID') + + ports = self._get_ports(remote_address, network_id, router_id) + + self.auth_info = qclient.get_auth_info() + if len(ports) == 1: + return ports[0]['device_id'], ports[0]['tenant_id'] + return None, None + + def _proxy_request(self, instance_id, tenant_id, req): + headers = { + 'X-Forwarded-For': req.headers.get('X-Forwarded-For'), + 'X-Instance-ID': instance_id, + 'X-Tenant-ID': tenant_id, + 'X-Instance-ID-Signature': self._sign_instance_id(instance_id) + } + + nova_ip_port = '%s:%s' % (self.conf.nova_metadata_ip, + self.conf.nova_metadata_port) + url = urlparse.urlunsplit(( + self.conf.nova_metadata_protocol, + nova_ip_port, + req.path_info, + req.query_string, + '')) + + h = httplib2.Http(ca_certs=self.conf.auth_ca_cert, + disable_ssl_certificate_validation= + self.conf.nova_metadata_insecure) + if self.conf.nova_client_cert and self.conf.nova_client_priv_key: + h.add_certificate(self.conf.nova_client_priv_key, + self.conf.nova_client_cert, + nova_ip_port) + resp, content = h.request(url, method=req.method, headers=headers, + body=req.body) + + if resp.status == 200: + LOG.debug(str(resp)) + req.response.content_type = resp['content-type'] + req.response.body = content + return req.response + elif resp.status == 403: + msg = _( + 'The remote metadata server responded with Forbidden. This ' + 'response usually occurs when shared secrets do not match.' + ) + LOG.warn(msg) + return webob.exc.HTTPForbidden() + elif resp.status == 404: + return webob.exc.HTTPNotFound() + elif resp.status == 409: + return webob.exc.HTTPConflict() + elif resp.status == 500: + msg = _( + 'Remote metadata server experienced an internal server error.' + ) + LOG.warn(msg) + return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + else: + raise Exception(_('Unexpected response code: %s') % resp.status) + + def _sign_instance_id(self, instance_id): + return hmac.new(self.conf.metadata_proxy_shared_secret, + instance_id, + hashlib.sha256).hexdigest() + + +class UnixDomainHttpProtocol(eventlet.wsgi.HttpProtocol): + def __init__(self, request, client_address, server): + if client_address == '': + client_address = ('', 0) + # base class is old-style, so super does not work properly + eventlet.wsgi.HttpProtocol.__init__(self, request, client_address, + server) + + +class WorkerService(wsgi.WorkerService): + def start(self): + self._server = self._service.pool.spawn(self._service._run, + self._application, + self._service._socket) + + +class UnixDomainWSGIServer(wsgi.Server): + def __init__(self, name): + self._socket = None + self._launcher = None + self._server = None + super(UnixDomainWSGIServer, self).__init__(name) + + def start(self, application, file_socket, workers, backlog): + self._socket = eventlet.listen(file_socket, + family=socket.AF_UNIX, + backlog=backlog) + if workers < 1: + # For the case where only one process is required. + self._server = self.pool.spawn_n(self._run, application, + self._socket) + else: + # Minimize the cost of checking for child exit by extending the + # wait interval past the default of 0.01s. + self._launcher = service.ProcessLauncher(wait_interval=1.0) + self._server = WorkerService(self, application) + self._launcher.launch_service(self._server, workers=workers) + + def _run(self, application, socket): + """Start a WSGI service in a new green thread.""" + logger = logging.getLogger('eventlet.wsgi.server') + eventlet.wsgi.server(socket, + application, + custom_pool=self.pool, + protocol=UnixDomainHttpProtocol, + log=logging.WritableLogger(logger)) + + +class UnixDomainMetadataProxy(object): + OPTS = [ + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location for Metadata Proxy UNIX domain socket')), + cfg.IntOpt('metadata_workers', + default=utils.cpu_count() // 2, + help=_('Number of separate worker processes for metadata ' + 'server')), + cfg.IntOpt('metadata_backlog', + default=4096, + help=_('Number of backlog requests to configure the ' + 'metadata server socket with')) + ] + + def __init__(self, conf): + self.conf = conf + + dirname = os.path.dirname(cfg.CONF.metadata_proxy_socket) + if os.path.isdir(dirname): + try: + os.unlink(cfg.CONF.metadata_proxy_socket) + except OSError: + with excutils.save_and_reraise_exception() as ctxt: + if not os.path.exists(cfg.CONF.metadata_proxy_socket): + ctxt.reraise = False + else: + os.makedirs(dirname, 0o755) + + self._init_state_reporting() + + def _init_state_reporting(self): + self.context = context.get_admin_context_without_session() + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-metadata-agent', + 'host': cfg.CONF.host, + 'topic': 'N/A', + 'configurations': { + 'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket, + 'nova_metadata_ip': cfg.CONF.nova_metadata_ip, + 'nova_metadata_port': cfg.CONF.nova_metadata_port, + }, + 'start_flag': True, + 'agent_type': n_const.AGENT_TYPE_METADATA} + report_interval = cfg.CONF.AGENT.report_interval + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + try: + self.state_rpc.report_state( + self.context, + self.agent_state, + use_call=self.agent_state.get('start_flag')) + except AttributeError: + # This means the server does not support report_state + LOG.warn(_('Neutron server does not support state report.' + ' State report for this agent will be disabled.')) + self.heartbeat.stop() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + return + self.agent_state.pop('start_flag', None) + + def run(self): + server = UnixDomainWSGIServer('neutron-metadata-agent') + server.start(MetadataProxyHandler(self.conf), + self.conf.metadata_proxy_socket, + workers=self.conf.metadata_workers, + backlog=self.conf.metadata_backlog) + server.wait() + + +def main(): + cfg.CONF.register_opts(UnixDomainMetadataProxy.OPTS) + cfg.CONF.register_opts(MetadataProxyHandler.OPTS) + cache.register_oslo_configs(cfg.CONF) + cfg.CONF.set_default(name='cache_url', default='memory://?default_ttl=5') + agent_conf.register_agent_state_opts_helper(cfg.CONF) + config.init(sys.argv[1:]) + config.setup_logging(cfg.CONF) + utils.log_opt_values(LOG) + proxy = UnixDomainMetadataProxy(cfg.CONF) + proxy.run() diff --git a/neutron/agent/metadata/namespace_proxy.py b/neutron/agent/metadata/namespace_proxy.py new file mode 100644 index 000000000..e330b22ca --- /dev/null +++ b/neutron/agent/metadata/namespace_proxy.py @@ -0,0 +1,184 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import httplib +import socket + +import eventlet +eventlet.monkey_patch() + +import httplib2 +from oslo.config import cfg +import six.moves.urllib.parse as urlparse +import webob + +from neutron.agent.linux import daemon +from neutron.common import config +from neutron.common import utils +from neutron.openstack.common import log as logging +from neutron import wsgi + +LOG = logging.getLogger(__name__) + + +class UnixDomainHTTPConnection(httplib.HTTPConnection): + """Connection class for HTTP over UNIX domain socket.""" + def __init__(self, host, port=None, strict=None, timeout=None, + proxy_info=None): + httplib.HTTPConnection.__init__(self, host, port, strict) + self.timeout = timeout + + def connect(self): + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + if self.timeout: + self.sock.settimeout(self.timeout) + self.sock.connect(cfg.CONF.metadata_proxy_socket) + + +class NetworkMetadataProxyHandler(object): + """Proxy AF_INET metadata request through Unix Domain socket. + + The Unix domain socket allows the proxy access resource that are not + accessible within the isolated tenant context. + """ + + def __init__(self, network_id=None, router_id=None): + self.network_id = network_id + self.router_id = router_id + + if network_id is None and router_id is None: + msg = _('network_id and router_id are None. One must be provided.') + raise ValueError(msg) + + @webob.dec.wsgify(RequestClass=webob.Request) + def __call__(self, req): + LOG.debug(_("Request: %s"), req) + try: + return self._proxy_request(req.remote_addr, + req.method, + req.path_info, + req.query_string, + req.body) + except Exception: + LOG.exception(_("Unexpected error.")) + msg = _('An unknown error has occurred. ' + 'Please try your request again.') + return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + + def _proxy_request(self, remote_address, method, path_info, + query_string, body): + headers = { + 'X-Forwarded-For': remote_address, + } + + if self.router_id: + headers['X-Neutron-Router-ID'] = self.router_id + else: + headers['X-Neutron-Network-ID'] = self.network_id + + url = urlparse.urlunsplit(( + 'http', + '169.254.169.254', # a dummy value to make the request proper + path_info, + query_string, + '')) + + h = httplib2.Http() + resp, content = h.request( + url, + method=method, + headers=headers, + body=body, + connection_type=UnixDomainHTTPConnection) + + if resp.status == 200: + LOG.debug(resp) + LOG.debug(content) + response = webob.Response() + response.status = resp.status + response.headers['Content-Type'] = resp['content-type'] + response.body = content + return response + elif resp.status == 404: + return webob.exc.HTTPNotFound() + elif resp.status == 409: + return webob.exc.HTTPConflict() + elif resp.status == 500: + msg = _( + 'Remote metadata server experienced an internal server error.' + ) + LOG.debug(msg) + return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + else: + raise Exception(_('Unexpected response code: %s') % resp.status) + + +class ProxyDaemon(daemon.Daemon): + def __init__(self, pidfile, port, network_id=None, router_id=None): + uuid = network_id or router_id + super(ProxyDaemon, self).__init__(pidfile, uuid=uuid) + self.network_id = network_id + self.router_id = router_id + self.port = port + + def run(self): + handler = NetworkMetadataProxyHandler( + self.network_id, + self.router_id) + proxy = wsgi.Server('neutron-network-metadata-proxy') + proxy.start(handler, self.port) + proxy.wait() + + +def main(): + opts = [ + cfg.StrOpt('network_id', + help=_('Network that will have instance metadata ' + 'proxied.')), + cfg.StrOpt('router_id', + help=_('Router that will have connected instances\' ' + 'metadata proxied.')), + cfg.StrOpt('pid_file', + help=_('Location of pid file of this process.')), + cfg.BoolOpt('daemonize', + default=True, + help=_('Run as daemon.')), + cfg.IntOpt('metadata_port', + default=9697, + help=_("TCP Port to listen for metadata server " + "requests.")), + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location of Metadata Proxy UNIX domain ' + 'socket')) + ] + + cfg.CONF.register_cli_opts(opts) + # Don't get the default configuration file + cfg.CONF(project='neutron', default_config_files=[]) + config.setup_logging(cfg.CONF) + utils.log_opt_values(LOG) + proxy = ProxyDaemon(cfg.CONF.pid_file, + cfg.CONF.metadata_port, + network_id=cfg.CONF.network_id, + router_id=cfg.CONF.router_id) + + if cfg.CONF.daemonize: + proxy.start() + else: + proxy.run() diff --git a/neutron/agent/netns_cleanup_util.py b/neutron/agent/netns_cleanup_util.py new file mode 100644 index 000000000..c7e5915b3 --- /dev/null +++ b/neutron/agent/netns_cleanup_util.py @@ -0,0 +1,176 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent.common import config as agent_config +from neutron.agent import dhcp_agent +from neutron.agent import l3_agent +from neutron.agent.linux import dhcp +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.api.v2 import attributes +from neutron.common import config +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) +NS_MANGLING_PATTERN = ('(%s|%s)' % (dhcp.NS_PREFIX, l3_agent.NS_PREFIX) + + attributes.UUID_PATTERN) + + +class FakeDhcpPlugin(object): + """Fake RPC plugin to bypass any RPC calls.""" + def __getattribute__(self, name): + def fake_method(*args): + pass + return fake_method + + +def setup_conf(): + """Setup the cfg for the clean up utility. + + Use separate setup_conf for the utility because there are many options + from the main config that do not apply during clean-up. + """ + + cli_opts = [ + cfg.BoolOpt('force', + default=False, + help=_('Delete the namespace by removing all devices.')), + ] + + conf = cfg.CONF + conf.register_cli_opts(cli_opts) + agent_config.register_interface_driver_opts_helper(conf) + agent_config.register_use_namespaces_opts_helper(conf) + agent_config.register_root_helper(conf) + conf.register_opts(dhcp.OPTS) + conf.register_opts(dhcp_agent.DhcpAgent.OPTS) + conf.register_opts(interface.OPTS) + return conf + + +def kill_dhcp(conf, namespace): + """Disable DHCP for a network if DHCP is still active.""" + root_helper = agent_config.get_root_helper(conf) + network_id = namespace.replace(dhcp.NS_PREFIX, '') + + dhcp_driver = importutils.import_object( + conf.dhcp_driver, + conf=conf, + network=dhcp.NetModel(conf.use_namespaces, {'id': network_id}), + root_helper=root_helper, + plugin=FakeDhcpPlugin()) + + if dhcp_driver.active: + dhcp_driver.disable() + + +def eligible_for_deletion(conf, namespace, force=False): + """Determine whether a namespace is eligible for deletion. + + Eligibility is determined by having only the lo device or if force + is passed as a parameter. + """ + + # filter out namespaces without UUID as the name + if not re.match(NS_MANGLING_PATTERN, namespace): + return False + + root_helper = agent_config.get_root_helper(conf) + ip = ip_lib.IPWrapper(root_helper, namespace) + return force or ip.namespace_is_empty() + + +def unplug_device(conf, device): + try: + device.link.delete() + except RuntimeError: + root_helper = agent_config.get_root_helper(conf) + # Maybe the device is OVS port, so try to delete + bridge_name = ovs_lib.get_bridge_for_iface(root_helper, device.name) + if bridge_name: + bridge = ovs_lib.OVSBridge(bridge_name, root_helper) + bridge.delete_port(device.name) + else: + LOG.debug(_('Unable to find bridge for device: %s'), device.name) + + +def destroy_namespace(conf, namespace, force=False): + """Destroy a given namespace. + + If force is True, then dhcp (if it exists) will be disabled and all + devices will be forcibly removed. + """ + + try: + root_helper = agent_config.get_root_helper(conf) + ip = ip_lib.IPWrapper(root_helper, namespace) + + if force: + kill_dhcp(conf, namespace) + # NOTE: The dhcp driver will remove the namespace if is it empty, + # so a second check is required here. + if ip.netns.exists(namespace): + for device in ip.get_devices(exclude_loopback=True): + unplug_device(conf, device) + + ip.garbage_collect_namespace() + except Exception: + LOG.exception(_('Error unable to destroy namespace: %s'), namespace) + + +def main(): + """Main method for cleaning up network namespaces. + + This method will make two passes checking for namespaces to delete. The + process will identify candidates, sleep, and call garbage collect. The + garbage collection will re-verify that the namespace meets the criteria for + deletion (ie it is empty). The period of sleep and the 2nd pass allow + time for the namespace state to settle, so that the check prior deletion + will re-confirm the namespace is empty. + + The utility is designed to clean-up after the forced or unexpected + termination of Neutron agents. + + The --force flag should only be used as part of the cleanup of a devstack + installation as it will blindly purge namespaces and their devices. This + option also kills any lingering DHCP instances. + """ + conf = setup_conf() + conf() + config.setup_logging(conf) + + root_helper = agent_config.get_root_helper(conf) + # Identify namespaces that are candidates for deletion. + candidates = [ns for ns in + ip_lib.IPWrapper.get_namespaces(root_helper) + if eligible_for_deletion(conf, ns, conf.force)] + + if candidates: + eventlet.sleep(2) + + for namespace in candidates: + destroy_namespace(conf, namespace, conf.force) diff --git a/neutron/agent/ovs_cleanup_util.py b/neutron/agent/ovs_cleanup_util.py new file mode 100644 index 000000000..f7a388b3d --- /dev/null +++ b/neutron/agent/ovs_cleanup_util.py @@ -0,0 +1,112 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.common import config as agent_config +from neutron.agent import l3_agent +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.common import config +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def setup_conf(): + """Setup the cfg for the clean up utility. + + Use separate setup_conf for the utility because there are many options + from the main config that do not apply during clean-up. + """ + opts = [ + cfg.BoolOpt('ovs_all_ports', + default=False, + help=_('True to delete all ports on all the OpenvSwitch ' + 'bridges. False to delete ports created by ' + 'Neutron on integration and external network ' + 'bridges.')) + ] + + conf = cfg.CONF + conf.register_cli_opts(opts) + conf.register_opts(l3_agent.L3NATAgent.OPTS) + conf.register_opts(interface.OPTS) + agent_config.register_interface_driver_opts_helper(conf) + agent_config.register_use_namespaces_opts_helper(conf) + agent_config.register_root_helper(conf) + return conf + + +def collect_neutron_ports(bridges, root_helper): + """Collect ports created by Neutron from OVS.""" + ports = [] + for bridge in bridges: + ovs = ovs_lib.OVSBridge(bridge, root_helper) + ports += [port.port_name for port in ovs.get_vif_ports()] + return ports + + +def delete_neutron_ports(ports, root_helper): + """Delete non-internal ports created by Neutron + + Non-internal OVS ports need to be removed manually. + """ + for port in ports: + if ip_lib.device_exists(port): + device = ip_lib.IPDevice(port, root_helper) + device.link.delete() + LOG.info(_("Delete %s"), port) + + +def main(): + """Main method for cleaning up OVS bridges. + + The utility cleans up the integration bridges used by Neutron. + """ + + conf = setup_conf() + conf() + config.setup_logging(conf) + + configuration_bridges = set([conf.ovs_integration_bridge, + conf.external_network_bridge]) + ovs_bridges = set(ovs_lib.get_bridges(conf.AGENT.root_helper)) + available_configuration_bridges = configuration_bridges & ovs_bridges + + if conf.ovs_all_ports: + bridges = ovs_bridges + else: + bridges = available_configuration_bridges + + # Collect existing ports created by Neutron on configuration bridges. + # After deleting ports from OVS bridges, we cannot determine which + # ports were created by Neutron, so port information is collected now. + ports = collect_neutron_ports(available_configuration_bridges, + conf.AGENT.root_helper) + + for bridge in bridges: + LOG.info(_("Cleaning %s"), bridge) + ovs = ovs_lib.OVSBridge(bridge, conf.AGENT.root_helper) + ovs.delete_ports(all_ports=conf.ovs_all_ports) + + # Remove remaining ports created by Neutron (usually veth pair) + delete_neutron_ports(ports, conf.AGENT.root_helper) + + LOG.info(_("OVS cleanup completed successfully")) diff --git a/neutron/agent/rpc.py b/neutron/agent/rpc.py new file mode 100644 index 000000000..1b2533f20 --- /dev/null +++ b/neutron/agent/rpc.py @@ -0,0 +1,112 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import itertools + +from neutron.common import rpc_compat +from neutron.common import topics + +from neutron.openstack.common import log as logging +from neutron.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + + +def create_consumers(endpoints, prefix, topic_details): + """Create agent RPC consumers. + + :param endpoints: The list of endpoints to process the incoming messages. + :param prefix: Common prefix for the plugin/agent message queues. + :param topic_details: A list of topics. Each topic has a name, an + operation, and an optional host param keying the + subscription to topic.host for plugin calls. + + :returns: A common Connection. + """ + + connection = rpc_compat.create_connection(new=True) + for details in topic_details: + topic, operation, node_name = itertools.islice( + itertools.chain(details, [None]), 3) + + topic_name = topics.get_topic_name(prefix, topic, operation) + connection.create_consumer(topic_name, endpoints, fanout=True) + if node_name: + node_topic_name = '%s.%s' % (topic_name, node_name) + connection.create_consumer(node_topic_name, + endpoints, + fanout=False) + connection.consume_in_threads() + return connection + + +class PluginReportStateAPI(rpc_compat.RpcProxy): + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(PluginReportStateAPI, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + + def report_state(self, context, agent_state, use_call=False): + msg = self.make_msg('report_state', + agent_state={'agent_state': + agent_state}, + time=timeutils.strtime()) + if use_call: + return self.call(context, msg, topic=self.topic) + else: + return self.cast(context, msg, topic=self.topic) + + +class PluginApi(rpc_compat.RpcProxy): + '''Agent side of the rpc API. + + API version history: + 1.0 - Initial version. + + ''' + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic): + super(PluginApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + + def get_device_details(self, context, device, agent_id): + return self.call(context, + self.make_msg('get_device_details', device=device, + agent_id=agent_id), + topic=self.topic) + + def update_device_down(self, context, device, agent_id, host=None): + return self.call(context, + self.make_msg('update_device_down', device=device, + agent_id=agent_id, host=host), + topic=self.topic) + + def update_device_up(self, context, device, agent_id, host=None): + return self.call(context, + self.make_msg('update_device_up', device=device, + agent_id=agent_id, host=host), + topic=self.topic) + + def tunnel_sync(self, context, tunnel_ip, tunnel_type=None): + return self.call(context, + self.make_msg('tunnel_sync', tunnel_ip=tunnel_ip, + tunnel_type=tunnel_type), + topic=self.topic) diff --git a/neutron/agent/securitygroups_rpc.py b/neutron/agent/securitygroups_rpc.py new file mode 100644 index 000000000..736ee659c --- /dev/null +++ b/neutron/agent/securitygroups_rpc.py @@ -0,0 +1,303 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg + +from neutron.common import topics +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) +SG_RPC_VERSION = "1.1" + +security_group_opts = [ + cfg.StrOpt( + 'firewall_driver', + help=_('Driver for security groups firewall in the L2 agent')), + cfg.BoolOpt( + 'enable_security_group', + default=True, + help=_( + 'Controls whether the neutron security group API is enabled ' + 'in the server. It should be false when using no security ' + 'groups or using the nova security group API.')) +] +cfg.CONF.register_opts(security_group_opts, 'SECURITYGROUP') + + +#This is backward compatibility check for Havana +def _is_valid_driver_combination(): + return ((cfg.CONF.SECURITYGROUP.enable_security_group and + (cfg.CONF.SECURITYGROUP.firewall_driver and + cfg.CONF.SECURITYGROUP.firewall_driver != + 'neutron.agent.firewall.NoopFirewallDriver')) or + (not cfg.CONF.SECURITYGROUP.enable_security_group and + (cfg.CONF.SECURITYGROUP.firewall_driver == + 'neutron.agent.firewall.NoopFirewallDriver' or + cfg.CONF.SECURITYGROUP.firewall_driver is None) + )) + + +def is_firewall_enabled(): + if not _is_valid_driver_combination(): + LOG.warn(_("Driver configuration doesn't match with " + "enable_security_group")) + + return cfg.CONF.SECURITYGROUP.enable_security_group + + +def _disable_extension(extension, aliases): + if extension in aliases: + aliases.remove(extension) + + +def disable_security_group_extension_by_config(aliases): + if not is_firewall_enabled(): + LOG.info(_('Disabled security-group extension.')) + _disable_extension('security-group', aliases) + LOG.info(_('Disabled allowed-address-pairs extension.')) + _disable_extension('allowed-address-pairs', aliases) + + +class SecurityGroupServerRpcApiMixin(object): + """A mix-in that enable SecurityGroup support in plugin rpc.""" + def security_group_rules_for_devices(self, context, devices): + LOG.debug(_("Get security group rules " + "for devices via rpc %r"), devices) + return self.call(context, + self.make_msg('security_group_rules_for_devices', + devices=devices), + version=SG_RPC_VERSION, + topic=self.topic) + + +class SecurityGroupAgentRpcCallbackMixin(object): + """A mix-in that enable SecurityGroup agent + support in agent implementations. + """ + #mix-in object should be have sg_agent + sg_agent = None + + def _security_groups_agent_not_set(self): + LOG.warning(_("Security group agent binding currently not set. " + "This should be set by the end of the init " + "process.")) + + def security_groups_rule_updated(self, context, **kwargs): + """Callback for security group rule update. + + :param security_groups: list of updated security_groups + """ + security_groups = kwargs.get('security_groups', []) + LOG.debug( + _("Security group rule updated on remote: %s"), security_groups) + if not self.sg_agent: + return self._security_groups_agent_not_set() + self.sg_agent.security_groups_rule_updated(security_groups) + + def security_groups_member_updated(self, context, **kwargs): + """Callback for security group member update. + + :param security_groups: list of updated security_groups + """ + security_groups = kwargs.get('security_groups', []) + LOG.debug( + _("Security group member updated on remote: %s"), security_groups) + if not self.sg_agent: + return self._security_groups_agent_not_set() + self.sg_agent.security_groups_member_updated(security_groups) + + def security_groups_provider_updated(self, context, **kwargs): + """Callback for security group provider update.""" + LOG.debug(_("Provider rule updated")) + if not self.sg_agent: + return self._security_groups_agent_not_set() + self.sg_agent.security_groups_provider_updated() + + +class SecurityGroupAgentRpcMixin(object): + """A mix-in that enable SecurityGroup agent + support in agent implementations. + """ + + def init_firewall(self, defer_refresh_firewall=False): + firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver + LOG.debug(_("Init firewall settings (driver=%s)"), firewall_driver) + if not _is_valid_driver_combination(): + LOG.warn(_("Driver configuration doesn't match " + "with enable_security_group")) + if not firewall_driver: + firewall_driver = 'neutron.agent.firewall.NoopFirewallDriver' + self.firewall = importutils.import_object(firewall_driver) + # The following flag will be set to true if port filter must not be + # applied as soon as a rule or membership notification is received + self.defer_refresh_firewall = defer_refresh_firewall + # Stores devices for which firewall should be refreshed when + # deferred refresh is enabled. + self.devices_to_refilter = set() + # Flag raised when a global refresh is needed + self.global_refresh_firewall = False + + def prepare_devices_filter(self, device_ids): + if not device_ids: + return + LOG.info(_("Preparing filters for devices %s"), device_ids) + devices = self.plugin_rpc.security_group_rules_for_devices( + self.context, list(device_ids)) + with self.firewall.defer_apply(): + for device in devices.values(): + self.firewall.prepare_port_filter(device) + + def security_groups_rule_updated(self, security_groups): + LOG.info(_("Security group " + "rule updated %r"), security_groups) + self._security_group_updated( + security_groups, + 'security_groups') + + def security_groups_member_updated(self, security_groups): + LOG.info(_("Security group " + "member updated %r"), security_groups) + self._security_group_updated( + security_groups, + 'security_group_source_groups') + + def _security_group_updated(self, security_groups, attribute): + devices = [] + sec_grp_set = set(security_groups) + for device in self.firewall.ports.values(): + if sec_grp_set & set(device.get(attribute, [])): + devices.append(device['device']) + if devices: + if self.defer_refresh_firewall: + LOG.debug(_("Adding %s devices to the list of devices " + "for which firewall needs to be refreshed"), + devices) + self.devices_to_refilter |= set(devices) + else: + self.refresh_firewall(devices) + + def security_groups_provider_updated(self): + LOG.info(_("Provider rule updated")) + if self.defer_refresh_firewall: + # NOTE(salv-orlando): A 'global refresh' might not be + # necessary if the subnet for which the provider rules + # were updated is known + self.global_refresh_firewall = True + else: + self.refresh_firewall() + + def remove_devices_filter(self, device_ids): + if not device_ids: + return + LOG.info(_("Remove device filter for %r"), device_ids) + with self.firewall.defer_apply(): + for device_id in device_ids: + device = self.firewall.ports.get(device_id) + if not device: + continue + self.firewall.remove_port_filter(device) + + def refresh_firewall(self, device_ids=None): + LOG.info(_("Refresh firewall rules")) + if not device_ids: + device_ids = self.firewall.ports.keys() + if not device_ids: + LOG.info(_("No ports here to refresh firewall")) + return + devices = self.plugin_rpc.security_group_rules_for_devices( + self.context, device_ids) + with self.firewall.defer_apply(): + for device in devices.values(): + LOG.debug(_("Update port filter for %s"), device['device']) + self.firewall.update_port_filter(device) + + def firewall_refresh_needed(self): + return self.global_refresh_firewall or self.devices_to_refilter + + def setup_port_filters(self, new_devices, updated_devices): + """Configure port filters for devices. + + This routine applies filters for new devices and refreshes firewall + rules when devices have been updated, or when there are changes in + security group membership or rules. + + :param new_devices: set containing identifiers for new devices + :param updated_devices: set containining identifiers for + updated devices + """ + if new_devices: + LOG.debug(_("Preparing device filters for %d new devices"), + len(new_devices)) + self.prepare_devices_filter(new_devices) + # These data structures are cleared here in order to avoid + # losing updates occurring during firewall refresh + devices_to_refilter = self.devices_to_refilter + global_refresh_firewall = self.global_refresh_firewall + self.devices_to_refilter = set() + self.global_refresh_firewall = False + # TODO(salv-orlando): Avoid if possible ever performing the global + # refresh providing a precise list of devices for which firewall + # should be refreshed + if global_refresh_firewall: + LOG.debug(_("Refreshing firewall for all filtered devices")) + self.refresh_firewall() + else: + # If a device is both in new and updated devices + # avoid reprocessing it + updated_devices = ((updated_devices | devices_to_refilter) - + new_devices) + if updated_devices: + LOG.debug(_("Refreshing firewall for %d devices"), + len(updated_devices)) + self.refresh_firewall(updated_devices) + + +class SecurityGroupAgentRpcApiMixin(object): + + def _get_security_group_topic(self): + return topics.get_topic_name(self.topic, + topics.SECURITY_GROUP, + topics.UPDATE) + + def security_groups_rule_updated(self, context, security_groups): + """Notify rule updated security groups.""" + if not security_groups: + return + self.fanout_cast(context, + self.make_msg('security_groups_rule_updated', + security_groups=security_groups), + version=SG_RPC_VERSION, + topic=self._get_security_group_topic()) + + def security_groups_member_updated(self, context, security_groups): + """Notify member updated security groups.""" + if not security_groups: + return + self.fanout_cast(context, + self.make_msg('security_groups_member_updated', + security_groups=security_groups), + version=SG_RPC_VERSION, + topic=self._get_security_group_topic()) + + def security_groups_provider_updated(self, context): + """Notify provider updated security groups.""" + self.fanout_cast(context, + self.make_msg('security_groups_provider_updated'), + version=SG_RPC_VERSION, + topic=self._get_security_group_topic()) diff --git a/neutron/api/__init__.py b/neutron/api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/api/api_common.py b/neutron/api/api_common.py new file mode 100644 index 000000000..e370e2e5a --- /dev/null +++ b/neutron/api/api_common.py @@ -0,0 +1,327 @@ +# Copyright 2011 Citrix System. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import urllib + +from oslo.config import cfg +from webob import exc + +from neutron.common import constants +from neutron.common import exceptions +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def get_filters(request, attr_info, skips=[]): + """Extracts the filters from the request string. + + Returns a dict of lists for the filters: + check=a&check=b&name=Bob& + becomes: + {'check': [u'a', u'b'], 'name': [u'Bob']} + """ + res = {} + for key, values in request.GET.dict_of_lists().iteritems(): + if key in skips: + continue + values = [v for v in values if v] + key_attr_info = attr_info.get(key, {}) + if 'convert_list_to' in key_attr_info: + values = key_attr_info['convert_list_to'](values) + elif 'convert_to' in key_attr_info: + convert_to = key_attr_info['convert_to'] + values = [convert_to(v) for v in values] + if values: + res[key] = values + return res + + +def get_previous_link(request, items, id_key): + params = request.GET.copy() + params.pop('marker', None) + if items: + marker = items[0][id_key] + params['marker'] = marker + params['page_reverse'] = True + return "%s?%s" % (request.path_url, urllib.urlencode(params)) + + +def get_next_link(request, items, id_key): + params = request.GET.copy() + params.pop('marker', None) + if items: + marker = items[-1][id_key] + params['marker'] = marker + params.pop('page_reverse', None) + return "%s?%s" % (request.path_url, urllib.urlencode(params)) + + +def get_limit_and_marker(request): + """Return marker, limit tuple from request. + + :param request: `wsgi.Request` possibly containing 'marker' and 'limit' + GET variables. 'marker' is the id of the last element + the client has seen, and 'limit' is the maximum number + of items to return. If limit == 0, it means we needn't + pagination, then return None. + """ + max_limit = _get_pagination_max_limit() + limit = _get_limit_param(request, max_limit) + if max_limit > 0: + limit = min(max_limit, limit) or max_limit + if not limit: + return None, None + marker = request.GET.get('marker', None) + return limit, marker + + +def _get_pagination_max_limit(): + max_limit = -1 + if (cfg.CONF.pagination_max_limit.lower() != + constants.PAGINATION_INFINITE): + try: + max_limit = int(cfg.CONF.pagination_max_limit) + if max_limit == 0: + raise ValueError() + except ValueError: + LOG.warn(_("Invalid value for pagination_max_limit: %s. It " + "should be an integer greater to 0"), + cfg.CONF.pagination_max_limit) + return max_limit + + +def _get_limit_param(request, max_limit): + """Extract integer limit from request or fail.""" + try: + limit = int(request.GET.get('limit', 0)) + if limit >= 0: + return limit + except ValueError: + pass + msg = _("Limit must be an integer 0 or greater and not '%d'") + raise exceptions.BadRequest(resource='limit', msg=msg) + + +def list_args(request, arg): + """Extracts the list of arg from request.""" + return [v for v in request.GET.getall(arg) if v] + + +def get_sorts(request, attr_info): + """Extract sort_key and sort_dir from request. + + Return as: [(key1, value1), (key2, value2)] + """ + sort_keys = list_args(request, "sort_key") + sort_dirs = list_args(request, "sort_dir") + if len(sort_keys) != len(sort_dirs): + msg = _("The number of sort_keys and sort_dirs must be same") + raise exc.HTTPBadRequest(explanation=msg) + valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC] + absent_keys = [x for x in sort_keys if x not in attr_info] + if absent_keys: + msg = _("%s is invalid attribute for sort_keys") % absent_keys + raise exc.HTTPBadRequest(explanation=msg) + invalid_dirs = [x for x in sort_dirs if x not in valid_dirs] + if invalid_dirs: + msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, " + "valid value is '%(asc)s' and '%(desc)s'") % + {'invalid_dirs': invalid_dirs, + 'asc': constants.SORT_DIRECTION_ASC, + 'desc': constants.SORT_DIRECTION_DESC}) + raise exc.HTTPBadRequest(explanation=msg) + return zip(sort_keys, + [x == constants.SORT_DIRECTION_ASC for x in sort_dirs]) + + +def get_page_reverse(request): + data = request.GET.get('page_reverse', 'False') + return data.lower() == "true" + + +def get_pagination_links(request, items, limit, + marker, page_reverse, key="id"): + key = key if key else 'id' + links = [] + if not limit: + return links + if not (len(items) < limit and not page_reverse): + links.append({"rel": "next", + "href": get_next_link(request, items, + key)}) + if not (len(items) < limit and page_reverse): + links.append({"rel": "previous", + "href": get_previous_link(request, items, + key)}) + return links + + +class PaginationHelper(object): + + def __init__(self, request, primary_key='id'): + self.request = request + self.primary_key = primary_key + + def update_fields(self, original_fields, fields_to_add): + pass + + def update_args(self, args): + pass + + def paginate(self, items): + return items + + def get_links(self, items): + return {} + + +class PaginationEmulatedHelper(PaginationHelper): + + def __init__(self, request, primary_key='id'): + super(PaginationEmulatedHelper, self).__init__(request, primary_key) + self.limit, self.marker = get_limit_and_marker(request) + self.page_reverse = get_page_reverse(request) + + def update_fields(self, original_fields, fields_to_add): + if not original_fields: + return + if self.primary_key not in original_fields: + original_fields.append(self.primary_key) + fields_to_add.append(self.primary_key) + + def paginate(self, items): + if not self.limit: + return items + i = -1 + if self.marker: + for item in items: + i = i + 1 + if item[self.primary_key] == self.marker: + break + if self.page_reverse: + return items[i - self.limit:i] + return items[i + 1:i + self.limit + 1] + + def get_links(self, items): + return get_pagination_links( + self.request, items, self.limit, self.marker, + self.page_reverse, self.primary_key) + + +class PaginationNativeHelper(PaginationEmulatedHelper): + + def update_args(self, args): + if self.primary_key not in dict(args.get('sorts', [])).keys(): + args.setdefault('sorts', []).append((self.primary_key, True)) + args.update({'limit': self.limit, 'marker': self.marker, + 'page_reverse': self.page_reverse}) + + def paginate(self, items): + return items + + +class NoPaginationHelper(PaginationHelper): + pass + + +class SortingHelper(object): + + def __init__(self, request, attr_info): + pass + + def update_args(self, args): + pass + + def update_fields(self, original_fields, fields_to_add): + pass + + def sort(self, items): + return items + + +class SortingEmulatedHelper(SortingHelper): + + def __init__(self, request, attr_info): + super(SortingEmulatedHelper, self).__init__(request, attr_info) + self.sort_dict = get_sorts(request, attr_info) + + def update_fields(self, original_fields, fields_to_add): + if not original_fields: + return + for key in dict(self.sort_dict).keys(): + if key not in original_fields: + original_fields.append(key) + fields_to_add.append(key) + + def sort(self, items): + def cmp_func(obj1, obj2): + for key, direction in self.sort_dict: + ret = cmp(obj1[key], obj2[key]) + if ret: + return ret * (1 if direction else -1) + return 0 + return sorted(items, cmp=cmp_func) + + +class SortingNativeHelper(SortingHelper): + + def __init__(self, request, attr_info): + self.sort_dict = get_sorts(request, attr_info) + + def update_args(self, args): + args['sorts'] = self.sort_dict + + +class NoSortingHelper(SortingHelper): + pass + + +class NeutronController(object): + """Base controller class for Neutron API.""" + # _resource_name will be redefined in sub concrete controller + _resource_name = None + + def __init__(self, plugin): + self._plugin = plugin + super(NeutronController, self).__init__() + + def _prepare_request_body(self, body, params): + """Verifies required parameters are in request body. + + Sets default value for missing optional parameters. + Body argument must be the deserialized body. + """ + try: + if body is None: + # Initialize empty resource for setting default value + body = {self._resource_name: {}} + data = body[self._resource_name] + except KeyError: + # raise if _resource_name is not in req body. + raise exc.HTTPBadRequest(_("Unable to find '%s' in request body") % + self._resource_name) + for param in params: + param_name = param['param-name'] + param_value = data.get(param_name) + # If the parameter wasn't found and it was required, return 400 + if param_value is None and param['required']: + msg = (_("Failed to parse request. " + "Parameter '%s' not specified") % param_name) + LOG.error(msg) + raise exc.HTTPBadRequest(msg) + data[param_name] = param_value or param.get('default-value') + return body diff --git a/neutron/api/extensions.py b/neutron/api/extensions.py new file mode 100644 index 000000000..4f9988e01 --- /dev/null +++ b/neutron/api/extensions.py @@ -0,0 +1,684 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import imp +import itertools +import os + +from oslo.config import cfg +import routes +import six +import webob.dec +import webob.exc + +from neutron.api.v2 import attributes +from neutron.common import exceptions +import neutron.extensions +from neutron import manager +from neutron.openstack.common import log as logging +from neutron import policy +from neutron import wsgi + + +LOG = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class PluginInterface(object): + + @classmethod + def __subclasshook__(cls, klass): + """Checking plugin class. + + The __subclasshook__ method is a class method + that will be called every time a class is tested + using issubclass(klass, PluginInterface). + In that case, it will check that every method + marked with the abstractmethod decorator is + provided by the plugin class. + """ + + if not cls.__abstractmethods__: + return NotImplemented + + for method in cls.__abstractmethods__: + if any(method in base.__dict__ for base in klass.__mro__): + continue + return NotImplemented + return True + + +class ExtensionDescriptor(object): + """Base class that defines the contract for extensions. + + Note that you don't have to derive from this class to have a valid + extension; it is purely a convenience. + """ + + def get_name(self): + """The name of the extension. + + e.g. 'Fox In Socks' + """ + raise NotImplementedError() + + def get_alias(self): + """The alias for the extension. + + e.g. 'FOXNSOX' + """ + raise NotImplementedError() + + def get_description(self): + """Friendly description for the extension. + + e.g. 'The Fox In Socks Extension' + """ + raise NotImplementedError() + + def get_namespace(self): + """The XML namespace for the extension. + + e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0' + """ + raise NotImplementedError() + + def get_updated(self): + """The timestamp when the extension was last updated. + + e.g. '2011-01-22T13:25:27-06:00' + """ + # NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS + raise NotImplementedError() + + def get_resources(self): + """List of extensions.ResourceExtension extension objects. + + Resources define new nouns, and are accessible through URLs. + """ + resources = [] + return resources + + def get_actions(self): + """List of extensions.ActionExtension extension objects. + + Actions are verbs callable from the API. + """ + actions = [] + return actions + + def get_request_extensions(self): + """List of extensions.RequestException extension objects. + + Request extensions are used to handle custom request data. + """ + request_exts = [] + return request_exts + + def get_extended_resources(self, version): + """Retrieve extended resources or attributes for core resources. + + Extended attributes are implemented by a core plugin similarly + to the attributes defined in the core, and can appear in + request and response messages. Their names are scoped with the + extension's prefix. The core API version is passed to this + function, which must return a + map[][][] + specifying the extended resource attribute properties required + by that API version. + + Extension can add resources and their attr definitions too. + The returned map can be integrated into RESOURCE_ATTRIBUTE_MAP. + """ + return {} + + def get_plugin_interface(self): + """Returns an abstract class which defines contract for the plugin. + + The abstract class should inherit from extesnions.PluginInterface, + Methods in this abstract class should be decorated as abstractmethod + """ + return None + + def update_attributes_map(self, extended_attributes, + extension_attrs_map=None): + """Update attributes map for this extension. + + This is default method for extending an extension's attributes map. + An extension can use this method and supplying its own resource + attribute map in extension_attrs_map argument to extend all its + attributes that needs to be extended. + + If an extension does not implement update_attributes_map, the method + does nothing and just return. + """ + if not extension_attrs_map: + return + + for resource, attrs in extension_attrs_map.iteritems(): + extended_attrs = extended_attributes.get(resource) + if extended_attrs: + attrs.update(extended_attrs) + + def get_alias_namespace_compatibility_map(self): + """Returns mappings between extension aliases and XML namespaces. + + The mappings are XML namespaces that should, for backward compatibility + reasons, be added to the XML serialization of extended attributes. + This allows an established extended attribute to be provided by + another extension than the original one while keeping its old alias + in the name. + :return: A dictionary of extension_aliases and namespace strings. + """ + return {} + + +class ActionExtensionController(wsgi.Controller): + + def __init__(self, application): + self.application = application + self.action_handlers = {} + + def add_action(self, action_name, handler): + self.action_handlers[action_name] = handler + + def action(self, request, id): + input_dict = self._deserialize(request.body, + request.get_content_type()) + for action_name, handler in self.action_handlers.iteritems(): + if action_name in input_dict: + return handler(input_dict, request, id) + # no action handler found (bump to downstream application) + response = self.application + return response + + +class RequestExtensionController(wsgi.Controller): + + def __init__(self, application): + self.application = application + self.handlers = [] + + def add_handler(self, handler): + self.handlers.append(handler) + + def process(self, request, *args, **kwargs): + res = request.get_response(self.application) + # currently request handlers are un-ordered + for handler in self.handlers: + response = handler(request, res) + return response + + +class ExtensionController(wsgi.Controller): + + def __init__(self, extension_manager): + self.extension_manager = extension_manager + + def _translate(self, ext): + ext_data = {} + ext_data['name'] = ext.get_name() + ext_data['alias'] = ext.get_alias() + ext_data['description'] = ext.get_description() + ext_data['namespace'] = ext.get_namespace() + ext_data['updated'] = ext.get_updated() + ext_data['links'] = [] # TODO(dprince): implement extension links + return ext_data + + def index(self, request): + extensions = [] + for _alias, ext in self.extension_manager.extensions.iteritems(): + extensions.append(self._translate(ext)) + return dict(extensions=extensions) + + def show(self, request, id): + # NOTE(dprince): the extensions alias is used as the 'id' for show + ext = self.extension_manager.extensions.get(id, None) + if not ext: + raise webob.exc.HTTPNotFound( + _("Extension with alias %s does not exist") % id) + return dict(extension=self._translate(ext)) + + def delete(self, request, id): + msg = _('Resource not found.') + raise webob.exc.HTTPNotFound(msg) + + def create(self, request): + msg = _('Resource not found.') + raise webob.exc.HTTPNotFound(msg) + + +class ExtensionMiddleware(wsgi.Middleware): + """Extensions middleware for WSGI.""" + + def __init__(self, application, + ext_mgr=None): + self.ext_mgr = (ext_mgr + or ExtensionManager(get_extensions_path())) + mapper = routes.Mapper() + + # extended resources + for resource in self.ext_mgr.get_resources(): + path_prefix = resource.path_prefix + if resource.parent: + path_prefix = (resource.path_prefix + + "/%s/{%s_id}" % + (resource.parent["collection_name"], + resource.parent["member_name"])) + + LOG.debug(_('Extended resource: %s'), + resource.collection) + for action, method in resource.collection_actions.iteritems(): + conditions = dict(method=[method]) + path = "/%s/%s" % (resource.collection, action) + with mapper.submapper(controller=resource.controller, + action=action, + path_prefix=path_prefix, + conditions=conditions) as submap: + submap.connect(path) + submap.connect("%s.:(format)" % path) + + mapper.resource(resource.collection, resource.collection, + controller=resource.controller, + member=resource.member_actions, + parent_resource=resource.parent, + path_prefix=path_prefix) + + # extended actions + action_controllers = self._action_ext_controllers(application, + self.ext_mgr, mapper) + for action in self.ext_mgr.get_actions(): + LOG.debug(_('Extended action: %s'), action.action_name) + controller = action_controllers[action.collection] + controller.add_action(action.action_name, action.handler) + + # extended requests + req_controllers = self._request_ext_controllers(application, + self.ext_mgr, mapper) + for request_ext in self.ext_mgr.get_request_extensions(): + LOG.debug(_('Extended request: %s'), request_ext.key) + controller = req_controllers[request_ext.key] + controller.add_handler(request_ext.handler) + + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + mapper) + super(ExtensionMiddleware, self).__init__(application) + + @classmethod + def factory(cls, global_config, **local_config): + """Paste factory.""" + def _factory(app): + return cls(app, global_config, **local_config) + return _factory + + def _action_ext_controllers(self, application, ext_mgr, mapper): + """Return a dict of ActionExtensionController-s by collection.""" + action_controllers = {} + for action in ext_mgr.get_actions(): + if action.collection not in action_controllers.keys(): + controller = ActionExtensionController(application) + mapper.connect("/%s/:(id)/action.:(format)" % + action.collection, + action='action', + controller=controller, + conditions=dict(method=['POST'])) + mapper.connect("/%s/:(id)/action" % action.collection, + action='action', + controller=controller, + conditions=dict(method=['POST'])) + action_controllers[action.collection] = controller + + return action_controllers + + def _request_ext_controllers(self, application, ext_mgr, mapper): + """Returns a dict of RequestExtensionController-s by collection.""" + request_ext_controllers = {} + for req_ext in ext_mgr.get_request_extensions(): + if req_ext.key not in request_ext_controllers.keys(): + controller = RequestExtensionController(application) + mapper.connect(req_ext.url_route + '.:(format)', + action='process', + controller=controller, + conditions=req_ext.conditions) + + mapper.connect(req_ext.url_route, + action='process', + controller=controller, + conditions=req_ext.conditions) + request_ext_controllers[req_ext.key] = controller + + return request_ext_controllers + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """Route the incoming request with router.""" + req.environ['extended.app'] = self.application + return self._router + + @staticmethod + @webob.dec.wsgify(RequestClass=wsgi.Request) + def _dispatch(req): + """Dispatch the request. + + Returns the routed WSGI app's response or defers to the extended + application. + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + return req.environ['extended.app'] + app = match['controller'] + return app + + +def plugin_aware_extension_middleware_factory(global_config, **local_config): + """Paste factory.""" + def _factory(app): + ext_mgr = PluginAwareExtensionManager.get_instance() + return ExtensionMiddleware(app, ext_mgr=ext_mgr) + return _factory + + +class ExtensionManager(object): + """Load extensions from the configured extension path. + + See tests/unit/extensions/foxinsocks.py for an + example extension implementation. + """ + + def __init__(self, path): + LOG.info(_('Initializing extension manager.')) + self.path = path + self.extensions = {} + self._load_all_extensions() + policy.reset() + + def get_resources(self): + """Returns a list of ResourceExtension objects.""" + resources = [] + resources.append(ResourceExtension('extensions', + ExtensionController(self))) + for ext in self.extensions.itervalues(): + try: + resources.extend(ext.get_resources()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have resource + # extensions + pass + return resources + + def get_actions(self): + """Returns a list of ActionExtension objects.""" + actions = [] + for ext in self.extensions.itervalues(): + try: + actions.extend(ext.get_actions()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have action + # extensions + pass + return actions + + def get_request_extensions(self): + """Returns a list of RequestExtension objects.""" + request_exts = [] + for ext in self.extensions.itervalues(): + try: + request_exts.extend(ext.get_request_extensions()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have request + # extensions + pass + return request_exts + + def extend_resources(self, version, attr_map): + """Extend resources with additional resources or attributes. + + :param: attr_map, the existing mapping from resource name to + attrs definition. + + After this function, we will extend the attr_map if an extension + wants to extend this map. + """ + update_exts = [] + processed_exts = set() + exts_to_process = self.extensions.copy() + # Iterate until there are unprocessed extensions or if no progress + # is made in a whole iteration + while exts_to_process: + processed_ext_count = len(processed_exts) + for ext_name, ext in exts_to_process.items(): + if not hasattr(ext, 'get_extended_resources'): + del exts_to_process[ext_name] + continue + if hasattr(ext, 'update_attributes_map'): + update_exts.append(ext) + if hasattr(ext, 'get_required_extensions'): + # Process extension only if all required extensions + # have been processed already + required_exts_set = set(ext.get_required_extensions()) + if required_exts_set - processed_exts: + continue + try: + extended_attrs = ext.get_extended_resources(version) + for resource, resource_attrs in extended_attrs.iteritems(): + if attr_map.get(resource, None): + attr_map[resource].update(resource_attrs) + else: + attr_map[resource] = resource_attrs + if extended_attrs: + attributes.EXT_NSES[ext.get_alias()] = ( + ext.get_namespace()) + except AttributeError: + LOG.exception(_("Error fetching extended attributes for " + "extension '%s'"), ext.get_name()) + try: + comp_map = ext.get_alias_namespace_compatibility_map() + attributes.EXT_NSES_BC.update(comp_map) + except AttributeError: + LOG.info(_("Extension '%s' provides no backward " + "compatibility map for extended attributes"), + ext.get_name()) + processed_exts.add(ext_name) + del exts_to_process[ext_name] + if len(processed_exts) == processed_ext_count: + # Exit loop as no progress was made + break + if exts_to_process: + # NOTE(salv-orlando): Consider whether this error should be fatal + LOG.error(_("It was impossible to process the following " + "extensions: %s because of missing requirements."), + ','.join(exts_to_process.keys())) + + # Extending extensions' attributes map. + for ext in update_exts: + ext.update_attributes_map(attr_map) + + def _check_extension(self, extension): + """Checks for required methods in extension objects.""" + try: + LOG.debug(_('Ext name: %s'), extension.get_name()) + LOG.debug(_('Ext alias: %s'), extension.get_alias()) + LOG.debug(_('Ext description: %s'), extension.get_description()) + LOG.debug(_('Ext namespace: %s'), extension.get_namespace()) + LOG.debug(_('Ext updated: %s'), extension.get_updated()) + except AttributeError as ex: + LOG.exception(_("Exception loading extension: %s"), unicode(ex)) + return False + return True + + def _load_all_extensions(self): + """Load extensions from the configured path. + + The extension name is constructed from the module_name. If your + extension module is named widgets.py, the extension class within that + module should be 'Widgets'. + + See tests/unit/extensions/foxinsocks.py for an example extension + implementation. + """ + for path in self.path.split(':'): + if os.path.exists(path): + self._load_all_extensions_from_path(path) + else: + LOG.error(_("Extension path '%s' doesn't exist!"), path) + + def _load_all_extensions_from_path(self, path): + # Sorting the extension list makes the order in which they + # are loaded predictable across a cluster of load-balanced + # Neutron Servers + for f in sorted(os.listdir(path)): + try: + LOG.debug(_('Loading extension file: %s'), f) + mod_name, file_ext = os.path.splitext(os.path.split(f)[-1]) + ext_path = os.path.join(path, f) + if file_ext.lower() == '.py' and not mod_name.startswith('_'): + mod = imp.load_source(mod_name, ext_path) + ext_name = mod_name[0].upper() + mod_name[1:] + new_ext_class = getattr(mod, ext_name, None) + if not new_ext_class: + LOG.warn(_('Did not find expected name ' + '"%(ext_name)s" in %(file)s'), + {'ext_name': ext_name, + 'file': ext_path}) + continue + new_ext = new_ext_class() + self.add_extension(new_ext) + except Exception as exception: + LOG.warn(_("Extension file %(f)s wasn't loaded due to " + "%(exception)s"), {'f': f, 'exception': exception}) + + def add_extension(self, ext): + # Do nothing if the extension doesn't check out + if not self._check_extension(ext): + return + + alias = ext.get_alias() + LOG.info(_('Loaded extension: %s'), alias) + + if alias in self.extensions: + raise exceptions.DuplicatedExtension(alias=alias) + self.extensions[alias] = ext + + +class PluginAwareExtensionManager(ExtensionManager): + + _instance = None + + def __init__(self, path, plugins): + self.plugins = plugins + super(PluginAwareExtensionManager, self).__init__(path) + self.check_if_plugin_extensions_loaded() + + def _check_extension(self, extension): + """Check if an extension is supported by any plugin.""" + extension_is_valid = super(PluginAwareExtensionManager, + self)._check_extension(extension) + return (extension_is_valid and + self._plugins_support(extension) and + self._plugins_implement_interface(extension)) + + def _plugins_support(self, extension): + alias = extension.get_alias() + supports_extension = any((hasattr(plugin, + "supported_extension_aliases") and + alias in plugin.supported_extension_aliases) + for plugin in self.plugins.values()) + if not supports_extension: + LOG.warn(_("Extension %s not supported by any of loaded plugins"), + alias) + return supports_extension + + def _plugins_implement_interface(self, extension): + if(not hasattr(extension, "get_plugin_interface") or + extension.get_plugin_interface() is None): + return True + for plugin in self.plugins.values(): + if isinstance(plugin, extension.get_plugin_interface()): + return True + LOG.warn(_("Loaded plugins do not implement extension %s interface"), + extension.get_alias()) + return False + + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls(get_extensions_path(), + manager.NeutronManager.get_service_plugins()) + return cls._instance + + def check_if_plugin_extensions_loaded(self): + """Check if an extension supported by a plugin has been loaded.""" + plugin_extensions = set(itertools.chain.from_iterable([ + getattr(plugin, "supported_extension_aliases", []) + for plugin in self.plugins.values()])) + missing_aliases = plugin_extensions - set(self.extensions) + if missing_aliases: + raise exceptions.ExtensionsNotFound( + extensions=list(missing_aliases)) + + +class RequestExtension(object): + """Extend requests and responses of core Neutron OpenStack API controllers. + + Provide a way to add data to responses and handle custom request data + that is sent to core Neutron OpenStack API controllers. + """ + + def __init__(self, method, url_route, handler): + self.url_route = url_route + self.handler = handler + self.conditions = dict(method=[method]) + self.key = "%s-%s" % (method, url_route) + + +class ActionExtension(object): + """Add custom actions to core Neutron OpenStack API controllers.""" + + def __init__(self, collection, action_name, handler): + self.collection = collection + self.action_name = action_name + self.handler = handler + + +class ResourceExtension(object): + """Add top level resources to the OpenStack API in Neutron.""" + + def __init__(self, collection, controller, parent=None, path_prefix="", + collection_actions={}, member_actions={}, attr_map={}): + self.collection = collection + self.controller = controller + self.parent = parent + self.collection_actions = collection_actions + self.member_actions = member_actions + self.path_prefix = path_prefix + self.attr_map = attr_map + + +# Returns the extension paths from a config entry and the __path__ +# of neutron.extensions +def get_extensions_path(): + paths = ':'.join(neutron.extensions.__path__) + if cfg.CONF.api_extensions_path: + paths = ':'.join([cfg.CONF.api_extensions_path, paths]) + + return paths + + +def append_api_extensions_path(paths): + paths = [cfg.CONF.api_extensions_path] + paths + cfg.CONF.set_override('api_extensions_path', + ':'.join([p for p in paths if p])) diff --git a/neutron/api/rpc/__init__.py b/neutron/api/rpc/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/api/rpc/agentnotifiers/__init__.py b/neutron/api/rpc/agentnotifiers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py new file mode 100644 index 000000000..71ca52c25 --- /dev/null +++ b/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py @@ -0,0 +1,177 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.common import constants +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils +from neutron import manager +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class DhcpAgentNotifyAPI(rpc_compat.RpcProxy): + """API for plugin to notify DHCP agent.""" + BASE_RPC_API_VERSION = '1.0' + # It seems dhcp agent does not support bulk operation + VALID_RESOURCES = ['network', 'subnet', 'port'] + VALID_METHOD_NAMES = ['network.create.end', + 'network.update.end', + 'network.delete.end', + 'subnet.create.end', + 'subnet.update.end', + 'subnet.delete.end', + 'port.create.end', + 'port.update.end', + 'port.delete.end'] + + def __init__(self, topic=topics.DHCP_AGENT, plugin=None): + super(DhcpAgentNotifyAPI, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self._plugin = plugin + + @property + def plugin(self): + if self._plugin is None: + self._plugin = manager.NeutronManager.get_plugin() + return self._plugin + + def _schedule_network(self, context, network, existing_agents): + """Schedule the network to new agents + + :return: all agents associated with the network + """ + new_agents = self.plugin.schedule_network(context, network) or [] + if new_agents: + for agent in new_agents: + self._cast_message( + context, 'network_create_end', + {'network': {'id': network['id']}}, agent['host']) + elif not existing_agents: + LOG.warn(_('Unable to schedule network %s: no agents available; ' + 'will retry on subsequent port creation events.'), + network['id']) + return new_agents + existing_agents + + def _get_enabled_agents(self, context, network, agents, method, payload): + """Get the list of agents whose admin_state is UP.""" + network_id = network['id'] + enabled_agents = [x for x in agents if x.admin_state_up] + active_agents = [x for x in agents if x.is_active] + len_enabled_agents = len(enabled_agents) + len_active_agents = len(active_agents) + if len_active_agents < len_enabled_agents: + LOG.warn(_("Only %(active)d of %(total)d DHCP agents associated " + "with network '%(net_id)s' are marked as active, so " + " notifications may be sent to inactive agents.") + % {'active': len_active_agents, + 'total': len_enabled_agents, + 'net_id': network_id}) + if not enabled_agents: + num_ports = self.plugin.get_ports_count( + context, {'network_id': [network_id]}) + notification_required = ( + num_ports > 0 and len(network['subnets']) >= 1) + if notification_required: + LOG.error(_("Will not send event %(method)s for network " + "%(net_id)s: no agent available. Payload: " + "%(payload)s") + % {'method': method, + 'net_id': network_id, + 'payload': payload}) + return enabled_agents + + def _notify_agents(self, context, method, payload, network_id): + """Notify all the agents that are hosting the network.""" + # fanout is required as we do not know who is "listening" + no_agents = not utils.is_extension_supported( + self.plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS) + fanout_required = method == 'network_delete_end' or no_agents + + # we do nothing on network creation because we want to give the + # admin the chance to associate an agent to the network manually + cast_required = method != 'network_create_end' + + if fanout_required: + self._fanout_message(context, method, payload) + elif cast_required: + admin_ctx = (context if context.is_admin else context.elevated()) + network = self.plugin.get_network(admin_ctx, network_id) + agents = self.plugin.get_dhcp_agents_hosting_networks( + context, [network_id]) + + # schedule the network first, if needed + schedule_required = method == 'port_create_end' + if schedule_required: + agents = self._schedule_network(admin_ctx, network, agents) + + enabled_agents = self._get_enabled_agents( + context, network, agents, method, payload) + for agent in enabled_agents: + self._cast_message( + context, method, payload, agent.host, agent.topic) + + def _cast_message(self, context, method, payload, host, + topic=topics.DHCP_AGENT): + """Cast the payload to the dhcp agent running on the host.""" + self.cast( + context, self.make_msg(method, + payload=payload), + topic='%s.%s' % (topic, host)) + + def _fanout_message(self, context, method, payload): + """Fanout the payload to all dhcp agents.""" + self.fanout_cast( + context, self.make_msg(method, + payload=payload), + topic=topics.DHCP_AGENT) + + def network_removed_from_agent(self, context, network_id, host): + self._cast_message(context, 'network_delete_end', + {'network_id': network_id}, host) + + def network_added_to_agent(self, context, network_id, host): + self._cast_message(context, 'network_create_end', + {'network': {'id': network_id}}, host) + + def agent_updated(self, context, admin_state_up, host): + self._cast_message(context, 'agent_updated', + {'admin_state_up': admin_state_up}, host) + + def notify(self, context, data, method_name): + # data is {'key' : 'value'} with only one key + if method_name not in self.VALID_METHOD_NAMES: + return + obj_type = data.keys()[0] + if obj_type not in self.VALID_RESOURCES: + return + obj_value = data[obj_type] + network_id = None + if obj_type == 'network' and 'id' in obj_value: + network_id = obj_value['id'] + elif obj_type in ['port', 'subnet'] and 'network_id' in obj_value: + network_id = obj_value['network_id'] + if not network_id: + return + method_name = method_name.replace(".", "_") + if method_name.endswith("_delete_end"): + if 'id' in obj_value: + self._notify_agents(context, method_name, + {obj_type + '_id': obj_value['id']}, + network_id) + else: + self._notify_agents(context, method_name, data, network_id) diff --git a/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py new file mode 100644 index 000000000..9bf1080db --- /dev/null +++ b/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py @@ -0,0 +1,121 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.common import constants +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as service_constants + + +LOG = logging.getLogger(__name__) + + +class L3AgentNotifyAPI(rpc_compat.RpcProxy): + """API for plugin to notify L3 agent.""" + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic=topics.L3_AGENT): + super(L3AgentNotifyAPI, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + + def _notification_host(self, context, method, payload, host): + """Notify the agent that is hosting the router.""" + LOG.debug(_('Nofity agent at %(host)s the message ' + '%(method)s'), {'host': host, + 'method': method}) + self.cast( + context, self.make_msg(method, + payload=payload), + topic='%s.%s' % (topics.L3_AGENT, host)) + + def _agent_notification(self, context, method, router_ids, + operation, data): + """Notify changed routers to hosting l3 agents.""" + adminContext = context.is_admin and context or context.elevated() + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + for router_id in router_ids: + l3_agents = plugin.get_l3_agents_hosting_routers( + adminContext, [router_id], + admin_state_up=True, + active=True) + for l3_agent in l3_agents: + LOG.debug(_('Notify agent at %(topic)s.%(host)s the message ' + '%(method)s'), + {'topic': l3_agent.topic, + 'host': l3_agent.host, + 'method': method}) + self.cast( + context, self.make_msg(method, + routers=[router_id]), + topic='%s.%s' % (l3_agent.topic, l3_agent.host), + version='1.1') + + def _notification(self, context, method, router_ids, operation, data): + """Notify all the agents that are hosting the routers.""" + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if not plugin: + LOG.error(_('No plugin for L3 routing registered. Cannot notify ' + 'agents with the message %s'), method) + return + if utils.is_extension_supported( + plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): + adminContext = (context.is_admin and + context or context.elevated()) + plugin.schedule_routers(adminContext, router_ids) + self._agent_notification( + context, method, router_ids, operation, data) + else: + self.fanout_cast( + context, self.make_msg(method, + routers=router_ids), + topic=topics.L3_AGENT) + + def _notification_fanout(self, context, method, router_id): + """Fanout the deleted router to all L3 agents.""" + LOG.debug(_('Fanout notify agent at %(topic)s the message ' + '%(method)s on router %(router_id)s'), + {'topic': topics.L3_AGENT, + 'method': method, + 'router_id': router_id}) + self.fanout_cast( + context, self.make_msg(method, + router_id=router_id), + topic=topics.L3_AGENT) + + def agent_updated(self, context, admin_state_up, host): + self._notification_host(context, 'agent_updated', + {'admin_state_up': admin_state_up}, + host) + + def router_deleted(self, context, router_id): + self._notification_fanout(context, 'router_deleted', router_id) + + def routers_updated(self, context, router_ids, operation=None, data=None): + if router_ids: + self._notification(context, 'routers_updated', router_ids, + operation, data) + + def router_removed_from_agent(self, context, router_id, host): + self._notification_host(context, 'router_removed_from_agent', + {'router_id': router_id}, host) + + def router_added_to_agent(self, context, router_ids, host): + self._notification_host(context, 'router_added_to_agent', + router_ids, host) diff --git a/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py new file mode 100644 index 000000000..da38cd8f8 --- /dev/null +++ b/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py @@ -0,0 +1,99 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as service_constants + +LOG = logging.getLogger(__name__) + + +class MeteringAgentNotifyAPI(rpc_compat.RpcProxy): + """API for plugin to notify L3 metering agent.""" + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic=topics.METERING_AGENT): + super(MeteringAgentNotifyAPI, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + + def _agent_notification(self, context, method, routers): + """Notify l3 metering agents hosted by l3 agent hosts.""" + adminContext = context.is_admin and context or context.elevated() + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + + l3_routers = {} + for router in routers: + l3_agents = plugin.get_l3_agents_hosting_routers( + adminContext, [router['id']], + admin_state_up=True, + active=True) + for l3_agent in l3_agents: + LOG.debug(_('Notify metering agent at %(topic)s.%(host)s ' + 'the message %(method)s'), + {'topic': self.topic, + 'host': l3_agent.host, + 'method': method}) + + l3_router = l3_routers.get(l3_agent.host, []) + l3_router.append(router) + l3_routers[l3_agent.host] = l3_router + + for host, routers in l3_routers.iteritems(): + self.cast(context, self.make_msg(method, routers=routers), + topic='%s.%s' % (self.topic, host)) + + def _notification_fanout(self, context, method, router_id): + LOG.debug(_('Fanout notify metering agent at %(topic)s the message ' + '%(method)s on router %(router_id)s'), + {'topic': self.topic, + 'method': method, + 'router_id': router_id}) + self.fanout_cast( + context, self.make_msg(method, + router_id=router_id), + topic=self.topic) + + def _notification(self, context, method, routers): + """Notify all the agents that are hosting the routers.""" + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if utils.is_extension_supported( + plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): + self._agent_notification(context, method, routers) + else: + self.fanout_cast(context, self.make_msg(method, routers=routers), + topic=self.topic) + + def router_deleted(self, context, router_id): + self._notification_fanout(context, 'router_deleted', router_id) + + def routers_updated(self, context, routers): + if routers: + self._notification(context, 'routers_updated', routers) + + def update_metering_label_rules(self, context, routers): + self._notification(context, 'update_metering_label_rules', routers) + + def add_metering_label(self, context, routers): + self._notification(context, 'add_metering_label', routers) + + def remove_metering_label(self, context, routers): + self._notification(context, 'remove_metering_label', routers) diff --git a/neutron/api/v2/__init__.py b/neutron/api/v2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/api/v2/attributes.py b/neutron/api/v2/attributes.py new file mode 100644 index 000000000..5c4479f23 --- /dev/null +++ b/neutron/api/v2/attributes.py @@ -0,0 +1,774 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +import re + +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils + + +LOG = logging.getLogger(__name__) + +ATTR_NOT_SPECIFIED = object() +# Defining a constant to avoid repeating string literal in several modules +SHARED = 'shared' + +# Used by range check to indicate no limit for a bound. +UNLIMITED = None + + +def _verify_dict_keys(expected_keys, target_dict, strict=True): + """Allows to verify keys in a dictionary. + + :param expected_keys: A list of keys expected to be present. + :param target_dict: The dictionary which should be verified. + :param strict: Specifies whether additional keys are allowed to be present. + :return: True, if keys in the dictionary correspond to the specification. + """ + if not isinstance(target_dict, dict): + msg = (_("Invalid input. '%(target_dict)s' must be a dictionary " + "with keys: %(expected_keys)s") % + {'target_dict': target_dict, 'expected_keys': expected_keys}) + return msg + + expected_keys = set(expected_keys) + provided_keys = set(target_dict.keys()) + + predicate = expected_keys.__eq__ if strict else expected_keys.issubset + + if not predicate(provided_keys): + msg = (_("Validation of dictionary's keys failed." + "Expected keys: %(expected_keys)s " + "Provided keys: %(provided_keys)s") % + {'expected_keys': expected_keys, + 'provided_keys': provided_keys}) + return msg + + +def is_attr_set(attribute): + return not (attribute is None or attribute is ATTR_NOT_SPECIFIED) + + +def _validate_values(data, valid_values=None): + if data not in valid_values: + msg = (_("'%(data)s' is not in %(valid_values)s") % + {'data': data, 'valid_values': valid_values}) + LOG.debug(msg) + return msg + + +def _validate_not_empty_string_or_none(data, max_len=None): + if data is not None: + return _validate_not_empty_string(data, max_len=max_len) + + +def _validate_not_empty_string(data, max_len=None): + msg = _validate_string(data, max_len=max_len) + if msg: + return msg + if not data.strip(): + return _("'%s' Blank strings are not permitted") % data + + +def _validate_string_or_none(data, max_len=None): + if data is not None: + return _validate_string(data, max_len=max_len) + + +def _validate_string(data, max_len=None): + if not isinstance(data, basestring): + msg = _("'%s' is not a valid string") % data + LOG.debug(msg) + return msg + + if max_len is not None and len(data) > max_len: + msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") % + {'data': data, 'max_len': max_len}) + LOG.debug(msg) + return msg + + +def _validate_boolean(data, valid_values=None): + try: + convert_to_boolean(data) + except n_exc.InvalidInput: + msg = _("'%s' is not a valid boolean value") % data + LOG.debug(msg) + return msg + + +def _validate_range(data, valid_values=None): + """Check that integer value is within a range provided. + + Test is inclusive. Allows either limit to be ignored, to allow + checking ranges where only the lower or upper limit matter. + It is expected that the limits provided are valid integers or + the value None. + """ + + min_value = valid_values[0] + max_value = valid_values[1] + try: + data = int(data) + except (ValueError, TypeError): + msg = _("'%s' is not an integer") % data + LOG.debug(msg) + return msg + if min_value is not UNLIMITED and data < min_value: + msg = _("'%(data)s' is too small - must be at least " + "'%(limit)d'") % {'data': data, 'limit': min_value} + LOG.debug(msg) + return msg + if max_value is not UNLIMITED and data > max_value: + msg = _("'%(data)s' is too large - must be no larger than " + "'%(limit)d'") % {'data': data, 'limit': max_value} + LOG.debug(msg) + return msg + + +def _validate_no_whitespace(data): + """Validates that input has no whitespace.""" + if len(data.split()) > 1: + msg = _("'%s' contains whitespace") % data + LOG.debug(msg) + raise n_exc.InvalidInput(error_message=msg) + return data + + +def _validate_mac_address(data, valid_values=None): + valid_mac = False + try: + valid_mac = netaddr.valid_mac(_validate_no_whitespace(data)) + except Exception: + pass + finally: + # TODO(arosen): The code in this file should be refactored + # so it catches the correct exceptions. _validate_no_whitespace + # raises AttributeError if data is None. + if valid_mac is False: + msg = _("'%s' is not a valid MAC address") % data + LOG.debug(msg) + return msg + + +def _validate_mac_address_or_none(data, valid_values=None): + if data is None: + return + return _validate_mac_address(data, valid_values) + + +def _validate_ip_address(data, valid_values=None): + try: + netaddr.IPAddress(_validate_no_whitespace(data)) + except Exception: + msg = _("'%s' is not a valid IP address") % data + LOG.debug(msg) + return msg + + +def _validate_ip_pools(data, valid_values=None): + """Validate that start and end IP addresses are present. + + In addition to this the IP addresses will also be validated + """ + if not isinstance(data, list): + msg = _("Invalid data format for IP pool: '%s'") % data + LOG.debug(msg) + return msg + + expected_keys = ['start', 'end'] + for ip_pool in data: + msg = _verify_dict_keys(expected_keys, ip_pool) + if msg: + LOG.debug(msg) + return msg + for k in expected_keys: + msg = _validate_ip_address(ip_pool[k]) + if msg: + LOG.debug(msg) + return msg + + +def _validate_fixed_ips(data, valid_values=None): + if not isinstance(data, list): + msg = _("Invalid data format for fixed IP: '%s'") % data + LOG.debug(msg) + return msg + + ips = [] + for fixed_ip in data: + if not isinstance(fixed_ip, dict): + msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip + LOG.debug(msg) + return msg + if 'ip_address' in fixed_ip: + # Ensure that duplicate entries are not set - just checking IP + # suffices. Duplicate subnet_id's are legitimate. + fixed_ip_address = fixed_ip['ip_address'] + if fixed_ip_address in ips: + msg = _("Duplicate IP address '%s'") % fixed_ip_address + else: + msg = _validate_ip_address(fixed_ip_address) + if msg: + LOG.debug(msg) + return msg + ips.append(fixed_ip_address) + if 'subnet_id' in fixed_ip: + msg = _validate_uuid(fixed_ip['subnet_id']) + if msg: + LOG.debug(msg) + return msg + + +def _validate_nameservers(data, valid_values=None): + if not hasattr(data, '__iter__'): + msg = _("Invalid data format for nameserver: '%s'") % data + LOG.debug(msg) + return msg + + ips = [] + for ip in data: + msg = _validate_ip_address(ip) + if msg: + # This may be a hostname + msg = _validate_regex(ip, HOSTNAME_PATTERN) + if msg: + msg = _("'%s' is not a valid nameserver") % ip + LOG.debug(msg) + return msg + if ip in ips: + msg = _("Duplicate nameserver '%s'") % ip + LOG.debug(msg) + return msg + ips.append(ip) + + +def _validate_hostroutes(data, valid_values=None): + if not isinstance(data, list): + msg = _("Invalid data format for hostroute: '%s'") % data + LOG.debug(msg) + return msg + + expected_keys = ['destination', 'nexthop'] + hostroutes = [] + for hostroute in data: + msg = _verify_dict_keys(expected_keys, hostroute) + if msg: + LOG.debug(msg) + return msg + msg = _validate_subnet(hostroute['destination']) + if msg: + LOG.debug(msg) + return msg + msg = _validate_ip_address(hostroute['nexthop']) + if msg: + LOG.debug(msg) + return msg + if hostroute in hostroutes: + msg = _("Duplicate hostroute '%s'") % hostroute + LOG.debug(msg) + return msg + hostroutes.append(hostroute) + + +def _validate_ip_address_or_none(data, valid_values=None): + if data is None: + return None + return _validate_ip_address(data, valid_values) + + +def _validate_subnet(data, valid_values=None): + msg = None + try: + net = netaddr.IPNetwork(_validate_no_whitespace(data)) + if '/' not in data: + msg = _("'%(data)s' isn't a recognized IP subnet cidr," + " '%(cidr)s' is recommended") % {"data": data, + "cidr": net.cidr} + else: + return + except Exception: + msg = _("'%s' is not a valid IP subnet") % data + if msg: + LOG.debug(msg) + return msg + + +def _validate_subnet_list(data, valid_values=None): + if not isinstance(data, list): + msg = _("'%s' is not a list") % data + LOG.debug(msg) + return msg + + if len(set(data)) != len(data): + msg = _("Duplicate items in the list: '%s'") % ', '.join(data) + LOG.debug(msg) + return msg + + for item in data: + msg = _validate_subnet(item) + if msg: + return msg + + +def _validate_subnet_or_none(data, valid_values=None): + if data is None: + return + return _validate_subnet(data, valid_values) + + +def _validate_regex(data, valid_values=None): + try: + if re.match(valid_values, data): + return + except TypeError: + pass + + msg = _("'%s' is not a valid input") % data + LOG.debug(msg) + return msg + + +def _validate_regex_or_none(data, valid_values=None): + if data is None: + return + return _validate_regex(data, valid_values) + + +def _validate_uuid(data, valid_values=None): + if not uuidutils.is_uuid_like(data): + msg = _("'%s' is not a valid UUID") % data + LOG.debug(msg) + return msg + + +def _validate_uuid_or_none(data, valid_values=None): + if data is not None: + return _validate_uuid(data) + + +def _validate_uuid_list(data, valid_values=None): + if not isinstance(data, list): + msg = _("'%s' is not a list") % data + LOG.debug(msg) + return msg + + for item in data: + msg = _validate_uuid(item) + if msg: + LOG.debug(msg) + return msg + + if len(set(data)) != len(data): + msg = _("Duplicate items in the list: '%s'") % ', '.join(data) + LOG.debug(msg) + return msg + + +def _validate_dict_item(key, key_validator, data): + # Find conversion function, if any, and apply it + conv_func = key_validator.get('convert_to') + if conv_func: + data[key] = conv_func(data.get(key)) + # Find validator function + # TODO(salv-orlando): Structure of dict attributes should be improved + # to avoid iterating over items + val_func = val_params = None + for (k, v) in key_validator.iteritems(): + if k.startswith('type:'): + # ask forgiveness, not permission + try: + val_func = validators[k] + except KeyError: + return _("Validator '%s' does not exist.") % k + val_params = v + break + # Process validation + if val_func: + return val_func(data.get(key), val_params) + + +def _validate_dict(data, key_specs=None): + if not isinstance(data, dict): + msg = _("'%s' is not a dictionary") % data + LOG.debug(msg) + return msg + # Do not perform any further validation, if no constraints are supplied + if not key_specs: + return + + # Check whether all required keys are present + required_keys = [key for key, spec in key_specs.iteritems() + if spec.get('required')] + + if required_keys: + msg = _verify_dict_keys(required_keys, data, False) + if msg: + LOG.debug(msg) + return msg + + # Perform validation and conversion of all values + # according to the specifications. + for key, key_validator in [(k, v) for k, v in key_specs.iteritems() + if k in data]: + msg = _validate_dict_item(key, key_validator, data) + if msg: + LOG.debug(msg) + return msg + + +def _validate_dict_or_none(data, key_specs=None): + if data is not None: + return _validate_dict(data, key_specs) + + +def _validate_dict_or_empty(data, key_specs=None): + if data != {}: + return _validate_dict(data, key_specs) + + +def _validate_dict_or_nodata(data, key_specs=None): + if data: + return _validate_dict(data, key_specs) + + +def _validate_non_negative(data, valid_values=None): + try: + data = int(data) + except (ValueError, TypeError): + msg = _("'%s' is not an integer") % data + LOG.debug(msg) + return msg + + if data < 0: + msg = _("'%s' should be non-negative") % data + LOG.debug(msg) + return msg + + +def convert_to_boolean(data): + if isinstance(data, basestring): + val = data.lower() + if val == "true" or val == "1": + return True + if val == "false" or val == "0": + return False + elif isinstance(data, bool): + return data + elif isinstance(data, int): + if data == 0: + return False + elif data == 1: + return True + msg = _("'%s' cannot be converted to boolean") % data + raise n_exc.InvalidInput(error_message=msg) + + +def convert_to_int(data): + try: + return int(data) + except (ValueError, TypeError): + msg = _("'%s' is not a integer") % data + raise n_exc.InvalidInput(error_message=msg) + + +def convert_kvp_str_to_list(data): + """Convert a value of the form 'key=value' to ['key', 'value']. + + :raises: n_exc.InvalidInput if any of the strings are malformed + (e.g. do not contain a key). + """ + kvp = [x.strip() for x in data.split('=', 1)] + if len(kvp) == 2 and kvp[0]: + return kvp + msg = _("'%s' is not of the form =[value]") % data + raise n_exc.InvalidInput(error_message=msg) + + +def convert_kvp_list_to_dict(kvp_list): + """Convert a list of 'key=value' strings to a dict. + + :raises: n_exc.InvalidInput if any of the strings are malformed + (e.g. do not contain a key) or if any + of the keys appear more than once. + """ + if kvp_list == ['True']: + # No values were provided (i.e. '--flag-name') + return {} + kvp_map = {} + for kvp_str in kvp_list: + key, value = convert_kvp_str_to_list(kvp_str) + kvp_map.setdefault(key, set()) + kvp_map[key].add(value) + return dict((x, list(y)) for x, y in kvp_map.iteritems()) + + +def convert_none_to_empty_list(value): + return [] if value is None else value + + +def convert_none_to_empty_dict(value): + return {} if value is None else value + + +def convert_to_list(data): + if data is None: + return [] + elif hasattr(data, '__iter__'): + return list(data) + else: + return [data] + + +HOSTNAME_PATTERN = ("(?=^.{1,254}$)(^(?:(?!\d+\.|-)[a-zA-Z0-9_\-]" + "{1,63}(? policy. + Otherwise, will strip off the last character for normal mappings, like + routers -> router. + """ + plural_mappings = {} + for plural in resource_map: + singular = special_mappings.get(plural, plural[:-1]) + plural_mappings[plural] = singular + return plural_mappings + + +def build_resource_info(plural_mappings, resource_map, which_service, + action_map=None, register_quota=False, + translate_name=False, allow_bulk=False): + """Build resources for advanced services. + + Takes the resource information, and singular/plural mappings, and creates + API resource objects for advanced services extensions. Will optionally + translate underscores to dashes in resource names, register the resource, + and accept action information for resources. + + :param plural_mappings: mappings between singular and plural forms + :param resource_map: attribute map for the WSGI resources to create + :param which_service: The name of the service for which the WSGI resources + are being created. This name will be used to pass + the appropriate plugin to the WSGI resource. + It can be set to None or "CORE"to create WSGI + resources for the the core plugin + :param action_map: custom resource actions + :param register_quota: it can be set to True to register quotas for the + resource(s) being created + :param translate_name: replaces underscores with dashes + :param allow_bulk: True if bulk create are allowed + """ + resources = [] + if not which_service: + which_service = constants.CORE + if action_map is None: + action_map = {} + if which_service != constants.CORE: + plugin = manager.NeutronManager.get_service_plugins()[which_service] + else: + plugin = manager.NeutronManager.get_plugin() + for collection_name in resource_map: + resource_name = plural_mappings[collection_name] + params = resource_map.get(collection_name, {}) + if translate_name: + collection_name = collection_name.replace('_', '-') + if register_quota: + quota.QUOTAS.register_resource_by_name(resource_name) + member_actions = action_map.get(resource_name, {}) + controller = base.create_resource( + collection_name, resource_name, plugin, params, + member_actions=member_actions, + allow_bulk=allow_bulk, + allow_pagination=cfg.CONF.allow_pagination, + allow_sorting=cfg.CONF.allow_sorting) + resource = extensions.ResourceExtension( + collection_name, + controller, + path_prefix=constants.COMMON_PREFIXES[which_service], + member_actions=member_actions, + attr_map=params) + resources.append(resource) + return resources diff --git a/neutron/api/v2/router.py b/neutron/api/v2/router.py new file mode 100644 index 000000000..f1b5787d9 --- /dev/null +++ b/neutron/api/v2/router.py @@ -0,0 +1,115 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg +import routes as routes_mapper +import six.moves.urllib.parse as urlparse +import webob +import webob.dec +import webob.exc + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron import manager +from neutron.openstack.common import log as logging +from neutron import wsgi + + +LOG = logging.getLogger(__name__) + +RESOURCES = {'network': 'networks', + 'subnet': 'subnets', + 'port': 'ports'} +SUB_RESOURCES = {} +COLLECTION_ACTIONS = ['index', 'create'] +MEMBER_ACTIONS = ['show', 'update', 'delete'] +REQUIREMENTS = {'id': attributes.UUID_PATTERN, 'format': 'xml|json'} + + +class Index(wsgi.Application): + def __init__(self, resources): + self.resources = resources + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + metadata = {'application/xml': {'attributes': { + 'resource': ['name', 'collection'], + 'link': ['href', 'rel']}}} + + layout = [] + for name, collection in self.resources.iteritems(): + href = urlparse.urljoin(req.path_url, collection) + resource = {'name': name, + 'collection': collection, + 'links': [{'rel': 'self', + 'href': href}]} + layout.append(resource) + response = dict(resources=layout) + content_type = req.best_match_content_type() + body = wsgi.Serializer(metadata=metadata).serialize(response, + content_type) + return webob.Response(body=body, content_type=content_type) + + +class APIRouter(wsgi.Router): + + @classmethod + def factory(cls, global_config, **local_config): + return cls(**local_config) + + def __init__(self, **local_config): + mapper = routes_mapper.Mapper() + plugin = manager.NeutronManager.get_plugin() + ext_mgr = extensions.PluginAwareExtensionManager.get_instance() + ext_mgr.extend_resources("2.0", attributes.RESOURCE_ATTRIBUTE_MAP) + + col_kwargs = dict(collection_actions=COLLECTION_ACTIONS, + member_actions=MEMBER_ACTIONS) + + def _map_resource(collection, resource, params, parent=None): + allow_bulk = cfg.CONF.allow_bulk + allow_pagination = cfg.CONF.allow_pagination + allow_sorting = cfg.CONF.allow_sorting + controller = base.create_resource( + collection, resource, plugin, params, allow_bulk=allow_bulk, + parent=parent, allow_pagination=allow_pagination, + allow_sorting=allow_sorting) + path_prefix = None + if parent: + path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'], + parent['member_name'], + collection) + mapper_kwargs = dict(controller=controller, + requirements=REQUIREMENTS, + path_prefix=path_prefix, + **col_kwargs) + return mapper.collection(collection, resource, + **mapper_kwargs) + + mapper.connect('index', '/', controller=Index(RESOURCES)) + for resource in RESOURCES: + _map_resource(RESOURCES[resource], resource, + attributes.RESOURCE_ATTRIBUTE_MAP.get( + RESOURCES[resource], dict())) + + for resource in SUB_RESOURCES: + _map_resource(SUB_RESOURCES[resource]['collection_name'], resource, + attributes.RESOURCE_ATTRIBUTE_MAP.get( + SUB_RESOURCES[resource]['collection_name'], + dict()), + SUB_RESOURCES[resource]['parent']) + + super(APIRouter, self).__init__(mapper) diff --git a/neutron/api/versions.py b/neutron/api/versions.py new file mode 100644 index 000000000..cdfa1e081 --- /dev/null +++ b/neutron/api/versions.py @@ -0,0 +1,69 @@ +# Copyright 2011 Citrix Systems. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.dec + +from neutron.api.views import versions as versions_view +from neutron.openstack.common import gettextutils +from neutron.openstack.common import log as logging +from neutron import wsgi + + +LOG = logging.getLogger(__name__) + + +class Versions(object): + + @classmethod + def factory(cls, global_config, **local_config): + return cls() + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """Respond to a request for all Neutron API versions.""" + version_objs = [ + { + "id": "v2.0", + "status": "CURRENT", + }, + ] + + if req.path != '/': + language = req.best_match_language() + msg = _('Unknown API version specified') + msg = gettextutils.translate(msg, language) + return webob.exc.HTTPNotFound(explanation=msg) + + builder = versions_view.get_view_builder(req) + versions = [builder.build(version) for version in version_objs] + response = dict(versions=versions) + metadata = { + "application/xml": { + "attributes": { + "version": ["status", "id"], + "link": ["rel", "href"], + } + } + } + + content_type = req.best_match_content_type() + body = (wsgi.Serializer(metadata=metadata). + serialize(response, content_type)) + + response = webob.Response() + response.content_type = content_type + response.body = body + + return response diff --git a/neutron/api/views/__init__.py b/neutron/api/views/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/api/views/versions.py b/neutron/api/views/versions.py new file mode 100644 index 000000000..79f83ef5d --- /dev/null +++ b/neutron/api/views/versions.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(object): + + def __init__(self, base_url): + """Object initialization. + + :param base_url: url of the root wsgi application + """ + self.base_url = base_url + + def build(self, version_data): + """Generic method used to generate a version entity.""" + version = { + "id": version_data["id"], + "status": version_data["status"], + "links": self._build_links(version_data), + } + + return version + + def _build_links(self, version_data): + """Generate a container of links that refer to the provided version.""" + href = self.generate_href(version_data["id"]) + + links = [ + { + "rel": "self", + "href": href, + }, + ] + + return links + + def generate_href(self, version_number): + """Create an url that refers to a specific version_number.""" + return os.path.join(self.base_url, version_number) diff --git a/neutron/auth.py b/neutron/auth.py new file mode 100644 index 000000000..52b32f847 --- /dev/null +++ b/neutron/auth.py @@ -0,0 +1,73 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import webob.dec +import webob.exc + +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common.middleware import request_id +from neutron import wsgi + +LOG = logging.getLogger(__name__) + + +class NeutronKeystoneContext(wsgi.Middleware): + """Make a request context from keystone headers.""" + + @webob.dec.wsgify + def __call__(self, req): + # Determine the user ID + user_id = req.headers.get('X_USER_ID') + if not user_id: + LOG.debug(_("X_USER_ID is not found in request")) + return webob.exc.HTTPUnauthorized() + + # Determine the tenant + tenant_id = req.headers.get('X_PROJECT_ID') + + # Suck out the roles + roles = [r.strip() for r in req.headers.get('X_ROLES', '').split(',')] + + # Human-friendly names + tenant_name = req.headers.get('X_PROJECT_NAME') + user_name = req.headers.get('X_USER_NAME') + + # Use request_id if already set + req_id = req.environ.get(request_id.ENV_REQUEST_ID) + + # Create a context with the authentication data + ctx = context.Context(user_id, tenant_id, roles=roles, + user_name=user_name, tenant_name=tenant_name, + request_id=req_id) + + # Inject the context... + req.environ['neutron.context'] = ctx + + return self.application + + +def pipeline_factory(loader, global_conf, **local_conf): + """Create a paste pipeline based on the 'auth_strategy' config option.""" + pipeline = local_conf[cfg.CONF.auth_strategy] + pipeline = pipeline.split() + filters = [loader.get_filter(n) for n in pipeline[:-1]] + app = loader.get_app(pipeline[-1]) + filters.reverse() + for filter in filters: + app = filter(app) + return app diff --git a/neutron/cmd/__init__.py b/neutron/cmd/__init__.py new file mode 100644 index 000000000..7506a2914 --- /dev/null +++ b/neutron/cmd/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/cmd/sanity/__init__.py b/neutron/cmd/sanity/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/cmd/sanity/checks.py b/neutron/cmd/sanity/checks.py new file mode 100644 index 000000000..970351ede --- /dev/null +++ b/neutron/cmd/sanity/checks.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.agent.linux import ovs_lib +from neutron.common import utils +from neutron.plugins.common import constants as const +from neutron.plugins.openvswitch.common import constants as ovs_const + + +def vxlan_supported(root_helper, from_ip='192.0.2.1', to_ip='192.0.2.2'): + name = "vxlantest-" + utils.get_random_string(6) + with ovs_lib.OVSBridge(name, root_helper) as br: + port = br.add_tunnel_port(from_ip, to_ip, const.TYPE_VXLAN) + return port != ovs_const.INVALID_OFPORT + + +def patch_supported(root_helper): + seed = utils.get_random_string(6) + name = "patchtest-" + seed + peer_name = "peertest0-" + seed + patch_name = "peertest1-" + seed + with ovs_lib.OVSBridge(name, root_helper) as br: + port = br.add_patch_port(patch_name, peer_name) + return port != ovs_const.INVALID_OFPORT diff --git a/neutron/cmd/sanity_check.py b/neutron/cmd/sanity_check.py new file mode 100644 index 000000000..b068fcb00 --- /dev/null +++ b/neutron/cmd/sanity_check.py @@ -0,0 +1,93 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from neutron.cmd.sanity import checks +from neutron.common import config +from neutron.openstack.common import log as logging +from oslo.config import cfg + + +LOG = logging.getLogger(__name__) +cfg.CONF.import_group('AGENT', 'neutron.plugins.openvswitch.common.config') + + +class BoolOptCallback(cfg.BoolOpt): + def __init__(self, name, callback, **kwargs): + self.callback = callback + super(BoolOptCallback, self).__init__(name, **kwargs) + + +def check_ovs_vxlan(): + result = checks.vxlan_supported(root_helper=cfg.CONF.AGENT.root_helper) + if not result: + LOG.error(_('Check for Open vSwitch VXLAN support failed. ' + 'Please ensure that the version of openvswitch ' + 'being used has VXLAN support.')) + return result + + +def check_ovs_patch(): + result = checks.patch_supported(root_helper=cfg.CONF.AGENT.root_helper) + if not result: + LOG.error(_('Check for Open vSwitch patch port support failed. ' + 'Please ensure that the version of openvswitch ' + 'being used has patch port support or disable features ' + 'requiring patch ports (gre/vxlan, etc.).')) + return result + + +# Define CLI opts to test specific features, with a calback for the test +OPTS = [ + BoolOptCallback('ovs_vxlan', check_ovs_vxlan, default=False, + help=_('Check for vxlan support')), + BoolOptCallback('ovs_patch', check_ovs_patch, default=False, + help=_('Check for patch port support')), +] + + +def enable_tests_from_config(): + """If a test can depend on configuration, use this function to set the + appropriate CLI option to enable that test. It will then be possible to + run all necessary tests, just by passing in the appropriate configs. + """ + + if 'vxlan' in cfg.CONF.AGENT.tunnel_types: + cfg.CONF.set_override('ovs_vxlan', True) + if cfg.CONF.AGENT.tunnel_types: + cfg.CONF.set_override('ovs_patch', True) + + +def all_tests_passed(): + res = True + for opt in OPTS: + if cfg.CONF.get(opt.name): + res &= opt.callback() + return res + + +def main(): + cfg.CONF.register_cli_opts(OPTS) + cfg.CONF.set_override('use_stderr', True) + config.setup_logging(cfg.CONF) + config.init(sys.argv[1:], default_config_files=[]) + + if cfg.CONF.config_file: + enable_tests_from_config() + + return 0 if all_tests_passed() else 1 diff --git a/neutron/cmd/usage_audit.py b/neutron/cmd/usage_audit.py new file mode 100644 index 000000000..6294d710d --- /dev/null +++ b/neutron/cmd/usage_audit.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 New Dream Network, LLC (DreamHost) +# Author: Julien Danjou +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Cron script to generate usage notifications for networks, ports and +subnets. + +""" + +import sys + +from oslo.config import cfg + +from neutron.common import config +from neutron.common import rpc as n_rpc +from neutron import context +from neutron import manager + + +def main(): + config.init(sys.argv[1:]) + config.setup_logging(cfg.CONF) + + cxt = context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + notifier = n_rpc.get_notifier('network') + for network in plugin.get_networks(cxt): + notifier.info(cxt, 'network.exists', {'network': network}) + for subnet in plugin.get_subnets(cxt): + notifier.info(cxt, 'subnet.exists', {'subnet': subnet}) + for port in plugin.get_ports(cxt): + notifier.info(cxt, 'port.exists', {'port': port}) + for router in plugin.get_routers(cxt): + notifier.info(cxt, 'router.exists', {'router': router}) + for floatingip in plugin.get_floatingips(cxt): + notifier.info(cxt, 'floatingip.exists', {'floatingip': floatingip}) diff --git a/neutron/common/__init__.py b/neutron/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/common/config.py b/neutron/common/config.py new file mode 100644 index 000000000..0a8232fa0 --- /dev/null +++ b/neutron/common/config.py @@ -0,0 +1,189 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Routines for configuring Neutron +""" + +import os + +from oslo.config import cfg +from oslo import messaging +from paste import deploy + +from neutron.api.v2 import attributes +from neutron.common import utils +from neutron.openstack.common.db import options as db_options +from neutron.openstack.common import log as logging +from neutron import version + + +LOG = logging.getLogger(__name__) + +core_opts = [ + cfg.StrOpt('bind_host', default='0.0.0.0', + help=_("The host IP to bind to")), + cfg.IntOpt('bind_port', default=9696, + help=_("The port to bind to")), + cfg.StrOpt('api_paste_config', default="api-paste.ini", + help=_("The API paste config file to use")), + cfg.StrOpt('api_extensions_path', default="", + help=_("The path for API extensions")), + cfg.StrOpt('policy_file', default="policy.json", + help=_("The policy file to use")), + cfg.StrOpt('auth_strategy', default='keystone', + help=_("The type of authentication to use")), + cfg.StrOpt('core_plugin', + help=_("The core plugin Neutron will use")), + cfg.ListOpt('service_plugins', default=[], + help=_("The service plugins Neutron will use")), + cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00", + help=_("The base MAC address Neutron will use for VIFs")), + cfg.IntOpt('mac_generation_retries', default=16, + help=_("How many times Neutron will retry MAC generation")), + cfg.BoolOpt('allow_bulk', default=True, + help=_("Allow the usage of the bulk API")), + cfg.BoolOpt('allow_pagination', default=False, + help=_("Allow the usage of the pagination")), + cfg.BoolOpt('allow_sorting', default=False, + help=_("Allow the usage of the sorting")), + cfg.StrOpt('pagination_max_limit', default="-1", + help=_("The maximum number of items returned in a single " + "response, value was 'infinite' or negative integer " + "means no limit")), + cfg.IntOpt('max_dns_nameservers', default=5, + help=_("Maximum number of DNS nameservers")), + cfg.IntOpt('max_subnet_host_routes', default=20, + help=_("Maximum number of host routes per subnet")), + cfg.IntOpt('max_fixed_ips_per_port', default=5, + help=_("Maximum number of fixed ips per port")), + cfg.IntOpt('dhcp_lease_duration', default=86400, + deprecated_name='dhcp_lease_time', + help=_("DHCP lease duration (in seconds). Use -1 to tell " + "dnsmasq to use infinite lease times.")), + cfg.BoolOpt('dhcp_agent_notification', default=True, + help=_("Allow sending resource operation" + " notification to DHCP agent")), + cfg.BoolOpt('allow_overlapping_ips', default=False, + help=_("Allow overlapping IP support in Neutron")), + cfg.StrOpt('host', default=utils.get_hostname(), + help=_("The hostname Neutron is running on")), + cfg.BoolOpt('force_gateway_on_subnet', default=False, + help=_("Ensure that configured gateway is on subnet")), + cfg.BoolOpt('notify_nova_on_port_status_changes', default=True, + help=_("Send notification to nova when port status changes")), + cfg.BoolOpt('notify_nova_on_port_data_changes', default=True, + help=_("Send notification to nova when port data (fixed_ips/" + "floatingip) changes so nova can update its cache.")), + cfg.StrOpt('nova_url', + default='http://127.0.0.1:8774/v2', + help=_('URL for connection to nova')), + cfg.StrOpt('nova_admin_username', + help=_('Username for connecting to nova in admin context')), + cfg.StrOpt('nova_admin_password', + help=_('Password for connection to nova in admin context'), + secret=True), + cfg.StrOpt('nova_admin_tenant_id', + help=_('The uuid of the admin nova tenant')), + cfg.StrOpt('nova_admin_auth_url', + default='http://localhost:5000/v2.0', + help=_('Authorization URL for connecting to nova in admin ' + 'context')), + cfg.StrOpt('nova_ca_certificates_file', + help=_('CA file for novaclient to verify server certificates')), + cfg.BoolOpt('nova_api_insecure', default=False, + help=_("If True, ignore any SSL validation issues")), + cfg.StrOpt('nova_region_name', + help=_('Name of nova region to use. Useful if keystone manages' + ' more than one region.')), + cfg.IntOpt('send_events_interval', default=2, + help=_('Number of seconds between sending events to nova if ' + 'there are any events to send.')), +] + +core_cli_opts = [ + cfg.StrOpt('state_path', + default='/var/lib/neutron', + help=_("Where to store Neutron state files. " + "This directory must be writable by the agent.")), +] + +# Register the configuration options +cfg.CONF.register_opts(core_opts) +cfg.CONF.register_cli_opts(core_cli_opts) + +# Ensure that the control exchange is set correctly +messaging.set_transport_defaults(control_exchange='neutron') +_SQL_CONNECTION_DEFAULT = 'sqlite://' +# Update the default QueuePool parameters. These can be tweaked by the +# configuration variables - max_pool_size, max_overflow and pool_timeout +db_options.set_defaults(sql_connection=_SQL_CONNECTION_DEFAULT, + sqlite_db='', max_pool_size=10, + max_overflow=20, pool_timeout=10) + + +def init(args, **kwargs): + cfg.CONF(args=args, project='neutron', + version='%%prog %s' % version.version_info.release_string(), + **kwargs) + + # FIXME(ihrachys): if import is put in global, circular import + # failure occurs + from neutron.common import rpc as n_rpc + n_rpc.init(cfg.CONF) + + # Validate that the base_mac is of the correct format + msg = attributes._validate_regex(cfg.CONF.base_mac, + attributes.MAC_PATTERN) + if msg: + msg = _("Base MAC: %s") % msg + raise Exception(msg) + + +def setup_logging(conf): + """Sets up the logging options for a log with supplied name. + + :param conf: a cfg.ConfOpts object + """ + product_name = "neutron" + logging.setup(product_name) + LOG.info(_("Logging enabled!")) + + +def load_paste_app(app_name): + """Builds and returns a WSGI app from a paste config file. + + :param app_name: Name of the application to load + :raises ConfigFilesNotFoundError when config file cannot be located + :raises RuntimeError when application cannot be loaded from config file + """ + + config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config) + if not config_path: + raise cfg.ConfigFilesNotFoundError( + config_files=[cfg.CONF.api_paste_config]) + config_path = os.path.abspath(config_path) + LOG.info(_("Config paste file: %s"), config_path) + + try: + app = deploy.loadapp("config:%s" % config_path, name=app_name) + except (LookupError, ImportError): + msg = (_("Unable to load %(app_name)s from " + "configuration file %(config_path)s.") % + {'app_name': app_name, + 'config_path': config_path}) + LOG.exception(msg) + raise RuntimeError(msg) + return app diff --git a/neutron/common/constants.py b/neutron/common/constants.py new file mode 100644 index 000000000..cf3fb6025 --- /dev/null +++ b/neutron/common/constants.py @@ -0,0 +1,121 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(salv-orlando): Verify if a single set of operational +# status constants is achievable +NET_STATUS_ACTIVE = 'ACTIVE' +NET_STATUS_BUILD = 'BUILD' +NET_STATUS_DOWN = 'DOWN' +NET_STATUS_ERROR = 'ERROR' + +PORT_STATUS_ACTIVE = 'ACTIVE' +PORT_STATUS_BUILD = 'BUILD' +PORT_STATUS_DOWN = 'DOWN' +PORT_STATUS_ERROR = 'ERROR' + +FLOATINGIP_STATUS_ACTIVE = 'ACTIVE' +FLOATINGIP_STATUS_DOWN = 'DOWN' +FLOATINGIP_STATUS_ERROR = 'ERROR' + +DEVICE_OWNER_ROUTER_INTF = "network:router_interface" +DEVICE_OWNER_ROUTER_GW = "network:router_gateway" +DEVICE_OWNER_FLOATINGIP = "network:floatingip" +DEVICE_OWNER_DHCP = "network:dhcp" + +DEVICE_ID_RESERVED_DHCP_PORT = "reserved_dhcp_port" + +FLOATINGIP_KEY = '_floatingips' +INTERFACE_KEY = '_interfaces' +METERING_LABEL_KEY = '_metering_labels' + +IPv4 = 'IPv4' +IPv6 = 'IPv6' + +DHCP_RESPONSE_PORT = 68 + +MIN_VLAN_TAG = 1 +MAX_VLAN_TAG = 4094 +MAX_VXLAN_VNI = 16777215 +FLOODING_ENTRY = ['00:00:00:00:00:00', '0.0.0.0'] + +EXT_NS_COMP = '_backward_comp_e_ns' +EXT_NS = '_extension_ns' +XML_NS_V20 = 'http://openstack.org/quantum/api/v2.0' +XSI_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance" +XSI_ATTR = "xsi:nil" +XSI_NIL_ATTR = "xmlns:xsi" +ATOM_NAMESPACE = "http://www.w3.org/2005/Atom" +ATOM_XMLNS = "xmlns:atom" +ATOM_LINK_NOTATION = "{%s}link" % ATOM_NAMESPACE +TYPE_XMLNS = "xmlns:quantum" +TYPE_ATTR = "quantum:type" +VIRTUAL_ROOT_KEY = "_v_root" + +TYPE_BOOL = "bool" +TYPE_INT = "int" +TYPE_LONG = "long" +TYPE_FLOAT = "float" +TYPE_LIST = "list" +TYPE_DICT = "dict" + +AGENT_TYPE_DHCP = 'DHCP agent' +AGENT_TYPE_OVS = 'Open vSwitch agent' +AGENT_TYPE_LINUXBRIDGE = 'Linux bridge agent' +AGENT_TYPE_HYPERV = 'HyperV agent' +AGENT_TYPE_NEC = 'NEC plugin agent' +AGENT_TYPE_OFA = 'OFA driver agent' +AGENT_TYPE_L3 = 'L3 agent' +AGENT_TYPE_LOADBALANCER = 'Loadbalancer agent' +AGENT_TYPE_MLNX = 'Mellanox plugin agent' +AGENT_TYPE_METERING = 'Metering agent' +AGENT_TYPE_METADATA = 'Metadata agent' +AGENT_TYPE_SDNVE = 'IBM SDN-VE agent' +L2_AGENT_TOPIC = 'N/A' + +PAGINATION_INFINITE = 'infinite' + +SORT_DIRECTION_ASC = 'asc' +SORT_DIRECTION_DESC = 'desc' + +PORT_BINDING_EXT_ALIAS = 'binding' +L3_AGENT_SCHEDULER_EXT_ALIAS = 'l3_agent_scheduler' +DHCP_AGENT_SCHEDULER_EXT_ALIAS = 'dhcp_agent_scheduler' +LBAAS_AGENT_SCHEDULER_EXT_ALIAS = 'lbaas_agent_scheduler' + +# Protocol names and numbers for Security Groups/Firewalls +PROTO_NAME_TCP = 'tcp' +PROTO_NAME_ICMP = 'icmp' +PROTO_NAME_ICMP_V6 = 'icmpv6' +PROTO_NAME_UDP = 'udp' +PROTO_NUM_TCP = 6 +PROTO_NUM_ICMP = 1 +PROTO_NUM_ICMP_V6 = 58 +PROTO_NUM_UDP = 17 + +# List of ICMPv6 types that should be allowed by default: +# Multicast Listener Query (130), +# Multicast Listener Report (131), +# Multicast Listener Done (132), +# Neighbor Solicitation (135), +# Neighbor Advertisement (136) +ICMPV6_ALLOWED_TYPES = [130, 131, 132, 135, 136] +ICMPV6_TYPE_RA = 134 + +DHCPV6_STATEFUL = 'dhcpv6-stateful' +DHCPV6_STATELESS = 'dhcpv6-stateless' +IPV6_SLAAC = 'slaac' +IPV6_MODES = [DHCPV6_STATEFUL, DHCPV6_STATELESS, IPV6_SLAAC] + +IPV6_LLA_PREFIX = 'fe80::/64' diff --git a/neutron/common/exceptions.py b/neutron/common/exceptions.py new file mode 100644 index 000000000..7fa63affd --- /dev/null +++ b/neutron/common/exceptions.py @@ -0,0 +1,321 @@ +# Copyright 2011 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Neutron base exception handling. +""" + +from neutron.openstack.common import excutils + + +class NeutronException(Exception): + """Base Neutron Exception. + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + """ + message = _("An unknown exception occurred.") + + def __init__(self, **kwargs): + try: + super(NeutronException, self).__init__(self.message % kwargs) + self.msg = self.message % kwargs + except Exception: + with excutils.save_and_reraise_exception() as ctxt: + if not self.use_fatal_exceptions(): + ctxt.reraise = False + # at least get the core message out if something happened + super(NeutronException, self).__init__(self.message) + + def __unicode__(self): + return unicode(self.msg) + + def use_fatal_exceptions(self): + return False + + +class BadRequest(NeutronException): + message = _('Bad %(resource)s request: %(msg)s') + + +class NotFound(NeutronException): + pass + + +class Conflict(NeutronException): + pass + + +class NotAuthorized(NeutronException): + message = _("Not authorized.") + + +class ServiceUnavailable(NeutronException): + message = _("The service is unavailable") + + +class AdminRequired(NotAuthorized): + message = _("User does not have admin privileges: %(reason)s") + + +class PolicyNotAuthorized(NotAuthorized): + message = _("Policy doesn't allow %(action)s to be performed.") + + +class NetworkNotFound(NotFound): + message = _("Network %(net_id)s could not be found") + + +class SubnetNotFound(NotFound): + message = _("Subnet %(subnet_id)s could not be found") + + +class PortNotFound(NotFound): + message = _("Port %(port_id)s could not be found") + + +class PortNotFoundOnNetwork(NotFound): + message = _("Port %(port_id)s could not be found " + "on network %(net_id)s") + + +class PolicyFileNotFound(NotFound): + message = _("Policy configuration policy.json could not be found") + + +class PolicyInitError(NeutronException): + message = _("Failed to init policy %(policy)s because %(reason)s") + + +class PolicyCheckError(NeutronException): + message = _("Failed to check policy %(policy)s because %(reason)s") + + +class StateInvalid(BadRequest): + message = _("Unsupported port state: %(port_state)s") + + +class InUse(NeutronException): + message = _("The resource is inuse") + + +class NetworkInUse(InUse): + message = _("Unable to complete operation on network %(net_id)s. " + "There are one or more ports still in use on the network.") + + +class SubnetInUse(InUse): + message = _("Unable to complete operation on subnet %(subnet_id)s. " + "One or more ports have an IP allocation from this subnet.") + + +class PortInUse(InUse): + message = _("Unable to complete operation on port %(port_id)s " + "for network %(net_id)s. Port already has an attached" + "device %(device_id)s.") + + +class MacAddressInUse(InUse): + message = _("Unable to complete operation for network %(net_id)s. " + "The mac address %(mac)s is in use.") + + +class HostRoutesExhausted(BadRequest): + # NOTE(xchenum): probably make sense to use quota exceeded exception? + message = _("Unable to complete operation for %(subnet_id)s. " + "The number of host routes exceeds the limit %(quota)s.") + + +class DNSNameServersExhausted(BadRequest): + # NOTE(xchenum): probably make sense to use quota exceeded exception? + message = _("Unable to complete operation for %(subnet_id)s. " + "The number of DNS nameservers exceeds the limit %(quota)s.") + + +class IpAddressInUse(InUse): + message = _("Unable to complete operation for network %(net_id)s. " + "The IP address %(ip_address)s is in use.") + + +class VlanIdInUse(InUse): + message = _("Unable to create the network. " + "The VLAN %(vlan_id)s on physical network " + "%(physical_network)s is in use.") + + +class FlatNetworkInUse(InUse): + message = _("Unable to create the flat network. " + "Physical network %(physical_network)s is in use.") + + +class TunnelIdInUse(InUse): + message = _("Unable to create the network. " + "The tunnel ID %(tunnel_id)s is in use.") + + +class TenantNetworksDisabled(ServiceUnavailable): + message = _("Tenant network creation is not enabled.") + + +class ResourceExhausted(ServiceUnavailable): + pass + + +class NoNetworkAvailable(ResourceExhausted): + message = _("Unable to create the network. " + "No tenant network is available for allocation.") + + +class SubnetMismatchForPort(BadRequest): + message = _("Subnet on port %(port_id)s does not match " + "the requested subnet %(subnet_id)s") + + +class MalformedRequestBody(BadRequest): + message = _("Malformed request body: %(reason)s") + + +class Invalid(NeutronException): + def __init__(self, message=None): + self.message = message + super(Invalid, self).__init__() + + +class InvalidInput(BadRequest): + message = _("Invalid input for operation: %(error_message)s.") + + +class InvalidAllocationPool(BadRequest): + message = _("The allocation pool %(pool)s is not valid.") + + +class OverlappingAllocationPools(Conflict): + message = _("Found overlapping allocation pools:" + "%(pool_1)s %(pool_2)s for subnet %(subnet_cidr)s.") + + +class OutOfBoundsAllocationPool(BadRequest): + message = _("The allocation pool %(pool)s spans " + "beyond the subnet cidr %(subnet_cidr)s.") + + +class MacAddressGenerationFailure(ServiceUnavailable): + message = _("Unable to generate unique mac on network %(net_id)s.") + + +class IpAddressGenerationFailure(Conflict): + message = _("No more IP addresses available on network %(net_id)s.") + + +class BridgeDoesNotExist(NeutronException): + message = _("Bridge %(bridge)s does not exist.") + + +class PreexistingDeviceFailure(NeutronException): + message = _("Creation failed. %(dev_name)s already exists.") + + +class SudoRequired(NeutronException): + message = _("Sudo privilege is required to run this command.") + + +class QuotaResourceUnknown(NotFound): + message = _("Unknown quota resources %(unknown)s.") + + +class OverQuota(Conflict): + message = _("Quota exceeded for resources: %(overs)s") + + +class QuotaMissingTenant(BadRequest): + message = _("Tenant-id was missing from Quota request") + + +class InvalidQuotaValue(Conflict): + message = _("Change would make usage less than 0 for the following " + "resources: %(unders)s") + + +class InvalidSharedSetting(Conflict): + message = _("Unable to reconfigure sharing settings for network " + "%(network)s. Multiple tenants are using it") + + +class InvalidExtensionEnv(BadRequest): + message = _("Invalid extension environment: %(reason)s") + + +class ExtensionsNotFound(NotFound): + message = _("Extensions not found: %(extensions)s") + + +class InvalidContentType(NeutronException): + message = _("Invalid content type %(content_type)s") + + +class ExternalIpAddressExhausted(BadRequest): + message = _("Unable to find any IP address on external " + "network %(net_id)s.") + + +class TooManyExternalNetworks(NeutronException): + message = _("More than one external network exists") + + +class InvalidConfigurationOption(NeutronException): + message = _("An invalid value was provided for %(opt_name)s: " + "%(opt_value)s") + + +class GatewayConflictWithAllocationPools(InUse): + message = _("Gateway ip %(ip_address)s conflicts with " + "allocation pool %(pool)s") + + +class GatewayIpInUse(InUse): + message = _("Current gateway ip %(ip_address)s already in use " + "by port %(port_id)s. Unable to update.") + + +class NetworkVlanRangeError(NeutronException): + message = _("Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'") + + def __init__(self, **kwargs): + # Convert vlan_range tuple to 'start:end' format for display + if isinstance(kwargs['vlan_range'], tuple): + kwargs['vlan_range'] = "%d:%d" % kwargs['vlan_range'] + super(NetworkVlanRangeError, self).__init__(**kwargs) + + +class NetworkVxlanPortRangeError(NeutronException): + message = _("Invalid network VXLAN port range: '%(vxlan_range)s'") + + +class VxlanNetworkUnsupported(NeutronException): + message = _("VXLAN Network unsupported.") + + +class DuplicatedExtension(NeutronException): + message = _("Found duplicate extension: %(alias)s") + + +class DeviceIDNotOwnedByTenant(Conflict): + message = _("The following device_id %(device_id)s is not owned by your " + "tenant or matches another tenants router.") + + +class InvalidCIDR(BadRequest): + message = _("Invalid CIDR %(input)s given as IP prefix") diff --git a/neutron/common/ipv6_utils.py b/neutron/common/ipv6_utils.py new file mode 100644 index 000000000..fbe61e49b --- /dev/null +++ b/neutron/common/ipv6_utils.py @@ -0,0 +1,39 @@ +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +IPv6-related utilities and helper functions. +""" + +import netaddr + + +def get_ipv6_addr_by_EUI64(prefix, mac): + # Check if the prefix is IPv4 address + isIPv4 = netaddr.valid_ipv4(prefix) + if isIPv4: + msg = _("Unable to generate IP address by EUI64 for IPv4 prefix") + raise TypeError(msg) + try: + eui64 = int(netaddr.EUI(mac).eui64()) + prefix = netaddr.IPNetwork(prefix) + return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57)) + except (ValueError, netaddr.AddrFormatError): + raise TypeError(_('Bad prefix or mac format for generating IPv6 ' + 'address by EUI-64: %(prefix)s, %(mac)s:') + % {'prefix': prefix, 'mac': mac}) + except TypeError: + raise TypeError(_('Bad prefix type for generate IPv6 address by ' + 'EUI-64: %s') % prefix) diff --git a/neutron/common/log.py b/neutron/common/log.py new file mode 100644 index 000000000..85e4dce80 --- /dev/null +++ b/neutron/common/log.py @@ -0,0 +1,35 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Log helper functions.""" + +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def log(method): + """Decorator helping to log method calls.""" + def wrapper(*args, **kwargs): + instance = args[0] + data = {"class_name": (instance.__class__.__module__ + '.' + + instance.__class__.__name__), + "method_name": method.__name__, + "args": args[1:], "kwargs": kwargs} + LOG.debug(_('%(class_name)s method %(method_name)s' + ' called with arguments %(args)s %(kwargs)s'), data) + return method(*args, **kwargs) + return wrapper diff --git a/neutron/common/rpc.py b/neutron/common/rpc.py new file mode 100644 index 000000000..98d468140 --- /dev/null +++ b/neutron/common/rpc.py @@ -0,0 +1,136 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +from oslo import messaging +from oslo.messaging import serializer as om_serializer + +from neutron.common import exceptions +from neutron import context +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +TRANSPORT = None +NOTIFIER = None + +ALLOWED_EXMODS = [ + exceptions.__name__, +] +EXTRA_EXMODS = [] + + +TRANSPORT_ALIASES = { + 'neutron.openstack.common.rpc.impl_fake': 'fake', + 'neutron.openstack.common.rpc.impl_qpid': 'qpid', + 'neutron.openstack.common.rpc.impl_kombu': 'rabbit', + 'neutron.openstack.common.rpc.impl_zmq': 'zmq', + 'neutron.rpc.impl_fake': 'fake', + 'neutron.rpc.impl_qpid': 'qpid', + 'neutron.rpc.impl_kombu': 'rabbit', + 'neutron.rpc.impl_zmq': 'zmq', +} + + +def init(conf): + global TRANSPORT, NOTIFIER + exmods = get_allowed_exmods() + TRANSPORT = messaging.get_transport(conf, + allowed_remote_exmods=exmods, + aliases=TRANSPORT_ALIASES) + NOTIFIER = messaging.Notifier(TRANSPORT) + + +def cleanup(): + global TRANSPORT, NOTIFIER + assert TRANSPORT is not None + assert NOTIFIER is not None + TRANSPORT.cleanup() + TRANSPORT = NOTIFIER = None + + +def add_extra_exmods(*args): + EXTRA_EXMODS.extend(args) + + +def clear_extra_exmods(): + del EXTRA_EXMODS[:] + + +def get_allowed_exmods(): + return ALLOWED_EXMODS + EXTRA_EXMODS + + +def get_client(target, version_cap=None, serializer=None): + assert TRANSPORT is not None + serializer = PluginRpcSerializer(serializer) + return messaging.RPCClient(TRANSPORT, + target, + version_cap=version_cap, + serializer=serializer) + + +def get_server(target, endpoints, serializer=None): + assert TRANSPORT is not None + serializer = PluginRpcSerializer(serializer) + return messaging.get_rpc_server(TRANSPORT, + target, + endpoints, + executor='eventlet', + serializer=serializer) + + +def get_notifier(service=None, host=None, publisher_id=None): + assert NOTIFIER is not None + if not publisher_id: + publisher_id = "%s.%s" % (service, host or cfg.CONF.host) + return NOTIFIER.prepare(publisher_id=publisher_id) + + +class PluginRpcSerializer(om_serializer.Serializer): + """This serializer is used to convert RPC common context into + Neutron Context. + """ + def __init__(self, base): + super(PluginRpcSerializer, self).__init__() + self._base = base + + def serialize_entity(self, ctxt, entity): + if not self._base: + return entity + return self._base.serialize_entity(ctxt, entity) + + def deserialize_entity(self, ctxt, entity): + if not self._base: + return entity + return self._base.deserialize_entity(ctxt, entity) + + def serialize_context(self, ctxt): + return ctxt.to_dict() + + def deserialize_context(self, ctxt): + rpc_ctxt_dict = ctxt.copy() + user_id = rpc_ctxt_dict.pop('user_id', None) + if not user_id: + user_id = rpc_ctxt_dict.pop('user', None) + tenant_id = rpc_ctxt_dict.pop('tenant_id', None) + if not tenant_id: + tenant_id = rpc_ctxt_dict.pop('project_id', None) + return context.Context(user_id, tenant_id, + load_admin_roles=False, **rpc_ctxt_dict) diff --git a/neutron/common/rpc_compat.py b/neutron/common/rpc_compat.py new file mode 100644 index 000000000..8c16c2c5b --- /dev/null +++ b/neutron/common/rpc_compat.py @@ -0,0 +1,165 @@ +# Copyright (c) 2014 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +from oslo import messaging + +from neutron.common import rpc as n_rpc +from neutron.openstack.common import log as logging +from neutron.openstack.common import service + + +LOG = logging.getLogger(__name__) + + +class RpcProxy(object): + ''' + This class is created to facilitate migration from oslo-incubator + RPC layer implementation to oslo.messaging and is intended to + emulate RpcProxy class behaviour using oslo.messaging API once the + migration is applied. + ''' + RPC_API_NAMESPACE = None + + def __init__(self, topic, default_version, version_cap=None): + self.topic = topic + target = messaging.Target(topic=topic, version=default_version) + self._client = n_rpc.get_client(target, version_cap=version_cap) + + def make_msg(self, method, **kwargs): + return {'method': method, + 'namespace': self.RPC_API_NAMESPACE, + 'args': kwargs} + + def call(self, context, msg, **kwargs): + return self.__call_rpc_method( + context, msg, rpc_method='call', **kwargs) + + def cast(self, context, msg, **kwargs): + self.__call_rpc_method(context, msg, rpc_method='cast', **kwargs) + + def fanout_cast(self, context, msg, **kwargs): + kwargs['fanout'] = True + self.__call_rpc_method(context, msg, rpc_method='cast', **kwargs) + + def __call_rpc_method(self, context, msg, **kwargs): + options = dict( + ((opt, kwargs[opt]) + for opt in ('fanout', 'timeout', 'topic', 'version') + if kwargs.get(opt)) + ) + if msg['namespace']: + options['namespace'] = msg['namespace'] + + if options: + callee = self._client.prepare(**options) + else: + callee = self._client + + func = getattr(callee, kwargs['rpc_method']) + return func(context, msg['method'], **msg['args']) + + +class RpcCallback(object): + ''' + This class is created to facilitate migration from oslo-incubator + RPC layer implementation to oslo.messaging and is intended to set + callback version using oslo.messaging API once the migration is + applied. + ''' + RPC_API_VERSION = '1.0' + + def __init__(self): + super(RpcCallback, self).__init__() + self.target = messaging.Target(version=self.RPC_API_VERSION) + + +class Service(service.Service): + """Service object for binaries running on hosts. + + A service enables rpc by listening to queues based on topic and host. + """ + def __init__(self, host, topic, manager=None, serializer=None): + super(Service, self).__init__() + self.host = host + self.topic = topic + self.serializer = serializer + if manager is None: + self.manager = self + else: + self.manager = manager + + def start(self): + super(Service, self).start() + + self.conn = create_connection(new=True) + LOG.debug("Creating Consumer connection for Service %s" % + self.topic) + + endpoints = [self.manager] + + # Share this same connection for these Consumers + self.conn.create_consumer(self.topic, endpoints, fanout=False) + + node_topic = '%s.%s' % (self.topic, self.host) + self.conn.create_consumer(node_topic, endpoints, fanout=False) + + self.conn.create_consumer(self.topic, endpoints, fanout=True) + + # Hook to allow the manager to do other initializations after + # the rpc connection is created. + if callable(getattr(self.manager, 'initialize_service_hook', None)): + self.manager.initialize_service_hook(self) + + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def stop(self): + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass + super(Service, self).stop() + + +class Connection(object): + + def __init__(self): + super(Connection, self).__init__() + self.servers = [] + + def create_consumer(self, topic, endpoints, fanout=False): + target = messaging.Target( + topic=topic, server=cfg.CONF.host, fanout=fanout) + server = n_rpc.get_server(target, endpoints) + self.servers.append(server) + + def consume_in_threads(self): + for server in self.servers: + server.start() + return self.servers + + +# functions +def create_connection(new=True): + return Connection() + + +# exceptions +RPCException = messaging.MessagingException +RemoteError = messaging.RemoteError +MessagingTimeout = messaging.MessagingTimeout diff --git a/neutron/common/test_lib.py b/neutron/common/test_lib.py new file mode 100644 index 000000000..81d242670 --- /dev/null +++ b/neutron/common/test_lib.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Colorizer Code is borrowed from Twisted: +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +# describes parameters used by different unit/functional tests +# a plugin-specific testing mechanism should import this dictionary +# and override the values in it if needed (e.g., run_tests.py in +# neutron/plugins/openvswitch/ ) +test_config = {} diff --git a/neutron/common/topics.py b/neutron/common/topics.py new file mode 100644 index 000000000..5e23bce25 --- /dev/null +++ b/neutron/common/topics.py @@ -0,0 +1,57 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +NETWORK = 'network' +SUBNET = 'subnet' +PORT = 'port' +SECURITY_GROUP = 'security_group' +L2POPULATION = 'l2population' + +CREATE = 'create' +DELETE = 'delete' +UPDATE = 'update' + +AGENT = 'q-agent-notifier' +PLUGIN = 'q-plugin' +L3PLUGIN = 'q-l3-plugin' +DHCP = 'q-dhcp-notifer' +FIREWALL_PLUGIN = 'q-firewall-plugin' +METERING_PLUGIN = 'q-metering-plugin' +LOADBALANCER_PLUGIN = 'n-lbaas-plugin' + +L3_AGENT = 'l3_agent' +DHCP_AGENT = 'dhcp_agent' +METERING_AGENT = 'metering_agent' +LOADBALANCER_AGENT = 'n-lbaas_agent' + + +def get_topic_name(prefix, table, operation, host=None): + """Create a topic name. + + The topic name needs to be synced between the agent and the + plugin. The plugin will send a fanout message to all of the + listening agents so that the agents in turn can perform their + updates accordingly. + + :param prefix: Common prefix for the plugin/agent message queues. + :param table: The table in question (NETWORK, SUBNET, PORT). + :param operation: The operation that invokes notification (CREATE, + DELETE, UPDATE) + :param host: Add host to the topic + :returns: The topic name. + """ + if host: + return '%s-%s-%s.%s' % (prefix, table, operation, host) + return '%s-%s-%s' % (prefix, table, operation) diff --git a/neutron/common/utils.py b/neutron/common/utils.py new file mode 100644 index 000000000..4378218e4 --- /dev/null +++ b/neutron/common/utils.py @@ -0,0 +1,301 @@ +# Copyright 2011, VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Borrowed from nova code base, more utilities will be added/borrowed as and +# when needed. + +"""Utilities and helper functions.""" + +import datetime +import functools +import hashlib +import logging as std_logging +import multiprocessing +import os +import random +import signal +import socket +import uuid + +from eventlet.green import subprocess +from oslo.config import cfg + +from neutron.common import constants as q_const +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging + + +TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" +LOG = logging.getLogger(__name__) +SYNCHRONIZED_PREFIX = 'neutron-' + +synchronized = lockutils.synchronized_with_prefix(SYNCHRONIZED_PREFIX) + + +class cache_method_results(object): + """This decorator is intended for object methods only.""" + + def __init__(self, func): + self.func = func + functools.update_wrapper(self, func) + self._first_call = True + self._not_cached = object() + + def _get_from_cache(self, target_self, *args, **kwargs): + func_name = "%(module)s.%(class)s.%(func_name)s" % { + 'module': target_self.__module__, + 'class': target_self.__class__.__name__, + 'func_name': self.func.__name__, + } + key = (func_name,) + args + if kwargs: + key += dict2tuple(kwargs) + try: + item = target_self._cache.get(key, self._not_cached) + except TypeError: + LOG.debug(_("Method %(func_name)s cannot be cached due to " + "unhashable parameters: args: %(args)s, kwargs: " + "%(kwargs)s"), + {'func_name': func_name, + 'args': args, + 'kwargs': kwargs}) + return self.func(target_self, *args, **kwargs) + + if item is self._not_cached: + item = self.func(target_self, *args, **kwargs) + target_self._cache.set(key, item, None) + + return item + + def __call__(self, target_self, *args, **kwargs): + if not hasattr(target_self, '_cache'): + raise NotImplementedError( + "Instance of class %(module)s.%(class)s must contain _cache " + "attribute" % { + 'module': target_self.__module__, + 'class': target_self.__class__.__name__}) + if not target_self._cache: + if self._first_call: + LOG.debug(_("Instance of class %(module)s.%(class)s doesn't " + "contain attribute _cache therefore results " + "cannot be cached for %(func_name)s."), + {'module': target_self.__module__, + 'class': target_self.__class__.__name__, + 'func_name': self.func.__name__}) + self._first_call = False + return self.func(target_self, *args, **kwargs) + return self._get_from_cache(target_self, *args, **kwargs) + + def __get__(self, obj, objtype): + return functools.partial(self.__call__, obj) + + +def read_cached_file(filename, cache_info, reload_func=None): + """Read from a file if it has been modified. + + :param cache_info: dictionary to hold opaque cache. + :param reload_func: optional function to be called with data when + file is reloaded due to a modification. + + :returns: data from file + + """ + mtime = os.path.getmtime(filename) + if not cache_info or mtime != cache_info.get('mtime'): + LOG.debug(_("Reloading cached file %s"), filename) + with open(filename) as fap: + cache_info['data'] = fap.read() + cache_info['mtime'] = mtime + if reload_func: + reload_func(cache_info['data']) + return cache_info['data'] + + +def find_config_file(options, config_file): + """Return the first config file found. + + We search for the paste config file in the following order: + * If --config-file option is used, use that + * Search for the configuration files via common cfg directories + :retval Full path to config file, or None if no config file found + """ + fix_path = lambda p: os.path.abspath(os.path.expanduser(p)) + if options.get('config_file'): + if os.path.exists(options['config_file']): + return fix_path(options['config_file']) + + dir_to_common = os.path.dirname(os.path.abspath(__file__)) + root = os.path.join(dir_to_common, '..', '..', '..', '..') + # Handle standard directory search for the config file + config_file_dirs = [fix_path(os.path.join(os.getcwd(), 'etc')), + fix_path(os.path.join('~', '.neutron-venv', 'etc', + 'neutron')), + fix_path('~'), + os.path.join(cfg.CONF.state_path, 'etc'), + os.path.join(cfg.CONF.state_path, 'etc', 'neutron'), + fix_path(os.path.join('~', '.local', + 'etc', 'neutron')), + '/usr/etc/neutron', + '/usr/local/etc/neutron', + '/etc/neutron/', + '/etc'] + + if 'plugin' in options: + config_file_dirs = [ + os.path.join(x, 'neutron', 'plugins', options['plugin']) + for x in config_file_dirs + ] + + if os.path.exists(os.path.join(root, 'plugins')): + plugins = [fix_path(os.path.join(root, 'plugins', p, 'etc')) + for p in os.listdir(os.path.join(root, 'plugins'))] + plugins = [p for p in plugins if os.path.isdir(p)] + config_file_dirs.extend(plugins) + + for cfg_dir in config_file_dirs: + cfg_file = os.path.join(cfg_dir, config_file) + if os.path.exists(cfg_file): + return cfg_file + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False, + env=None): + return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout, + stderr=stderr, preexec_fn=_subprocess_setup, + close_fds=True, env=env) + + +def parse_mappings(mapping_list, unique_values=True): + """Parse a list of of mapping strings into a dictionary. + + :param mapping_list: a list of strings of the form ':' + :param unique_values: values must be unique if True + :returns: a dict mapping keys to values + """ + mappings = {} + for mapping in mapping_list: + mapping = mapping.strip() + if not mapping: + continue + split_result = mapping.split(':') + if len(split_result) != 2: + raise ValueError(_("Invalid mapping: '%s'") % mapping) + key = split_result[0].strip() + if not key: + raise ValueError(_("Missing key in mapping: '%s'") % mapping) + value = split_result[1].strip() + if not value: + raise ValueError(_("Missing value in mapping: '%s'") % mapping) + if key in mappings: + raise ValueError(_("Key %(key)s in mapping: '%(mapping)s' not " + "unique") % {'key': key, 'mapping': mapping}) + if unique_values and value in mappings.itervalues(): + raise ValueError(_("Value %(value)s in mapping: '%(mapping)s' " + "not unique") % {'value': value, + 'mapping': mapping}) + mappings[key] = value + return mappings + + +def get_hostname(): + return socket.gethostname() + + +def compare_elements(a, b): + """Compare elements if a and b have same elements. + + This method doesn't consider ordering + """ + if a is None: + a = [] + if b is None: + b = [] + return set(a) == set(b) + + +def dict2str(dic): + return ','.join("%s=%s" % (key, val) + for key, val in sorted(dic.iteritems())) + + +def str2dict(string): + res_dict = {} + for keyvalue in string.split(','): + (key, value) = keyvalue.split('=', 1) + res_dict[key] = value + return res_dict + + +def dict2tuple(d): + items = d.items() + items.sort() + return tuple(items) + + +def diff_list_of_dict(old_list, new_list): + new_set = set([dict2str(l) for l in new_list]) + old_set = set([dict2str(l) for l in old_list]) + added = new_set - old_set + removed = old_set - new_set + return [str2dict(a) for a in added], [str2dict(r) for r in removed] + + +def is_extension_supported(plugin, ext_alias): + return ext_alias in getattr( + plugin, "supported_extension_aliases", []) + + +def log_opt_values(log): + cfg.CONF.log_opt_values(log, std_logging.DEBUG) + + +def is_valid_vlan_tag(vlan): + return q_const.MIN_VLAN_TAG <= vlan <= q_const.MAX_VLAN_TAG + + +def get_random_string(length): + """Get a random hex string of the specified length. + + based on Cinder library + cinder/transfer/api.py + """ + rndstr = "" + random.seed(datetime.datetime.now().microsecond) + while len(rndstr) < length: + rndstr += hashlib.sha224(str(random.random())).hexdigest() + + return rndstr[0:length] + + +def get_dhcp_agent_device_id(network_id, host): + # Split host so as to always use only the hostname and + # not the domain name. This will guarantee consistentcy + # whether a local hostname or an fqdn is passed in. + local_hostname = host.split('.')[0] + host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(local_hostname)) + return 'dhcp%s-%s' % (host_uuid, network_id) + + +def cpu_count(): + try: + return multiprocessing.cpu_count() + except NotImplementedError: + return 1 diff --git a/neutron/context.py b/neutron/context.py new file mode 100644 index 000000000..da4376e71 --- /dev/null +++ b/neutron/context.py @@ -0,0 +1,176 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Context: context for security/db session.""" + +import copy + +import datetime + +from neutron.db import api as db_api +from neutron.openstack.common import context as common_context +from neutron.openstack.common import local +from neutron.openstack.common import log as logging +from neutron import policy + + +LOG = logging.getLogger(__name__) + + +class ContextBase(common_context.RequestContext): + """Security context and request information. + + Represents the user taking a given action within the system. + + """ + + def __init__(self, user_id, tenant_id, is_admin=None, read_deleted="no", + roles=None, timestamp=None, load_admin_roles=True, + request_id=None, tenant_name=None, user_name=None, + overwrite=True, **kwargs): + """Object initialization. + + :param read_deleted: 'no' indicates deleted records are hidden, 'yes' + indicates deleted records are visible, 'only' indicates that + *only* deleted records are visible. + + :param overwrite: Set to False to ensure that the greenthread local + copy of the index is not overwritten. + + :param kwargs: Extra arguments that might be present, but we ignore + because they possibly came in from older rpc messages. + """ + super(ContextBase, self).__init__(user=user_id, tenant=tenant_id, + is_admin=is_admin, + request_id=request_id) + self.user_name = user_name + self.tenant_name = tenant_name + + self.read_deleted = read_deleted + if not timestamp: + timestamp = datetime.datetime.utcnow() + self.timestamp = timestamp + self._session = None + self.roles = roles or [] + if self.is_admin is None: + self.is_admin = policy.check_is_admin(self) + elif self.is_admin and load_admin_roles: + # Ensure context is populated with admin roles + admin_roles = policy.get_admin_roles() + if admin_roles: + self.roles = list(set(self.roles) | set(admin_roles)) + # Allow openstack.common.log to access the context + if overwrite or not hasattr(local.store, 'context'): + local.store.context = self + + # Log only once the context has been configured to prevent + # format errors. + if kwargs: + LOG.debug(_('Arguments dropped when creating ' + 'context: %s'), kwargs) + + @property + def project_id(self): + return self.tenant + + @property + def tenant_id(self): + return self.tenant + + @tenant_id.setter + def tenant_id(self, tenant_id): + self.tenant = tenant_id + + @property + def user_id(self): + return self.user + + @user_id.setter + def user_id(self, user_id): + self.user = user_id + + def _get_read_deleted(self): + return self._read_deleted + + def _set_read_deleted(self, read_deleted): + if read_deleted not in ('no', 'yes', 'only'): + raise ValueError(_("read_deleted can only be one of 'no', " + "'yes' or 'only', not %r") % read_deleted) + self._read_deleted = read_deleted + + def _del_read_deleted(self): + del self._read_deleted + + read_deleted = property(_get_read_deleted, _set_read_deleted, + _del_read_deleted) + + def to_dict(self): + return {'user_id': self.user_id, + 'tenant_id': self.tenant_id, + 'project_id': self.project_id, + 'is_admin': self.is_admin, + 'read_deleted': self.read_deleted, + 'roles': self.roles, + 'timestamp': str(self.timestamp), + 'request_id': self.request_id, + 'tenant': self.tenant, + 'user': self.user, + 'tenant_name': self.tenant_name, + 'project_name': self.tenant_name, + 'user_name': self.user_name, + } + + @classmethod + def from_dict(cls, values): + return cls(**values) + + def elevated(self, read_deleted=None): + """Return a version of this context with admin flag set.""" + context = copy.copy(self) + context.is_admin = True + + if 'admin' not in [x.lower() for x in context.roles]: + context.roles.append('admin') + + if read_deleted is not None: + context.read_deleted = read_deleted + + return context + + +class Context(ContextBase): + @property + def session(self): + if self._session is None: + self._session = db_api.get_session() + return self._session + + +def get_admin_context(read_deleted="no", load_admin_roles=True): + return Context(user_id=None, + tenant_id=None, + is_admin=True, + read_deleted=read_deleted, + load_admin_roles=load_admin_roles, + overwrite=False) + + +def get_admin_context_without_session(read_deleted="no"): + return ContextBase(user_id=None, + tenant_id=None, + is_admin=True, + read_deleted=read_deleted) diff --git a/neutron/db/__init__.py b/neutron/db/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/db/agents_db.py b/neutron/db/agents_db.py new file mode 100644 index 000000000..57712066a --- /dev/null +++ b/neutron/db/agents_db.py @@ -0,0 +1,219 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from eventlet import greenthread + +from oslo.config import cfg +import sqlalchemy as sa +from sqlalchemy.orm import exc + +from neutron.common import rpc_compat +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import agent as ext_agent +from neutron import manager +from neutron.openstack.common.db import exception as db_exc +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import timeutils + +LOG = logging.getLogger(__name__) +cfg.CONF.register_opt( + cfg.IntOpt('agent_down_time', default=75, + help=_("Seconds to regard the agent is down; should be at " + "least twice report_interval, to be sure the " + "agent is down for good."))) + + +class Agent(model_base.BASEV2, models_v2.HasId): + """Represents agents running in neutron deployments.""" + + __table_args__ = ( + sa.UniqueConstraint('agent_type', 'host', + name='uniq_agents0agent_type0host'), + ) + + # L3 agent, DHCP agent, OVS agent, LinuxBridge + agent_type = sa.Column(sa.String(255), nullable=False) + binary = sa.Column(sa.String(255), nullable=False) + # TOPIC is a fanout exchange topic + topic = sa.Column(sa.String(255), nullable=False) + # TOPIC.host is a target topic + host = sa.Column(sa.String(255), nullable=False) + admin_state_up = sa.Column(sa.Boolean, default=True, + nullable=False) + # the time when first report came from agents + created_at = sa.Column(sa.DateTime, nullable=False) + # the time when first report came after agents start + started_at = sa.Column(sa.DateTime, nullable=False) + # updated when agents report + heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False) + # description is note for admin user + description = sa.Column(sa.String(255)) + # configurations: a json dict string, I think 4095 is enough + configurations = sa.Column(sa.String(4095), nullable=False) + + @property + def is_active(self): + return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp) + + +class AgentDbMixin(ext_agent.AgentPluginBase): + """Mixin class to add agent extension to db_base_plugin_v2.""" + + def _get_agent(self, context, id): + try: + agent = self._get_by_id(context, Agent, id) + except exc.NoResultFound: + raise ext_agent.AgentNotFound(id=id) + return agent + + @classmethod + def is_agent_down(cls, heart_beat_time): + return timeutils.is_older_than(heart_beat_time, + cfg.CONF.agent_down_time) + + def get_configuration_dict(self, agent_db): + try: + conf = jsonutils.loads(agent_db.configurations) + except Exception: + msg = _('Configuration for agent %(agent_type)s on host %(host)s' + ' is invalid.') + LOG.warn(msg, {'agent_type': agent_db.agent_type, + 'host': agent_db.host}) + conf = {} + return conf + + def _make_agent_dict(self, agent, fields=None): + attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get( + ext_agent.RESOURCE_NAME + 's') + res = dict((k, agent[k]) for k in attr + if k not in ['alive', 'configurations']) + res['alive'] = not AgentDbMixin.is_agent_down( + res['heartbeat_timestamp']) + res['configurations'] = self.get_configuration_dict(agent) + return self._fields(res, fields) + + def delete_agent(self, context, id): + with context.session.begin(subtransactions=True): + agent = self._get_agent(context, id) + context.session.delete(agent) + + def update_agent(self, context, id, agent): + agent_data = agent['agent'] + with context.session.begin(subtransactions=True): + agent = self._get_agent(context, id) + agent.update(agent_data) + return self._make_agent_dict(agent) + + def get_agents_db(self, context, filters=None): + query = self._get_collection_query(context, Agent, filters=filters) + return query.all() + + def get_agents(self, context, filters=None, fields=None): + return self._get_collection(context, Agent, + self._make_agent_dict, + filters=filters, fields=fields) + + def _get_agent_by_type_and_host(self, context, agent_type, host): + query = self._model_query(context, Agent) + try: + agent_db = query.filter(Agent.agent_type == agent_type, + Agent.host == host).one() + return agent_db + except exc.NoResultFound: + raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type, + host=host) + except exc.MultipleResultsFound: + raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type, + host=host) + + def get_agent(self, context, id, fields=None): + agent = self._get_agent(context, id) + return self._make_agent_dict(agent, fields) + + def _create_or_update_agent(self, context, agent): + with context.session.begin(subtransactions=True): + res_keys = ['agent_type', 'binary', 'host', 'topic'] + res = dict((k, agent[k]) for k in res_keys) + + configurations_dict = agent.get('configurations', {}) + res['configurations'] = jsonutils.dumps(configurations_dict) + current_time = timeutils.utcnow() + try: + agent_db = self._get_agent_by_type_and_host( + context, agent['agent_type'], agent['host']) + res['heartbeat_timestamp'] = current_time + if agent.get('start_flag'): + res['started_at'] = current_time + greenthread.sleep(0) + agent_db.update(res) + except ext_agent.AgentNotFoundByTypeHost: + greenthread.sleep(0) + res['created_at'] = current_time + res['started_at'] = current_time + res['heartbeat_timestamp'] = current_time + res['admin_state_up'] = True + agent_db = Agent(**res) + greenthread.sleep(0) + context.session.add(agent_db) + greenthread.sleep(0) + + def create_or_update_agent(self, context, agent): + """Create or update agent according to report.""" + + try: + return self._create_or_update_agent(context, agent) + except db_exc.DBDuplicateEntry as e: + with excutils.save_and_reraise_exception() as ctxt: + if e.columns == ['agent_type', 'host']: + # It might happen that two or more concurrent transactions + # are trying to insert new rows having the same value of + # (agent_type, host) pair at the same time (if there has + # been no such entry in the table and multiple agent status + # updates are being processed at the moment). In this case + # having a unique constraint on (agent_type, host) columns + # guarantees that only one transaction will succeed and + # insert a new agent entry, others will fail and be rolled + # back. That means we must retry them one more time: no + # INSERTs will be issued, because + # _get_agent_by_type_and_host() will return the existing + # agent entry, which will be updated multiple times + ctxt.reraise = False + return self._create_or_update_agent(context, agent) + + +class AgentExtRpcCallback(rpc_compat.RpcCallback): + """Processes the rpc report in plugin implementations.""" + + RPC_API_VERSION = '1.0' + START_TIME = timeutils.utcnow() + + def __init__(self, plugin=None): + super(AgentExtRpcCallback, self).__init__() + self.plugin = plugin + + def report_state(self, context, **kwargs): + """Report state from agent to server.""" + time = kwargs['time'] + time = timeutils.parse_strtime(time) + if self.START_TIME > time: + LOG.debug(_("Message with invalid timestamp received")) + return + agent_state = kwargs['agent_state']['agent_state'] + if not self.plugin: + self.plugin = manager.NeutronManager.get_plugin() + self.plugin.create_or_update_agent(context, agent_state) diff --git a/neutron/db/agentschedulers_db.py b/neutron/db/agentschedulers_db.py new file mode 100644 index 000000000..2022dbe3e --- /dev/null +++ b/neutron/db/agentschedulers_db.py @@ -0,0 +1,226 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc +from sqlalchemy.orm import joinedload + +from neutron.common import constants +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import model_base +from neutron.extensions import agent as ext_agent +from neutron.extensions import dhcpagentscheduler +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +AGENTS_SCHEDULER_OPTS = [ + cfg.StrOpt('network_scheduler_driver', + default='neutron.scheduler.' + 'dhcp_agent_scheduler.ChanceScheduler', + help=_('Driver to use for scheduling network to DHCP agent')), + cfg.BoolOpt('network_auto_schedule', default=True, + help=_('Allow auto scheduling networks to DHCP agent.')), + cfg.IntOpt('dhcp_agents_per_network', default=1, + help=_('Number of DHCP agents scheduled to host a network.')), +] + +cfg.CONF.register_opts(AGENTS_SCHEDULER_OPTS) + + +class NetworkDhcpAgentBinding(model_base.BASEV2): + """Represents binding between neutron networks and DHCP agents.""" + + network_id = sa.Column(sa.String(36), + sa.ForeignKey("networks.id", ondelete='CASCADE'), + primary_key=True) + dhcp_agent = orm.relation(agents_db.Agent) + dhcp_agent_id = sa.Column(sa.String(36), + sa.ForeignKey("agents.id", + ondelete='CASCADE'), + primary_key=True) + + +class AgentSchedulerDbMixin(agents_db.AgentDbMixin): + """Common class for agent scheduler mixins.""" + + # agent notifiers to handle agent update operations; + # should be updated by plugins; + agent_notifiers = { + constants.AGENT_TYPE_DHCP: None, + constants.AGENT_TYPE_L3: None, + constants.AGENT_TYPE_LOADBALANCER: None, + } + + @staticmethod + def is_eligible_agent(active, agent): + if active is None: + # filtering by activeness is disabled, all agents are eligible + return True + else: + # note(rpodolyaka): original behaviour is saved here: if active + # filter is set, only agents which are 'up' + # (i.e. have a recent heartbeat timestamp) + # are eligible, even if active is False + return not agents_db.AgentDbMixin.is_agent_down( + agent['heartbeat_timestamp']) + + def update_agent(self, context, id, agent): + original_agent = self.get_agent(context, id) + result = super(AgentSchedulerDbMixin, self).update_agent( + context, id, agent) + agent_data = agent['agent'] + agent_notifier = self.agent_notifiers.get(original_agent['agent_type']) + if (agent_notifier and + 'admin_state_up' in agent_data and + original_agent['admin_state_up'] != agent_data['admin_state_up']): + agent_notifier.agent_updated(context, + agent_data['admin_state_up'], + original_agent['host']) + return result + + +class DhcpAgentSchedulerDbMixin(dhcpagentscheduler + .DhcpAgentSchedulerPluginBase, + AgentSchedulerDbMixin): + """Mixin class to add DHCP agent scheduler extension to db_base_plugin_v2. + """ + + network_scheduler = None + + def get_dhcp_agents_hosting_networks( + self, context, network_ids, active=None): + if not network_ids: + return [] + query = context.session.query(NetworkDhcpAgentBinding) + query = query.options(joinedload('dhcp_agent')) + if len(network_ids) == 1: + query = query.filter( + NetworkDhcpAgentBinding.network_id == network_ids[0]) + elif network_ids: + query = query.filter( + NetworkDhcpAgentBinding.network_id in network_ids) + if active is not None: + query = (query.filter(agents_db.Agent.admin_state_up == active)) + + return [binding.dhcp_agent + for binding in query + if AgentSchedulerDbMixin.is_eligible_agent(active, + binding.dhcp_agent)] + + def add_network_to_dhcp_agent(self, context, id, network_id): + self._get_network(context, network_id) + with context.session.begin(subtransactions=True): + agent_db = self._get_agent(context, id) + if (agent_db['agent_type'] != constants.AGENT_TYPE_DHCP or + not agent_db['admin_state_up']): + raise dhcpagentscheduler.InvalidDHCPAgent(id=id) + dhcp_agents = self.get_dhcp_agents_hosting_networks( + context, [network_id]) + for dhcp_agent in dhcp_agents: + if id == dhcp_agent.id: + raise dhcpagentscheduler.NetworkHostedByDHCPAgent( + network_id=network_id, agent_id=id) + binding = NetworkDhcpAgentBinding() + binding.dhcp_agent_id = id + binding.network_id = network_id + context.session.add(binding) + dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) + if dhcp_notifier: + dhcp_notifier.network_added_to_agent( + context, network_id, agent_db.host) + + def remove_network_from_dhcp_agent(self, context, id, network_id): + agent = self._get_agent(context, id) + with context.session.begin(subtransactions=True): + try: + query = context.session.query(NetworkDhcpAgentBinding) + binding = query.filter( + NetworkDhcpAgentBinding.network_id == network_id, + NetworkDhcpAgentBinding.dhcp_agent_id == id).one() + except exc.NoResultFound: + raise dhcpagentscheduler.NetworkNotHostedByDhcpAgent( + network_id=network_id, agent_id=id) + + # reserve the port, so the ip is reused on a subsequent add + device_id = utils.get_dhcp_agent_device_id(network_id, + agent['host']) + filters = dict(device_id=[device_id]) + ports = self.get_ports(context, filters=filters) + for port in ports: + port['device_id'] = constants.DEVICE_ID_RESERVED_DHCP_PORT + self.update_port(context, port['id'], dict(port=port)) + + context.session.delete(binding) + dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) + if dhcp_notifier: + dhcp_notifier.network_removed_from_agent( + context, network_id, agent.host) + + def list_networks_on_dhcp_agent(self, context, id): + query = context.session.query(NetworkDhcpAgentBinding.network_id) + query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == id) + + net_ids = [item[0] for item in query] + if net_ids: + return {'networks': + self.get_networks(context, filters={'id': net_ids})} + else: + return {'networks': []} + + def list_active_networks_on_active_dhcp_agent(self, context, host): + try: + agent = self._get_agent_by_type_and_host( + context, constants.AGENT_TYPE_DHCP, host) + except ext_agent.AgentNotFoundByTypeHost: + LOG.debug("DHCP Agent not found on host %s", host) + return [] + + if not agent.admin_state_up: + return [] + query = context.session.query(NetworkDhcpAgentBinding.network_id) + query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == agent.id) + + net_ids = [item[0] for item in query] + if net_ids: + return self.get_networks( + context, + filters={'id': net_ids, 'admin_state_up': [True]} + ) + else: + return [] + + def list_dhcp_agents_hosting_network(self, context, network_id): + dhcp_agents = self.get_dhcp_agents_hosting_networks( + context, [network_id]) + agent_ids = [dhcp_agent.id for dhcp_agent in dhcp_agents] + if agent_ids: + return { + 'agents': self.get_agents(context, filters={'id': agent_ids})} + else: + return {'agents': []} + + def schedule_network(self, context, created_network): + if self.network_scheduler: + return self.network_scheduler.schedule( + self, context, created_network) + + def auto_schedule_networks(self, context, host): + if self.network_scheduler: + self.network_scheduler.auto_schedule_networks(self, context, host) diff --git a/neutron/db/allowedaddresspairs_db.py b/neutron/db/allowedaddresspairs_db.py new file mode 100644 index 000000000..b648c8c47 --- /dev/null +++ b/neutron/db/allowedaddresspairs_db.py @@ -0,0 +1,147 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.api.v2 import attributes as attr +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import allowedaddresspairs as addr_pair + + +class AllowedAddressPair(model_base.BASEV2): + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + mac_address = sa.Column(sa.String(32), nullable=False, primary_key=True) + ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) + + port = orm.relationship( + models_v2.Port, + backref=orm.backref("allowed_address_pairs", + lazy="joined", cascade="delete")) + + +class AllowedAddressPairsMixin(object): + """Mixin class for allowed address pairs.""" + + def _process_create_allowed_address_pairs(self, context, port, + allowed_address_pairs): + if not attr.is_attr_set(allowed_address_pairs): + return [] + with context.session.begin(subtransactions=True): + for address_pair in allowed_address_pairs: + # use port.mac_address if no mac address in address pair + if 'mac_address' not in address_pair: + address_pair['mac_address'] = port['mac_address'] + db_pair = AllowedAddressPair( + port_id=port['id'], + mac_address=address_pair['mac_address'], + ip_address=address_pair['ip_address']) + context.session.add(db_pair) + + return allowed_address_pairs + + def get_allowed_address_pairs(self, context, port_id): + pairs = (context.session.query(AllowedAddressPair). + filter_by(port_id=port_id)) + return [self._make_allowed_address_pairs_dict(pair) + for pair in pairs] + + def _extend_port_dict_allowed_address_pairs(self, port_res, port_db): + # If port_db is provided, allowed address pairs will be accessed via + # sqlalchemy models. As they're loaded together with ports this + # will not cause an extra query. + allowed_address_pairs = [ + self._make_allowed_address_pairs_dict(address_pair) for + address_pair in port_db.allowed_address_pairs] + port_res[addr_pair.ADDRESS_PAIRS] = allowed_address_pairs + return port_res + + # Register dict extend functions for ports + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attr.PORTS, ['_extend_port_dict_allowed_address_pairs']) + + def _delete_allowed_address_pairs(self, context, id): + query = self._model_query(context, AllowedAddressPair) + with context.session.begin(subtransactions=True): + query.filter(AllowedAddressPair.port_id == id).delete() + + def _make_allowed_address_pairs_dict(self, allowed_address_pairs, + fields=None): + res = {'mac_address': allowed_address_pairs['mac_address'], + 'ip_address': allowed_address_pairs['ip_address']} + return self._fields(res, fields) + + def _has_address_pairs(self, port): + return (attr.is_attr_set(port['port'][addr_pair.ADDRESS_PAIRS]) + and port['port'][addr_pair.ADDRESS_PAIRS] != []) + + def _check_update_has_allowed_address_pairs(self, port): + """Determine if request has an allowed address pair. + + Return True if the port parameter has a non-empty + 'allowed_address_pairs' attribute. Otherwise returns False. + """ + return (addr_pair.ADDRESS_PAIRS in port['port'] and + self._has_address_pairs(port)) + + def _check_update_deletes_allowed_address_pairs(self, port): + """Determine if request deletes address pair. + + Return True if port has as a allowed address pair and its value + is either [] or not is_attr_set, otherwise return False + """ + return (addr_pair.ADDRESS_PAIRS in port['port'] and + not self._has_address_pairs(port)) + + def is_address_pairs_attribute_updated(self, port, update_attrs): + """Check if the address pairs attribute is being updated. + + Returns True if there is an update. This can be used to decide + if a port update notification should be sent to agents or third + party controllers. + """ + + new_pairs = update_attrs.get(addr_pair.ADDRESS_PAIRS) + if new_pairs is None: + return False + old_pairs = port.get(addr_pair.ADDRESS_PAIRS) + + # Missing or unchanged address pairs in attributes mean no update + return new_pairs != old_pairs + + def update_address_pairs_on_port(self, context, port_id, port, + original_port, updated_port): + """Update allowed address pairs on port. + + Returns True if an update notification is required. Notification + is not done here because other changes on the port may need + notification. This method is expected to be called within + a transaction. + """ + new_pairs = port['port'].get(addr_pair.ADDRESS_PAIRS) + + if self.is_address_pairs_attribute_updated(original_port, + port['port']): + updated_port[addr_pair.ADDRESS_PAIRS] = new_pairs + self._delete_allowed_address_pairs(context, port_id) + self._process_create_allowed_address_pairs( + context, updated_port, new_pairs) + return True + + return False diff --git a/neutron/db/api.py b/neutron/db/api.py new file mode 100644 index 000000000..3ccf40624 --- /dev/null +++ b/neutron/db/api.py @@ -0,0 +1,89 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import sqlalchemy as sql + +from neutron.db import model_base +from neutron.openstack.common.db.sqlalchemy import session +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +BASE = model_base.BASEV2 + +cfg.CONF.import_opt('connection', + 'neutron.openstack.common.db.options', + group='database') + +_FACADE = None + + +def _create_facade_lazily(): + global _FACADE + + if _FACADE is None: + _FACADE = session.EngineFacade.from_config( + cfg.CONF.database.connection, cfg.CONF, sqlite_fk=True) + + return _FACADE + + +def configure_db(): + """Configure database. + + Establish the database, create an engine if needed, and register + the models. + """ + register_models() + + +def clear_db(base=BASE): + unregister_models(base) + + +def get_engine(): + """Helper method to grab engine.""" + facade = _create_facade_lazily() + return facade.get_engine() + + +def get_session(autocommit=True, expire_on_commit=False): + """Helper method to grab session.""" + facade = _create_facade_lazily() + return facade.get_session(autocommit=autocommit, + expire_on_commit=expire_on_commit) + + +def register_models(base=BASE): + """Register Models and create properties.""" + try: + facade = _create_facade_lazily() + engine = facade.get_engine() + base.metadata.create_all(engine) + except sql.exc.OperationalError as e: + LOG.info(_("Database registration exception: %s"), e) + return False + return True + + +def unregister_models(base=BASE): + """Unregister Models, useful clearing out data before testing.""" + try: + facade = _create_facade_lazily() + engine = facade.get_engine() + base.metadata.drop_all(engine) + except Exception: + LOG.exception(_("Database exception")) diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py new file mode 100644 index 000000000..4d804f559 --- /dev/null +++ b/neutron/db/db_base_plugin_v2.py @@ -0,0 +1,1625 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random +import weakref + +import netaddr +from oslo.config import cfg +from sqlalchemy import event +from sqlalchemy import orm +from sqlalchemy.orm import exc +from sqlalchemy import sql + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import ipv6_utils +from neutron import context as ctx +from neutron.db import api as db +from neutron.db import models_v2 +from neutron.db import sqlalchemyutils +from neutron.extensions import l3 +from neutron import manager +from neutron import neutron_plugin_base_v2 +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as service_constants + + +LOG = logging.getLogger(__name__) + +# Ports with the following 'device_owner' values will not prevent +# network deletion. If delete_network() finds that all ports on a +# network have these owners, it will explicitly delete each port +# and allow network deletion to continue. Similarly, if delete_subnet() +# finds out that all existing IP Allocations are associated with ports +# with these owners, it will allow subnet deletion to proceed with the +# IP allocations being cleaned up by cascade. +AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP] + + +class CommonDbMixin(object): + """Common methods used in core and service plugins.""" + # Plugins, mixin classes implementing extension will register + # hooks into the dict below for "augmenting" the "core way" of + # building a query for retrieving objects from a model class. + # To this aim, the register_model_query_hook and unregister_query_hook + # from this class should be invoked + _model_query_hooks = {} + + # This dictionary will store methods for extending attributes of + # api resources. Mixins can use this dict for adding their own methods + # TODO(salvatore-orlando): Avoid using class-level variables + _dict_extend_functions = {} + + @classmethod + def register_model_query_hook(cls, model, name, query_hook, filter_hook, + result_filters=None): + """Register a hook to be invoked when a query is executed. + + Add the hooks to the _model_query_hooks dict. Models are the keys + of this dict, whereas the value is another dict mapping hook names to + callables performing the hook. + Each hook has a "query" component, used to build the query expression + and a "filter" component, which is used to build the filter expression. + + Query hooks take as input the query being built and return a + transformed query expression. + + Filter hooks take as input the filter expression being built and return + a transformed filter expression + """ + model_hooks = cls._model_query_hooks.get(model) + if not model_hooks: + # add key to dict + model_hooks = {} + cls._model_query_hooks[model] = model_hooks + model_hooks[name] = {'query': query_hook, 'filter': filter_hook, + 'result_filters': result_filters} + + @property + def safe_reference(self): + """Return a weakref to the instance. + + Minimize the potential for the instance persisting + unnecessarily in memory by returning a weakref proxy that + won't prevent deallocation. + """ + return weakref.proxy(self) + + def _model_query(self, context, model): + query = context.session.query(model) + # define basic filter condition for model query + # NOTE(jkoelker) non-admin queries are scoped to their tenant_id + # NOTE(salvatore-orlando): unless the model allows for shared objects + query_filter = None + if not context.is_admin and hasattr(model, 'tenant_id'): + if hasattr(model, 'shared'): + query_filter = ((model.tenant_id == context.tenant_id) | + (model.shared == sql.true())) + else: + query_filter = (model.tenant_id == context.tenant_id) + # Execute query hooks registered from mixins and plugins + for _name, hooks in self._model_query_hooks.get(model, + {}).iteritems(): + query_hook = hooks.get('query') + if isinstance(query_hook, basestring): + query_hook = getattr(self, query_hook, None) + if query_hook: + query = query_hook(context, model, query) + + filter_hook = hooks.get('filter') + if isinstance(filter_hook, basestring): + filter_hook = getattr(self, filter_hook, None) + if filter_hook: + query_filter = filter_hook(context, model, query_filter) + + # NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the + # condition, raising an exception + if query_filter is not None: + query = query.filter(query_filter) + return query + + def _fields(self, resource, fields): + if fields: + return dict(((key, item) for key, item in resource.items() + if key in fields)) + return resource + + def _get_tenant_id_for_create(self, context, resource): + if context.is_admin and 'tenant_id' in resource: + tenant_id = resource['tenant_id'] + elif ('tenant_id' in resource and + resource['tenant_id'] != context.tenant_id): + reason = _('Cannot create resource for another tenant') + raise n_exc.AdminRequired(reason=reason) + else: + tenant_id = context.tenant_id + return tenant_id + + def _get_by_id(self, context, model, id): + query = self._model_query(context, model) + return query.filter(model.id == id).one() + + def _apply_filters_to_query(self, query, model, filters): + if filters: + for key, value in filters.iteritems(): + column = getattr(model, key, None) + if column: + query = query.filter(column.in_(value)) + for _name, hooks in self._model_query_hooks.get(model, + {}).iteritems(): + result_filter = hooks.get('result_filters', None) + if isinstance(result_filter, basestring): + result_filter = getattr(self, result_filter, None) + + if result_filter: + query = result_filter(query, filters) + return query + + def _apply_dict_extend_functions(self, resource_type, + response, db_object): + for func in self._dict_extend_functions.get( + resource_type, []): + args = (response, db_object) + if isinstance(func, basestring): + func = getattr(self, func, None) + else: + # must call unbound method - use self as 1st argument + args = (self,) + args + if func: + func(*args) + + def _get_collection_query(self, context, model, filters=None, + sorts=None, limit=None, marker_obj=None, + page_reverse=False): + collection = self._model_query(context, model) + collection = self._apply_filters_to_query(collection, model, filters) + if limit and page_reverse and sorts: + sorts = [(s[0], not s[1]) for s in sorts] + collection = sqlalchemyutils.paginate_query(collection, model, limit, + sorts, + marker_obj=marker_obj) + return collection + + def _get_collection(self, context, model, dict_func, filters=None, + fields=None, sorts=None, limit=None, marker_obj=None, + page_reverse=False): + query = self._get_collection_query(context, model, filters=filters, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + items = [dict_func(c, fields) for c in query] + if limit and page_reverse: + items.reverse() + return items + + def _get_collection_count(self, context, model, filters=None): + return self._get_collection_query(context, model, filters).count() + + def _get_marker_obj(self, context, resource, limit, marker): + if limit and marker: + return getattr(self, '_get_%s' % resource)(context, marker) + return None + + def _filter_non_model_columns(self, data, model): + """Remove all the attributes from data which are not columns of + the model passed as second parameter. + """ + columns = [c.name for c in model.__table__.columns] + return dict((k, v) for (k, v) in + data.iteritems() if k in columns) + + +class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + CommonDbMixin): + """V2 Neutron plugin interface implementation using SQLAlchemy models. + + Whenever a non-read call happens the plugin will call an event handler + class method (e.g., network_created()). The result is that this class + can be sub-classed by other classes that add custom behaviors on certain + events. + """ + + # This attribute specifies whether the plugin supports or not + # bulk/pagination/sorting operations. Name mangling is used in + # order to ensure it is qualified by class + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + def __init__(self): + db.configure_db() + if cfg.CONF.notify_nova_on_port_status_changes: + from neutron.notifiers import nova + # NOTE(arosen) These event listeners are here to hook into when + # port status changes and notify nova about their change. + self.nova_notifier = nova.Notifier() + event.listen(models_v2.Port, 'after_insert', + self.nova_notifier.send_port_status) + event.listen(models_v2.Port, 'after_update', + self.nova_notifier.send_port_status) + event.listen(models_v2.Port.status, 'set', + self.nova_notifier.record_port_status_changed) + + @classmethod + def register_dict_extend_funcs(cls, resource, funcs): + cur_funcs = cls._dict_extend_functions.get(resource, []) + cur_funcs.extend(funcs) + cls._dict_extend_functions[resource] = cur_funcs + + def _get_network(self, context, id): + try: + network = self._get_by_id(context, models_v2.Network, id) + except exc.NoResultFound: + raise n_exc.NetworkNotFound(net_id=id) + return network + + def _get_subnet(self, context, id): + try: + subnet = self._get_by_id(context, models_v2.Subnet, id) + except exc.NoResultFound: + raise n_exc.SubnetNotFound(subnet_id=id) + return subnet + + def _get_port(self, context, id): + try: + port = self._get_by_id(context, models_v2.Port, id) + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=id) + return port + + def _get_dns_by_subnet(self, context, subnet_id): + dns_qry = context.session.query(models_v2.DNSNameServer) + return dns_qry.filter_by(subnet_id=subnet_id).all() + + def _get_route_by_subnet(self, context, subnet_id): + route_qry = context.session.query(models_v2.SubnetRoute) + return route_qry.filter_by(subnet_id=subnet_id).all() + + def _get_subnets_by_network(self, context, network_id): + subnet_qry = context.session.query(models_v2.Subnet) + return subnet_qry.filter_by(network_id=network_id).all() + + def _get_all_subnets(self, context): + # NOTE(salvatore-orlando): This query might end up putting + # a lot of stress on the db. Consider adding a cache layer + return context.session.query(models_v2.Subnet).all() + + @staticmethod + def _generate_mac(context, network_id): + base_mac = cfg.CONF.base_mac.split(':') + max_retries = cfg.CONF.mac_generation_retries + for i in range(max_retries): + mac = [int(base_mac[0], 16), int(base_mac[1], 16), + int(base_mac[2], 16), random.randint(0x00, 0xff), + random.randint(0x00, 0xff), random.randint(0x00, 0xff)] + if base_mac[3] != '00': + mac[3] = int(base_mac[3], 16) + mac_address = ':'.join(map(lambda x: "%02x" % x, mac)) + if NeutronDbPluginV2._check_unique_mac(context, network_id, + mac_address): + LOG.debug(_("Generated mac for network %(network_id)s " + "is %(mac_address)s"), + {'network_id': network_id, + 'mac_address': mac_address}) + return mac_address + else: + LOG.debug(_("Generated mac %(mac_address)s exists. Remaining " + "attempts %(max_retries)s."), + {'mac_address': mac_address, + 'max_retries': max_retries - (i + 1)}) + LOG.error(_("Unable to generate mac address after %s attempts"), + max_retries) + raise n_exc.MacAddressGenerationFailure(net_id=network_id) + + @staticmethod + def _check_unique_mac(context, network_id, mac_address): + mac_qry = context.session.query(models_v2.Port) + try: + mac_qry.filter_by(network_id=network_id, + mac_address=mac_address).one() + except exc.NoResultFound: + return True + return False + + @staticmethod + def _delete_ip_allocation(context, network_id, subnet_id, ip_address): + + # Delete the IP address from the IPAllocate table + LOG.debug(_("Delete allocated IP %(ip_address)s " + "(%(network_id)s/%(subnet_id)s)"), + {'ip_address': ip_address, + 'network_id': network_id, + 'subnet_id': subnet_id}) + context.session.query(models_v2.IPAllocation).filter_by( + network_id=network_id, + ip_address=ip_address, + subnet_id=subnet_id).delete() + + @staticmethod + def _check_if_subnet_uses_eui64(subnet): + """Check if ipv6 address will be calculated via EUI64.""" + return (subnet['ipv6_address_mode'] == constants.IPV6_SLAAC + or subnet['ipv6_address_mode'] == constants.DHCPV6_STATELESS) + + @staticmethod + def _generate_ip(context, subnets): + try: + return NeutronDbPluginV2._try_generate_ip(context, subnets) + except n_exc.IpAddressGenerationFailure: + NeutronDbPluginV2._rebuild_availability_ranges(context, subnets) + + return NeutronDbPluginV2._try_generate_ip(context, subnets) + + @staticmethod + def _try_generate_ip(context, subnets): + """Generate an IP address. + + The IP address will be generated from one of the subnets defined on + the network. + """ + range_qry = context.session.query( + models_v2.IPAvailabilityRange).join( + models_v2.IPAllocationPool).with_lockmode('update') + for subnet in subnets: + range = range_qry.filter_by(subnet_id=subnet['id']).first() + if not range: + LOG.debug(_("All IPs from subnet %(subnet_id)s (%(cidr)s) " + "allocated"), + {'subnet_id': subnet['id'], 'cidr': subnet['cidr']}) + continue + ip_address = range['first_ip'] + LOG.debug(_("Allocated IP - %(ip_address)s from %(first_ip)s " + "to %(last_ip)s"), + {'ip_address': ip_address, + 'first_ip': range['first_ip'], + 'last_ip': range['last_ip']}) + if range['first_ip'] == range['last_ip']: + # No more free indices on subnet => delete + LOG.debug(_("No more free IP's in slice. Deleting allocation " + "pool.")) + context.session.delete(range) + else: + # increment the first free + range['first_ip'] = str(netaddr.IPAddress(ip_address) + 1) + return {'ip_address': ip_address, 'subnet_id': subnet['id']} + raise n_exc.IpAddressGenerationFailure(net_id=subnets[0]['network_id']) + + @staticmethod + def _rebuild_availability_ranges(context, subnets): + ip_qry = context.session.query( + models_v2.IPAllocation).with_lockmode('update') + # PostgreSQL does not support select...for update with an outer join. + # No join is needed here. + pool_qry = context.session.query( + models_v2.IPAllocationPool).options( + orm.noload('available_ranges')).with_lockmode('update') + for subnet in sorted(subnets): + LOG.debug(_("Rebuilding availability ranges for subnet %s") + % subnet) + + # Create a set of all currently allocated addresses + ip_qry_results = ip_qry.filter_by(subnet_id=subnet['id']) + allocations = netaddr.IPSet([netaddr.IPAddress(i['ip_address']) + for i in ip_qry_results]) + + for pool in pool_qry.filter_by(subnet_id=subnet['id']): + # Create a set of all addresses in the pool + poolset = netaddr.IPSet(netaddr.iter_iprange(pool['first_ip'], + pool['last_ip'])) + + # Use set difference to find free addresses in the pool + available = poolset - allocations + + # Generator compacts an ip set into contiguous ranges + def ipset_to_ranges(ipset): + first, last = None, None + for cidr in ipset.iter_cidrs(): + if last and last + 1 != cidr.first: + yield netaddr.IPRange(first, last) + first = None + first, last = first if first else cidr.first, cidr.last + if first: + yield netaddr.IPRange(first, last) + + # Write the ranges to the db + for range in ipset_to_ranges(available): + available_range = models_v2.IPAvailabilityRange( + allocation_pool_id=pool['id'], + first_ip=str(netaddr.IPAddress(range.first)), + last_ip=str(netaddr.IPAddress(range.last))) + context.session.add(available_range) + + @staticmethod + def _allocate_specific_ip(context, subnet_id, ip_address): + """Allocate a specific IP address on the subnet.""" + ip = int(netaddr.IPAddress(ip_address)) + range_qry = context.session.query( + models_v2.IPAvailabilityRange).join( + models_v2.IPAllocationPool).with_lockmode('update') + results = range_qry.filter_by(subnet_id=subnet_id) + for range in results: + first = int(netaddr.IPAddress(range['first_ip'])) + last = int(netaddr.IPAddress(range['last_ip'])) + if first <= ip <= last: + if first == last: + context.session.delete(range) + return + elif first == ip: + range['first_ip'] = str(netaddr.IPAddress(ip_address) + 1) + return + elif last == ip: + range['last_ip'] = str(netaddr.IPAddress(ip_address) - 1) + return + else: + # Split into two ranges + new_first = str(netaddr.IPAddress(ip_address) + 1) + new_last = range['last_ip'] + range['last_ip'] = str(netaddr.IPAddress(ip_address) - 1) + ip_range = models_v2.IPAvailabilityRange( + allocation_pool_id=range['allocation_pool_id'], + first_ip=new_first, + last_ip=new_last) + context.session.add(ip_range) + return + + @staticmethod + def _check_unique_ip(context, network_id, subnet_id, ip_address): + """Validate that the IP address on the subnet is not in use.""" + ip_qry = context.session.query(models_v2.IPAllocation) + try: + ip_qry.filter_by(network_id=network_id, + subnet_id=subnet_id, + ip_address=ip_address).one() + except exc.NoResultFound: + return True + return False + + @staticmethod + def _check_subnet_ip(cidr, ip_address): + """Validate that the IP address is on the subnet.""" + ip = netaddr.IPAddress(ip_address) + net = netaddr.IPNetwork(cidr) + # Check that the IP is valid on subnet. This cannot be the + # network or the broadcast address + if (ip != net.network and + ip != net.broadcast and + net.netmask & ip == net.network): + return True + return False + + @staticmethod + def _check_ip_in_allocation_pool(context, subnet_id, gateway_ip, + ip_address): + """Validate IP in allocation pool. + + Validates that the IP address is either the default gateway or + in the allocation pools of the subnet. + """ + # Check if the IP is the gateway + if ip_address == gateway_ip: + # Gateway is not in allocation pool + return False + + # Check if the requested IP is in a defined allocation pool + pool_qry = context.session.query(models_v2.IPAllocationPool) + allocation_pools = pool_qry.filter_by(subnet_id=subnet_id) + ip = netaddr.IPAddress(ip_address) + for allocation_pool in allocation_pools: + allocation_pool_range = netaddr.IPRange( + allocation_pool['first_ip'], + allocation_pool['last_ip']) + if ip in allocation_pool_range: + return True + return False + + def _test_fixed_ips_for_port(self, context, network_id, fixed_ips): + """Test fixed IPs for port. + + Check that configured subnets are valid prior to allocating any + IPs. Include the subnet_id in the result if only an IP address is + configured. + + :raises: InvalidInput, IpAddressInUse + """ + fixed_ip_set = [] + for fixed in fixed_ips: + found = False + if 'subnet_id' not in fixed: + if 'ip_address' not in fixed: + msg = _('IP allocation requires subnet_id or ip_address') + raise n_exc.InvalidInput(error_message=msg) + + filter = {'network_id': [network_id]} + subnets = self.get_subnets(context, filters=filter) + for subnet in subnets: + if NeutronDbPluginV2._check_subnet_ip(subnet['cidr'], + fixed['ip_address']): + found = True + subnet_id = subnet['id'] + break + if not found: + msg = _('IP address %s is not a valid IP for the defined ' + 'networks subnets') % fixed['ip_address'] + raise n_exc.InvalidInput(error_message=msg) + else: + subnet = self._get_subnet(context, fixed['subnet_id']) + if subnet['network_id'] != network_id: + msg = (_("Failed to create port on network %(network_id)s" + ", because fixed_ips included invalid subnet " + "%(subnet_id)s") % + {'network_id': network_id, + 'subnet_id': fixed['subnet_id']}) + raise n_exc.InvalidInput(error_message=msg) + subnet_id = subnet['id'] + + if 'ip_address' in fixed: + # Ensure that the IP's are unique + if not NeutronDbPluginV2._check_unique_ip(context, network_id, + subnet_id, + fixed['ip_address']): + raise n_exc.IpAddressInUse(net_id=network_id, + ip_address=fixed['ip_address']) + + # Ensure that the IP is valid on the subnet + if (not found and + not NeutronDbPluginV2._check_subnet_ip( + subnet['cidr'], fixed['ip_address'])): + msg = _('IP address %s is not a valid IP for the defined ' + 'subnet') % fixed['ip_address'] + raise n_exc.InvalidInput(error_message=msg) + + fixed_ip_set.append({'subnet_id': subnet_id, + 'ip_address': fixed['ip_address']}) + else: + fixed_ip_set.append({'subnet_id': subnet_id}) + if len(fixed_ip_set) > cfg.CONF.max_fixed_ips_per_port: + msg = _('Exceeded maximim amount of fixed ips per port') + raise n_exc.InvalidInput(error_message=msg) + return fixed_ip_set + + def _allocate_fixed_ips(self, context, network, fixed_ips): + """Allocate IP addresses according to the configured fixed_ips.""" + ips = [] + for fixed in fixed_ips: + if 'ip_address' in fixed: + # Remove the IP address from the allocation pool + NeutronDbPluginV2._allocate_specific_ip( + context, fixed['subnet_id'], fixed['ip_address']) + ips.append({'ip_address': fixed['ip_address'], + 'subnet_id': fixed['subnet_id']}) + # Only subnet ID is specified => need to generate IP + # from subnet + else: + subnets = [self._get_subnet(context, fixed['subnet_id'])] + # IP address allocation + result = self._generate_ip(context, subnets) + ips.append({'ip_address': result['ip_address'], + 'subnet_id': result['subnet_id']}) + return ips + + def _update_ips_for_port(self, context, network_id, port_id, original_ips, + new_ips): + """Add or remove IPs from the port.""" + ips = [] + # These ips are still on the port and haven't been removed + prev_ips = [] + + # the new_ips contain all of the fixed_ips that are to be updated + if len(new_ips) > cfg.CONF.max_fixed_ips_per_port: + msg = _('Exceeded maximim amount of fixed ips per port') + raise n_exc.InvalidInput(error_message=msg) + + # Remove all of the intersecting elements + for original_ip in original_ips[:]: + for new_ip in new_ips[:]: + if ('ip_address' in new_ip and + original_ip['ip_address'] == new_ip['ip_address']): + original_ips.remove(original_ip) + new_ips.remove(new_ip) + prev_ips.append(original_ip) + + # Check if the IP's to add are OK + to_add = self._test_fixed_ips_for_port(context, network_id, new_ips) + for ip in original_ips: + LOG.debug(_("Port update. Hold %s"), ip) + NeutronDbPluginV2._delete_ip_allocation(context, + network_id, + ip['subnet_id'], + ip['ip_address']) + + if to_add: + LOG.debug(_("Port update. Adding %s"), to_add) + network = self._get_network(context, network_id) + ips = self._allocate_fixed_ips(context, network, to_add) + return ips, prev_ips + + def _allocate_ips_for_port(self, context, network, port): + """Allocate IP addresses for the port. + + If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP + addresses for the port. If port['fixed_ips'] contains an IP address or + a subnet_id then allocate an IP address accordingly. + """ + p = port['port'] + ips = [] + + fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED + if fixed_configured: + configured_ips = self._test_fixed_ips_for_port(context, + p["network_id"], + p['fixed_ips']) + ips = self._allocate_fixed_ips(context, network, configured_ips) + else: + filter = {'network_id': [p['network_id']]} + subnets = self.get_subnets(context, filters=filter) + # Split into v4 and v6 subnets + v4 = [] + v6 = [] + for subnet in subnets: + if subnet['ip_version'] == 4: + v4.append(subnet) + else: + v6.append(subnet) + for subnet in v6: + if self._check_if_subnet_uses_eui64(subnet): + #(dzyu) If true, calculate an IPv6 address + # by mac address and prefix, then remove this + # subnet from the array of subnets that will be passed + # to the _generate_ip() function call, since we just + # generated an IP. + mac = p['mac_address'] + prefix = subnet['cidr'] + ip_address = ipv6_utils.get_ipv6_addr_by_EUI64( + prefix, mac) + ips.append({'ip_address': ip_address.format(), + 'subnet_id': subnet['id']}) + v6.remove(subnet) + version_subnets = [v4, v6] + for subnets in version_subnets: + if subnets: + result = NeutronDbPluginV2._generate_ip(context, subnets) + ips.append({'ip_address': result['ip_address'], + 'subnet_id': result['subnet_id']}) + return ips + + def _validate_subnet_cidr(self, context, network, new_subnet_cidr): + """Validate the CIDR for a subnet. + + Verifies the specified CIDR does not overlap with the ones defined + for the other subnets specified for this network, or with any other + CIDR if overlapping IPs are disabled. + """ + new_subnet_ipset = netaddr.IPSet([new_subnet_cidr]) + if cfg.CONF.allow_overlapping_ips: + subnet_list = network.subnets + else: + subnet_list = self._get_all_subnets(context) + for subnet in subnet_list: + if (netaddr.IPSet([subnet.cidr]) & new_subnet_ipset): + # don't give out details of the overlapping subnet + err_msg = (_("Requested subnet with cidr: %(cidr)s for " + "network: %(network_id)s overlaps with another " + "subnet") % + {'cidr': new_subnet_cidr, + 'network_id': network.id}) + LOG.info(_("Validation for CIDR: %(new_cidr)s failed - " + "overlaps with subnet %(subnet_id)s " + "(CIDR: %(cidr)s)"), + {'new_cidr': new_subnet_cidr, + 'subnet_id': subnet.id, + 'cidr': subnet.cidr}) + raise n_exc.InvalidInput(error_message=err_msg) + + def _validate_allocation_pools(self, ip_pools, subnet_cidr): + """Validate IP allocation pools. + + Verify start and end address for each allocation pool are valid, + ie: constituted by valid and appropriately ordered IP addresses. + Also, verify pools do not overlap among themselves. + Finally, verify that each range fall within the subnet's CIDR. + """ + subnet = netaddr.IPNetwork(subnet_cidr) + subnet_first_ip = netaddr.IPAddress(subnet.first + 1) + subnet_last_ip = netaddr.IPAddress(subnet.last - 1) + + LOG.debug(_("Performing IP validity checks on allocation pools")) + ip_sets = [] + for ip_pool in ip_pools: + try: + start_ip = netaddr.IPAddress(ip_pool['start']) + end_ip = netaddr.IPAddress(ip_pool['end']) + except netaddr.AddrFormatError: + LOG.info(_("Found invalid IP address in pool: " + "%(start)s - %(end)s:"), + {'start': ip_pool['start'], + 'end': ip_pool['end']}) + raise n_exc.InvalidAllocationPool(pool=ip_pool) + if (start_ip.version != subnet.version or + end_ip.version != subnet.version): + LOG.info(_("Specified IP addresses do not match " + "the subnet IP version")) + raise n_exc.InvalidAllocationPool(pool=ip_pool) + if end_ip < start_ip: + LOG.info(_("Start IP (%(start)s) is greater than end IP " + "(%(end)s)"), + {'start': ip_pool['start'], 'end': ip_pool['end']}) + raise n_exc.InvalidAllocationPool(pool=ip_pool) + if start_ip < subnet_first_ip or end_ip > subnet_last_ip: + LOG.info(_("Found pool larger than subnet " + "CIDR:%(start)s - %(end)s"), + {'start': ip_pool['start'], + 'end': ip_pool['end']}) + raise n_exc.OutOfBoundsAllocationPool( + pool=ip_pool, + subnet_cidr=subnet_cidr) + # Valid allocation pool + # Create an IPSet for it for easily verifying overlaps + ip_sets.append(netaddr.IPSet(netaddr.IPRange( + ip_pool['start'], + ip_pool['end']).cidrs())) + + LOG.debug(_("Checking for overlaps among allocation pools " + "and gateway ip")) + ip_ranges = ip_pools[:] + + # Use integer cursors as an efficient way for implementing + # comparison and avoiding comparing the same pair twice + for l_cursor in range(len(ip_sets)): + for r_cursor in range(l_cursor + 1, len(ip_sets)): + if ip_sets[l_cursor] & ip_sets[r_cursor]: + l_range = ip_ranges[l_cursor] + r_range = ip_ranges[r_cursor] + LOG.info(_("Found overlapping ranges: %(l_range)s and " + "%(r_range)s"), + {'l_range': l_range, 'r_range': r_range}) + raise n_exc.OverlappingAllocationPools( + pool_1=l_range, + pool_2=r_range, + subnet_cidr=subnet_cidr) + + def _validate_host_route(self, route, ip_version): + try: + netaddr.IPNetwork(route['destination']) + netaddr.IPAddress(route['nexthop']) + except netaddr.core.AddrFormatError: + err_msg = _("Invalid route: %s") % route + raise n_exc.InvalidInput(error_message=err_msg) + except ValueError: + # netaddr.IPAddress would raise this + err_msg = _("Invalid route: %s") % route + raise n_exc.InvalidInput(error_message=err_msg) + self._validate_ip_version(ip_version, route['nexthop'], 'nexthop') + self._validate_ip_version(ip_version, route['destination'], + 'destination') + + def _allocate_pools_for_subnet(self, context, subnet): + """Create IP allocation pools for a given subnet + + Pools are defined by the 'allocation_pools' attribute, + a list of dict objects with 'start' and 'end' keys for + defining the pool range. + """ + pools = [] + # Auto allocate the pool around gateway_ip + net = netaddr.IPNetwork(subnet['cidr']) + first_ip = net.first + 1 + last_ip = net.last - 1 + gw_ip = int(netaddr.IPAddress(subnet['gateway_ip'] or net.last)) + # Use the gw_ip to find a point for splitting allocation pools + # for this subnet + split_ip = min(max(gw_ip, net.first), net.last) + if split_ip > first_ip: + pools.append({'start': str(netaddr.IPAddress(first_ip)), + 'end': str(netaddr.IPAddress(split_ip - 1))}) + if split_ip < last_ip: + pools.append({'start': str(netaddr.IPAddress(split_ip + 1)), + 'end': str(netaddr.IPAddress(last_ip))}) + # return auto-generated pools + # no need to check for their validity + return pools + + def _validate_shared_update(self, context, id, original, updated): + # The only case that needs to be validated is when 'shared' + # goes from True to False + if updated['shared'] == original.shared or updated['shared']: + return + ports = self._model_query( + context, models_v2.Port).filter( + models_v2.Port.network_id == id) + subnets = self._model_query( + context, models_v2.Subnet).filter( + models_v2.Subnet.network_id == id) + tenant_ids = set([port['tenant_id'] for port in ports] + + [subnet['tenant_id'] for subnet in subnets]) + # raise if multiple tenants found or if the only tenant found + # is not the owner of the network + if (len(tenant_ids) > 1 or len(tenant_ids) == 1 and + tenant_ids.pop() != original.tenant_id): + raise n_exc.InvalidSharedSetting(network=original.name) + + def _validate_ipv6_attributes(self, subnet, cur_subnet): + ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode')) + address_mode_set = attributes.is_attr_set( + subnet.get('ipv6_address_mode')) + if cur_subnet: + ra_mode = (subnet['ipv6_ra_mode'] if ra_mode_set + else cur_subnet['ipv6_ra_mode']) + addr_mode = (subnet['ipv6_address_mode'] if address_mode_set + else cur_subnet['ipv6_address_mode']) + if ra_mode_set or address_mode_set: + # Check that updated subnet ipv6 attributes do not conflict + self._validate_ipv6_combination(ra_mode, addr_mode) + self._validate_ipv6_update_dhcp(subnet, cur_subnet) + else: + self._validate_ipv6_dhcp(ra_mode_set, address_mode_set, + subnet['enable_dhcp']) + if ra_mode_set and address_mode_set: + self._validate_ipv6_combination(subnet['ipv6_ra_mode'], + subnet['ipv6_address_mode']) + + def _validate_ipv6_combination(self, ra_mode, address_mode): + if ra_mode != address_mode: + msg = _("ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode " + "set to '%(addr_mode)s' is not valid. " + "If both attributes are set, they must be the same value" + ) % {'ra_mode': ra_mode, 'addr_mode': address_mode} + raise n_exc.InvalidInput(error_message=msg) + + def _validate_ipv6_dhcp(self, ra_mode_set, address_mode_set, enable_dhcp): + if (ra_mode_set or address_mode_set) and not enable_dhcp: + msg = _("ipv6_ra_mode or ipv6_address_mode cannot be set when " + "enable_dhcp is set to False.") + raise n_exc.InvalidInput(error_message=msg) + + def _validate_ipv6_update_dhcp(self, subnet, cur_subnet): + if ('enable_dhcp' in subnet and not subnet['enable_dhcp']): + msg = _("Cannot disable enable_dhcp with " + "ipv6 attributes set") + + ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode')) + address_mode_set = attributes.is_attr_set( + subnet.get('ipv6_address_mode')) + + if ra_mode_set or address_mode_set: + raise n_exc.InvalidInput(error_message=msg) + + old_ra_mode_set = attributes.is_attr_set( + cur_subnet.get('ipv6_ra_mode')) + old_address_mode_set = attributes.is_attr_set( + cur_subnet.get('ipv6_address_mode')) + + if old_ra_mode_set or old_address_mode_set: + raise n_exc.InvalidInput(error_message=msg) + + def _make_network_dict(self, network, fields=None, + process_extensions=True): + res = {'id': network['id'], + 'name': network['name'], + 'tenant_id': network['tenant_id'], + 'admin_state_up': network['admin_state_up'], + 'status': network['status'], + 'shared': network['shared'], + 'subnets': [subnet['id'] + for subnet in network['subnets']]} + # Call auxiliary extend functions, if any + if process_extensions: + self._apply_dict_extend_functions( + attributes.NETWORKS, res, network) + return self._fields(res, fields) + + def _make_subnet_dict(self, subnet, fields=None): + res = {'id': subnet['id'], + 'name': subnet['name'], + 'tenant_id': subnet['tenant_id'], + 'network_id': subnet['network_id'], + 'ip_version': subnet['ip_version'], + 'cidr': subnet['cidr'], + 'allocation_pools': [{'start': pool['first_ip'], + 'end': pool['last_ip']} + for pool in subnet['allocation_pools']], + 'gateway_ip': subnet['gateway_ip'], + 'enable_dhcp': subnet['enable_dhcp'], + 'ipv6_ra_mode': subnet['ipv6_ra_mode'], + 'ipv6_address_mode': subnet['ipv6_address_mode'], + 'dns_nameservers': [dns['address'] + for dns in subnet['dns_nameservers']], + 'host_routes': [{'destination': route['destination'], + 'nexthop': route['nexthop']} + for route in subnet['routes']], + 'shared': subnet['shared'] + } + return self._fields(res, fields) + + def _make_port_dict(self, port, fields=None, + process_extensions=True): + res = {"id": port["id"], + 'name': port['name'], + "network_id": port["network_id"], + 'tenant_id': port['tenant_id'], + "mac_address": port["mac_address"], + "admin_state_up": port["admin_state_up"], + "status": port["status"], + "fixed_ips": [{'subnet_id': ip["subnet_id"], + 'ip_address': ip["ip_address"]} + for ip in port["fixed_ips"]], + "device_id": port["device_id"], + "device_owner": port["device_owner"]} + # Call auxiliary extend functions, if any + if process_extensions: + self._apply_dict_extend_functions( + attributes.PORTS, res, port) + return self._fields(res, fields) + + def _create_bulk(self, resource, context, request_items): + objects = [] + collection = "%ss" % resource + items = request_items[collection] + context.session.begin(subtransactions=True) + try: + for item in items: + obj_creator = getattr(self, 'create_%s' % resource) + objects.append(obj_creator(context, item)) + context.session.commit() + except Exception: + context.session.rollback() + with excutils.save_and_reraise_exception(): + LOG.error(_("An exception occurred while creating " + "the %(resource)s:%(item)s"), + {'resource': resource, 'item': item}) + return objects + + def create_network_bulk(self, context, networks): + return self._create_bulk('network', context, networks) + + def create_network(self, context, network): + """Handle creation of a single network.""" + # single request processing + n = network['network'] + # NOTE(jkoelker) Get the tenant_id outside of the session to avoid + # unneeded db action if the operation raises + tenant_id = self._get_tenant_id_for_create(context, n) + with context.session.begin(subtransactions=True): + args = {'tenant_id': tenant_id, + 'id': n.get('id') or uuidutils.generate_uuid(), + 'name': n['name'], + 'admin_state_up': n['admin_state_up'], + 'shared': n['shared'], + 'status': n.get('status', constants.NET_STATUS_ACTIVE)} + network = models_v2.Network(**args) + context.session.add(network) + return self._make_network_dict(network, process_extensions=False) + + def update_network(self, context, id, network): + n = network['network'] + with context.session.begin(subtransactions=True): + network = self._get_network(context, id) + # validate 'shared' parameter + if 'shared' in n: + self._validate_shared_update(context, id, network, n) + network.update(n) + # also update shared in all the subnets for this network + subnets = self._get_subnets_by_network(context, id) + for subnet in subnets: + subnet['shared'] = network['shared'] + return self._make_network_dict(network) + + def delete_network(self, context, id): + with context.session.begin(subtransactions=True): + network = self._get_network(context, id) + + filters = {'network_id': [id]} + # NOTE(armando-migliaccio): stick with base plugin + query = context.session.query( + models_v2.Port).enable_eagerloads(False) + ports = self._apply_filters_to_query( + query, models_v2.Port, filters).with_lockmode('update') + + # check if there are any tenant owned ports in-use + only_auto_del = all(p['device_owner'] in AUTO_DELETE_PORT_OWNERS + for p in ports) + + if not only_auto_del: + raise n_exc.NetworkInUse(net_id=id) + + # clean up network owned ports + for port in ports: + self._delete_port(context, port['id']) + + # clean up subnets + subnets_qry = context.session.query(models_v2.Subnet) + subnets_qry.filter_by(network_id=id).delete() + context.session.delete(network) + + def get_network(self, context, id, fields=None): + network = self._get_network(context, id) + return self._make_network_dict(network, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'network', limit, marker) + return self._get_collection(context, models_v2.Network, + self._make_network_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_networks_count(self, context, filters=None): + return self._get_collection_count(context, models_v2.Network, + filters=filters) + + def create_subnet_bulk(self, context, subnets): + return self._create_bulk('subnet', context, subnets) + + def _validate_ip_version(self, ip_version, addr, name): + """Check IP field of a subnet match specified ip version.""" + ip = netaddr.IPNetwork(addr) + if ip.version != ip_version: + data = {'name': name, + 'addr': addr, + 'ip_version': ip_version} + msg = _("%(name)s '%(addr)s' does not match " + "the ip_version '%(ip_version)s'") % data + raise n_exc.InvalidInput(error_message=msg) + + def _validate_subnet(self, context, s, cur_subnet=None): + """Validate a subnet spec.""" + + # This method will validate attributes which may change during + # create_subnet() and update_subnet(). + # The method requires the subnet spec 's' has 'ip_version' field. + # If 's' dict does not have 'ip_version' field in an API call + # (e.g., update_subnet()), you need to set 'ip_version' field + # before calling this method. + + ip_ver = s['ip_version'] + + if 'cidr' in s: + self._validate_ip_version(ip_ver, s['cidr'], 'cidr') + + if attributes.is_attr_set(s.get('gateway_ip')): + self._validate_ip_version(ip_ver, s['gateway_ip'], 'gateway_ip') + if (cfg.CONF.force_gateway_on_subnet and + not NeutronDbPluginV2._check_subnet_ip(s['cidr'], + s['gateway_ip'])): + error_message = _("Gateway is not valid on subnet") + raise n_exc.InvalidInput(error_message=error_message) + # Ensure the gateway IP is not assigned to any port + # skip this check in case of create (s parameter won't have id) + # NOTE(salv-orlando): There is slight chance of a race, when + # a subnet-update and a router-interface-add operation are + # executed concurrently + if cur_subnet: + alloc_qry = context.session.query(models_v2.IPAllocation) + allocated = alloc_qry.filter_by( + ip_address=cur_subnet['gateway_ip'], + subnet_id=cur_subnet['id']).first() + if allocated and allocated['port_id']: + raise n_exc.GatewayIpInUse( + ip_address=cur_subnet['gateway_ip'], + port_id=allocated['port_id']) + + if attributes.is_attr_set(s.get('dns_nameservers')): + if len(s['dns_nameservers']) > cfg.CONF.max_dns_nameservers: + raise n_exc.DNSNameServersExhausted( + subnet_id=s.get('id', _('new subnet')), + quota=cfg.CONF.max_dns_nameservers) + for dns in s['dns_nameservers']: + try: + netaddr.IPAddress(dns) + except Exception: + raise n_exc.InvalidInput( + error_message=(_("Error parsing dns address %s") % + dns)) + self._validate_ip_version(ip_ver, dns, 'dns_nameserver') + + if attributes.is_attr_set(s.get('host_routes')): + if len(s['host_routes']) > cfg.CONF.max_subnet_host_routes: + raise n_exc.HostRoutesExhausted( + subnet_id=s.get('id', _('new subnet')), + quota=cfg.CONF.max_subnet_host_routes) + # check if the routes are all valid + for rt in s['host_routes']: + self._validate_host_route(rt, ip_ver) + + if ip_ver == 4: + if attributes.is_attr_set(s.get('ipv6_ra_mode')): + raise n_exc.InvalidInput( + error_message=(_("ipv6_ra_mode is not valid when " + "ip_version is 4"))) + if attributes.is_attr_set(s.get('ipv6_address_mode')): + raise n_exc.InvalidInput( + error_message=(_("ipv6_address_mode is not valid when " + "ip_version is 4"))) + if ip_ver == 6: + self._validate_ipv6_attributes(s, cur_subnet) + + def _validate_gw_out_of_pools(self, gateway_ip, pools): + for allocation_pool in pools: + pool_range = netaddr.IPRange( + allocation_pool['start'], + allocation_pool['end']) + if netaddr.IPAddress(gateway_ip) in pool_range: + raise n_exc.GatewayConflictWithAllocationPools( + pool=pool_range, + ip_address=gateway_ip) + + def create_subnet(self, context, subnet): + + net = netaddr.IPNetwork(subnet['subnet']['cidr']) + # turn the CIDR into a proper subnet + subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen) + + s = subnet['subnet'] + + if s['gateway_ip'] is attributes.ATTR_NOT_SPECIFIED: + s['gateway_ip'] = str(netaddr.IPAddress(net.first + 1)) + + if s['allocation_pools'] == attributes.ATTR_NOT_SPECIFIED: + s['allocation_pools'] = self._allocate_pools_for_subnet(context, s) + else: + self._validate_allocation_pools(s['allocation_pools'], s['cidr']) + if s['gateway_ip'] is not None: + self._validate_gw_out_of_pools(s['gateway_ip'], + s['allocation_pools']) + + self._validate_subnet(context, s) + + tenant_id = self._get_tenant_id_for_create(context, s) + with context.session.begin(subtransactions=True): + network = self._get_network(context, s["network_id"]) + self._validate_subnet_cidr(context, network, s['cidr']) + # The 'shared' attribute for subnets is for internal plugin + # use only. It is not exposed through the API + args = {'tenant_id': tenant_id, + 'id': s.get('id') or uuidutils.generate_uuid(), + 'name': s['name'], + 'network_id': s['network_id'], + 'ip_version': s['ip_version'], + 'cidr': s['cidr'], + 'enable_dhcp': s['enable_dhcp'], + 'gateway_ip': s['gateway_ip'], + 'shared': network.shared} + if s['ip_version'] == 6 and s['enable_dhcp']: + if attributes.is_attr_set(s['ipv6_ra_mode']): + args['ipv6_ra_mode'] = s['ipv6_ra_mode'] + if attributes.is_attr_set(s['ipv6_address_mode']): + args['ipv6_address_mode'] = s['ipv6_address_mode'] + subnet = models_v2.Subnet(**args) + + context.session.add(subnet) + if s['dns_nameservers'] is not attributes.ATTR_NOT_SPECIFIED: + for addr in s['dns_nameservers']: + ns = models_v2.DNSNameServer(address=addr, + subnet_id=subnet.id) + context.session.add(ns) + + if s['host_routes'] is not attributes.ATTR_NOT_SPECIFIED: + for rt in s['host_routes']: + route = models_v2.SubnetRoute( + subnet_id=subnet.id, + destination=rt['destination'], + nexthop=rt['nexthop']) + context.session.add(route) + + for pool in s['allocation_pools']: + ip_pool = models_v2.IPAllocationPool(subnet=subnet, + first_ip=pool['start'], + last_ip=pool['end']) + context.session.add(ip_pool) + ip_range = models_v2.IPAvailabilityRange( + ipallocationpool=ip_pool, + first_ip=pool['start'], + last_ip=pool['end']) + context.session.add(ip_range) + + return self._make_subnet_dict(subnet) + + def _update_subnet_dns_nameservers(self, context, id, s): + old_dns_list = self._get_dns_by_subnet(context, id) + new_dns_addr_set = set(s["dns_nameservers"]) + old_dns_addr_set = set([dns['address'] + for dns in old_dns_list]) + + new_dns = list(new_dns_addr_set) + for dns_addr in old_dns_addr_set - new_dns_addr_set: + for dns in old_dns_list: + if dns['address'] == dns_addr: + context.session.delete(dns) + for dns_addr in new_dns_addr_set - old_dns_addr_set: + dns = models_v2.DNSNameServer( + address=dns_addr, + subnet_id=id) + context.session.add(dns) + del s["dns_nameservers"] + return new_dns + + def _update_subnet_host_routes(self, context, id, s): + + def _combine(ht): + return ht['destination'] + "_" + ht['nexthop'] + + old_route_list = self._get_route_by_subnet(context, id) + + new_route_set = set([_combine(route) + for route in s['host_routes']]) + + old_route_set = set([_combine(route) + for route in old_route_list]) + + for route_str in old_route_set - new_route_set: + for route in old_route_list: + if _combine(route) == route_str: + context.session.delete(route) + for route_str in new_route_set - old_route_set: + route = models_v2.SubnetRoute( + destination=route_str.partition("_")[0], + nexthop=route_str.partition("_")[2], + subnet_id=id) + context.session.add(route) + + # Gather host routes for result + new_routes = [] + for route_str in new_route_set: + new_routes.append( + {'destination': route_str.partition("_")[0], + 'nexthop': route_str.partition("_")[2]}) + del s["host_routes"] + return new_routes + + def _update_subnet_allocation_pools(self, context, id, s): + context.session.query(models_v2.IPAllocationPool).filter_by( + subnet_id=id).delete() + new_pools = [models_v2.IPAllocationPool( + first_ip=p['start'], last_ip=p['end'], + subnet_id=id) for p in s['allocation_pools']] + context.session.add_all(new_pools) + NeutronDbPluginV2._rebuild_availability_ranges(context, [s]) + #Gather new pools for result: + result_pools = [{'start': pool['start'], + 'end': pool['end']} + for pool in s['allocation_pools']] + del s['allocation_pools'] + return result_pools + + def update_subnet(self, context, id, subnet): + """Update the subnet with new info. + + The change however will not be realized until the client renew the + dns lease or we support gratuitous DHCP offers + """ + s = subnet['subnet'] + changed_host_routes = False + changed_dns = False + changed_allocation_pools = False + db_subnet = self._get_subnet(context, id) + # Fill 'ip_version' and 'allocation_pools' fields with the current + # value since _validate_subnet() expects subnet spec has 'ip_version' + # and 'allocation_pools' fields. + s['ip_version'] = db_subnet.ip_version + s['cidr'] = db_subnet.cidr + s['id'] = db_subnet.id + self._validate_subnet(context, s, cur_subnet=db_subnet) + + if 'gateway_ip' in s and s['gateway_ip'] is not None: + allocation_pools = [{'start': p['first_ip'], 'end': p['last_ip']} + for p in db_subnet.allocation_pools] + self._validate_gw_out_of_pools(s["gateway_ip"], allocation_pools) + + with context.session.begin(subtransactions=True): + if "dns_nameservers" in s: + changed_dns = True + new_dns = self._update_subnet_dns_nameservers(context, id, s) + + if "host_routes" in s: + changed_host_routes = True + new_routes = self._update_subnet_host_routes(context, id, s) + + if "allocation_pools" in s: + self._validate_allocation_pools(s['allocation_pools'], + s['cidr']) + changed_allocation_pools = True + new_pools = self._update_subnet_allocation_pools(context, + id, s) + subnet = self._get_subnet(context, id) + subnet.update(s) + result = self._make_subnet_dict(subnet) + # Keep up with fields that changed + if changed_dns: + result['dns_nameservers'] = new_dns + if changed_host_routes: + result['host_routes'] = new_routes + if changed_allocation_pools: + result['allocation_pools'] = new_pools + return result + + def delete_subnet(self, context, id): + with context.session.begin(subtransactions=True): + subnet = self._get_subnet(context, id) + # Check if any tenant owned ports are using this subnet + allocated = (context.session.query(models_v2.IPAllocation). + filter_by(subnet_id=subnet['id']). + join(models_v2.Port). + filter_by(network_id=subnet['network_id']). + with_lockmode('update')) + + # remove network owned ports + for a in allocated: + if a.ports.device_owner in AUTO_DELETE_PORT_OWNERS: + NeutronDbPluginV2._delete_ip_allocation( + context, subnet.network_id, id, a.ip_address) + else: + raise n_exc.SubnetInUse(subnet_id=id) + + context.session.delete(subnet) + + def get_subnet(self, context, id, fields=None): + subnet = self._get_subnet(context, id) + return self._make_subnet_dict(subnet, fields) + + def get_subnets(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'subnet', limit, marker) + return self._get_collection(context, models_v2.Subnet, + self._make_subnet_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_subnets_count(self, context, filters=None): + return self._get_collection_count(context, models_v2.Subnet, + filters=filters) + + def create_port_bulk(self, context, ports): + return self._create_bulk('port', context, ports) + + def create_port(self, context, port): + p = port['port'] + port_id = p.get('id') or uuidutils.generate_uuid() + network_id = p['network_id'] + # NOTE(jkoelker) Get the tenant_id outside of the session to avoid + # unneeded db action if the operation raises + tenant_id = self._get_tenant_id_for_create(context, p) + if p.get('device_owner') == constants.DEVICE_OWNER_ROUTER_INTF: + self._enforce_device_owner_not_router_intf_or_device_id(context, p, + tenant_id) + + with context.session.begin(subtransactions=True): + network = self._get_network(context, network_id) + + # Ensure that a MAC address is defined and it is unique on the + # network + if p['mac_address'] is attributes.ATTR_NOT_SPECIFIED: + #Note(scollins) Add the generated mac_address to the port, + #since _allocate_ips_for_port will need the mac when + #calculating an EUI-64 address for a v6 subnet + p['mac_address'] = NeutronDbPluginV2._generate_mac(context, + network_id) + else: + # Ensure that the mac on the network is unique + if not NeutronDbPluginV2._check_unique_mac(context, + network_id, + p['mac_address']): + raise n_exc.MacAddressInUse(net_id=network_id, + mac=p['mac_address']) + + # Returns the IP's for the port + ips = self._allocate_ips_for_port(context, network, port) + + if 'status' not in p: + status = constants.PORT_STATUS_ACTIVE + else: + status = p['status'] + + port = models_v2.Port(tenant_id=tenant_id, + name=p['name'], + id=port_id, + network_id=network_id, + mac_address=p['mac_address'], + admin_state_up=p['admin_state_up'], + status=status, + device_id=p['device_id'], + device_owner=p['device_owner']) + context.session.add(port) + + # Update the allocated IP's + if ips: + for ip in ips: + ip_address = ip['ip_address'] + subnet_id = ip['subnet_id'] + LOG.debug(_("Allocated IP %(ip_address)s " + "(%(network_id)s/%(subnet_id)s/%(port_id)s)"), + {'ip_address': ip_address, + 'network_id': network_id, + 'subnet_id': subnet_id, + 'port_id': port_id}) + allocated = models_v2.IPAllocation( + network_id=network_id, + port_id=port_id, + ip_address=ip_address, + subnet_id=subnet_id, + ) + context.session.add(allocated) + + return self._make_port_dict(port, process_extensions=False) + + def update_port(self, context, id, port): + p = port['port'] + + changed_ips = False + with context.session.begin(subtransactions=True): + port = self._get_port(context, id) + if 'device_owner' in p: + current_device_owner = p['device_owner'] + changed_device_owner = True + else: + current_device_owner = port['device_owner'] + changed_device_owner = False + if p.get('device_id') != port['device_id']: + changed_device_id = True + + # if the current device_owner is ROUTER_INF and the device_id or + # device_owner changed check device_id is not another tenants + # router + if ((current_device_owner == constants.DEVICE_OWNER_ROUTER_INTF) + and (changed_device_id or changed_device_owner)): + self._enforce_device_owner_not_router_intf_or_device_id( + context, p, port['tenant_id'], port) + + # Check if the IPs need to be updated + if 'fixed_ips' in p: + changed_ips = True + original = self._make_port_dict(port, process_extensions=False) + added_ips, prev_ips = self._update_ips_for_port( + context, port["network_id"], id, original["fixed_ips"], + p['fixed_ips']) + + # Update ips if necessary + for ip in added_ips: + allocated = models_v2.IPAllocation( + network_id=port['network_id'], port_id=port.id, + ip_address=ip['ip_address'], subnet_id=ip['subnet_id']) + context.session.add(allocated) + # Remove all attributes in p which are not in the port DB model + # and then update the port + port.update(self._filter_non_model_columns(p, models_v2.Port)) + + result = self._make_port_dict(port) + # Keep up with fields that changed + if changed_ips: + result['fixed_ips'] = prev_ips + added_ips + return result + + def delete_port(self, context, id): + with context.session.begin(subtransactions=True): + self._delete_port(context, id) + + def delete_ports_by_device_id(self, context, device_id, network_id=None): + query = (context.session.query(models_v2.Port.id) + .enable_eagerloads(False) + .filter(models_v2.Port.device_id == device_id)) + if network_id: + query = query.filter(models_v2.Port.network_id == network_id) + port_ids = [p[0] for p in query] + for port_id in port_ids: + try: + self.delete_port(context, port_id) + except n_exc.PortNotFound: + # Don't raise if something else concurrently deleted the port + LOG.debug(_("Ignoring PortNotFound when deleting port '%s'. " + "The port has already been deleted."), + port_id) + + def _delete_port(self, context, id): + query = (context.session.query(models_v2.Port). + enable_eagerloads(False).filter_by(id=id)) + if not context.is_admin: + query = query.filter_by(tenant_id=context.tenant_id) + query.delete() + + def get_port(self, context, id, fields=None): + port = self._get_port(context, id) + return self._make_port_dict(port, fields) + + def _get_ports_query(self, context, filters=None, sorts=None, limit=None, + marker_obj=None, page_reverse=False): + Port = models_v2.Port + IPAllocation = models_v2.IPAllocation + + if not filters: + filters = {} + + query = self._model_query(context, Port) + + fixed_ips = filters.pop('fixed_ips', {}) + ip_addresses = fixed_ips.get('ip_address') + subnet_ids = fixed_ips.get('subnet_id') + if ip_addresses or subnet_ids: + query = query.join(Port.fixed_ips) + if ip_addresses: + query = query.filter(IPAllocation.ip_address.in_(ip_addresses)) + if subnet_ids: + query = query.filter(IPAllocation.subnet_id.in_(subnet_ids)) + + query = self._apply_filters_to_query(query, Port, filters) + if limit and page_reverse and sorts: + sorts = [(s[0], not s[1]) for s in sorts] + query = sqlalchemyutils.paginate_query(query, Port, limit, + sorts, marker_obj) + return query + + def get_ports(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'port', limit, marker) + query = self._get_ports_query(context, filters=filters, + sorts=sorts, limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + items = [self._make_port_dict(c, fields) for c in query] + if limit and page_reverse: + items.reverse() + return items + + def get_ports_count(self, context, filters=None): + return self._get_ports_query(context, filters).count() + + def _enforce_device_owner_not_router_intf_or_device_id(self, context, + port_request, + tenant_id, + db_port=None): + if not context.is_admin: + # find the device_id. If the call was update_port and the + # device_id was not passed in we use the device_id from the + # db. + device_id = port_request.get('device_id') + if not device_id and db_port: + device_id = db_port.get('device_id') + # check to make sure device_id does not match another tenants + # router. + if device_id: + if hasattr(self, 'get_router'): + try: + ctx_admin = ctx.get_admin_context() + router = self.get_router(ctx_admin, device_id) + except l3.RouterNotFound: + return + else: + l3plugin = ( + manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT)) + if l3plugin: + try: + ctx_admin = ctx.get_admin_context() + router = l3plugin.get_router(ctx_admin, + device_id) + except l3.RouterNotFound: + return + else: + # raise as extension doesn't support L3 anyways. + raise n_exc.DeviceIDNotOwnedByTenant( + device_id=device_id) + if tenant_id != router['tenant_id']: + raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id) diff --git a/neutron/db/dhcp_rpc_base.py b/neutron/db/dhcp_rpc_base.py new file mode 100644 index 000000000..e42099c63 --- /dev/null +++ b/neutron/db/dhcp_rpc_base.py @@ -0,0 +1,287 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import utils +from neutron.extensions import portbindings +from neutron import manager +from neutron.openstack.common.db import exception as db_exc +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class DhcpRpcCallbackMixin(object): + """A mix-in that enable DHCP agent support in plugin implementations.""" + + def _get_active_networks(self, context, **kwargs): + """Retrieve and return a list of the active networks.""" + host = kwargs.get('host') + plugin = manager.NeutronManager.get_plugin() + if utils.is_extension_supported( + plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS): + if cfg.CONF.network_auto_schedule: + plugin.auto_schedule_networks(context, host) + nets = plugin.list_active_networks_on_active_dhcp_agent( + context, host) + else: + filters = dict(admin_state_up=[True]) + nets = plugin.get_networks(context, filters=filters) + return nets + + def _port_action(self, plugin, context, port, action): + """Perform port operations taking care of concurrency issues.""" + try: + if action == 'create_port': + return plugin.create_port(context, port) + elif action == 'update_port': + return plugin.update_port(context, port['id'], port['port']) + else: + msg = _('Unrecognized action') + raise n_exc.Invalid(message=msg) + except (db_exc.DBError, n_exc.NetworkNotFound, + n_exc.SubnetNotFound, n_exc.IpAddressGenerationFailure) as e: + with excutils.save_and_reraise_exception(reraise=False) as ctxt: + if isinstance(e, n_exc.IpAddressGenerationFailure): + # Check if the subnet still exists and if it does not, + # this is the reason why the ip address generation failed. + # In any other unlikely event re-raise + try: + subnet_id = port['port']['fixed_ips'][0]['subnet_id'] + plugin.get_subnet(context, subnet_id) + except n_exc.SubnetNotFound: + pass + else: + ctxt.reraise = True + net_id = port['port']['network_id'] + LOG.warn(_("Action %(action)s for network %(net_id)s " + "could not complete successfully: %(reason)s") + % {"action": action, "net_id": net_id, 'reason': e}) + + def get_active_networks(self, context, **kwargs): + """Retrieve and return a list of the active network ids.""" + # NOTE(arosen): This method is no longer used by the DHCP agent but is + # left so that neutron-dhcp-agents will still continue to work if + # neutron-server is upgraded and not the agent. + host = kwargs.get('host') + LOG.debug(_('get_active_networks requested from %s'), host) + nets = self._get_active_networks(context, **kwargs) + return [net['id'] for net in nets] + + def get_active_networks_info(self, context, **kwargs): + """Returns all the networks/subnets/ports in system.""" + host = kwargs.get('host') + LOG.debug(_('get_active_networks_info from %s'), host) + networks = self._get_active_networks(context, **kwargs) + plugin = manager.NeutronManager.get_plugin() + filters = {'network_id': [network['id'] for network in networks]} + ports = plugin.get_ports(context, filters=filters) + filters['enable_dhcp'] = [True] + subnets = plugin.get_subnets(context, filters=filters) + + for network in networks: + network['subnets'] = [subnet for subnet in subnets + if subnet['network_id'] == network['id']] + network['ports'] = [port for port in ports + if port['network_id'] == network['id']] + + return networks + + def get_network_info(self, context, **kwargs): + """Retrieve and return a extended information about a network.""" + network_id = kwargs.get('network_id') + host = kwargs.get('host') + LOG.debug(_('Network %(network_id)s requested from ' + '%(host)s'), {'network_id': network_id, + 'host': host}) + plugin = manager.NeutronManager.get_plugin() + try: + network = plugin.get_network(context, network_id) + except n_exc.NetworkNotFound: + LOG.warn(_("Network %s could not be found, it might have " + "been deleted concurrently."), network_id) + return + filters = dict(network_id=[network_id]) + network['subnets'] = plugin.get_subnets(context, filters=filters) + network['ports'] = plugin.get_ports(context, filters=filters) + return network + + def get_dhcp_port(self, context, **kwargs): + """Allocate a DHCP port for the host and return port information. + + This method will re-use an existing port if one already exists. When a + port is re-used, the fixed_ip allocation will be updated to the current + network state. If an expected failure occurs, a None port is returned. + + """ + host = kwargs.get('host') + network_id = kwargs.get('network_id') + device_id = kwargs.get('device_id') + # There could be more than one dhcp server per network, so create + # a device id that combines host and network ids + + LOG.debug(_('Port %(device_id)s for %(network_id)s requested from ' + '%(host)s'), {'device_id': device_id, + 'network_id': network_id, + 'host': host}) + plugin = manager.NeutronManager.get_plugin() + retval = None + + filters = dict(network_id=[network_id]) + subnets = dict([(s['id'], s) for s in + plugin.get_subnets(context, filters=filters)]) + + dhcp_enabled_subnet_ids = [s['id'] for s in + subnets.values() if s['enable_dhcp']] + + try: + filters = dict(network_id=[network_id], device_id=[device_id]) + ports = plugin.get_ports(context, filters=filters) + if ports: + # Ensure that fixed_ips cover all dhcp_enabled subnets. + port = ports[0] + for fixed_ip in port['fixed_ips']: + if fixed_ip['subnet_id'] in dhcp_enabled_subnet_ids: + dhcp_enabled_subnet_ids.remove(fixed_ip['subnet_id']) + port['fixed_ips'].extend( + [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + + retval = plugin.update_port(context, port['id'], + dict(port=port)) + + except n_exc.NotFound as e: + LOG.warning(e) + + if retval is None: + # No previous port exists, so create a new one. + LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s ' + 'does not exist on %(host)s'), + {'device_id': device_id, + 'network_id': network_id, + 'host': host}) + try: + network = plugin.get_network(context, network_id) + except n_exc.NetworkNotFound: + LOG.warn(_("Network %s could not be found, it might have " + "been deleted concurrently."), network_id) + return + + port_dict = dict( + admin_state_up=True, + device_id=device_id, + network_id=network_id, + tenant_id=network['tenant_id'], + mac_address=attributes.ATTR_NOT_SPECIFIED, + name='', + device_owner=constants.DEVICE_OWNER_DHCP, + fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + + retval = self._port_action(plugin, context, {'port': port_dict}, + 'create_port') + if not retval: + return + + # Convert subnet_id to subnet dict + for fixed_ip in retval['fixed_ips']: + subnet_id = fixed_ip.pop('subnet_id') + fixed_ip['subnet'] = subnets[subnet_id] + + return retval + + def release_dhcp_port(self, context, **kwargs): + """Release the port currently being used by a DHCP agent.""" + host = kwargs.get('host') + network_id = kwargs.get('network_id') + device_id = kwargs.get('device_id') + + LOG.debug(_('DHCP port deletion for %(network_id)s request from ' + '%(host)s'), + {'network_id': network_id, 'host': host}) + plugin = manager.NeutronManager.get_plugin() + plugin.delete_ports_by_device_id(context, device_id, network_id) + + def release_port_fixed_ip(self, context, **kwargs): + """Release the fixed_ip associated the subnet on a port.""" + host = kwargs.get('host') + network_id = kwargs.get('network_id') + device_id = kwargs.get('device_id') + subnet_id = kwargs.get('subnet_id') + + LOG.debug(_('DHCP port remove fixed_ip for %(subnet_id)s request ' + 'from %(host)s'), + {'subnet_id': subnet_id, 'host': host}) + plugin = manager.NeutronManager.get_plugin() + filters = dict(network_id=[network_id], device_id=[device_id]) + ports = plugin.get_ports(context, filters=filters) + + if ports: + port = ports[0] + + fixed_ips = port.get('fixed_ips', []) + for i in range(len(fixed_ips)): + if fixed_ips[i]['subnet_id'] == subnet_id: + del fixed_ips[i] + break + plugin.update_port(context, port['id'], dict(port=port)) + + def update_lease_expiration(self, context, **kwargs): + """Release the fixed_ip associated the subnet on a port.""" + # NOTE(arosen): This method is no longer used by the DHCP agent but is + # left so that neutron-dhcp-agents will still continue to work if + # neutron-server is upgraded and not the agent. + host = kwargs.get('host') + + LOG.warning(_('Updating lease expiration is now deprecated. Issued ' + 'from host %s.'), host) + + def create_dhcp_port(self, context, **kwargs): + """Create and return dhcp port information. + + If an expected failure occurs, a None port is returned. + + """ + host = kwargs.get('host') + port = kwargs.get('port') + LOG.debug(_('Create dhcp port %(port)s ' + 'from %(host)s.'), + {'port': port, + 'host': host}) + + port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP + port['port'][portbindings.HOST_ID] = host + if 'mac_address' not in port['port']: + port['port']['mac_address'] = attributes.ATTR_NOT_SPECIFIED + plugin = manager.NeutronManager.get_plugin() + return self._port_action(plugin, context, port, 'create_port') + + def update_dhcp_port(self, context, **kwargs): + """Update the dhcp port.""" + host = kwargs.get('host') + port_id = kwargs.get('port_id') + port = kwargs.get('port') + LOG.debug(_('Update dhcp port %(port)s ' + 'from %(host)s.'), + {'port': port, + 'host': host}) + plugin = manager.NeutronManager.get_plugin() + return self._port_action(plugin, context, + {'id': port_id, 'port': port}, + 'update_port') diff --git a/neutron/db/external_net_db.py b/neutron/db/external_net_db.py new file mode 100644 index 000000000..53f389536 --- /dev/null +++ b/neutron/db/external_net_db.py @@ -0,0 +1,163 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc +from sqlalchemy.sql import expression as expr + +from neutron.api.v2 import attributes +from neutron.common import constants as l3_constants +from neutron.common import exceptions as n_exc +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import external_net +from neutron import manager +from neutron.plugins.common import constants as service_constants + + +DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW + + +class ExternalNetwork(model_base.BASEV2): + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + + # Add a relationship to the Network model in order to instruct + # SQLAlchemy to eagerly load this association + network = orm.relationship( + models_v2.Network, + backref=orm.backref("external", lazy='joined', + uselist=False, cascade='delete')) + + +class External_net_db_mixin(object): + """Mixin class to add external network methods to db_base_plugin_v2.""" + + def _network_model_hook(self, context, original_model, query): + query = query.outerjoin(ExternalNetwork, + (original_model.id == + ExternalNetwork.network_id)) + return query + + def _network_filter_hook(self, context, original_model, conditions): + if conditions is not None and not hasattr(conditions, '__iter__'): + conditions = (conditions, ) + # Apply the external network filter only in non-admin context + if not context.is_admin and hasattr(original_model, 'tenant_id'): + conditions = expr.or_(ExternalNetwork.network_id != expr.null(), + *conditions) + return conditions + + def _network_result_filter_hook(self, query, filters): + vals = filters and filters.get(external_net.EXTERNAL, []) + if not vals: + return query + if vals[0]: + return query.filter((ExternalNetwork.network_id != expr.null())) + return query.filter((ExternalNetwork.network_id == expr.null())) + + # TODO(salvatore-orlando): Perform this operation without explicitly + # referring to db_base_plugin_v2, as plugins that do not extend from it + # might exist in the future + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Network, + "external_net", + '_network_model_hook', + '_network_filter_hook', + '_network_result_filter_hook') + + def _network_is_external(self, context, net_id): + try: + context.session.query(ExternalNetwork).filter_by( + network_id=net_id).one() + return True + except exc.NoResultFound: + return False + + def _extend_network_dict_l3(self, network_res, network_db): + # Comparing with None for converting uuid into bool + network_res[external_net.EXTERNAL] = network_db.external is not None + return network_res + + # Register dict extend functions for networks + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.NETWORKS, ['_extend_network_dict_l3']) + + def _process_l3_create(self, context, net_data, req_data): + external = req_data.get(external_net.EXTERNAL) + external_set = attributes.is_attr_set(external) + + if not external_set: + return + + if external: + # expects to be called within a plugin's session + context.session.add(ExternalNetwork(network_id=net_data['id'])) + net_data[external_net.EXTERNAL] = external + + def _process_l3_update(self, context, net_data, req_data): + + new_value = req_data.get(external_net.EXTERNAL) + net_id = net_data['id'] + if not attributes.is_attr_set(new_value): + return + + if net_data.get(external_net.EXTERNAL) == new_value: + return + + if new_value: + context.session.add(ExternalNetwork(network_id=net_id)) + net_data[external_net.EXTERNAL] = True + else: + # must make sure we do not have any external gateway ports + # (and thus, possible floating IPs) on this network before + # allow it to be update to external=False + port = context.session.query(models_v2.Port).filter_by( + device_owner=DEVICE_OWNER_ROUTER_GW, + network_id=net_data['id']).first() + if port: + raise external_net.ExternalNetworkInUse(net_id=net_id) + + context.session.query(ExternalNetwork).filter_by( + network_id=net_id).delete() + net_data[external_net.EXTERNAL] = False + + def _process_l3_delete(self, context, network_id): + l3plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if l3plugin: + l3plugin.delete_disassociated_floatingips(context, network_id) + + def _filter_nets_l3(self, context, nets, filters): + vals = filters and filters.get(external_net.EXTERNAL, []) + if not vals: + return nets + + ext_nets = set(en['network_id'] + for en in context.session.query(ExternalNetwork)) + if vals[0]: + return [n for n in nets if n['id'] in ext_nets] + else: + return [n for n in nets if n['id'] not in ext_nets] + + def get_external_network_id(self, context): + nets = self.get_networks(context, {external_net.EXTERNAL: [True]}) + if len(nets) > 1: + raise n_exc.TooManyExternalNetworks() + else: + return nets[0]['id'] if nets else None diff --git a/neutron/db/extradhcpopt_db.py b/neutron/db/extradhcpopt_db.py new file mode 100644 index 000000000..4693457f7 --- /dev/null +++ b/neutron/db/extradhcpopt_db.py @@ -0,0 +1,127 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Don Kehn, dekehn@gmail.com +# +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.api.v2 import attributes +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import extra_dhcp_opt as edo_ext +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class ExtraDhcpOpt(model_base.BASEV2, models_v2.HasId): + """Represent a generic concept of extra options associated to a port. + + Each port may have none to many dhcp opts associated to it that can + define specifically different or extra options to DHCP clients. + These will be written to the /opts files, and each option's + tag will be referenced in the /host file. + """ + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + nullable=False) + opt_name = sa.Column(sa.String(64), nullable=False) + opt_value = sa.Column(sa.String(255), nullable=False) + __table_args__ = (sa.UniqueConstraint('port_id', + 'opt_name', + name='uidx_portid_optname'), + model_base.BASEV2.__table_args__,) + + # Add a relationship to the Port model in order to instruct SQLAlchemy to + # eagerly load extra_dhcp_opts bindings + ports = orm.relationship( + models_v2.Port, + backref=orm.backref("dhcp_opts", lazy='joined', cascade='delete')) + + +class ExtraDhcpOptMixin(object): + """Mixin class to add extra options to the DHCP opts file + and associate them to a port. + """ + def _process_port_create_extra_dhcp_opts(self, context, port, + extra_dhcp_opts): + if not extra_dhcp_opts: + return port + with context.session.begin(subtransactions=True): + for dopt in extra_dhcp_opts: + if dopt['opt_value']: + db = ExtraDhcpOpt( + port_id=port['id'], + opt_name=dopt['opt_name'], + opt_value=dopt['opt_value']) + context.session.add(db) + return self._extend_port_extra_dhcp_opts_dict(context, port) + + def _extend_port_extra_dhcp_opts_dict(self, context, port): + port[edo_ext.EXTRADHCPOPTS] = self._get_port_extra_dhcp_opts_binding( + context, port['id']) + + def _get_port_extra_dhcp_opts_binding(self, context, port_id): + query = self._model_query(context, ExtraDhcpOpt) + binding = query.filter(ExtraDhcpOpt.port_id == port_id) + return [{'opt_name': r.opt_name, 'opt_value': r.opt_value} + for r in binding] + + def _update_extra_dhcp_opts_on_port(self, context, id, port, + updated_port=None): + # It is not necessary to update in a transaction, because + # its called from within one from ovs_neutron_plugin. + dopts = port['port'].get(edo_ext.EXTRADHCPOPTS) + + if dopts: + opt_db = self._model_query( + context, ExtraDhcpOpt).filter_by(port_id=id).all() + # if there are currently no dhcp_options associated to + # this port, Then just insert the new ones and be done. + with context.session.begin(subtransactions=True): + for upd_rec in dopts: + for opt in opt_db: + if opt['opt_name'] == upd_rec['opt_name']: + # to handle deleting of a opt from the port. + if upd_rec['opt_value'] is None: + context.session.delete(opt) + elif opt['opt_value'] != upd_rec['opt_value']: + opt.update( + {'opt_value': upd_rec['opt_value']}) + break + else: + if upd_rec['opt_value'] is not None: + db = ExtraDhcpOpt( + port_id=id, + opt_name=upd_rec['opt_name'], + opt_value=upd_rec['opt_value']) + context.session.add(db) + + if updated_port: + edolist = self._get_port_extra_dhcp_opts_binding(context, id) + updated_port[edo_ext.EXTRADHCPOPTS] = edolist + + return bool(dopts) + + def _extend_port_dict_extra_dhcp_opt(self, res, port): + res[edo_ext.EXTRADHCPOPTS] = [{'opt_name': dho.opt_name, + 'opt_value': dho.opt_value} + for dho in port.dhcp_opts] + return res + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.PORTS, ['_extend_port_dict_extra_dhcp_opt']) diff --git a/neutron/db/extraroute_db.py b/neutron/db/extraroute_db.py new file mode 100644 index 000000000..c4d2ada8a --- /dev/null +++ b/neutron/db/extraroute_db.py @@ -0,0 +1,185 @@ +# Copyright 2013, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo.config import cfg +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.common import utils +from neutron.db import db_base_plugin_v2 +from neutron.db import l3_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import extraroute +from neutron.extensions import l3 +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +extra_route_opts = [ + #TODO(nati): use quota framework when it support quota for attributes + cfg.IntOpt('max_routes', default=30, + help=_("Maximum number of routes")), +] + +cfg.CONF.register_opts(extra_route_opts) + + +class RouterRoute(model_base.BASEV2, models_v2.Route): + router_id = sa.Column(sa.String(36), + sa.ForeignKey('routers.id', + ondelete="CASCADE"), + primary_key=True) + + router = orm.relationship(l3_db.Router, + backref=orm.backref("route_list", + lazy='joined', + cascade='delete')) + + +class ExtraRoute_db_mixin(l3_db.L3_NAT_db_mixin): + """Mixin class to support extra route configuration on router.""" + + def _extend_router_dict_extraroute(self, router_res, router_db): + router_res['routes'] = (ExtraRoute_db_mixin. + _make_extra_route_list( + router_db['route_list'] + )) + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + l3.ROUTERS, ['_extend_router_dict_extraroute']) + + def update_router(self, context, id, router): + r = router['router'] + with context.session.begin(subtransactions=True): + #check if route exists and have permission to access + router_db = self._get_router(context, id) + if 'routes' in r: + self._update_extra_routes(context, router_db, r['routes']) + routes = self._get_extra_routes_by_router_id(context, id) + router_updated = super(ExtraRoute_db_mixin, self).update_router( + context, id, router) + router_updated['routes'] = routes + + return router_updated + + def _get_subnets_by_cidr(self, context, cidr): + query_subnets = context.session.query(models_v2.Subnet) + return query_subnets.filter_by(cidr=cidr).all() + + def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop): + #Note(nati): Nexthop should be connected, + # so we need to check + # nexthop belongs to one of cidrs of the router ports + if not netaddr.all_matching_cidrs(nexthop, cidrs): + raise extraroute.InvalidRoutes( + routes=routes, + reason=_('the nexthop is not connected with router')) + #Note(nati) nexthop should not be same as fixed_ips + if nexthop in ips: + raise extraroute.InvalidRoutes( + routes=routes, + reason=_('the nexthop is used by router')) + + def _validate_routes(self, context, + router_id, routes): + if len(routes) > cfg.CONF.max_routes: + raise extraroute.RoutesExhausted( + router_id=router_id, + quota=cfg.CONF.max_routes) + + filters = {'device_id': [router_id]} + ports = self._core_plugin.get_ports(context, filters) + cidrs = [] + ips = [] + for port in ports: + for ip in port['fixed_ips']: + cidrs.append(self._core_plugin._get_subnet( + context, ip['subnet_id'])['cidr']) + ips.append(ip['ip_address']) + for route in routes: + self._validate_routes_nexthop( + cidrs, ips, routes, route['nexthop']) + + def _update_extra_routes(self, context, router, routes): + self._validate_routes(context, router['id'], + routes) + old_routes, routes_dict = self._get_extra_routes_dict_by_router_id( + context, router['id']) + added, removed = utils.diff_list_of_dict(old_routes, + routes) + LOG.debug(_('Added routes are %s'), added) + for route in added: + router_routes = RouterRoute( + router_id=router['id'], + destination=route['destination'], + nexthop=route['nexthop']) + context.session.add(router_routes) + + LOG.debug(_('Removed routes are %s'), removed) + for route in removed: + context.session.delete( + routes_dict[(route['destination'], route['nexthop'])]) + + @staticmethod + def _make_extra_route_list(extra_routes): + return [{'destination': route['destination'], + 'nexthop': route['nexthop']} + for route in extra_routes] + + def _get_extra_routes_by_router_id(self, context, id): + query = context.session.query(RouterRoute) + query = query.filter_by(router_id=id) + return self._make_extra_route_list(query) + + def _get_extra_routes_dict_by_router_id(self, context, id): + query = context.session.query(RouterRoute) + query = query.filter_by(router_id=id) + routes = [] + routes_dict = {} + for route in query: + routes.append({'destination': route['destination'], + 'nexthop': route['nexthop']}) + routes_dict[(route['destination'], route['nexthop'])] = route + return routes, routes_dict + + def get_router(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + router = super(ExtraRoute_db_mixin, self).get_router( + context, id, fields) + return router + + def get_routers(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + with context.session.begin(subtransactions=True): + routers = super(ExtraRoute_db_mixin, self).get_routers( + context, filters, fields, sorts=sorts, limit=limit, + marker=marker, page_reverse=page_reverse) + return routers + + def _confirm_router_interface_not_in_use(self, context, router_id, + subnet_id): + super(ExtraRoute_db_mixin, self)._confirm_router_interface_not_in_use( + context, router_id, subnet_id) + subnet_db = self._core_plugin._get_subnet(context, subnet_id) + subnet_cidr = netaddr.IPNetwork(subnet_db['cidr']) + extra_routes = self._get_extra_routes_by_router_id(context, router_id) + for route in extra_routes: + if netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr]): + raise extraroute.RouterInterfaceInUseByRoute( + router_id=router_id, subnet_id=subnet_id) diff --git a/neutron/db/firewall/__init__.py b/neutron/db/firewall/__init__.py new file mode 100644 index 000000000..d1e364161 --- /dev/null +++ b/neutron/db/firewall/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/db/firewall/firewall_db.py b/neutron/db/firewall/firewall_db.py new file mode 100644 index 000000000..5636a8cbf --- /dev/null +++ b/neutron/db/firewall/firewall_db.py @@ -0,0 +1,481 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. + +import sqlalchemy as sa +from sqlalchemy.ext.orderinglist import ordering_list +from sqlalchemy import orm +from sqlalchemy.orm import exc + +from neutron.db import db_base_plugin_v2 as base_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import firewall +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as const + + +LOG = logging.getLogger(__name__) + + +class FirewallRule(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a Firewall rule.""" + __tablename__ = 'firewall_rules' + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(1024)) + firewall_policy_id = sa.Column(sa.String(36), + sa.ForeignKey('firewall_policies.id'), + nullable=True) + shared = sa.Column(sa.Boolean) + protocol = sa.Column(sa.String(40)) + ip_version = sa.Column(sa.Integer, nullable=False) + source_ip_address = sa.Column(sa.String(46)) + destination_ip_address = sa.Column(sa.String(46)) + source_port_range_min = sa.Column(sa.Integer) + source_port_range_max = sa.Column(sa.Integer) + destination_port_range_min = sa.Column(sa.Integer) + destination_port_range_max = sa.Column(sa.Integer) + action = sa.Column(sa.Enum('allow', 'deny', name='firewallrules_action')) + enabled = sa.Column(sa.Boolean) + position = sa.Column(sa.Integer) + + +class Firewall(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a Firewall resource.""" + __tablename__ = 'firewalls' + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(1024)) + shared = sa.Column(sa.Boolean) + admin_state_up = sa.Column(sa.Boolean) + status = sa.Column(sa.String(16)) + firewall_policy_id = sa.Column(sa.String(36), + sa.ForeignKey('firewall_policies.id'), + nullable=True) + + +class FirewallPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a Firewall Policy resource.""" + __tablename__ = 'firewall_policies' + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(1024)) + shared = sa.Column(sa.Boolean) + firewall_rules = orm.relationship( + FirewallRule, + backref=orm.backref('firewall_policies', cascade='all, delete'), + order_by='FirewallRule.position', + collection_class=ordering_list('position', count_from=1)) + audited = sa.Column(sa.Boolean) + firewalls = orm.relationship(Firewall, backref='firewall_policies') + + +class Firewall_db_mixin(firewall.FirewallPluginBase, base_db.CommonDbMixin): + """Mixin class for Firewall DB implementation.""" + + @property + def _core_plugin(self): + return manager.NeutronManager.get_plugin() + + def _get_firewall(self, context, id): + try: + return self._get_by_id(context, Firewall, id) + except exc.NoResultFound: + raise firewall.FirewallNotFound(firewall_id=id) + + def _get_firewall_policy(self, context, id): + try: + return self._get_by_id(context, FirewallPolicy, id) + except exc.NoResultFound: + raise firewall.FirewallPolicyNotFound(firewall_policy_id=id) + + def _get_firewall_rule(self, context, id): + try: + return self._get_by_id(context, FirewallRule, id) + except exc.NoResultFound: + raise firewall.FirewallRuleNotFound(firewall_rule_id=id) + + def _make_firewall_dict(self, fw, fields=None): + res = {'id': fw['id'], + 'tenant_id': fw['tenant_id'], + 'name': fw['name'], + 'description': fw['description'], + 'shared': fw['shared'], + 'admin_state_up': fw['admin_state_up'], + 'status': fw['status'], + 'firewall_policy_id': fw['firewall_policy_id']} + return self._fields(res, fields) + + def _make_firewall_policy_dict(self, firewall_policy, fields=None): + fw_rules = [rule['id'] for rule in firewall_policy['firewall_rules']] + firewalls = [fw['id'] for fw in firewall_policy['firewalls']] + res = {'id': firewall_policy['id'], + 'tenant_id': firewall_policy['tenant_id'], + 'name': firewall_policy['name'], + 'description': firewall_policy['description'], + 'shared': firewall_policy['shared'], + 'audited': firewall_policy['audited'], + 'firewall_rules': fw_rules, + 'firewall_list': firewalls} + return self._fields(res, fields) + + def _make_firewall_rule_dict(self, firewall_rule, fields=None): + position = None + # We return the position only if the firewall_rule is bound to a + # firewall_policy. + if firewall_rule['firewall_policy_id']: + position = firewall_rule['position'] + src_port_range = self._get_port_range_from_min_max_ports( + firewall_rule['source_port_range_min'], + firewall_rule['source_port_range_max']) + dst_port_range = self._get_port_range_from_min_max_ports( + firewall_rule['destination_port_range_min'], + firewall_rule['destination_port_range_max']) + res = {'id': firewall_rule['id'], + 'tenant_id': firewall_rule['tenant_id'], + 'name': firewall_rule['name'], + 'description': firewall_rule['description'], + 'firewall_policy_id': firewall_rule['firewall_policy_id'], + 'shared': firewall_rule['shared'], + 'protocol': firewall_rule['protocol'], + 'ip_version': firewall_rule['ip_version'], + 'source_ip_address': firewall_rule['source_ip_address'], + 'destination_ip_address': + firewall_rule['destination_ip_address'], + 'source_port': src_port_range, + 'destination_port': dst_port_range, + 'action': firewall_rule['action'], + 'position': position, + 'enabled': firewall_rule['enabled']} + return self._fields(res, fields) + + def _set_rules_for_policy(self, context, firewall_policy_db, rule_id_list): + fwp_db = firewall_policy_db + with context.session.begin(subtransactions=True): + if not rule_id_list: + fwp_db.firewall_rules = [] + fwp_db.audited = False + return + # We will first check if the new list of rules is valid + filters = {'id': [r_id for r_id in rule_id_list]} + rules_in_db = self._get_collection_query(context, FirewallRule, + filters=filters) + rules_dict = dict((fwr_db['id'], fwr_db) for fwr_db in rules_in_db) + for fwrule_id in rule_id_list: + if fwrule_id not in rules_dict: + # If we find an invalid rule in the list we + # do not perform the update since this breaks + # the integrity of this list. + raise firewall.FirewallRuleNotFound(firewall_rule_id= + fwrule_id) + elif rules_dict[fwrule_id]['firewall_policy_id']: + if (rules_dict[fwrule_id]['firewall_policy_id'] != + fwp_db['id']): + raise firewall.FirewallRuleInUse( + firewall_rule_id=fwrule_id) + # New list of rules is valid so we will first reset the existing + # list and then add each rule in order. + # Note that the list could be empty in which case we interpret + # it as clearing existing rules. + fwp_db.firewall_rules = [] + for fwrule_id in rule_id_list: + fwp_db.firewall_rules.append(rules_dict[fwrule_id]) + fwp_db.firewall_rules.reorder() + fwp_db.audited = False + + def _process_rule_for_policy(self, context, firewall_policy_id, + firewall_rule_db, position): + with context.session.begin(subtransactions=True): + fwp_query = context.session.query( + FirewallPolicy).with_lockmode('update') + fwp_db = fwp_query.filter_by(id=firewall_policy_id).one() + if position: + # Note that although position numbering starts at 1, + # internal ordering of the list starts at 0, so we compensate. + fwp_db.firewall_rules.insert(position - 1, firewall_rule_db) + else: + fwp_db.firewall_rules.remove(firewall_rule_db) + fwp_db.firewall_rules.reorder() + fwp_db.audited = False + return self._make_firewall_policy_dict(fwp_db) + + def _get_min_max_ports_from_range(self, port_range): + if not port_range: + return [None, None] + min_port, sep, max_port = port_range.partition(":") + if not max_port: + max_port = min_port + return [int(min_port), int(max_port)] + + def _get_port_range_from_min_max_ports(self, min_port, max_port): + if not min_port: + return None + if min_port == max_port: + return str(min_port) + else: + return '%d:%d' % (min_port, max_port) + + def _validate_fwr_protocol_parameters(self, fwr): + protocol = fwr['protocol'] + if protocol not in (const.TCP, const.UDP): + if fwr['source_port'] or fwr['destination_port']: + raise firewall.FirewallRuleInvalidICMPParameter( + param="Source, destination port") + + def create_firewall(self, context, firewall): + LOG.debug(_("create_firewall() called")) + fw = firewall['firewall'] + tenant_id = self._get_tenant_id_for_create(context, fw) + with context.session.begin(subtransactions=True): + firewall_db = Firewall(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=fw['name'], + description=fw['description'], + firewall_policy_id= + fw['firewall_policy_id'], + admin_state_up=fw['admin_state_up'], + status=const.PENDING_CREATE) + context.session.add(firewall_db) + return self._make_firewall_dict(firewall_db) + + def update_firewall(self, context, id, firewall): + LOG.debug(_("update_firewall() called")) + fw = firewall['firewall'] + with context.session.begin(subtransactions=True): + fw_query = context.session.query( + Firewall).with_lockmode('update') + firewall_db = fw_query.filter_by(id=id).one() + firewall_db.update(fw) + return self._make_firewall_dict(firewall_db) + + def delete_firewall(self, context, id): + LOG.debug(_("delete_firewall() called")) + with context.session.begin(subtransactions=True): + fw_query = context.session.query( + Firewall).with_lockmode('update') + firewall_db = fw_query.filter_by(id=id).one() + # Note: Plugin should ensure that it's okay to delete if the + # firewall is active + context.session.delete(firewall_db) + + def get_firewall(self, context, id, fields=None): + LOG.debug(_("get_firewall() called")) + fw = self._get_firewall(context, id) + return self._make_firewall_dict(fw, fields) + + def get_firewalls(self, context, filters=None, fields=None): + LOG.debug(_("get_firewalls() called")) + return self._get_collection(context, Firewall, + self._make_firewall_dict, + filters=filters, fields=fields) + + def get_firewalls_count(self, context, filters=None): + LOG.debug(_("get_firewalls_count() called")) + return self._get_collection_count(context, Firewall, + filters=filters) + + def create_firewall_policy(self, context, firewall_policy): + LOG.debug(_("create_firewall_policy() called")) + fwp = firewall_policy['firewall_policy'] + tenant_id = self._get_tenant_id_for_create(context, fwp) + with context.session.begin(subtransactions=True): + fwp_db = FirewallPolicy(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=fwp['name'], + description=fwp['description'], + shared=fwp['shared']) + context.session.add(fwp_db) + self._set_rules_for_policy(context, fwp_db, + fwp['firewall_rules']) + fwp_db.audited = fwp['audited'] + return self._make_firewall_policy_dict(fwp_db) + + def update_firewall_policy(self, context, id, firewall_policy): + LOG.debug(_("update_firewall_policy() called")) + fwp = firewall_policy['firewall_policy'] + with context.session.begin(subtransactions=True): + fwp_db = self._get_firewall_policy(context, id) + if 'firewall_rules' in fwp: + self._set_rules_for_policy(context, fwp_db, + fwp['firewall_rules']) + del fwp['firewall_rules'] + fwp_db.update(fwp) + return self._make_firewall_policy_dict(fwp_db) + + def delete_firewall_policy(self, context, id): + LOG.debug(_("delete_firewall_policy() called")) + with context.session.begin(subtransactions=True): + fwp = self._get_firewall_policy(context, id) + # Ensure that the firewall_policy is not + # being used + qry = context.session.query(Firewall) + if qry.filter_by(firewall_policy_id=id).first(): + raise firewall.FirewallPolicyInUse(firewall_policy_id=id) + else: + context.session.delete(fwp) + + def get_firewall_policy(self, context, id, fields=None): + LOG.debug(_("get_firewall_policy() called")) + fwp = self._get_firewall_policy(context, id) + return self._make_firewall_policy_dict(fwp, fields) + + def get_firewall_policies(self, context, filters=None, fields=None): + LOG.debug(_("get_firewall_policies() called")) + return self._get_collection(context, FirewallPolicy, + self._make_firewall_policy_dict, + filters=filters, fields=fields) + + def get_firewalls_policies_count(self, context, filters=None): + LOG.debug(_("get_firewall_policies_count() called")) + return self._get_collection_count(context, FirewallPolicy, + filters=filters) + + def create_firewall_rule(self, context, firewall_rule): + LOG.debug(_("create_firewall_rule() called")) + fwr = firewall_rule['firewall_rule'] + self._validate_fwr_protocol_parameters(fwr) + tenant_id = self._get_tenant_id_for_create(context, fwr) + src_port_min, src_port_max = self._get_min_max_ports_from_range( + fwr['source_port']) + dst_port_min, dst_port_max = self._get_min_max_ports_from_range( + fwr['destination_port']) + with context.session.begin(subtransactions=True): + fwr_db = FirewallRule(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=fwr['name'], + description=fwr['description'], + shared=fwr['shared'], + protocol=fwr['protocol'], + ip_version=fwr['ip_version'], + source_ip_address=fwr['source_ip_address'], + destination_ip_address= + fwr['destination_ip_address'], + source_port_range_min=src_port_min, + source_port_range_max=src_port_max, + destination_port_range_min=dst_port_min, + destination_port_range_max=dst_port_max, + action=fwr['action'], + enabled=fwr['enabled']) + context.session.add(fwr_db) + return self._make_firewall_rule_dict(fwr_db) + + def update_firewall_rule(self, context, id, firewall_rule): + LOG.debug(_("update_firewall_rule() called")) + fwr = firewall_rule['firewall_rule'] + if 'source_port' in fwr: + src_port_min, src_port_max = self._get_min_max_ports_from_range( + fwr['source_port']) + fwr['source_port_range_min'] = src_port_min + fwr['source_port_range_max'] = src_port_max + del fwr['source_port'] + if 'destination_port' in fwr: + dst_port_min, dst_port_max = self._get_min_max_ports_from_range( + fwr['destination_port']) + fwr['destination_port_range_min'] = dst_port_min + fwr['destination_port_range_max'] = dst_port_max + del fwr['destination_port'] + with context.session.begin(subtransactions=True): + fwr_db = self._get_firewall_rule(context, id) + fwr_db.update(fwr) + if fwr_db.firewall_policy_id: + fwp_db = self._get_firewall_policy(context, + fwr_db.firewall_policy_id) + fwp_db.audited = False + return self._make_firewall_rule_dict(fwr_db) + + def delete_firewall_rule(self, context, id): + LOG.debug(_("delete_firewall_rule() called")) + with context.session.begin(subtransactions=True): + fwr = self._get_firewall_rule(context, id) + if fwr.firewall_policy_id: + raise firewall.FirewallRuleInUse(firewall_rule_id=id) + context.session.delete(fwr) + + def get_firewall_rule(self, context, id, fields=None): + LOG.debug(_("get_firewall_rule() called")) + fwr = self._get_firewall_rule(context, id) + return self._make_firewall_rule_dict(fwr, fields) + + def get_firewall_rules(self, context, filters=None, fields=None): + LOG.debug(_("get_firewall_rules() called")) + return self._get_collection(context, FirewallRule, + self._make_firewall_rule_dict, + filters=filters, fields=fields) + + def get_firewalls_rules_count(self, context, filters=None): + LOG.debug(_("get_firewall_rules_count() called")) + return self._get_collection_count(context, FirewallRule, + filters=filters) + + def _validate_insert_remove_rule_request(self, id, rule_info): + if not rule_info or 'firewall_rule_id' not in rule_info: + raise firewall.FirewallRuleInfoMissing() + + def insert_rule(self, context, id, rule_info): + LOG.debug(_("insert_rule() called")) + self._validate_insert_remove_rule_request(id, rule_info) + firewall_rule_id = rule_info['firewall_rule_id'] + insert_before = True + ref_firewall_rule_id = None + if not firewall_rule_id: + raise firewall.FirewallRuleNotFound(firewall_rule_id=None) + if 'insert_before' in rule_info: + ref_firewall_rule_id = rule_info['insert_before'] + if not ref_firewall_rule_id and 'insert_after' in rule_info: + # If insert_before is set, we will ignore insert_after. + ref_firewall_rule_id = rule_info['insert_after'] + insert_before = False + with context.session.begin(subtransactions=True): + fwr_db = self._get_firewall_rule(context, firewall_rule_id) + if fwr_db.firewall_policy_id: + raise firewall.FirewallRuleInUse(firewall_rule_id=fwr_db['id']) + if ref_firewall_rule_id: + # If reference_firewall_rule_id is set, the new rule + # is inserted depending on the value of insert_before. + # If insert_before is set, the new rule is inserted before + # reference_firewall_rule_id, and if it is not set the new + # rule is inserted after reference_firewall_rule_id. + ref_fwr_db = self._get_firewall_rule( + context, ref_firewall_rule_id) + if insert_before: + position = ref_fwr_db.position + else: + position = ref_fwr_db.position + 1 + else: + # If reference_firewall_rule_id is not set, it is assumed + # that the new rule needs to be inserted at the top. + # insert_before field is ignored. + # So default insertion is always at the top. + # Also note that position numbering starts at 1. + position = 1 + return self._process_rule_for_policy(context, id, fwr_db, + position) + + def remove_rule(self, context, id, rule_info): + LOG.debug(_("remove_rule() called")) + self._validate_insert_remove_rule_request(id, rule_info) + firewall_rule_id = rule_info['firewall_rule_id'] + if not firewall_rule_id: + raise firewall.FirewallRuleNotFound(firewall_rule_id=None) + with context.session.begin(subtransactions=True): + fwr_db = self._get_firewall_rule(context, firewall_rule_id) + if fwr_db.firewall_policy_id != id: + raise firewall.FirewallRuleNotAssociatedWithPolicy( + firewall_rule_id=fwr_db['id'], + firewall_policy_id=id) + return self._process_rule_for_policy(context, id, fwr_db, None) diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py new file mode 100644 index 000000000..19969cb60 --- /dev/null +++ b/neutron/db/l3_agentschedulers_db.py @@ -0,0 +1,291 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import sqlalchemy as sa +from sqlalchemy import func +from sqlalchemy import orm +from sqlalchemy.orm import exc +from sqlalchemy.orm import joinedload + +from neutron.common import constants +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import l3agentscheduler + + +L3_AGENTS_SCHEDULER_OPTS = [ + cfg.StrOpt('router_scheduler_driver', + default='neutron.scheduler.l3_agent_scheduler.ChanceScheduler', + help=_('Driver to use for scheduling ' + 'router to a default L3 agent')), + cfg.BoolOpt('router_auto_schedule', default=True, + help=_('Allow auto scheduling of routers to L3 agent.')), +] + +cfg.CONF.register_opts(L3_AGENTS_SCHEDULER_OPTS) + + +class RouterL3AgentBinding(model_base.BASEV2, models_v2.HasId): + """Represents binding between neutron routers and L3 agents.""" + + router_id = sa.Column(sa.String(36), + sa.ForeignKey("routers.id", ondelete='CASCADE')) + l3_agent = orm.relation(agents_db.Agent) + l3_agent_id = sa.Column(sa.String(36), + sa.ForeignKey("agents.id", + ondelete='CASCADE')) + + +class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, + agentschedulers_db.AgentSchedulerDbMixin): + """Mixin class to add l3 agent scheduler extension to plugins + using the l3 agent for routing. + """ + + router_scheduler = None + + def add_router_to_l3_agent(self, context, agent_id, router_id): + """Add a l3 agent to host a router.""" + router = self.get_router(context, router_id) + with context.session.begin(subtransactions=True): + agent_db = self._get_agent(context, agent_id) + if (agent_db['agent_type'] != constants.AGENT_TYPE_L3 or + not agent_db['admin_state_up'] or + not self.get_l3_agent_candidates(router, [agent_db])): + raise l3agentscheduler.InvalidL3Agent(id=agent_id) + query = context.session.query(RouterL3AgentBinding) + try: + binding = query.filter_by(router_id=router_id).one() + + raise l3agentscheduler.RouterHostedByL3Agent( + router_id=router_id, + agent_id=binding.l3_agent_id) + except exc.NoResultFound: + pass + + result = self.auto_schedule_routers(context, + agent_db.host, + [router_id]) + if not result: + raise l3agentscheduler.RouterSchedulingFailed( + router_id=router_id, agent_id=agent_id) + + l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) + if l3_notifier: + l3_notifier.router_added_to_agent( + context, [router_id], agent_db.host) + + def remove_router_from_l3_agent(self, context, agent_id, router_id): + """Remove the router from l3 agent. + + After removal, the router will be non-hosted until there is update + which leads to re-schedule or be added to another agent manually. + """ + agent = self._get_agent(context, agent_id) + self._unbind_router(context, router_id, agent_id) + l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) + if l3_notifier: + l3_notifier.router_removed_from_agent( + context, router_id, agent.host) + + def _unbind_router(self, context, router_id, agent_id): + with context.session.begin(subtransactions=True): + query = context.session.query(RouterL3AgentBinding) + query = query.filter( + RouterL3AgentBinding.router_id == router_id, + RouterL3AgentBinding.l3_agent_id == agent_id) + try: + binding = query.one() + except exc.NoResultFound: + raise l3agentscheduler.RouterNotHostedByL3Agent( + router_id=router_id, agent_id=agent_id) + context.session.delete(binding) + + def reschedule_router(self, context, router_id, candidates=None): + """Reschedule router to a new l3 agent + + Remove the router from the agent(s) currently hosting it and + schedule it again + """ + cur_agents = self.list_l3_agents_hosting_router( + context, router_id)['agents'] + with context.session.begin(subtransactions=True): + for agent in cur_agents: + self._unbind_router(context, router_id, agent['id']) + + new_agent = self.schedule_router(context, router_id, + candidates=candidates) + if not new_agent: + raise l3agentscheduler.RouterReschedulingFailed( + router_id=router_id) + + l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) + if l3_notifier: + for agent in cur_agents: + l3_notifier.router_removed_from_agent( + context, router_id, agent['host']) + l3_notifier.router_added_to_agent( + context, [router_id], new_agent.host) + + def list_routers_on_l3_agent(self, context, agent_id): + query = context.session.query(RouterL3AgentBinding.router_id) + query = query.filter(RouterL3AgentBinding.l3_agent_id == agent_id) + + router_ids = [item[0] for item in query] + if router_ids: + return {'routers': + self.get_routers(context, filters={'id': router_ids})} + else: + return {'routers': []} + + def list_active_sync_routers_on_active_l3_agent( + self, context, host, router_ids): + agent = self._get_agent_by_type_and_host( + context, constants.AGENT_TYPE_L3, host) + if not agent.admin_state_up: + return [] + query = context.session.query(RouterL3AgentBinding.router_id) + query = query.filter( + RouterL3AgentBinding.l3_agent_id == agent.id) + + if not router_ids: + pass + else: + query = query.filter( + RouterL3AgentBinding.router_id.in_(router_ids)) + router_ids = [item[0] for item in query] + if router_ids: + return self.get_sync_data(context, router_ids=router_ids, + active=True) + else: + return [] + + def get_l3_agents_hosting_routers(self, context, router_ids, + admin_state_up=None, + active=None): + if not router_ids: + return [] + query = context.session.query(RouterL3AgentBinding) + if len(router_ids) > 1: + query = query.options(joinedload('l3_agent')).filter( + RouterL3AgentBinding.router_id.in_(router_ids)) + else: + query = query.options(joinedload('l3_agent')).filter( + RouterL3AgentBinding.router_id == router_ids[0]) + if admin_state_up is not None: + query = (query.filter(agents_db.Agent.admin_state_up == + admin_state_up)) + l3_agents = [binding.l3_agent for binding in query] + if active is not None: + l3_agents = [l3_agent for l3_agent in + l3_agents if not + agents_db.AgentDbMixin.is_agent_down( + l3_agent['heartbeat_timestamp'])] + return l3_agents + + def _get_l3_bindings_hosting_routers(self, context, router_ids): + if not router_ids: + return [] + query = context.session.query(RouterL3AgentBinding) + if len(router_ids) > 1: + query = query.options(joinedload('l3_agent')).filter( + RouterL3AgentBinding.router_id.in_(router_ids)) + else: + query = query.options(joinedload('l3_agent')).filter( + RouterL3AgentBinding.router_id == router_ids[0]) + return query.all() + + def list_l3_agents_hosting_router(self, context, router_id): + with context.session.begin(subtransactions=True): + bindings = self._get_l3_bindings_hosting_routers( + context, [router_id]) + results = [] + for binding in bindings: + l3_agent_dict = self._make_agent_dict(binding.l3_agent) + results.append(l3_agent_dict) + if results: + return {'agents': results} + else: + return {'agents': []} + + def get_l3_agents(self, context, active=None, filters=None): + query = context.session.query(agents_db.Agent) + query = query.filter( + agents_db.Agent.agent_type == constants.AGENT_TYPE_L3) + if active is not None: + query = (query.filter(agents_db.Agent.admin_state_up == active)) + if filters: + for key, value in filters.iteritems(): + column = getattr(agents_db.Agent, key, None) + if column: + query = query.filter(column.in_(value)) + + return [l3_agent + for l3_agent in query + if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent( + active, l3_agent)] + + def get_l3_agent_candidates(self, sync_router, l3_agents): + """Get the valid l3 agents for the router from a list of l3_agents.""" + candidates = [] + for l3_agent in l3_agents: + if not l3_agent.admin_state_up: + continue + agent_conf = self.get_configuration_dict(l3_agent) + router_id = agent_conf.get('router_id', None) + use_namespaces = agent_conf.get('use_namespaces', True) + handle_internal_only_routers = agent_conf.get( + 'handle_internal_only_routers', True) + gateway_external_network_id = agent_conf.get( + 'gateway_external_network_id', None) + if not use_namespaces and router_id != sync_router['id']: + continue + ex_net_id = (sync_router['external_gateway_info'] or {}).get( + 'network_id') + if ((not ex_net_id and not handle_internal_only_routers) or + (ex_net_id and gateway_external_network_id and + ex_net_id != gateway_external_network_id)): + continue + candidates.append(l3_agent) + return candidates + + def auto_schedule_routers(self, context, host, router_ids): + if self.router_scheduler: + return self.router_scheduler.auto_schedule_routers( + self, context, host, router_ids) + + def schedule_router(self, context, router, candidates=None): + if self.router_scheduler: + return self.router_scheduler.schedule( + self, context, router, candidates) + + def schedule_routers(self, context, routers): + """Schedule the routers to l3 agents.""" + for router in routers: + self.schedule_router(context, router) + + def get_l3_agent_with_min_routers(self, context, agent_ids): + """Return l3 agent with the least number of routers.""" + query = context.session.query( + agents_db.Agent, + func.count( + RouterL3AgentBinding.router_id + ).label('count')).outerjoin(RouterL3AgentBinding).group_by( + RouterL3AgentBinding.l3_agent_id).order_by('count') + res = query.filter(agents_db.Agent.id.in_(agent_ids)).first() + return res[0] diff --git a/neutron/db/l3_db.py b/neutron/db/l3_db.py new file mode 100644 index 000000000..5d2aa6e1a --- /dev/null +++ b/neutron/db/l3_db.py @@ -0,0 +1,1039 @@ +# Copyright 2012 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc + +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as l3_constants +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import utils +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import external_net +from neutron.extensions import l3 +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants + +LOG = logging.getLogger(__name__) + + +DEVICE_OWNER_ROUTER_INTF = l3_constants.DEVICE_OWNER_ROUTER_INTF +DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW +DEVICE_OWNER_FLOATINGIP = l3_constants.DEVICE_OWNER_FLOATINGIP +EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO + +# Maps API field to DB column +# API parameter name and Database column names may differ. +# Useful to keep the filtering between API and Database. +API_TO_DB_COLUMN_MAP = {'port_id': 'fixed_port_id'} +CORE_ROUTER_ATTRS = ('id', 'name', 'tenant_id', 'admin_state_up', 'status') + + +class Router(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a v2 neutron router.""" + + name = sa.Column(sa.String(255)) + status = sa.Column(sa.String(16)) + admin_state_up = sa.Column(sa.Boolean) + gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) + gw_port = orm.relationship(models_v2.Port, lazy='joined') + + +class FloatingIP(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a floating IP address. + + This IP address may or may not be allocated to a tenant, and may or + may not be associated with an internal port/ip address/router. + """ + + floating_ip_address = sa.Column(sa.String(64), nullable=False) + floating_network_id = sa.Column(sa.String(36), nullable=False) + floating_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'), + nullable=False) + fixed_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) + fixed_ip_address = sa.Column(sa.String(64)) + router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id')) + # Additional attribute for keeping track of the router where the floating + # ip was associated in order to be able to ensure consistency even if an + # aysnchronous backend is unavailable when the floating IP is disassociated + last_known_router_id = sa.Column(sa.String(36)) + status = sa.Column(sa.String(16)) + + +class L3_NAT_db_mixin(l3.RouterPluginBase): + """Mixin class to add L3/NAT router methods to db_base_plugin_v2.""" + + router_device_owners = ( + DEVICE_OWNER_ROUTER_INTF, + DEVICE_OWNER_ROUTER_GW, + DEVICE_OWNER_FLOATINGIP + ) + + @property + def l3_rpc_notifier(self): + if not hasattr(self, '_l3_rpc_notifier'): + self._l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() + return self._l3_rpc_notifier + + @l3_rpc_notifier.setter + def l3_rpc_notifier(self, value): + self._l3_rpc_notifier = value + + @property + def _core_plugin(self): + return manager.NeutronManager.get_plugin() + + def _get_router(self, context, router_id): + try: + router = self._get_by_id(context, Router, router_id) + except exc.NoResultFound: + raise l3.RouterNotFound(router_id=router_id) + return router + + def _make_router_dict(self, router, fields=None, process_extensions=True): + res = dict((key, router[key]) for key in CORE_ROUTER_ATTRS) + if router['gw_port_id']: + ext_gw_info = {'network_id': router.gw_port['network_id']} + else: + ext_gw_info = None + res.update({ + EXTERNAL_GW_INFO: ext_gw_info, + 'gw_port_id': router['gw_port_id'], + }) + # NOTE(salv-orlando): The following assumes this mixin is used in a + # class inheriting from CommonDbMixin, which is true for all existing + # plugins. + if process_extensions: + self._apply_dict_extend_functions(l3.ROUTERS, res, router) + return self._fields(res, fields) + + def _create_router_db(self, context, router, tenant_id, gw_info): + """Create the DB object and update gw info, if available.""" + with context.session.begin(subtransactions=True): + # pre-generate id so it will be available when + # configuring external gw port + router_db = Router(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=router['name'], + admin_state_up=router['admin_state_up'], + status="ACTIVE") + context.session.add(router_db) + return router_db + + def create_router(self, context, router): + r = router['router'] + gw_info = r.pop(EXTERNAL_GW_INFO, None) + tenant_id = self._get_tenant_id_for_create(context, r) + with context.session.begin(subtransactions=True): + router_db = self._create_router_db(context, r, tenant_id, gw_info) + if gw_info: + self._update_router_gw_info(context, router_db['id'], gw_info) + return self._make_router_dict(router_db) + + def _update_router_db(self, context, router_id, data, gw_info): + """Update the DB object and related gw info, if available.""" + with context.session.begin(subtransactions=True): + if gw_info != attributes.ATTR_NOT_SPECIFIED: + self._update_router_gw_info(context, router_id, gw_info) + router_db = self._get_router(context, router_id) + if data: + router_db.update(data) + return router_db + + def update_router(self, context, id, router): + r = router['router'] + gw_info = r.pop(EXTERNAL_GW_INFO, attributes.ATTR_NOT_SPECIFIED) + # check whether router needs and can be rescheduled to the proper + # l3 agent (associated with given external network); + # do check before update in DB as an exception will be raised + # in case no proper l3 agent found + candidates = None + if gw_info != attributes.ATTR_NOT_SPECIFIED: + candidates = self._check_router_needs_rescheduling( + context, id, gw_info) + router_db = self._update_router_db(context, id, r, gw_info) + if candidates: + l3_plugin = manager.NeutronManager.get_service_plugins().get( + constants.L3_ROUTER_NAT) + l3_plugin.reschedule_router(context, id, candidates) + + self.l3_rpc_notifier.routers_updated(context, [router_db['id']]) + return self._make_router_dict(router_db) + + def _check_router_needs_rescheduling(self, context, router_id, gw_info): + """Checks whether router's l3 agent can handle the given network + + When external_network_bridge is set, each L3 agent can be associated + with at most one external network. If router's new external gateway + is on other network then the router needs to be rescheduled to the + proper l3 agent. + If external_network_bridge is not set then the agent + can support multiple external networks and rescheduling is not needed + + :return: list of candidate agents if rescheduling needed, + None otherwise; raises exception if there is no eligible l3 agent + associated with target external network + """ + # TODO(obondarev): rethink placement of this func as l3 db manager is + # not really a proper place for agent scheduling stuff + network_id = gw_info.get('network_id') if gw_info else None + if not network_id: + return + + nets = self._core_plugin.get_networks( + context, {external_net.EXTERNAL: [True]}) + # nothing to do if there is only one external network + if len(nets) <= 1: + return + + # first get plugin supporting l3 agent scheduling + # (either l3 service plugin or core_plugin) + l3_plugin = manager.NeutronManager.get_service_plugins().get( + constants.L3_ROUTER_NAT) + if (not utils.is_extension_supported( + l3_plugin, + l3_constants.L3_AGENT_SCHEDULER_EXT_ALIAS) or + l3_plugin.router_scheduler is None): + # that might mean that we are dealing with non-agent-based + # implementation of l3 services + return + + cur_agents = l3_plugin.list_l3_agents_hosting_router( + context, router_id)['agents'] + for agent in cur_agents: + ext_net_id = agent['configurations'].get( + 'gateway_external_network_id') + ext_bridge = agent['configurations'].get( + 'external_network_bridge', 'br-ex') + if (ext_net_id == network_id or + (not ext_net_id and not ext_bridge)): + return + + # otherwise find l3 agent with matching gateway_external_network_id + active_agents = l3_plugin.get_l3_agents(context, active=True) + router = { + 'id': router_id, + 'external_gateway_info': {'network_id': network_id} + } + candidates = l3_plugin.get_l3_agent_candidates( + router, active_agents) + if not candidates: + msg = (_('No eligible l3 agent associated with external network ' + '%s found') % network_id) + raise n_exc.BadRequest(resource='router', msg=msg) + + return candidates + + def _create_router_gw_port(self, context, router, network_id): + # Port has no 'tenant-id', as it is hidden from user + gw_port = self._core_plugin.create_port(context.elevated(), { + 'port': {'tenant_id': '', # intentionally not set + 'network_id': network_id, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, + 'device_id': router['id'], + 'device_owner': DEVICE_OWNER_ROUTER_GW, + 'admin_state_up': True, + 'name': ''}}) + + if not gw_port['fixed_ips']: + self._core_plugin.delete_port(context.elevated(), gw_port['id'], + l3_port_check=False) + msg = (_('No IPs available for external network %s') % + network_id) + raise n_exc.BadRequest(resource='router', msg=msg) + + with context.session.begin(subtransactions=True): + router.gw_port = self._core_plugin._get_port(context.elevated(), + gw_port['id']) + context.session.add(router) + + def _validate_gw_info(self, context, gw_port, info): + network_id = info['network_id'] if info else None + if network_id: + network_db = self._core_plugin._get_network(context, network_id) + if not network_db.external: + msg = _("Network %s is not an external network") % network_id + raise n_exc.BadRequest(resource='router', msg=msg) + return network_id + + def _delete_current_gw_port(self, context, router_id, router, new_network): + """Delete gw port, if it is attached to an old network.""" + is_gw_port_attached_to_existing_network = ( + router.gw_port and router.gw_port['network_id'] != new_network) + admin_ctx = context.elevated() + if is_gw_port_attached_to_existing_network: + if self.get_floatingips_count( + admin_ctx, {'router_id': [router_id]}): + raise l3.RouterExternalGatewayInUseByFloatingIp( + router_id=router_id, net_id=router.gw_port['network_id']) + with context.session.begin(subtransactions=True): + gw_port_id = router.gw_port['id'] + router.gw_port = None + context.session.add(router) + self._core_plugin.delete_port( + admin_ctx, gw_port_id, l3_port_check=False) + + def _create_gw_port(self, context, router_id, router, new_network): + new_valid_gw_port_attachment = ( + new_network and (not router.gw_port or + router.gw_port['network_id'] != new_network)) + if new_valid_gw_port_attachment: + subnets = self._core_plugin._get_subnets_by_network(context, + new_network) + for subnet in subnets: + self._check_for_dup_router_subnet(context, router_id, + new_network, subnet['id'], + subnet['cidr']) + self._create_router_gw_port(context, router, new_network) + + def _update_router_gw_info(self, context, router_id, info, router=None): + # TODO(salvatore-orlando): guarantee atomic behavior also across + # operations that span beyond the model classes handled by this + # class (e.g.: delete_port) + router = router or self._get_router(context, router_id) + gw_port = router.gw_port + network_id = self._validate_gw_info(context, gw_port, info) + self._delete_current_gw_port(context, router_id, router, network_id) + self._create_gw_port(context, router_id, router, network_id) + + def _ensure_router_not_in_use(self, context, router_id): + admin_ctx = context.elevated() + router = self._get_router(context, router_id) + if self.get_floatingips_count( + admin_ctx, filters={'router_id': [router_id]}): + raise l3.RouterInUse(router_id=router_id) + device_owner = self._get_device_owner(context, router) + device_filter = {'device_id': [router_id], + 'device_owner': [device_owner]} + port_count = self._core_plugin.get_ports_count( + admin_ctx, filters=device_filter) + if port_count: + raise l3.RouterInUse(router_id=router_id) + return router + + def delete_router(self, context, id): + with context.session.begin(subtransactions=True): + router = self._ensure_router_not_in_use(context, id) + + #TODO(nati) Refactor here when we have router insertion model + vpnservice = manager.NeutronManager.get_service_plugins().get( + constants.VPN) + if vpnservice: + vpnservice.check_router_in_use(context, id) + + context.session.delete(router) + + # Delete the gw port after the router has been removed to + # avoid a constraint violation. + device_filter = {'device_id': [id], + 'device_owner': [DEVICE_OWNER_ROUTER_GW]} + ports = self._core_plugin.get_ports(context.elevated(), + filters=device_filter) + if ports: + self._core_plugin._delete_port(context.elevated(), + ports[0]['id']) + + self.l3_rpc_notifier.router_deleted(context, id) + + def get_router(self, context, id, fields=None): + router = self._get_router(context, id) + return self._make_router_dict(router, fields) + + def get_routers(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'router', limit, marker) + return self._get_collection(context, Router, + self._make_router_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_routers_count(self, context, filters=None): + return self._get_collection_count(context, Router, + filters=filters) + + def _check_for_dup_router_subnet(self, context, router_id, + network_id, subnet_id, subnet_cidr): + try: + rport_qry = context.session.query(models_v2.Port) + rports = rport_qry.filter_by(device_id=router_id) + # It's possible these ports are on the same network, but + # different subnets. + new_ipnet = netaddr.IPNetwork(subnet_cidr) + for p in rports: + for ip in p['fixed_ips']: + if ip['subnet_id'] == subnet_id: + msg = (_("Router already has a port on subnet %s") + % subnet_id) + raise n_exc.BadRequest(resource='router', msg=msg) + sub_id = ip['subnet_id'] + cidr = self._core_plugin._get_subnet(context.elevated(), + sub_id)['cidr'] + ipnet = netaddr.IPNetwork(cidr) + match1 = netaddr.all_matching_cidrs(new_ipnet, [cidr]) + match2 = netaddr.all_matching_cidrs(ipnet, [subnet_cidr]) + if match1 or match2: + data = {'subnet_cidr': subnet_cidr, + 'subnet_id': subnet_id, + 'cidr': cidr, + 'sub_id': sub_id} + msg = (_("Cidr %(subnet_cidr)s of subnet " + "%(subnet_id)s overlaps with cidr %(cidr)s " + "of subnet %(sub_id)s") % data) + raise n_exc.BadRequest(resource='router', msg=msg) + except exc.NoResultFound: + pass + + def _get_device_owner(self, context, router=None): + """Get device_owner for the specified router.""" + # NOTE(armando-migliaccio): in the base case this is invariant + return DEVICE_OWNER_ROUTER_INTF + + def _validate_interface_info(self, interface_info): + if not interface_info: + msg = _("Either subnet_id or port_id must be specified") + raise n_exc.BadRequest(resource='router', msg=msg) + port_id_specified = 'port_id' in interface_info + subnet_id_specified = 'subnet_id' in interface_info + if port_id_specified and subnet_id_specified: + msg = _("Cannot specify both subnet-id and port-id") + raise n_exc.BadRequest(resource='router', msg=msg) + return port_id_specified, subnet_id_specified + + def _add_interface_by_port(self, context, router_id, port_id, owner): + with context.session.begin(subtransactions=True): + port = self._core_plugin._get_port(context, port_id) + if port['device_id']: + raise n_exc.PortInUse(net_id=port['network_id'], + port_id=port['id'], + device_id=port['device_id']) + fixed_ips = [ip for ip in port['fixed_ips']] + if len(fixed_ips) != 1: + msg = _('Router port must have exactly one fixed IP') + raise n_exc.BadRequest(resource='router', msg=msg) + subnet_id = fixed_ips[0]['subnet_id'] + subnet = self._core_plugin._get_subnet(context, subnet_id) + self._check_for_dup_router_subnet(context, router_id, + port['network_id'], + subnet['id'], + subnet['cidr']) + port.update({'device_id': router_id, 'device_owner': owner}) + return port + + def _add_interface_by_subnet(self, context, router_id, subnet_id, owner): + subnet = self._core_plugin._get_subnet(context, subnet_id) + if not subnet['gateway_ip']: + msg = _('Subnet for router interface must have a gateway IP') + raise n_exc.BadRequest(resource='router', msg=msg) + self._check_for_dup_router_subnet(context, router_id, + subnet['network_id'], + subnet_id, + subnet['cidr']) + fixed_ip = {'ip_address': subnet['gateway_ip'], + 'subnet_id': subnet['id']} + return self._core_plugin.create_port(context, { + 'port': + {'tenant_id': subnet['tenant_id'], + 'network_id': subnet['network_id'], + 'fixed_ips': [fixed_ip], + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'admin_state_up': True, + 'device_id': router_id, + 'device_owner': owner, + 'name': ''}}) + + def add_router_interface(self, context, router_id, interface_info): + add_by_port, add_by_sub = self._validate_interface_info(interface_info) + device_owner = self._get_device_owner(context, router_id) + + if add_by_port: + port = self._add_interface_by_port( + context, router_id, interface_info['port_id'], device_owner) + elif add_by_sub: + port = self._add_interface_by_subnet( + context, router_id, interface_info['subnet_id'], device_owner) + + self.l3_rpc_notifier.routers_updated( + context, [router_id], 'add_router_interface') + info = {'id': router_id, + 'tenant_id': port['tenant_id'], + 'port_id': port['id'], + 'subnet_id': port['fixed_ips'][0]['subnet_id']} + notifier = n_rpc.get_notifier('network') + notifier.info( + context, 'router.interface.create', {'router_interface': info}) + return info + + def _confirm_router_interface_not_in_use(self, context, router_id, + subnet_id): + subnet_db = self._core_plugin._get_subnet(context, subnet_id) + subnet_cidr = netaddr.IPNetwork(subnet_db['cidr']) + fip_qry = context.session.query(FloatingIP) + for fip_db in fip_qry.filter_by(router_id=router_id): + if netaddr.IPAddress(fip_db['fixed_ip_address']) in subnet_cidr: + raise l3.RouterInterfaceInUseByFloatingIP( + router_id=router_id, subnet_id=subnet_id) + + def _remove_interface_by_port(self, context, router_id, + port_id, subnet_id, owner): + port_db = self._core_plugin._get_port(context, port_id) + if not (port_db['device_owner'] == owner and + port_db['device_id'] == router_id): + raise l3.RouterInterfaceNotFound(router_id=router_id, + port_id=port_id) + port_subnet_id = port_db['fixed_ips'][0]['subnet_id'] + if subnet_id and port_subnet_id != subnet_id: + raise n_exc.SubnetMismatchForPort( + port_id=port_id, subnet_id=subnet_id) + subnet = self._core_plugin._get_subnet(context, port_subnet_id) + self._confirm_router_interface_not_in_use( + context, router_id, port_subnet_id) + self._core_plugin.delete_port(context, port_db['id'], + l3_port_check=False) + return (port_db, subnet) + + def _remove_interface_by_subnet(self, context, + router_id, subnet_id, owner): + self._confirm_router_interface_not_in_use( + context, router_id, subnet_id) + subnet = self._core_plugin._get_subnet(context, subnet_id) + + try: + rport_qry = context.session.query(models_v2.Port) + ports = rport_qry.filter_by( + device_id=router_id, + device_owner=owner, + network_id=subnet['network_id']) + + for p in ports: + if p['fixed_ips'][0]['subnet_id'] == subnet_id: + self._core_plugin.delete_port(context, p['id'], + l3_port_check=False) + return (p, subnet) + except exc.NoResultFound: + pass + raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id, + subnet_id=subnet_id) + + def remove_router_interface(self, context, router_id, interface_info): + if not interface_info: + msg = _("Either subnet_id or port_id must be specified") + raise n_exc.BadRequest(resource='router', msg=msg) + port_id = interface_info.get('port_id') + subnet_id = interface_info.get('subnet_id') + device_owner = self._get_device_owner(context, router_id) + if port_id: + port, subnet = self._remove_interface_by_port(context, router_id, + port_id, subnet_id, + device_owner) + elif subnet_id: + port, subnet = self._remove_interface_by_subnet( + context, router_id, subnet_id, device_owner) + + self.l3_rpc_notifier.routers_updated( + context, [router_id], 'remove_router_interface') + info = {'id': router_id, + 'tenant_id': port['tenant_id'], + 'port_id': port['id'], + 'subnet_id': subnet['id']} + notifier = n_rpc.get_notifier('network') + notifier.info( + context, 'router.interface.delete', {'router_interface': info}) + return info + + def _get_floatingip(self, context, id): + try: + floatingip = self._get_by_id(context, FloatingIP, id) + except exc.NoResultFound: + raise l3.FloatingIPNotFound(floatingip_id=id) + return floatingip + + def _make_floatingip_dict(self, floatingip, fields=None): + res = {'id': floatingip['id'], + 'tenant_id': floatingip['tenant_id'], + 'floating_ip_address': floatingip['floating_ip_address'], + 'floating_network_id': floatingip['floating_network_id'], + 'router_id': floatingip['router_id'], + 'port_id': floatingip['fixed_port_id'], + 'fixed_ip_address': floatingip['fixed_ip_address'], + 'status': floatingip['status']} + return self._fields(res, fields) + + def _get_interface_ports_for_network(self, context, network_id): + router_intf_qry = context.session.query(models_v2.Port) + return router_intf_qry.filter_by( + network_id=network_id, + device_owner=DEVICE_OWNER_ROUTER_INTF) + + def _get_router_for_floatingip(self, context, internal_port, + internal_subnet_id, + external_network_id): + subnet_db = self._core_plugin._get_subnet(context, + internal_subnet_id) + if not subnet_db['gateway_ip']: + msg = (_('Cannot add floating IP to port on subnet %s ' + 'which has no gateway_ip') % internal_subnet_id) + raise n_exc.BadRequest(resource='floatingip', msg=msg) + + router_intf_ports = self._get_interface_ports_for_network( + context, internal_port['network_id']) + + for intf_p in router_intf_ports: + if intf_p['fixed_ips'][0]['subnet_id'] == internal_subnet_id: + router_id = intf_p['device_id'] + router_gw_qry = context.session.query(models_v2.Port) + has_gw_port = router_gw_qry.filter_by( + network_id=external_network_id, + device_id=router_id, + device_owner=DEVICE_OWNER_ROUTER_GW).count() + if has_gw_port: + return router_id + + raise l3.ExternalGatewayForFloatingIPNotFound( + subnet_id=internal_subnet_id, + external_network_id=external_network_id, + port_id=internal_port['id']) + + def _internal_fip_assoc_data(self, context, fip): + """Retrieve internal port data for floating IP. + + Retrieve information concerning the internal port where + the floating IP should be associated to. + """ + internal_port = self._core_plugin._get_port(context, fip['port_id']) + if not internal_port['tenant_id'] == fip['tenant_id']: + port_id = fip['port_id'] + if 'id' in fip: + floatingip_id = fip['id'] + data = {'port_id': port_id, + 'floatingip_id': floatingip_id} + msg = (_('Port %(port_id)s is associated with a different ' + 'tenant than Floating IP %(floatingip_id)s and ' + 'therefore cannot be bound.') % data) + else: + msg = (_('Cannot create floating IP and bind it to ' + 'Port %s, since that port is owned by a ' + 'different tenant.') % port_id) + raise n_exc.BadRequest(resource='floatingip', msg=msg) + + internal_subnet_id = None + if 'fixed_ip_address' in fip and fip['fixed_ip_address']: + internal_ip_address = fip['fixed_ip_address'] + for ip in internal_port['fixed_ips']: + if ip['ip_address'] == internal_ip_address: + internal_subnet_id = ip['subnet_id'] + if not internal_subnet_id: + msg = (_('Port %(id)s does not have fixed ip %(address)s') % + {'id': internal_port['id'], + 'address': internal_ip_address}) + raise n_exc.BadRequest(resource='floatingip', msg=msg) + else: + ips = [ip['ip_address'] for ip in internal_port['fixed_ips']] + if not ips: + msg = (_('Cannot add floating IP to port %s that has' + 'no fixed IP addresses') % internal_port['id']) + raise n_exc.BadRequest(resource='floatingip', msg=msg) + if len(ips) > 1: + msg = (_('Port %s has multiple fixed IPs. Must provide' + ' a specific IP when assigning a floating IP') % + internal_port['id']) + raise n_exc.BadRequest(resource='floatingip', msg=msg) + internal_ip_address = internal_port['fixed_ips'][0]['ip_address'] + internal_subnet_id = internal_port['fixed_ips'][0]['subnet_id'] + return internal_port, internal_subnet_id, internal_ip_address + + def get_assoc_data(self, context, fip, floating_network_id): + """Determine/extract data associated with the internal port. + + When a floating IP is associated with an internal port, + we need to extract/determine some data associated with the + internal port, including the internal_ip_address, and router_id. + We also need to confirm that this internal port is owned by the + tenant who owns the floating IP. + """ + (internal_port, internal_subnet_id, + internal_ip_address) = self._internal_fip_assoc_data(context, fip) + router_id = self._get_router_for_floatingip(context, + internal_port, + internal_subnet_id, + floating_network_id) + # confirm that this router has a floating + # ip enabled gateway with support for this floating IP network + try: + port_qry = context.elevated().session.query(models_v2.Port) + port_qry.filter_by( + network_id=floating_network_id, + device_id=router_id, + device_owner=DEVICE_OWNER_ROUTER_GW).one() + except exc.NoResultFound: + raise l3.ExternalGatewayForFloatingIPNotFound( + subnet_id=internal_subnet_id, + port_id=internal_port['id']) + + return (fip['port_id'], internal_ip_address, router_id) + + def _update_fip_assoc(self, context, fip, floatingip_db, external_port): + previous_router_id = floatingip_db.router_id + port_id = internal_ip_address = router_id = None + if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and + not ('port_id' in fip and fip['port_id'])): + msg = _("fixed_ip_address cannot be specified without a port_id") + raise n_exc.BadRequest(resource='floatingip', msg=msg) + if 'port_id' in fip and fip['port_id']: + port_id, internal_ip_address, router_id = self.get_assoc_data( + context, + fip, + floatingip_db['floating_network_id']) + fip_qry = context.session.query(FloatingIP) + try: + fip_qry.filter_by( + fixed_port_id=fip['port_id'], + floating_network_id=floatingip_db['floating_network_id'], + fixed_ip_address=internal_ip_address).one() + raise l3.FloatingIPPortAlreadyAssociated( + port_id=fip['port_id'], + fip_id=floatingip_db['id'], + floating_ip_address=floatingip_db['floating_ip_address'], + fixed_ip=internal_ip_address, + net_id=floatingip_db['floating_network_id']) + except exc.NoResultFound: + pass + floatingip_db.update({'fixed_ip_address': internal_ip_address, + 'fixed_port_id': port_id, + 'router_id': router_id, + 'last_known_router_id': previous_router_id}) + + def create_floatingip( + self, context, floatingip, + initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): + fip = floatingip['floatingip'] + tenant_id = self._get_tenant_id_for_create(context, fip) + fip_id = uuidutils.generate_uuid() + + f_net_id = fip['floating_network_id'] + if not self._core_plugin._network_is_external(context, f_net_id): + msg = _("Network %s is not a valid external network") % f_net_id + raise n_exc.BadRequest(resource='floatingip', msg=msg) + + with context.session.begin(subtransactions=True): + # This external port is never exposed to the tenant. + # it is used purely for internal system and admin use when + # managing floating IPs. + external_port = self._core_plugin.create_port(context.elevated(), { + 'port': + {'tenant_id': '', # tenant intentionally not set + 'network_id': f_net_id, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, + 'admin_state_up': True, + 'device_id': fip_id, + 'device_owner': DEVICE_OWNER_FLOATINGIP, + 'name': ''}}) + # Ensure IP addresses are allocated on external port + if not external_port['fixed_ips']: + raise n_exc.ExternalIpAddressExhausted(net_id=f_net_id) + + floating_fixed_ip = external_port['fixed_ips'][0] + floating_ip_address = floating_fixed_ip['ip_address'] + floatingip_db = FloatingIP( + id=fip_id, + tenant_id=tenant_id, + status=initial_status, + floating_network_id=fip['floating_network_id'], + floating_ip_address=floating_ip_address, + floating_port_id=external_port['id']) + fip['tenant_id'] = tenant_id + # Update association with internal port + # and define external IP address + self._update_fip_assoc(context, fip, + floatingip_db, external_port) + context.session.add(floatingip_db) + + router_id = floatingip_db['router_id'] + if router_id: + self.l3_rpc_notifier.routers_updated( + context, [router_id], + 'create_floatingip') + return self._make_floatingip_dict(floatingip_db) + + def update_floatingip(self, context, id, floatingip): + fip = floatingip['floatingip'] + with context.session.begin(subtransactions=True): + floatingip_db = self._get_floatingip(context, id) + fip['tenant_id'] = floatingip_db['tenant_id'] + fip['id'] = id + fip_port_id = floatingip_db['floating_port_id'] + before_router_id = floatingip_db['router_id'] + self._update_fip_assoc(context, fip, floatingip_db, + self._core_plugin.get_port( + context.elevated(), fip_port_id)) + router_ids = [] + if before_router_id: + router_ids.append(before_router_id) + router_id = floatingip_db['router_id'] + if router_id and router_id != before_router_id: + router_ids.append(router_id) + if router_ids: + self.l3_rpc_notifier.routers_updated( + context, router_ids, 'update_floatingip') + return self._make_floatingip_dict(floatingip_db) + + def update_floatingip_status(self, context, floatingip_id, status): + """Update operational status for floating IP in neutron DB.""" + fip_query = self._model_query(context, FloatingIP).filter( + FloatingIP.id == floatingip_id) + fip_query.update({'status': status}, synchronize_session=False) + + def delete_floatingip(self, context, id): + floatingip = self._get_floatingip(context, id) + router_id = floatingip['router_id'] + with context.session.begin(subtransactions=True): + context.session.delete(floatingip) + self._core_plugin.delete_port(context.elevated(), + floatingip['floating_port_id'], + l3_port_check=False) + if router_id: + self.l3_rpc_notifier.routers_updated( + context, [router_id], + 'delete_floatingip') + + def get_floatingip(self, context, id, fields=None): + floatingip = self._get_floatingip(context, id) + return self._make_floatingip_dict(floatingip, fields) + + def get_floatingips(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'floatingip', limit, + marker) + if filters is not None: + for key, val in API_TO_DB_COLUMN_MAP.iteritems(): + if key in filters: + filters[val] = filters.pop(key) + + return self._get_collection(context, FloatingIP, + self._make_floatingip_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def delete_disassociated_floatingips(self, context, network_id): + query = self._model_query(context, FloatingIP) + query = query.filter_by(floating_network_id=network_id, + fixed_port_id=None, + router_id=None) + for fip in query: + self.delete_floatingip(context, fip.id) + + def get_floatingips_count(self, context, filters=None): + return self._get_collection_count(context, FloatingIP, + filters=filters) + + def prevent_l3_port_deletion(self, context, port_id): + """Checks to make sure a port is allowed to be deleted. + + Raises an exception if this is not the case. This should be called by + any plugin when the API requests the deletion of a port, since some + ports for L3 are not intended to be deleted directly via a DELETE + to /ports, but rather via other API calls that perform the proper + deletion checks. + """ + port_db = self._core_plugin._get_port(context, port_id) + if port_db['device_owner'] in self.router_device_owners: + # Raise port in use only if the port has IP addresses + # Otherwise it's a stale port that can be removed + fixed_ips = port_db['fixed_ips'] + if fixed_ips: + raise l3.L3PortInUse(port_id=port_id, + device_owner=port_db['device_owner']) + else: + LOG.debug(_("Port %(port_id)s has owner %(port_owner)s, but " + "no IP address, so it can be deleted"), + {'port_id': port_db['id'], + 'port_owner': port_db['device_owner']}) + + def disassociate_floatingips(self, context, port_id): + router_ids = set() + + with context.session.begin(subtransactions=True): + fip_qry = context.session.query(FloatingIP) + floating_ips = fip_qry.filter_by(fixed_port_id=port_id) + for floating_ip in floating_ips: + router_ids.add(floating_ip['router_id']) + floating_ip.update({'fixed_port_id': None, + 'fixed_ip_address': None, + 'router_id': None}) + + if router_ids: + self.l3_rpc_notifier.routers_updated( + context, list(router_ids), + 'disassociate_floatingips') + + def _build_routers_list(self, routers, gw_ports): + gw_port_id_gw_port_dict = dict((gw_port['id'], gw_port) + for gw_port in gw_ports) + for router in routers: + gw_port_id = router['gw_port_id'] + if gw_port_id: + router['gw_port'] = gw_port_id_gw_port_dict[gw_port_id] + return routers + + def _get_sync_routers(self, context, router_ids=None, active=None): + """Query routers and their gw ports for l3 agent. + + Query routers with the router_ids. The gateway ports, if any, + will be queried too. + l3 agent has an option to deal with only one router id. In addition, + when we need to notify the agent the data about only one router + (when modification of router, its interfaces, gw_port and floatingips), + we will have router_ids. + @param router_ids: the list of router ids which we want to query. + if it is None, all of routers will be queried. + @return: a list of dicted routers with dicted gw_port populated if any + """ + filters = {'id': router_ids} if router_ids else {} + if active is not None: + filters['admin_state_up'] = [active] + router_dicts = self.get_routers(context, filters=filters) + gw_port_ids = [] + if not router_dicts: + return [] + for router_dict in router_dicts: + gw_port_id = router_dict['gw_port_id'] + if gw_port_id: + gw_port_ids.append(gw_port_id) + gw_ports = [] + if gw_port_ids: + gw_ports = self.get_sync_gw_ports(context, gw_port_ids) + return self._build_routers_list(router_dicts, gw_ports) + + def _get_sync_floating_ips(self, context, router_ids): + """Query floating_ips that relate to list of router_ids.""" + if not router_ids: + return [] + return self.get_floatingips(context, {'router_id': router_ids}) + + def get_sync_gw_ports(self, context, gw_port_ids): + if not gw_port_ids: + return [] + filters = {'id': gw_port_ids} + gw_ports = self._core_plugin.get_ports(context, filters) + if gw_ports: + self._populate_subnet_for_ports(context, gw_ports) + return gw_ports + + def get_sync_interfaces(self, context, router_ids, device_owners=None): + """Query router interfaces that relate to list of router_ids.""" + device_owners = device_owners or [DEVICE_OWNER_ROUTER_INTF] + if not router_ids: + return [] + filters = {'device_id': router_ids, + 'device_owner': device_owners} + interfaces = self._core_plugin.get_ports(context, filters) + if interfaces: + self._populate_subnet_for_ports(context, interfaces) + return interfaces + + def _populate_subnet_for_ports(self, context, ports): + """Populate ports with subnet. + + These ports already have fixed_ips populated. + """ + if not ports: + return + + def each_port_with_ip(): + for port in ports: + fixed_ips = port.get('fixed_ips', []) + if len(fixed_ips) > 1: + LOG.info(_("Ignoring multiple IPs on router port %s"), + port['id']) + continue + elif not fixed_ips: + # Skip ports without IPs, which can occur if a subnet + # attached to a router is deleted + LOG.info(_("Skipping port %s as no IP is configure on it"), + port['id']) + continue + yield (port, fixed_ips[0]) + + network_ids = set(p['network_id'] for p, _ in each_port_with_ip()) + filters = {'network_id': [id for id in network_ids]} + fields = ['id', 'cidr', 'gateway_ip', 'network_id'] + + subnets_by_network = dict((id, []) for id in network_ids) + for subnet in self._core_plugin.get_subnets(context, filters, fields): + subnets_by_network[subnet['network_id']].append(subnet) + + for port, fixed_ip in each_port_with_ip(): + port['extra_subnets'] = [] + for subnet in subnets_by_network[port['network_id']]: + subnet_info = {'id': subnet['id'], + 'cidr': subnet['cidr'], + 'gateway_ip': subnet['gateway_ip']} + + if subnet['id'] == fixed_ip['subnet_id']: + port['subnet'] = subnet_info + else: + port['extra_subnets'].append(subnet_info) + + def _process_sync_data(self, routers, interfaces, floating_ips): + routers_dict = {} + for router in routers: + routers_dict[router['id']] = router + for floating_ip in floating_ips: + router = routers_dict.get(floating_ip['router_id']) + if router: + router_floatingips = router.get(l3_constants.FLOATINGIP_KEY, + []) + router_floatingips.append(floating_ip) + router[l3_constants.FLOATINGIP_KEY] = router_floatingips + for interface in interfaces: + router = routers_dict.get(interface['device_id']) + if router: + router_interfaces = router.get(l3_constants.INTERFACE_KEY, []) + router_interfaces.append(interface) + router[l3_constants.INTERFACE_KEY] = router_interfaces + return routers_dict.values() + + def _get_router_info_list(self, context, router_ids=None, active=None, + device_owners=None): + """Query routers and their related floating_ips, interfaces.""" + with context.session.begin(subtransactions=True): + routers = self._get_sync_routers(context, + router_ids=router_ids, + active=active) + router_ids = [router['id'] for router in routers] + interfaces = self.get_sync_interfaces( + context, router_ids, device_owners) + floating_ips = self._get_sync_floating_ips(context, router_ids) + return (routers, interfaces, floating_ips) + + def get_sync_data(self, context, router_ids=None, active=None): + routers, interfaces, floating_ips = self._get_router_info_list( + context, router_ids=router_ids, active=active) + return self._process_sync_data(routers, interfaces, floating_ips) diff --git a/neutron/db/l3_gwmode_db.py b/neutron/db/l3_gwmode_db.py new file mode 100644 index 000000000..4c184019f --- /dev/null +++ b/neutron/db/l3_gwmode_db.py @@ -0,0 +1,73 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sqlalchemy as sa + +from neutron.db import db_base_plugin_v2 +from neutron.db import l3_db +from neutron.extensions import l3 +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) +EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO + +# Modify the Router Data Model adding the enable_snat attribute +setattr(l3_db.Router, 'enable_snat', + sa.Column(sa.Boolean, default=True, nullable=False)) + + +class L3_NAT_db_mixin(l3_db.L3_NAT_db_mixin): + """Mixin class to add configurable gateway modes.""" + + # Register dict extend functions for ports and networks + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + l3.ROUTERS, ['_extend_router_dict_gw_mode']) + + def _extend_router_dict_gw_mode(self, router_res, router_db): + if router_db.gw_port_id: + nw_id = router_db.gw_port['network_id'] + router_res[EXTERNAL_GW_INFO] = { + 'network_id': nw_id, + 'enable_snat': router_db.enable_snat} + + def _update_router_gw_info(self, context, router_id, info, router=None): + # Load the router only if necessary + if not router: + router = self._get_router(context, router_id) + # if enable_snat is not specified use the value + # stored in the database (default:True) + enable_snat = not info or info.get('enable_snat', router.enable_snat) + with context.session.begin(subtransactions=True): + router.enable_snat = enable_snat + + # Calls superclass, pass router db object for avoiding re-loading + super(L3_NAT_db_mixin, self)._update_router_gw_info( + context, router_id, info, router=router) + # Returning the router might come back useful if this + # method is overriden in child classes + return router + + def _build_routers_list(self, routers, gw_ports): + gw_port_id_gw_port_dict = {} + for gw_port in gw_ports: + gw_port_id_gw_port_dict[gw_port['id']] = gw_port + for rtr in routers: + gw_port_id = rtr['gw_port_id'] + if gw_port_id: + rtr['gw_port'] = gw_port_id_gw_port_dict[gw_port_id] + # Add enable_snat key + rtr['enable_snat'] = rtr[EXTERNAL_GW_INFO]['enable_snat'] + return routers diff --git a/neutron/db/l3_rpc_base.py b/neutron/db/l3_rpc_base.py new file mode 100644 index 000000000..d86d95698 --- /dev/null +++ b/neutron/db/l3_rpc_base.py @@ -0,0 +1,128 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from neutron.common import constants +from neutron.common import utils +from neutron import context as neutron_context +from neutron.extensions import l3 +from neutron.extensions import portbindings +from neutron import manager +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as plugin_constants + + +LOG = logging.getLogger(__name__) + + +class L3RpcCallbackMixin(object): + """A mix-in that enable L3 agent rpc support in plugin implementations.""" + + def sync_routers(self, context, **kwargs): + """Sync routers according to filters to a specific agent. + + @param context: contain user information + @param kwargs: host, router_ids + @return: a list of routers + with their interfaces and floating_ips + """ + router_ids = kwargs.get('router_ids') + host = kwargs.get('host') + context = neutron_context.get_admin_context() + l3plugin = manager.NeutronManager.get_service_plugins()[ + plugin_constants.L3_ROUTER_NAT] + if not l3plugin: + routers = {} + LOG.error(_('No plugin for L3 routing registered! Will reply ' + 'to l3 agent with empty router dictionary.')) + elif utils.is_extension_supported( + l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): + if cfg.CONF.router_auto_schedule: + l3plugin.auto_schedule_routers(context, host, router_ids) + routers = l3plugin.list_active_sync_routers_on_active_l3_agent( + context, host, router_ids) + else: + routers = l3plugin.get_sync_data(context, router_ids) + plugin = manager.NeutronManager.get_plugin() + if utils.is_extension_supported( + plugin, constants.PORT_BINDING_EXT_ALIAS): + self._ensure_host_set_on_ports(context, plugin, host, routers) + LOG.debug(_("Routers returned to l3 agent:\n %s"), + jsonutils.dumps(routers, indent=5)) + return routers + + def _ensure_host_set_on_ports(self, context, plugin, host, routers): + for router in routers: + LOG.debug(_("Checking router: %(id)s for host: %(host)s"), + {'id': router['id'], 'host': host}) + self._ensure_host_set_on_port(context, plugin, host, + router.get('gw_port')) + for interface in router.get(constants.INTERFACE_KEY, []): + self._ensure_host_set_on_port(context, plugin, host, + interface) + + def _ensure_host_set_on_port(self, context, plugin, host, port): + if (port and + (port.get(portbindings.HOST_ID) != host or + port.get(portbindings.VIF_TYPE) == + portbindings.VIF_TYPE_BINDING_FAILED)): + plugin.update_port(context, port['id'], + {'port': {portbindings.HOST_ID: host}}) + + def get_external_network_id(self, context, **kwargs): + """Get one external network id for l3 agent. + + l3 agent expects only on external network when it performs + this query. + """ + context = neutron_context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + net_id = plugin.get_external_network_id(context) + LOG.debug(_("External network ID returned to l3 agent: %s"), + net_id) + return net_id + + def update_floatingip_statuses(self, context, router_id, fip_statuses): + """Update operational status for a floating IP.""" + l3_plugin = manager.NeutronManager.get_service_plugins()[ + plugin_constants.L3_ROUTER_NAT] + with context.session.begin(subtransactions=True): + for (floatingip_id, status) in fip_statuses.iteritems(): + LOG.debug(_("New status for floating IP %(floatingip_id)s: " + "%(status)s"), {'floatingip_id': floatingip_id, + 'status': status}) + try: + l3_plugin.update_floatingip_status(context, + floatingip_id, + status) + except l3.FloatingIPNotFound: + LOG.debug(_("Floating IP: %s no longer present."), + floatingip_id) + # Find all floating IPs known to have been the given router + # for which an update was not received. Set them DOWN mercilessly + # This situation might occur for some asynchronous backends if + # notifications were missed + known_router_fips = l3_plugin.get_floatingips( + context, {'last_known_router_id': [router_id]}) + # Consider only floating ips which were disassociated in the API + # FIXME(salv-orlando): Filtering in code should be avoided. + # the plugin should offer a way to specify a null filter + fips_to_disable = (fip['id'] for fip in known_router_fips + if not fip['router_id']) + for fip_id in fips_to_disable: + l3_plugin.update_floatingip_status( + context, fip_id, constants.FLOATINGIP_STATUS_DOWN) diff --git a/neutron/db/loadbalancer/__init__.py b/neutron/db/loadbalancer/__init__.py new file mode 100644 index 000000000..cae279d0a --- /dev/null +++ b/neutron/db/loadbalancer/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/db/loadbalancer/loadbalancer_db.py b/neutron/db/loadbalancer/loadbalancer_db.py new file mode 100644 index 000000000..9c81a0664 --- /dev/null +++ b/neutron/db/loadbalancer/loadbalancer_db.py @@ -0,0 +1,802 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc +from sqlalchemy.orm import validates + +from neutron.api.v2 import attributes +from neutron.common import exceptions as n_exc +from neutron.db import db_base_plugin_v2 as base_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.db import servicetype_db as st_db +from neutron.extensions import loadbalancer +from neutron import manager +from neutron.openstack.common.db import exception +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.services.loadbalancer import constants as lb_const + + +LOG = logging.getLogger(__name__) + + +class SessionPersistence(model_base.BASEV2): + + vip_id = sa.Column(sa.String(36), + sa.ForeignKey("vips.id"), + primary_key=True) + type = sa.Column(sa.Enum("SOURCE_IP", + "HTTP_COOKIE", + "APP_COOKIE", + name="sesssionpersistences_type"), + nullable=False) + cookie_name = sa.Column(sa.String(1024)) + + +class PoolStatistics(model_base.BASEV2): + """Represents pool statistics.""" + + pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"), + primary_key=True) + bytes_in = sa.Column(sa.BigInteger, nullable=False) + bytes_out = sa.Column(sa.BigInteger, nullable=False) + active_connections = sa.Column(sa.BigInteger, nullable=False) + total_connections = sa.Column(sa.BigInteger, nullable=False) + + @validates('bytes_in', 'bytes_out', + 'active_connections', 'total_connections') + def validate_non_negative_int(self, key, value): + if value < 0: + data = {'key': key, 'value': value} + raise ValueError(_('The %(key)s field can not have ' + 'negative value. ' + 'Current value is %(value)d.') % data) + return value + + +class Vip(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, + models_v2.HasStatusDescription): + """Represents a v2 neutron loadbalancer vip.""" + + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) + protocol_port = sa.Column(sa.Integer, nullable=False) + protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), + nullable=False) + pool_id = sa.Column(sa.String(36), nullable=False, unique=True) + session_persistence = orm.relationship(SessionPersistence, + uselist=False, + backref="vips", + cascade="all, delete-orphan") + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + connection_limit = sa.Column(sa.Integer) + port = orm.relationship(models_v2.Port) + + +class Member(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, + models_v2.HasStatusDescription): + """Represents a v2 neutron loadbalancer member.""" + + __table_args__ = ( + sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port', + name='uniq_member0pool_id0address0port'), + ) + pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"), + nullable=False) + address = sa.Column(sa.String(64), nullable=False) + protocol_port = sa.Column(sa.Integer, nullable=False) + weight = sa.Column(sa.Integer, nullable=False) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + + +class Pool(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, + models_v2.HasStatusDescription): + """Represents a v2 neutron loadbalancer pool.""" + + vip_id = sa.Column(sa.String(36), sa.ForeignKey("vips.id")) + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + subnet_id = sa.Column(sa.String(36), nullable=False) + protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), + nullable=False) + lb_method = sa.Column(sa.Enum("ROUND_ROBIN", + "LEAST_CONNECTIONS", + "SOURCE_IP", + name="pools_lb_method"), + nullable=False) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + stats = orm.relationship(PoolStatistics, + uselist=False, + backref="pools", + cascade="all, delete-orphan") + members = orm.relationship(Member, backref="pools", + cascade="all, delete-orphan") + monitors = orm.relationship("PoolMonitorAssociation", backref="pools", + cascade="all, delete-orphan") + vip = orm.relationship(Vip, backref='pool') + + provider = orm.relationship( + st_db.ProviderResourceAssociation, + uselist=False, + lazy="joined", + primaryjoin="Pool.id==ProviderResourceAssociation.resource_id", + foreign_keys=[st_db.ProviderResourceAssociation.resource_id] + ) + + +class HealthMonitor(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a v2 neutron loadbalancer healthmonitor.""" + + type = sa.Column(sa.Enum("PING", "TCP", "HTTP", "HTTPS", + name="healthmontiors_type"), + nullable=False) + delay = sa.Column(sa.Integer, nullable=False) + timeout = sa.Column(sa.Integer, nullable=False) + max_retries = sa.Column(sa.Integer, nullable=False) + http_method = sa.Column(sa.String(16)) + url_path = sa.Column(sa.String(255)) + expected_codes = sa.Column(sa.String(64)) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + + pools = orm.relationship( + "PoolMonitorAssociation", backref="healthmonitor", + cascade="all", lazy="joined" + ) + + +class PoolMonitorAssociation(model_base.BASEV2, + models_v2.HasStatusDescription): + """Many-to-many association between pool and healthMonitor classes.""" + + pool_id = sa.Column(sa.String(36), + sa.ForeignKey("pools.id"), + primary_key=True) + monitor_id = sa.Column(sa.String(36), + sa.ForeignKey("healthmonitors.id"), + primary_key=True) + + +class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + base_db.CommonDbMixin): + """Wraps loadbalancer with SQLAlchemy models. + + A class that wraps the implementation of the Neutron loadbalancer + plugin database access interface using SQLAlchemy models. + """ + + @property + def _core_plugin(self): + return manager.NeutronManager.get_plugin() + + def update_status(self, context, model, id, status, + status_description=None): + with context.session.begin(subtransactions=True): + if issubclass(model, Vip): + try: + v_db = (self._model_query(context, model). + filter(model.id == id). + options(orm.noload('port')). + one()) + except exc.NoResultFound: + raise loadbalancer.VipNotFound(vip_id=id) + else: + v_db = self._get_resource(context, model, id) + if v_db.status != status: + v_db.status = status + # update status_description in two cases: + # - new value is passed + # - old value is not None (needs to be updated anyway) + if status_description or v_db['status_description']: + v_db.status_description = status_description + + def _get_resource(self, context, model, id): + try: + r = self._get_by_id(context, model, id) + except exc.NoResultFound: + with excutils.save_and_reraise_exception(reraise=False) as ctx: + if issubclass(model, Vip): + raise loadbalancer.VipNotFound(vip_id=id) + elif issubclass(model, Pool): + raise loadbalancer.PoolNotFound(pool_id=id) + elif issubclass(model, Member): + raise loadbalancer.MemberNotFound(member_id=id) + elif issubclass(model, HealthMonitor): + raise loadbalancer.HealthMonitorNotFound(monitor_id=id) + ctx.reraise = True + return r + + def assert_modification_allowed(self, obj): + status = getattr(obj, 'status', None) + + if status == constants.PENDING_DELETE: + raise loadbalancer.StateInvalid(id=id, state=status) + + ######################################################## + # VIP DB access + def _make_vip_dict(self, vip, fields=None): + fixed_ip = (vip.port.fixed_ips or [{}])[0] + + res = {'id': vip['id'], + 'tenant_id': vip['tenant_id'], + 'name': vip['name'], + 'description': vip['description'], + 'subnet_id': fixed_ip.get('subnet_id'), + 'address': fixed_ip.get('ip_address'), + 'port_id': vip['port_id'], + 'protocol_port': vip['protocol_port'], + 'protocol': vip['protocol'], + 'pool_id': vip['pool_id'], + 'session_persistence': None, + 'connection_limit': vip['connection_limit'], + 'admin_state_up': vip['admin_state_up'], + 'status': vip['status'], + 'status_description': vip['status_description']} + + if vip['session_persistence']: + s_p = { + 'type': vip['session_persistence']['type'] + } + + if vip['session_persistence']['type'] == 'APP_COOKIE': + s_p['cookie_name'] = vip['session_persistence']['cookie_name'] + + res['session_persistence'] = s_p + + return self._fields(res, fields) + + def _check_session_persistence_info(self, info): + """Performs sanity check on session persistence info. + + :param info: Session persistence info + """ + if info['type'] == 'APP_COOKIE': + if not info.get('cookie_name'): + raise ValueError(_("'cookie_name' should be specified for this" + " type of session persistence.")) + else: + if 'cookie_name' in info: + raise ValueError(_("'cookie_name' is not allowed for this type" + " of session persistence")) + + def _create_session_persistence_db(self, session_info, vip_id): + self._check_session_persistence_info(session_info) + + sesspersist_db = SessionPersistence( + type=session_info['type'], + cookie_name=session_info.get('cookie_name'), + vip_id=vip_id) + return sesspersist_db + + def _update_vip_session_persistence(self, context, vip_id, info): + self._check_session_persistence_info(info) + + vip = self._get_resource(context, Vip, vip_id) + + with context.session.begin(subtransactions=True): + # Update sessionPersistence table + sess_qry = context.session.query(SessionPersistence) + sesspersist_db = sess_qry.filter_by(vip_id=vip_id).first() + + # Insert a None cookie_info if it is not present to overwrite an + # an existing value in the database. + if 'cookie_name' not in info: + info['cookie_name'] = None + + if sesspersist_db: + sesspersist_db.update(info) + else: + sesspersist_db = SessionPersistence( + type=info['type'], + cookie_name=info['cookie_name'], + vip_id=vip_id) + context.session.add(sesspersist_db) + # Update vip table + vip.session_persistence = sesspersist_db + context.session.add(vip) + + def _delete_session_persistence(self, context, vip_id): + with context.session.begin(subtransactions=True): + sess_qry = context.session.query(SessionPersistence) + sess_qry.filter_by(vip_id=vip_id).delete() + + def _create_port_for_vip(self, context, vip_db, subnet_id, ip_address): + # resolve subnet and create port + subnet = self._core_plugin.get_subnet(context, subnet_id) + fixed_ip = {'subnet_id': subnet['id']} + if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED: + fixed_ip['ip_address'] = ip_address + + port_data = { + 'tenant_id': vip_db.tenant_id, + 'name': 'vip-' + vip_db.id, + 'network_id': subnet['network_id'], + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'admin_state_up': False, + 'device_id': '', + 'device_owner': '', + 'fixed_ips': [fixed_ip] + } + + port = self._core_plugin.create_port(context, {'port': port_data}) + vip_db.port_id = port['id'] + + def create_vip(self, context, vip): + v = vip['vip'] + tenant_id = self._get_tenant_id_for_create(context, v) + + with context.session.begin(subtransactions=True): + if v['pool_id']: + pool = self._get_resource(context, Pool, v['pool_id']) + # validate that the pool has same tenant + if pool['tenant_id'] != tenant_id: + raise n_exc.NotAuthorized() + # validate that the pool has same protocol + if pool['protocol'] != v['protocol']: + raise loadbalancer.ProtocolMismatch( + vip_proto=v['protocol'], + pool_proto=pool['protocol']) + if pool['status'] == constants.PENDING_DELETE: + raise loadbalancer.StateInvalid(state=pool['status'], + id=pool['id']) + else: + pool = None + + vip_db = Vip(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=v['name'], + description=v['description'], + port_id=None, + protocol_port=v['protocol_port'], + protocol=v['protocol'], + pool_id=v['pool_id'], + connection_limit=v['connection_limit'], + admin_state_up=v['admin_state_up'], + status=constants.PENDING_CREATE) + + session_info = v['session_persistence'] + + if session_info: + s_p = self._create_session_persistence_db( + session_info, + vip_db['id']) + vip_db.session_persistence = s_p + + try: + context.session.add(vip_db) + context.session.flush() + except exception.DBDuplicateEntry: + raise loadbalancer.VipExists(pool_id=v['pool_id']) + + # create a port to reserve address for IPAM + self._create_port_for_vip( + context, + vip_db, + v['subnet_id'], + v.get('address') + ) + + if pool: + pool['vip_id'] = vip_db['id'] + + return self._make_vip_dict(vip_db) + + def update_vip(self, context, id, vip): + v = vip['vip'] + + sess_persist = v.pop('session_persistence', None) + with context.session.begin(subtransactions=True): + vip_db = self._get_resource(context, Vip, id) + + self.assert_modification_allowed(vip_db) + + if sess_persist: + self._update_vip_session_persistence(context, id, sess_persist) + else: + self._delete_session_persistence(context, id) + + if v: + try: + # in case new pool already has a vip + # update will raise integrity error at first query + old_pool_id = vip_db['pool_id'] + vip_db.update(v) + # If the pool_id is changed, we need to update + # the associated pools + if 'pool_id' in v: + new_pool = self._get_resource(context, Pool, + v['pool_id']) + self.assert_modification_allowed(new_pool) + + # check that the pool matches the tenant_id + if new_pool['tenant_id'] != vip_db['tenant_id']: + raise n_exc.NotAuthorized() + # validate that the pool has same protocol + if new_pool['protocol'] != vip_db['protocol']: + raise loadbalancer.ProtocolMismatch( + vip_proto=vip_db['protocol'], + pool_proto=new_pool['protocol']) + if new_pool['status'] == constants.PENDING_DELETE: + raise loadbalancer.StateInvalid( + state=new_pool['status'], + id=new_pool['id']) + + if old_pool_id: + old_pool = self._get_resource( + context, + Pool, + old_pool_id + ) + old_pool['vip_id'] = None + + new_pool['vip_id'] = vip_db['id'] + except exception.DBDuplicateEntry: + raise loadbalancer.VipExists(pool_id=v['pool_id']) + + return self._make_vip_dict(vip_db) + + def delete_vip(self, context, id): + with context.session.begin(subtransactions=True): + vip = self._get_resource(context, Vip, id) + qry = context.session.query(Pool) + for pool in qry.filter_by(vip_id=id): + pool.update({"vip_id": None}) + + context.session.delete(vip) + if vip.port: # this is a Neutron port + self._core_plugin.delete_port(context, vip.port.id) + + def get_vip(self, context, id, fields=None): + vip = self._get_resource(context, Vip, id) + return self._make_vip_dict(vip, fields) + + def get_vips(self, context, filters=None, fields=None): + return self._get_collection(context, Vip, + self._make_vip_dict, + filters=filters, fields=fields) + + ######################################################## + # Pool DB access + def _make_pool_dict(self, pool, fields=None): + res = {'id': pool['id'], + 'tenant_id': pool['tenant_id'], + 'name': pool['name'], + 'description': pool['description'], + 'subnet_id': pool['subnet_id'], + 'protocol': pool['protocol'], + 'vip_id': pool['vip_id'], + 'lb_method': pool['lb_method'], + 'admin_state_up': pool['admin_state_up'], + 'status': pool['status'], + 'status_description': pool['status_description'], + 'provider': '' + } + + if pool.provider: + res['provider'] = pool.provider.provider_name + + # Get the associated members + res['members'] = [member['id'] for member in pool['members']] + + # Get the associated health_monitors + res['health_monitors'] = [ + monitor['monitor_id'] for monitor in pool['monitors']] + res['health_monitors_status'] = [ + {'monitor_id': monitor['monitor_id'], + 'status': monitor['status'], + 'status_description': monitor['status_description']} + for monitor in pool['monitors']] + return self._fields(res, fields) + + def update_pool_stats(self, context, pool_id, data=None): + """Update a pool with new stats structure.""" + data = data or {} + with context.session.begin(subtransactions=True): + pool_db = self._get_resource(context, Pool, pool_id) + self.assert_modification_allowed(pool_db) + pool_db.stats = self._create_pool_stats(context, pool_id, data) + + for member, stats in data.get('members', {}).items(): + stats_status = stats.get(lb_const.STATS_STATUS) + if stats_status: + self.update_status(context, Member, member, stats_status) + + def _create_pool_stats(self, context, pool_id, data=None): + # This is internal method to add pool statistics. It won't + # be exposed to API + if not data: + data = {} + stats_db = PoolStatistics( + pool_id=pool_id, + bytes_in=data.get(lb_const.STATS_IN_BYTES, 0), + bytes_out=data.get(lb_const.STATS_OUT_BYTES, 0), + active_connections=data.get(lb_const.STATS_ACTIVE_CONNECTIONS, 0), + total_connections=data.get(lb_const.STATS_TOTAL_CONNECTIONS, 0) + ) + return stats_db + + def _delete_pool_stats(self, context, pool_id): + # This is internal method to delete pool statistics. It won't + # be exposed to API + with context.session.begin(subtransactions=True): + stats_qry = context.session.query(PoolStatistics) + try: + stats = stats_qry.filter_by(pool_id=pool_id).one() + except exc.NoResultFound: + raise loadbalancer.PoolStatsNotFound(pool_id=pool_id) + context.session.delete(stats) + + def create_pool(self, context, pool): + v = pool['pool'] + + tenant_id = self._get_tenant_id_for_create(context, v) + with context.session.begin(subtransactions=True): + pool_db = Pool(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=v['name'], + description=v['description'], + subnet_id=v['subnet_id'], + protocol=v['protocol'], + lb_method=v['lb_method'], + admin_state_up=v['admin_state_up'], + status=constants.PENDING_CREATE) + pool_db.stats = self._create_pool_stats(context, pool_db['id']) + context.session.add(pool_db) + + return self._make_pool_dict(pool_db) + + def update_pool(self, context, id, pool): + p = pool['pool'] + with context.session.begin(subtransactions=True): + pool_db = self._get_resource(context, Pool, id) + self.assert_modification_allowed(pool_db) + if p: + pool_db.update(p) + + return self._make_pool_dict(pool_db) + + def _ensure_pool_delete_conditions(self, context, pool_id): + if context.session.query(Vip).filter_by(pool_id=pool_id).first(): + raise loadbalancer.PoolInUse(pool_id=pool_id) + + def delete_pool(self, context, pool_id): + # Check if the pool is in use + self._ensure_pool_delete_conditions(context, pool_id) + + with context.session.begin(subtransactions=True): + self._delete_pool_stats(context, pool_id) + pool_db = self._get_resource(context, Pool, pool_id) + context.session.delete(pool_db) + + def get_pool(self, context, id, fields=None): + pool = self._get_resource(context, Pool, id) + return self._make_pool_dict(pool, fields) + + def get_pools(self, context, filters=None, fields=None): + collection = self._model_query(context, Pool) + collection = self._apply_filters_to_query(collection, Pool, filters) + return [self._make_pool_dict(c, fields) + for c in collection] + + def stats(self, context, pool_id): + with context.session.begin(subtransactions=True): + pool = self._get_resource(context, Pool, pool_id) + stats = pool['stats'] + + res = {lb_const.STATS_IN_BYTES: stats['bytes_in'], + lb_const.STATS_OUT_BYTES: stats['bytes_out'], + lb_const.STATS_ACTIVE_CONNECTIONS: stats['active_connections'], + lb_const.STATS_TOTAL_CONNECTIONS: stats['total_connections']} + return {'stats': res} + + def create_pool_health_monitor(self, context, health_monitor, pool_id): + monitor_id = health_monitor['health_monitor']['id'] + with context.session.begin(subtransactions=True): + assoc_qry = context.session.query(PoolMonitorAssociation) + assoc = assoc_qry.filter_by(pool_id=pool_id, + monitor_id=monitor_id).first() + if assoc: + raise loadbalancer.PoolMonitorAssociationExists( + monitor_id=monitor_id, pool_id=pool_id) + + pool = self._get_resource(context, Pool, pool_id) + + assoc = PoolMonitorAssociation(pool_id=pool_id, + monitor_id=monitor_id, + status=constants.PENDING_CREATE) + pool.monitors.append(assoc) + monitors = [monitor['monitor_id'] for monitor in pool['monitors']] + + res = {"health_monitor": monitors} + return res + + def delete_pool_health_monitor(self, context, id, pool_id): + with context.session.begin(subtransactions=True): + assoc = self._get_pool_health_monitor(context, id, pool_id) + pool = self._get_resource(context, Pool, pool_id) + pool.monitors.remove(assoc) + + def _get_pool_health_monitor(self, context, id, pool_id): + try: + assoc_qry = context.session.query(PoolMonitorAssociation) + return assoc_qry.filter_by(monitor_id=id, pool_id=pool_id).one() + except exc.NoResultFound: + raise loadbalancer.PoolMonitorAssociationNotFound( + monitor_id=id, pool_id=pool_id) + + def get_pool_health_monitor(self, context, id, pool_id, fields=None): + pool_hm = self._get_pool_health_monitor(context, id, pool_id) + # need to add tenant_id for admin_or_owner policy check to pass + hm = self.get_health_monitor(context, id) + res = {'pool_id': pool_id, + 'monitor_id': id, + 'status': pool_hm['status'], + 'status_description': pool_hm['status_description'], + 'tenant_id': hm['tenant_id']} + return self._fields(res, fields) + + def update_pool_health_monitor(self, context, id, pool_id, + status, status_description=None): + with context.session.begin(subtransactions=True): + assoc = self._get_pool_health_monitor(context, id, pool_id) + self.assert_modification_allowed(assoc) + assoc.status = status + assoc.status_description = status_description + + ######################################################## + # Member DB access + def _make_member_dict(self, member, fields=None): + res = {'id': member['id'], + 'tenant_id': member['tenant_id'], + 'pool_id': member['pool_id'], + 'address': member['address'], + 'protocol_port': member['protocol_port'], + 'weight': member['weight'], + 'admin_state_up': member['admin_state_up'], + 'status': member['status'], + 'status_description': member['status_description']} + + return self._fields(res, fields) + + def create_member(self, context, member): + v = member['member'] + tenant_id = self._get_tenant_id_for_create(context, v) + + try: + with context.session.begin(subtransactions=True): + # ensuring that pool exists + self._get_resource(context, Pool, v['pool_id']) + member_db = Member(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + pool_id=v['pool_id'], + address=v['address'], + protocol_port=v['protocol_port'], + weight=v['weight'], + admin_state_up=v['admin_state_up'], + status=constants.PENDING_CREATE) + context.session.add(member_db) + return self._make_member_dict(member_db) + except exception.DBDuplicateEntry: + raise loadbalancer.MemberExists( + address=v['address'], + port=v['protocol_port'], + pool=v['pool_id']) + + def update_member(self, context, id, member): + v = member['member'] + try: + with context.session.begin(subtransactions=True): + member_db = self._get_resource(context, Member, id) + self.assert_modification_allowed(member_db) + if v: + member_db.update(v) + return self._make_member_dict(member_db) + except exception.DBDuplicateEntry: + raise loadbalancer.MemberExists( + address=member_db['address'], + port=member_db['protocol_port'], + pool=member_db['pool_id']) + + def delete_member(self, context, id): + with context.session.begin(subtransactions=True): + member_db = self._get_resource(context, Member, id) + context.session.delete(member_db) + + def get_member(self, context, id, fields=None): + member = self._get_resource(context, Member, id) + return self._make_member_dict(member, fields) + + def get_members(self, context, filters=None, fields=None): + return self._get_collection(context, Member, + self._make_member_dict, + filters=filters, fields=fields) + + ######################################################## + # HealthMonitor DB access + def _make_health_monitor_dict(self, health_monitor, fields=None): + res = {'id': health_monitor['id'], + 'tenant_id': health_monitor['tenant_id'], + 'type': health_monitor['type'], + 'delay': health_monitor['delay'], + 'timeout': health_monitor['timeout'], + 'max_retries': health_monitor['max_retries'], + 'admin_state_up': health_monitor['admin_state_up']} + # no point to add the values below to + # the result if the 'type' is not HTTP/S + if res['type'] in ['HTTP', 'HTTPS']: + for attr in ['url_path', 'http_method', 'expected_codes']: + res[attr] = health_monitor[attr] + res['pools'] = [{'pool_id': p['pool_id'], + 'status': p['status'], + 'status_description': p['status_description']} + for p in health_monitor.pools] + return self._fields(res, fields) + + def create_health_monitor(self, context, health_monitor): + v = health_monitor['health_monitor'] + tenant_id = self._get_tenant_id_for_create(context, v) + with context.session.begin(subtransactions=True): + # setting ACTIVE status since healthmon is shared DB object + monitor_db = HealthMonitor(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + type=v['type'], + delay=v['delay'], + timeout=v['timeout'], + max_retries=v['max_retries'], + http_method=v['http_method'], + url_path=v['url_path'], + expected_codes=v['expected_codes'], + admin_state_up=v['admin_state_up']) + context.session.add(monitor_db) + return self._make_health_monitor_dict(monitor_db) + + def update_health_monitor(self, context, id, health_monitor): + v = health_monitor['health_monitor'] + with context.session.begin(subtransactions=True): + monitor_db = self._get_resource(context, HealthMonitor, id) + self.assert_modification_allowed(monitor_db) + if v: + monitor_db.update(v) + return self._make_health_monitor_dict(monitor_db) + + def delete_health_monitor(self, context, id): + """Delete health monitor object from DB + + Raises an error if the monitor has associations with pools + """ + query = self._model_query(context, PoolMonitorAssociation) + has_associations = query.filter_by(monitor_id=id).first() + if has_associations: + raise loadbalancer.HealthMonitorInUse(monitor_id=id) + + with context.session.begin(subtransactions=True): + monitor_db = self._get_resource(context, HealthMonitor, id) + context.session.delete(monitor_db) + + def get_health_monitor(self, context, id, fields=None): + healthmonitor = self._get_resource(context, HealthMonitor, id) + return self._make_health_monitor_dict(healthmonitor, fields) + + def get_health_monitors(self, context, filters=None, fields=None): + return self._get_collection(context, HealthMonitor, + self._make_health_monitor_dict, + filters=filters, fields=fields) diff --git a/neutron/db/metering/__init__.py b/neutron/db/metering/__init__.py new file mode 100644 index 000000000..82a447213 --- /dev/null +++ b/neutron/db/metering/__init__.py @@ -0,0 +1,15 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/db/metering/metering_db.py b/neutron/db/metering/metering_db.py new file mode 100644 index 000000000..fe48ae4fd --- /dev/null +++ b/neutron/db/metering/metering_db.py @@ -0,0 +1,239 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api +from neutron.common import constants +from neutron.db import api as dbapi +from neutron.db import db_base_plugin_v2 as base_db +from neutron.db import l3_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import metering +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils + + +LOG = logging.getLogger(__name__) + + +class MeteringLabelRule(model_base.BASEV2, models_v2.HasId): + direction = sa.Column(sa.Enum('ingress', 'egress', + name='meteringlabels_direction')) + remote_ip_prefix = sa.Column(sa.String(64)) + metering_label_id = sa.Column(sa.String(36), + sa.ForeignKey("meteringlabels.id", + ondelete="CASCADE"), + nullable=False) + excluded = sa.Column(sa.Boolean, default=False) + + +class MeteringLabel(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(1024)) + rules = orm.relationship(MeteringLabelRule, backref="label", + cascade="delete", lazy="joined") + routers = orm.relationship( + l3_db.Router, + primaryjoin="MeteringLabel.tenant_id==Router.tenant_id", + foreign_keys='MeteringLabel.tenant_id', + uselist=True) + + +class MeteringDbMixin(metering.MeteringPluginBase, + base_db.CommonDbMixin): + + def __init__(self): + dbapi.register_models() + + self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI() + + def _make_metering_label_dict(self, metering_label, fields=None): + res = {'id': metering_label['id'], + 'name': metering_label['name'], + 'description': metering_label['description'], + 'tenant_id': metering_label['tenant_id']} + return self._fields(res, fields) + + def create_metering_label(self, context, metering_label): + m = metering_label['metering_label'] + tenant_id = self._get_tenant_id_for_create(context, m) + + with context.session.begin(subtransactions=True): + metering_db = MeteringLabel(id=uuidutils.generate_uuid(), + description=m['description'], + tenant_id=tenant_id, + name=m['name']) + context.session.add(metering_db) + + return self._make_metering_label_dict(metering_db) + + def delete_metering_label(self, context, label_id): + with context.session.begin(subtransactions=True): + try: + label = self._get_by_id(context, MeteringLabel, label_id) + except orm.exc.NoResultFound: + raise metering.MeteringLabelNotFound(label_id=label_id) + + context.session.delete(label) + + def get_metering_label(self, context, label_id, fields=None): + try: + metering_label = self._get_by_id(context, MeteringLabel, label_id) + except orm.exc.NoResultFound: + raise metering.MeteringLabelNotFound(label_id=label_id) + + return self._make_metering_label_dict(metering_label, fields) + + def get_metering_labels(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'metering_labels', limit, + marker) + return self._get_collection(context, MeteringLabel, + self._make_metering_label_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def _make_metering_label_rule_dict(self, metering_label_rule, fields=None): + res = {'id': metering_label_rule['id'], + 'metering_label_id': metering_label_rule['metering_label_id'], + 'direction': metering_label_rule['direction'], + 'remote_ip_prefix': metering_label_rule['remote_ip_prefix'], + 'excluded': metering_label_rule['excluded']} + return self._fields(res, fields) + + def get_metering_label_rules(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'metering_label_rules', + limit, marker) + + return self._get_collection(context, MeteringLabelRule, + self._make_metering_label_rule_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_metering_label_rule(self, context, rule_id, fields=None): + try: + metering_label_rule = self._get_by_id(context, + MeteringLabelRule, rule_id) + except orm.exc.NoResultFound: + raise metering.MeteringLabelRuleNotFound(rule_id=rule_id) + + return self._make_metering_label_rule_dict(metering_label_rule, fields) + + def _validate_cidr(self, context, label_id, remote_ip_prefix, + direction, excluded): + r_ips = self.get_metering_label_rules(context, + filters={'metering_label_id': + label_id, + 'direction': + [direction], + 'excluded': + [excluded]}, + fields=['remote_ip_prefix']) + + cidrs = [r['remote_ip_prefix'] for r in r_ips] + new_cidr_ipset = netaddr.IPSet([remote_ip_prefix]) + if (netaddr.IPSet(cidrs) & new_cidr_ipset): + raise metering.MeteringLabelRuleOverlaps(remote_ip_prefix= + remote_ip_prefix) + + def create_metering_label_rule(self, context, metering_label_rule): + m = metering_label_rule['metering_label_rule'] + with context.session.begin(subtransactions=True): + label_id = m['metering_label_id'] + ip_prefix = m['remote_ip_prefix'] + direction = m['direction'] + excluded = m['excluded'] + + self._validate_cidr(context, label_id, ip_prefix, direction, + excluded) + metering_db = MeteringLabelRule(id=uuidutils.generate_uuid(), + metering_label_id=label_id, + direction=direction, + excluded=m['excluded'], + remote_ip_prefix=ip_prefix) + context.session.add(metering_db) + + return self._make_metering_label_rule_dict(metering_db) + + def delete_metering_label_rule(self, context, rule_id): + with context.session.begin(subtransactions=True): + try: + rule = self._get_by_id(context, MeteringLabelRule, rule_id) + except orm.exc.NoResultFound: + raise metering.MeteringLabelRuleNotFound(rule_id=rule_id) + + context.session.delete(rule) + + def _get_metering_rules_dict(self, metering_label): + rules = [] + for rule in metering_label.rules: + rule_dict = self._make_metering_label_rule_dict(rule) + rules.append(rule_dict) + + return rules + + def _make_router_dict(self, router): + res = {'id': router['id'], + 'name': router['name'], + 'tenant_id': router['tenant_id'], + 'admin_state_up': router['admin_state_up'], + 'status': router['status'], + 'gw_port_id': router['gw_port_id'], + constants.METERING_LABEL_KEY: []} + + return res + + def _process_sync_metering_data(self, labels): + routers_dict = {} + for label in labels: + routers = label.routers + for router in routers: + router_dict = routers_dict.get( + router['id'], + self._make_router_dict(router)) + + rules = self._get_metering_rules_dict(label) + + data = {'id': label['id'], 'rules': rules} + router_dict[constants.METERING_LABEL_KEY].append(data) + + routers_dict[router['id']] = router_dict + + return routers_dict.values() + + def get_sync_data_metering(self, context, label_id=None, router_ids=None): + labels = context.session.query(MeteringLabel) + + if label_id: + labels = labels.filter(MeteringLabel.id == label_id) + elif router_ids: + labels = (labels.join(MeteringLabel.routers). + filter(l3_db.Router.id.in_(router_ids))) + + return self._process_sync_metering_data(labels) diff --git a/neutron/db/metering/metering_rpc.py b/neutron/db/metering/metering_rpc.py new file mode 100644 index 000000000..c0bbd51ad --- /dev/null +++ b/neutron/db/metering/metering_rpc.py @@ -0,0 +1,55 @@ +# Copyright (C) 2014 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants as consts +from neutron.common import utils +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as service_constants + +LOG = logging.getLogger(__name__) + + +class MeteringRpcCallbacks(object): + + RPC_API_VERSION = '1.0' + + def __init__(self, meter_plugin): + self.meter_plugin = meter_plugin + + def get_sync_data_metering(self, context, **kwargs): + l3_plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if not l3_plugin: + return + + host = kwargs.get('host') + if not utils.is_extension_supported( + l3_plugin, consts.L3_AGENT_SCHEDULER_EXT_ALIAS) or not host: + return self.meter_plugin.get_sync_data_metering(context) + else: + agents = l3_plugin.get_l3_agents(context, filters={'host': [host]}) + if not agents: + LOG.error(_('Unable to find agent %s.'), host) + return + + routers = l3_plugin.list_routers_on_l3_agent(context, agents[0].id) + router_ids = [router['id'] for router in routers['routers']] + if not router_ids: + return + + return self.meter_plugin.get_sync_data_metering(context, + router_ids=router_ids) diff --git a/neutron/db/migration/README b/neutron/db/migration/README new file mode 100644 index 000000000..75d86d051 --- /dev/null +++ b/neutron/db/migration/README @@ -0,0 +1,92 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author Mark McClain (DreamHost) + +The migrations in the alembic/versions contain the changes needed to migrate +from older Neutron releases to newer versions. A migration occurs by executing +a script that details the changes needed to upgrade/downgrade the database. The +migration scripts are ordered so that multiple scripts can run sequentially to +update the database. The scripts are executed by Neutron's migration wrapper +which uses the Alembic library to manage the migration. Neutron supports +migration from Folsom or later. + + +If you are a deployer or developer and want to migrate from Folsom to Grizzly +or later you must first add version tracking to the database: + +$ neutron-db-manage --config-file /path/to/neutron.conf \ + --config-file /path/to/plugin/config.ini stamp folsom + +You can then upgrade to the latest database version via: +$ neutron-db-manage --config-file /path/to/neutron.conf \ + --config-file /path/to/plugin/config.ini upgrade head + +To check the current database version: +$ neutron-db-manage --config-file /path/to/neutron.conf \ + --config-file /path/to/plugin/config.ini current + +To create a script to run the migration offline: +$ neutron-db-manage --config-file /path/to/neutron.conf \ + --config-file /path/to/plugin/config.ini upgrade head --sql + +To run the offline migration between specific migration versions: +$ neutron-db-manage --config-file /path/to/neutron.conf \ +--config-file /path/to/plugin/config.ini upgrade \ +: --sql + +Upgrade the database incrementally: +$ neutron-db-manage --config-file /path/to/neutron.conf \ +--config-file /path/to/plugin/config.ini upgrade --delta <# of revs> + +Downgrade the database by a certain number of revisions: +$ neutron-db-manage --config-file /path/to/neutron.conf \ +--config-file /path/to/plugin/config.ini downgrade --delta <# of revs> + + +DEVELOPERS: +A database migration script is required when you submit a change to Neutron +that alters the database model definition. The migration script is a special +python file that includes code to update/downgrade the database to match the +changes in the model definition. Alembic will execute these scripts in order to +provide a linear migration path between revision. The neutron-db-manage command +can be used to generate migration template for you to complete. The operations +in the template are those supported by the Alembic migration library. + +$ neutron-db-manage --config-file /path/to/neutron.conf \ +--config-file /path/to/plugin/config.ini revision \ +-m "description of revision" \ +--autogenerate + +This generates a prepopulated template with the changes needed to match the +database state with the models. You should inspect the autogenerated template +to ensure that the proper models have been altered. + +In rare circumstances, you may want to start with an empty migration template +and manually author the changes necessary for an upgrade/downgrade. You can +create a blank file via: + +$ neutron-db-manage --config-file /path/to/neutron.conf \ +--config-file /path/to/plugin/config.ini revision \ +-m "description of revision" + +The migration timeline should remain linear so that there is a clear path when +upgrading/downgrading. To verify that the timeline does branch, you can run +this command: +$ neutron-db-manage --config-file /path/to/neutron.conf \ +--config-file /path/to/plugin/config.ini check_migration + +If the migration path does branch, you can find the branch point via: +$ neutron-db-manage --config-file /path/to/neutron.conf \ +--config-file /path/to/plugin/config.ini history diff --git a/neutron/db/migration/__init__.py b/neutron/db/migration/__init__.py new file mode 100644 index 000000000..6b367233b --- /dev/null +++ b/neutron/db/migration/__init__.py @@ -0,0 +1,53 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +from alembic import op +import sqlalchemy as sa + +OVS_PLUGIN = ('neutron.plugins.openvswitch.ovs_neutron_plugin' + '.OVSNeutronPluginV2') +CISCO_PLUGIN = 'neutron.plugins.cisco.network_plugin.PluginV2' + + +def should_run(active_plugins, migrate_plugins): + if '*' in migrate_plugins: + return True + else: + if (CISCO_PLUGIN not in migrate_plugins and + OVS_PLUGIN in migrate_plugins): + migrate_plugins.append(CISCO_PLUGIN) + return set(active_plugins) & set(migrate_plugins) + + +def alter_enum(table, column, enum_type, nullable): + bind = op.get_bind() + engine = bind.engine + if engine.name == 'postgresql': + values = {'table': table, + 'column': column, + 'name': enum_type.name} + op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values) + enum_type.create(bind, checkfirst=False) + op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO " + "old_%(column)s" % values) + op.add_column(table, sa.Column(column, enum_type, nullable=nullable)) + op.execute("UPDATE %(table)s SET %(column)s = " + "old_%(column)s::text::%(name)s" % values) + op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values) + op.execute("DROP TYPE old_%(name)s" % values) + else: + op.alter_column(table, column, type_=enum_type, + existing_nullable=nullable) diff --git a/neutron/db/migration/alembic.ini b/neutron/db/migration/alembic.ini new file mode 100644 index 000000000..83098afbd --- /dev/null +++ b/neutron/db/migration/alembic.ini @@ -0,0 +1,52 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +script_location = %(here)s/alembic + +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# default to an empty string because the Neutron migration cli will +# extract the correct value and set it programatically before alemic is fully +# invoked. +sqlalchemy.url = + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/neutron/db/migration/alembic_migrations/__init__.py b/neutron/db/migration/alembic_migrations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/db/migration/alembic_migrations/common_ext_ops.py b/neutron/db/migration/alembic_migrations/common_ext_ops.py new file mode 100644 index 000000000..5663ff228 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/common_ext_ops.py @@ -0,0 +1,83 @@ +# Copyright 2013 Openstack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" +Upgrade/downgrade operations for 'community' extensions +""" + +from alembic import op +import sqlalchemy as sa + + +def upgrade_l3(): + op.create_table( + 'routers', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('status', sa.String(length=16), nullable=True), + sa.Column('admin_state_up', sa.Boolean(), nullable=True), + sa.Column('gw_port_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['gw_port_id'], ['ports.id'], ), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'externalnetworks', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id') + ) + + op.create_table( + 'floatingips', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('floating_ip_address', sa.String(length=64), nullable=False), + sa.Column('floating_network_id', sa.String(length=36), nullable=False), + sa.Column('floating_port_id', sa.String(length=36), nullable=False), + sa.Column('fixed_port_id', sa.String(length=36), nullable=True), + sa.Column('fixed_ip_address', sa.String(length=64), nullable=True), + sa.Column('router_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['fixed_port_id'], ['ports.id'], ), + sa.ForeignKeyConstraint(['floating_port_id'], ['ports.id'], ), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ), + sa.PrimaryKeyConstraint('id') + ) + + +def upgrade_quota(options=None): + if not (options or {}).get('folsom_quota_db_enabled'): + return + + op.create_table( + 'quotas', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(255), index=True), + sa.Column('resource', sa.String(255)), + sa.Column('limit', sa.Integer()), + sa.PrimaryKeyConstraint('id') + ) + + +def downgrade_l3(): + for table in ('floatingips', 'routers', 'externalnetworks'): + op.drop_table(table) + + +def downgrade_quota(options=None): + if (options or {}).get('folsom_quota_db_enabled'): + op.drop_table('quotas') diff --git a/neutron/db/migration/alembic_migrations/env.py b/neutron/db/migration/alembic_migrations/env.py new file mode 100644 index 000000000..885027792 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/env.py @@ -0,0 +1,106 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +from logging import config as logging_config + +from alembic import context +from sqlalchemy import create_engine, pool + +from neutron.db import model_base +from neutron.openstack.common import importutils + + +DATABASE_QUOTA_DRIVER = 'neutron.extensions._quotav2_driver.DbQuotaDriver' + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config +neutron_config = config.neutron_config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +logging_config.fileConfig(config.config_file_name) + +plugin_class_path = neutron_config.core_plugin +active_plugins = [plugin_class_path] +active_plugins += neutron_config.service_plugins + +for class_path in active_plugins: + importutils.import_class(class_path) + +# set the target for 'autogenerate' support +target_metadata = model_base.BASEV2.metadata + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with either a URL + or an Engine. + + Calls to context.execute() here emit the given string to the + script output. + + """ + kwargs = dict() + if neutron_config.database.connection: + kwargs['url'] = neutron_config.database.connection + else: + kwargs['dialect_name'] = neutron_config.database.engine + context.configure(**kwargs) + + with context.begin_transaction(): + context.run_migrations(active_plugins=active_plugins, + options=build_options()) + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + engine = create_engine( + neutron_config.database.connection, + poolclass=pool.NullPool) + + connection = engine.connect() + context.configure( + connection=connection, + target_metadata=target_metadata + ) + + try: + with context.begin_transaction(): + context.run_migrations(active_plugins=active_plugins, + options=build_options()) + finally: + connection.close() + + +def build_options(): + return {'folsom_quota_db_enabled': is_db_quota_enabled()} + + +def is_db_quota_enabled(): + return neutron_config.QUOTAS.quota_driver == DATABASE_QUOTA_DRIVER + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/neutron/db/migration/alembic_migrations/script.py.mako b/neutron/db/migration/alembic_migrations/script.py.mako new file mode 100644 index 000000000..eb3dc9e02 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/script.py.mako @@ -0,0 +1,52 @@ +# Copyright ${create_date.year} OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision} +Create Date: ${create_date} + +""" + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + '${config.neutron_config.core_plugin}' +] + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ${upgrades if upgrades else "pass"} + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ${downgrades if downgrades else "pass"} diff --git a/neutron/db/migration/alembic_migrations/versions/1064e98b7917_nec_pf_port_del.py b/neutron/db/migration/alembic_migrations/versions/1064e98b7917_nec_pf_port_del.py new file mode 100644 index 000000000..a4ddbab5a --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1064e98b7917_nec_pf_port_del.py @@ -0,0 +1,63 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nec-pf-port-del + +Revision ID: 1064e98b7917 +Revises: 3d6fae8b70b0 +Create Date: 2013-09-24 05:33:54.602618 + +""" + +# revision identifiers, used by Alembic. +revision = '1064e98b7917' +down_revision = '3d6fae8b70b0' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('packetfilters', 'in_port', + existing_type=sa.String(length=36), + nullable=True) + op.create_foreign_key( + 'packetfilters_ibfk_2', + source='packetfilters', referent='ports', + local_cols=['in_port'], remote_cols=['id'], + ondelete='CASCADE') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_constraint('packetfilters_ibfk_2', 'packetfilters', 'foreignkey') + op.alter_column('packetfilters', 'in_port', + existing_type=sa.String(length=36), + nullable=False) diff --git a/neutron/db/migration/alembic_migrations/versions/10cd28e692e9_nuage_extraroute.py b/neutron/db/migration/alembic_migrations/versions/10cd28e692e9_nuage_extraroute.py new file mode 100644 index 000000000..2949813bb --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/10cd28e692e9_nuage_extraroute.py @@ -0,0 +1,68 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nuage_extraroute + +Revision ID: 10cd28e692e9 +Revises: 1b837a7125a9 +Create Date: 2014-05-14 14:47:53.148132 + +""" + +# revision identifiers, used by Alembic. +revision = '10cd28e692e9' +down_revision = '1b837a7125a9' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nuage.plugin.NuagePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'routerroutes_mapping', + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.Column('nuage_route_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + ) + op.create_table( + 'routerroutes', + sa.Column('destination', sa.String(length=64), nullable=False), + sa.Column('nexthop', sa.String(length=64), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('destination', 'nexthop', + 'router_id'), + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('routerroutes') + op.drop_table('routerroutes_mapping') diff --git a/neutron/db/migration/alembic_migrations/versions/1149d7de0cfa_port_security.py b/neutron/db/migration/alembic_migrations/versions/1149d7de0cfa_port_security.py new file mode 100644 index 000000000..8929fc920 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1149d7de0cfa_port_security.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""initial port security + +Revision ID: 1149d7de0cfa +Revises: 1b693c095aa3 +Create Date: 2013-01-22 14:05:20.696502 + +""" + +# revision identifiers, used by Alembic. +revision = '1149d7de0cfa' +down_revision = '1b693c095aa3' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table('networksecuritybindings', + sa.Column('network_id', sa.String(length=36), + nullable=False), + sa.Column('port_security_enabled', sa.Boolean(), + nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id')) + op.create_table('portsecuritybindings', + sa.Column('port_id', sa.String(length=36), + nullable=False), + sa.Column('port_security_enabled', sa.Boolean(), + nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id')) + ### end Alembic commands ### + + # Copy network and port ids over to network|port(securitybindings) table + # and set port_security_enabled to false as ip address pairs were not + # configured in NVP/NSX originally. + op.execute("INSERT INTO networksecuritybindings SELECT id as " + "network_id, False as port_security_enabled from networks") + op.execute("INSERT INTO portsecuritybindings SELECT id as port_id, " + "False as port_security_enabled from ports") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('portsecuritybindings') + op.drop_table('networksecuritybindings') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py b/neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py new file mode 100644 index 000000000..8db943492 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py @@ -0,0 +1,208 @@ +# Copyright 2014 NEC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nec: delete old ofc mapping tables + +Revision ID: 117643811bca +Revises: 81c553f3776c +Create Date: 2014-03-02 05:26:47.073318 + +""" + +# revision identifiers, used by Alembic. +revision = '117643811bca' +down_revision = '81c553f3776c' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.ext import compiler as sa_compiler +from sqlalchemy.sql import expression as sa_expr + +from neutron.db import migration + + +# sqlalchemy does not support the expression: +# INSERT INTO (, ...) (SELECT ...) +# The following class is to support this expression. +# Reference: http://docs.sqlalchemy.org/en/rel_0_9/core/compiler.html +# section: "Compiling sub-elements of a custom expression construct" + +class InsertFromSelect(sa_expr.Executable, sa_expr.ClauseElement): + _execution_options = (sa_expr.Executable._execution_options. + union({'autocommit': True})) + + def __init__(self, insert_spec, select): + self.insert_spec = insert_spec + self.select = select + + +@sa_compiler.compiles(InsertFromSelect) +def visit_insert_from_select(element, compiler, **kw): + if type(element.insert_spec) == list: + columns = [] + for column in element.insert_spec: + columns.append(column.name) + table = compiler.process(element.insert_spec[0].table, asfrom=True) + columns = ", ".join(columns) + sql = ("INSERT INTO %s (%s) (%s)" % + (table, columns, compiler.process(element.select))) + else: + sql = ("INSERT INTO %s (%s)" % + (compiler.process(element.insert_spec, asfrom=True), + compiler.process(element.select))) + return sql + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + # Table definitions below are only used for sqlalchemy to generate + # SQL statements, so in networks/ports tables only required field + # are declared. Note that 'quantum_id' in OFC ID mapping tables + # will be renamed in a later patch (bug 1287432). + + ofctenants = sa_expr.table( + 'ofctenants', + sa_expr.column('id'), + sa_expr.column('quantum_id')) + ofcnetworks = sa_expr.table( + 'ofcnetworks', + sa_expr.column('id'), + sa_expr.column('quantum_id')) + ofcports = sa_expr.table( + 'ofcports', + sa_expr.column('id'), + sa_expr.column('quantum_id')) + ofcfilters = sa_expr.table( + 'ofcfilters', + sa_expr.column('id'), + sa_expr.column('quantum_id')) + + ofctenantmappings = sa_expr.table( + 'ofctenantmappings', + sa_expr.column('ofc_id'), + sa_expr.column('quantum_id')) + ofcnetworkmappings = sa_expr.table( + 'ofcnetworkmappings', + sa_expr.column('ofc_id'), + sa_expr.column('quantum_id')) + ofcportmappings = sa_expr.table( + 'ofcportmappings', + sa_expr.column('ofc_id'), + sa_expr.column('quantum_id')) + ofcfiltermappings = sa_expr.table( + 'ofcfiltermappings', + sa_expr.column('ofc_id'), + sa_expr.column('quantum_id')) + + networks = sa_expr.table( + 'networks', + sa_expr.column('id'), + sa_expr.column('tenant_id')) + ports = sa_expr.table( + 'ports', + sa_expr.column('id'), + sa_expr.column('network_id')) + + # ofctenants -> ofctenantmappings + select_obj = sa.select([ofctenants.c.quantum_id, + op.inline_literal('/tenants/') + ofctenants.c.id]) + stmt = InsertFromSelect([ofctenantmappings.c.quantum_id, + ofctenantmappings.c.ofc_id], + select_obj) + op.execute(stmt) + + # ofcnetworks -> ofcnetworkmappings + select_obj = ofcnetworks.join( + networks, + ofcnetworks.c.quantum_id == networks.c.id) + select_obj = select_obj.join( + ofctenantmappings, + ofctenantmappings.c.quantum_id == networks.c.tenant_id) + select_obj = sa.select( + [ofcnetworks.c.quantum_id, + (ofctenantmappings.c.ofc_id + + op.inline_literal('/networks/') + ofcnetworks.c.id)], + from_obj=select_obj) + stmt = InsertFromSelect([ofcnetworkmappings.c.quantum_id, + ofcnetworkmappings.c.ofc_id], + select_obj) + op.execute(stmt) + + # ofcports -> ofcportmappings + select_obj = ofcports.join(ports, ofcports.c.quantum_id == ports.c.id) + select_obj = select_obj.join( + ofcnetworkmappings, + ofcnetworkmappings.c.quantum_id == ports.c.network_id) + select_obj = sa.select( + [ofcports.c.quantum_id, + (ofcnetworkmappings.c.ofc_id + + op.inline_literal('/ports/') + ofcports.c.id)], + from_obj=select_obj) + stmt = InsertFromSelect([ofcportmappings.c.quantum_id, + ofcportmappings.c.ofc_id], + select_obj) + op.execute(stmt) + + # ofcfilters -> ofcfiltermappings + select_obj = sa.select([ofcfilters.c.quantum_id, + op.inline_literal('/filters/') + ofcfilters.c.id]) + stmt = InsertFromSelect([ofcfiltermappings.c.quantum_id, + ofcfiltermappings.c.ofc_id], + select_obj) + op.execute(stmt) + + # drop old mapping tables + op.drop_table('ofctenants') + op.drop_table('ofcnetworks') + op.drop_table('ofcports') + op.drop_table('ofcfilters') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ofctenants', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'ofcnetworks', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'ofcports', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'ofcfilters', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id') + ) diff --git a/neutron/db/migration/alembic_migrations/versions/11c6e18605c8_pool_monitor_status_.py b/neutron/db/migration/alembic_migrations/versions/11c6e18605c8_pool_monitor_status_.py new file mode 100644 index 000000000..598f2ab32 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/11c6e18605c8_pool_monitor_status_.py @@ -0,0 +1,62 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Pool Monitor status field + +Revision ID: 11c6e18605c8 +Revises: 52ff27f7567a +Create Date: 2013-07-10 06:07:20.878520 + +""" + +# revision identifiers, used by Alembic. +revision = '11c6e18605c8' +down_revision = '52ff27f7567a' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('poolmonitorassociations', sa.Column('status', + sa.String(16), + server_default='', + nullable=False)) + op.add_column('poolmonitorassociations', sa.Column('status_description', + sa.String(255))) + + # Set status to ACTIVE for existing associations + op.execute("UPDATE poolmonitorassociations SET status='ACTIVE'") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('poolmonitorassociations', 'status') + op.drop_column('poolmonitorassociations', 'status_description') diff --git a/neutron/db/migration/alembic_migrations/versions/128e042a2b68_ext_gw_mode.py b/neutron/db/migration/alembic_migrations/versions/128e042a2b68_ext_gw_mode.py new file mode 100644 index 000000000..dc4c14c14 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/128e042a2b68_ext_gw_mode.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ext_gw_mode + +Revision ID: 128e042a2b68 +Revises: 32b517556ec9 +Create Date: 2013-03-27 00:35:17.323280 + +""" + +# revision identifiers, used by Alembic. +revision = '128e042a2b68' +down_revision = '32b517556ec9' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.hyperv.hyperv_neutron_plugin.HyperVNeutronPlugin', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2', + 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin', + 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'neutron.plugins.cisco.network_plugin.PluginV2', +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('routers', sa.Column('enable_snat', sa.Boolean(), + nullable=False, default=True)) + # Set enable_snat to True for existing routers + op.execute("UPDATE routers SET enable_snat=True") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('routers', 'enable_snat') diff --git a/neutron/db/migration/alembic_migrations/versions/1341ed32cc1e_nvp_netbinding_update.py b/neutron/db/migration/alembic_migrations/versions/1341ed32cc1e_nvp_netbinding_update.py new file mode 100644 index 000000000..677d6f291 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1341ed32cc1e_nvp_netbinding_update.py @@ -0,0 +1,70 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_net_binding + +Revision ID: 1341ed32cc1e +Revises: 4692d074d587 +Create Date: 2013-02-26 01:28:29.182195 + +""" + +# revision identifiers, used by Alembic. +revision = '1341ed32cc1e' +down_revision = '4692d074d587' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + +new_type = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', + name='nvp_network_bindings_binding_type') +old_type = sa.Enum('flat', 'vlan', 'stt', 'gre', + name='nvp_network_bindings_binding_type') + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.alter_column('nvp_network_bindings', 'tz_uuid', + name='phy_uuid', + existing_type=sa.String(36), + existing_nullable=True) + migration.alter_enum('nvp_network_bindings', 'binding_type', new_type, + nullable=False) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.alter_column('nvp_network_bindings', 'phy_uuid', + name='tz_uuid', + existing_type=sa.String(36), + existing_nullable=True) + migration.alter_enum('nvp_network_bindings', 'binding_type', old_type, + nullable=False) diff --git a/neutron/db/migration/alembic_migrations/versions/13de305df56e_add_nec_pf_name.py b/neutron/db/migration/alembic_migrations/versions/13de305df56e_add_nec_pf_name.py new file mode 100644 index 000000000..17a951cd5 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/13de305df56e_add_nec_pf_name.py @@ -0,0 +1,55 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nec_add_pf_name + +Revision ID: 13de305df56e +Revises: b7a8863760e +Create Date: 2013-07-06 00:42:26.991175 + +""" + +# revision identifiers, used by Alembic. +revision = '13de305df56e' +down_revision = 'b7a8863760e' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('packetfilters', + sa.Column('name', sa.String(length=255), nullable=True)) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('packetfilters', 'name') diff --git a/neutron/db/migration/alembic_migrations/versions/1421183d533f_nsx_dhcp_metadata.py b/neutron/db/migration/alembic_migrations/versions/1421183d533f_nsx_dhcp_metadata.py new file mode 100644 index 000000000..69e5414a8 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1421183d533f_nsx_dhcp_metadata.py @@ -0,0 +1,74 @@ +# Copyright 2014 VMware, Inc. + +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""NSX DHCP/metadata support + +Revision ID: 1421183d533f +Revises: 8f682276ee4 +Create Date: 2013-10-11 14:33:37.303215 + +""" + +revision = '1421183d533f' +down_revision = '8f682276ee4' + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'lsn', + sa.Column('net_id', + sa.String(length=36), nullable=False), + sa.Column('lsn_id', + sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('lsn_id')) + + op.create_table( + 'lsn_port', + sa.Column('lsn_port_id', + sa.String(length=36), nullable=False), + sa.Column('lsn_id', + sa.String(length=36), nullable=False), + sa.Column('sub_id', + sa.String(length=36), nullable=False, unique=True), + sa.Column('mac_addr', + sa.String(length=32), nullable=False, unique=True), + sa.ForeignKeyConstraint(['lsn_id'], ['lsn.lsn_id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('lsn_port_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('lsn_port') + op.drop_table('lsn') diff --git a/neutron/db/migration/alembic_migrations/versions/14f24494ca31_arista_ml2.py b/neutron/db/migration/alembic_migrations/versions/14f24494ca31_arista_ml2.py new file mode 100644 index 000000000..4ce63f9c0 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/14f24494ca31_arista_ml2.py @@ -0,0 +1,78 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""DB Migration for Arista ml2 mechanism driver + +Revision ID: 14f24494ca31 +Revises: 2a3bae1ceb8 +Create Date: 2013-08-15 18:54:16.083640 + +""" + +# revision identifiers, used by Alembic. +revision = '14f24494ca31' +down_revision = '2a3bae1ceb8' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'arista_provisioned_nets', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.Column('segmentation_id', sa.Integer(), + autoincrement=False, nullable=True), + sa.PrimaryKeyConstraint('id')) + + op.create_table( + 'arista_provisioned_vms', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('vm_id', sa.String(length=255), nullable=True), + sa.Column('host_id', sa.String(length=255), nullable=True), + sa.Column('port_id', sa.String(length=36), nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.PrimaryKeyConstraint('id')) + + op.create_table( + 'arista_provisioned_tenants', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('arista_provisioned_tenants') + op.drop_table('arista_provisioned_vms') + op.drop_table('arista_provisioned_nets') diff --git a/neutron/db/migration/alembic_migrations/versions/157a5d299379_ml2_binding_profile.py b/neutron/db/migration/alembic_migrations/versions/157a5d299379_ml2_binding_profile.py new file mode 100644 index 000000000..200589d16 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/157a5d299379_ml2_binding_profile.py @@ -0,0 +1,55 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ml2 binding:profile + +Revision ID: 157a5d299379 +Revises: 50d5ba354c23 +Create Date: 2014-02-13 23:48:25.147279 + +""" + +# revision identifiers, used by Alembic. +revision = '157a5d299379' +down_revision = '50d5ba354c23' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('ml2_port_bindings', + sa.Column('profile', sa.String(length=4095), + nullable=False, server_default='')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('ml2_port_bindings', 'profile') diff --git a/neutron/db/migration/alembic_migrations/versions/176a85fc7d79_add_portbindings_db.py b/neutron/db/migration/alembic_migrations/versions/176a85fc7d79_add_portbindings_db.py new file mode 100644 index 000000000..c1289250d --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/176a85fc7d79_add_portbindings_db.py @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add portbindings db + +Revision ID: 176a85fc7d79 +Revises: f489cf14a79c +Create Date: 2013-03-21 14:59:53.052600 + +""" + +# revision identifiers, used by Alembic. +revision = '176a85fc7d79' +down_revision = 'f489cf14a79c' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'portbindingports', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.drop_table('portbindingports') diff --git a/neutron/db/migration/alembic_migrations/versions/19180cf98af6_nsx_gw_devices.py b/neutron/db/migration/alembic_migrations/versions/19180cf98af6_nsx_gw_devices.py new file mode 100644 index 000000000..fafb85a51 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/19180cf98af6_nsx_gw_devices.py @@ -0,0 +1,103 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nsx_gw_devices + +Revision ID: 19180cf98af6 +Revises: 117643811bca +Create Date: 2014-02-26 02:46:26.151741 + +""" + +# revision identifiers, used by Alembic. +revision = '19180cf98af6' +down_revision = '117643811bca' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'networkgatewaydevicereferences', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_gateway_id', sa.String(length=36), nullable=True), + sa.Column('interface_name', sa.String(length=64), nullable=True), + sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name'), + mysql_engine='InnoDB') + # Copy data from networkgatewaydevices into networkgatewaydevicereference + op.execute("INSERT INTO networkgatewaydevicereferences SELECT " + "id, network_gateway_id, interface_name FROM " + "networkgatewaydevices") + # drop networkgatewaydevices + op.drop_table('networkgatewaydevices') + op.create_table( + 'networkgatewaydevices', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('nsx_id', sa.String(length=36), nullable=True), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('connector_type', sa.String(length=10), nullable=True), + sa.Column('connector_ip', sa.String(length=64), nullable=True), + sa.Column('status', sa.String(length=16), nullable=True), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB') + # Create a networkgatewaydevice for each existing reference. + # For existing references nsx_id == neutron_id + # Do not fill conenctor info as they would be unknown + op.execute("INSERT INTO networkgatewaydevices (id, nsx_id, tenant_id) " + "SELECT gw_dev_ref.id, gw_dev_ref.id as nsx_id, tenant_id " + "FROM networkgatewaydevicereferences AS gw_dev_ref " + "INNER JOIN networkgateways AS net_gw ON " + "gw_dev_ref.network_gateway_id=net_gw.id") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('networkgatewaydevices') + # Re-create previous version of networkgatewaydevices table + op.create_table( + 'networkgatewaydevices', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_gateway_id', sa.String(length=36), nullable=True), + sa.Column('interface_name', sa.String(length=64), nullable=True), + sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id'), + mysql_engine='InnoDB') + # Copy from networkgatewaydevicereferences to networkgatewaydevices + op.execute("INSERT INTO networkgatewaydevices SELECT " + "id, network_gateway_id, interface_name FROM " + "networkgatewaydevicereferences") + # Dropt networkgatewaydevicereferences + op.drop_table('networkgatewaydevicereferences') diff --git a/neutron/db/migration/alembic_migrations/versions/1b2580001654_nsx_sec_group_mappin.py b/neutron/db/migration/alembic_migrations/versions/1b2580001654_nsx_sec_group_mappin.py new file mode 100644 index 000000000..76e072ca3 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1b2580001654_nsx_sec_group_mappin.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nsx_sec_group_mapping + +Revision ID: 1b2580001654 +Revises: abc88c33f74f +Create Date: 2013-12-27 13:02:42.894648 + +""" + +# revision identifiers, used by Alembic. +revision = '1b2580001654' +down_revision = 'abc88c33f74f' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + # Create table for security group mappings + op.create_table( + 'neutron_nsx_security_group_mappings', + sa.Column('neutron_id', sa.String(length=36), nullable=False), + sa.Column('nsx_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['neutron_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('neutron_id', 'nsx_id')) + # Execute statement to add a record in security group mappings for + # each record in securitygroups + op.execute("INSERT INTO neutron_nsx_security_group_mappings SELECT id,id " + "from securitygroups") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.drop_table('neutron_nsx_security_group_mappings') diff --git a/neutron/db/migration/alembic_migrations/versions/1b693c095aa3_quota_ext_db_grizzly.py b/neutron/db/migration/alembic_migrations/versions/1b693c095aa3_quota_ext_db_grizzly.py new file mode 100644 index 000000000..2a50e4d5e --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1b693c095aa3_quota_ext_db_grizzly.py @@ -0,0 +1,64 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Quota ext support added in Grizzly + +Revision ID: 1b693c095aa3 +Revises: 1d76643bcec4 +Create Date: 2013-01-19 02:58:17.667524 + +""" + +# revision identifiers, used by Alembic. +revision = '1b693c095aa3' +down_revision = '2a6d0b51f4bb' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'quotas', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('resource', sa.String(length=255), nullable=True), + sa.Column('limit', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('quotas') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/1b837a7125a9_cisco_apic_driver.py b/neutron/db/migration/alembic_migrations/versions/1b837a7125a9_cisco_apic_driver.py new file mode 100644 index 000000000..92b132643 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1b837a7125a9_cisco_apic_driver.py @@ -0,0 +1,74 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Cisco APIC Mechanism Driver + +Revision ID: 1b837a7125a9 +Revises: 6be312499f9 +Create Date: 2014-02-13 09:35:19.147619 + +""" + +# revision identifiers, used by Alembic. +revision = '1b837a7125a9' +down_revision = '6be312499f9' + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_ml2_apic_epgs', + sa.Column('network_id', sa.String(length=255), nullable=False), + sa.Column('epg_id', sa.String(length=64), nullable=False), + sa.Column('segmentation_id', sa.String(length=64), nullable=False), + sa.Column('provider', sa.Boolean(), default=False, nullable=False), + sa.PrimaryKeyConstraint('network_id')) + + op.create_table( + 'cisco_ml2_apic_port_profiles', + sa.Column('node_id', sa.String(length=255), nullable=False), + sa.Column('profile_id', sa.String(length=64), nullable=False), + sa.Column('hpselc_id', sa.String(length=64), nullable=False), + sa.Column('module', sa.String(length=10), nullable=False), + sa.Column('from_port', sa.Integer(), nullable=False), + sa.Column('to_port', sa.Integer(), nullable=False), + sa.PrimaryKeyConstraint('node_id')) + + op.create_table( + 'cisco_ml2_apic_contracts', + sa.Column('tenant_id', sa.String(length=255), nullable=False), + sa.Column('contract_id', sa.String(length=64), nullable=False), + sa.Column('filter_id', sa.String(length=64), nullable=False), + sa.PrimaryKeyConstraint('tenant_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_ml2_apic_contracts') + op.drop_table('cisco_ml2_apic_port_profiles') + op.drop_table('cisco_ml2_apic_epgs') diff --git a/neutron/db/migration/alembic_migrations/versions/1c33fa3cd1a1_extra_route_config.py b/neutron/db/migration/alembic_migrations/versions/1c33fa3cd1a1_extra_route_config.py new file mode 100644 index 000000000..bc5b78215 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1c33fa3cd1a1_extra_route_config.py @@ -0,0 +1,82 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Support routing table configuration on Router + +Revision ID: 1c33fa3cd1a1 +Revises: 45680af419f9 +Create Date: 2013-01-17 14:35:09.386975 + +""" + +# revision identifiers, used by Alembic. +revision = '1c33fa3cd1a1' +down_revision = '45680af419f9' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2', + 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'neutron.plugins.cisco.network_plugin.PluginV2', + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.rename_table( + 'routes', + 'subnetroutes', + ) + op.create_table( + 'routerroutes', + sa.Column('destination', sa.String(length=64), nullable=False), + sa.Column( + 'nexthop', sa.String(length=64), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint( + ['router_id'], ['routers.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('destination', 'nexthop', 'router_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.rename_table( + 'subnetroutes', + 'routes', + ) + op.drop_table('routerroutes') diff --git a/neutron/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py b/neutron/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py new file mode 100644 index 000000000..114d575fe --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_netbinding + +Revision ID: 1d76643bcec4 +Revises: 3cb5d900c5de +Create Date: 2013-01-15 07:36:10.024346 + +""" + +# revision identifiers, used by Alembic. +revision = '1d76643bcec4' +down_revision = '3cb5d900c5de' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'nvp_network_bindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('binding_type', + sa.Enum('flat', 'vlan', 'stt', 'gre', + name='nvp_network_bindings_binding_type'), + nullable=False), + sa.Column('tz_uuid', sa.String(length=36), nullable=True), + sa.Column('vlan_id', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('nvp_network_bindings') diff --git a/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py b/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py new file mode 100644 index 000000000..6d4d3adee --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py @@ -0,0 +1,66 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set_not_null_fields_lb_stats + +Revision ID: 1e5dd1d09b22 +Revises: 54f7549a0e5f +Create Date: 2014-03-17 11:00:35.370618 + +""" + +# revision identifiers, used by Alembic. +revision = '1e5dd1d09b22' +down_revision = '54f7549a0e5f' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('poolstatisticss', 'bytes_in', nullable=False, + existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'bytes_out', nullable=False, + existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'active_connections', nullable=False, + existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'total_connections', nullable=False, + existing_type=sa.BigInteger()) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('poolstatisticss', 'bytes_in', nullable=True, + existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'bytes_out', nullable=True, + existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'active_connections', nullable=True, + existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'total_connections', nullable=True, + existing_type=sa.BigInteger()) diff --git a/neutron/db/migration/alembic_migrations/versions/1efb85914233_allowedaddresspairs.py b/neutron/db/migration/alembic_migrations/versions/1efb85914233_allowedaddresspairs.py new file mode 100644 index 000000000..e1882d059 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1efb85914233_allowedaddresspairs.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""allowedaddresspairs + +Revision ID: 1efb85914233 +Revises: 51b4de912379 +Create Date: 2013-07-23 12:56:00.402855 + +""" + +# revision identifiers, used by Alembic. +revision = '1efb85914233' +down_revision = '51b4de912379' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'allowedaddresspairs', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('mac_address', sa.String(length=32), nullable=False), + sa.Column('ip_address', sa.String(length=64), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id', 'mac_address', 'ip_address'), + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('allowedaddresspairs') diff --git a/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py b/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py new file mode 100644 index 000000000..e74a8be42 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py @@ -0,0 +1,73 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add a unique constraint on (agent_type, host) columns to prevent a race +condition when an agent entry is 'upserted'. + +Revision ID: 1fcfc149aca4 +Revises: e197124d4b9 +Create Date: 2013-11-27 18:35:28.148680 + +""" + +revision = '1fcfc149aca4' +down_revision = 'e197124d4b9' + +migration_for_plugins = [ + 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', + 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin', +] + +from alembic import op + +from neutron.db import migration + + +TABLE_NAME = 'agents' +UC_NAME = 'uniq_agents0agent_type0host' + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_unique_constraint( + name=UC_NAME, + source=TABLE_NAME, + local_cols=['agent_type', 'host'] + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_constraint( + name=UC_NAME, + table_name=TABLE_NAME, + type_='unique' + ) diff --git a/neutron/db/migration/alembic_migrations/versions/2032abe8edac_lbaas_add_status_des.py b/neutron/db/migration/alembic_migrations/versions/2032abe8edac_lbaas_add_status_des.py new file mode 100644 index 000000000..813f93e96 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/2032abe8edac_lbaas_add_status_des.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""LBaaS add status description + +Revision ID: 2032abe8edac +Revises: 477a4488d3f4 +Create Date: 2013-06-24 06:51:47.308545 + +""" + +# revision identifiers, used by Alembic. +revision = '2032abe8edac' +down_revision = '477a4488d3f4' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + +ENTITIES = ['vips', 'pools', 'members', 'healthmonitors'] + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + for entity in ENTITIES: + op.add_column(entity, sa.Column('status_description', sa.String(255))) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + for entity in ENTITIES: + op.drop_column(entity, 'status_description') diff --git a/neutron/db/migration/alembic_migrations/versions/20ae61555e95_ml2_gre_type_driver.py b/neutron/db/migration/alembic_migrations/versions/20ae61555e95_ml2_gre_type_driver.py new file mode 100644 index 000000000..e63668c25 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/20ae61555e95_ml2_gre_type_driver.py @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""DB Migration for ML2 GRE Type Driver + +Revision ID: 20ae61555e95 +Revises: 13de305df56e +Create Date: 2013-07-10 17:19:03.021937 + +""" + +# revision identifiers, used by Alembic. +revision = '20ae61555e95' +down_revision = '13de305df56e' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ml2_gre_allocations', + sa.Column('gre_id', sa.Integer, nullable=False, + autoincrement=False), + sa.Column('allocated', sa.Boolean, nullable=False), + sa.PrimaryKeyConstraint('gre_id') + ) + + op.create_table( + 'ml2_gre_endpoints', + sa.Column('ip_address', sa.String(length=64)), + sa.PrimaryKeyConstraint('ip_address') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ml2_gre_allocations') + op.drop_table('ml2_gre_endpoints') diff --git a/neutron/db/migration/alembic_migrations/versions/2447ad0e9585_add_ipv6_mode_props.py b/neutron/db/migration/alembic_migrations/versions/2447ad0e9585_add_ipv6_mode_props.py new file mode 100644 index 000000000..f3b444a22 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/2447ad0e9585_add_ipv6_mode_props.py @@ -0,0 +1,81 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author Sean M. Collins (Comcast) + +"""Add IPv6 Subnet properties + +Revision ID: 2447ad0e9585 +Revises: 33dd0a9fa487 +Create Date: 2013-10-23 16:36:44.188904 + +""" + +# revision identifiers, used by Alembic. +revision = '2447ad0e9585' +down_revision = '33dd0a9fa487' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + '*' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + # Workaround for Alemic bug #89 + # https://bitbucket.org/zzzeek/alembic/issue/89 + context = op.get_context() + if context.bind.dialect.name == 'postgresql': + op.execute("CREATE TYPE ipv6_ra_modes AS ENUM ('%s', '%s', '%s')" + % ('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless')) + op.execute("CREATE TYPE ipv6_address_modes AS ENUM ('%s', '%s', '%s')" + % ('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless')) + op.add_column('subnets', + sa.Column('ipv6_ra_mode', + sa.Enum('slaac', + 'dhcpv6-stateful', + 'dhcpv6-stateless', + name='ipv6_ra_modes'), + nullable=True) + ) + op.add_column('subnets', + sa.Column('ipv6_address_mode', + sa.Enum('slaac', + 'dhcpv6-stateful', + 'dhcpv6-stateless', + name='ipv6_address_modes'), + nullable=True) + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('subnets', 'ipv6_ra_mode') + op.drop_column('subnets', 'ipv6_address_mode') + context = op.get_context() + if context.bind.dialect.name == 'postgresql': + op.execute('DROP TYPE ipv6_ra_modes') + op.execute('DROP TYPE ipv6_address_modes') diff --git a/neutron/db/migration/alembic_migrations/versions/24c7ea5160d7_cisco_csr_vpnaas.py b/neutron/db/migration/alembic_migrations/versions/24c7ea5160d7_cisco_csr_vpnaas.py new file mode 100644 index 000000000..f7cdfd913 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/24c7ea5160d7_cisco_csr_vpnaas.py @@ -0,0 +1,60 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Cisco CSR VPNaaS + + Revision ID: 24c7ea5160d7 + Revises: 492a106273f8 + Create Date: 2014-02-03 13:06:50.407601 +""" + +# revision identifiers, used by Alembic. +revision = '24c7ea5160d7' +down_revision = '492a106273f8' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.vpn.plugin.VPNDriverPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_csr_identifier_map', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('ipsec_site_conn_id', sa.String(length=64), + primary_key=True), + sa.Column('csr_tunnel_id', sa.Integer(), nullable=False), + sa.Column('csr_ike_policy_id', sa.Integer(), nullable=False), + sa.Column('csr_ipsec_policy_id', sa.Integer(), nullable=False), + sa.ForeignKeyConstraint(['ipsec_site_conn_id'], + ['ipsec_site_connections.id'], + ondelete='CASCADE') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_csr_identifier_map') diff --git a/neutron/db/migration/alembic_migrations/versions/2528ceb28230_nec_pf_netid_fix.py b/neutron/db/migration/alembic_migrations/versions/2528ceb28230_nec_pf_netid_fix.py new file mode 100644 index 000000000..c2f130109 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/2528ceb28230_nec_pf_netid_fix.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""NEC PacketFilter network_id nullable fix + +Revision ID: 2528ceb28230 +Revises: 1064e98b7917 +Create Date: 2013-09-24 12:07:43.124256 + +""" + +# revision identifiers, used by Alembic. +revision = '2528ceb28230' +down_revision = '1064e98b7917' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('packetfilters', 'network_id', + existing_type=sa.String(length=36), + nullable=False) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + # NOTE(amotoki): There is a bug that nullable of network_id is + # set to True by mistake in folsom_initial (bug 1229508). + # To make sure nullable=False in any revision, nullable is set + # to False in both upgrade and downgrade. + op.alter_column('packetfilters', 'network_id', + existing_type=sa.String(length=36), + nullable=False) diff --git a/neutron/db/migration/alembic_migrations/versions/263772d65691_cisco_db_cleanup_2.py b/neutron/db/migration/alembic_migrations/versions/263772d65691_cisco_db_cleanup_2.py new file mode 100644 index 000000000..4f3adc447 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/263772d65691_cisco_db_cleanup_2.py @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Cisco plugin db cleanup part II + +Revision ID: 263772d65691 +Revises: 35c7c198ddea +Create Date: 2013-07-29 02:31:26.646343 + +""" + +# revision identifiers, used by Alembic. +revision = '263772d65691' +down_revision = '35c7c198ddea' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.rename_table('credentials', 'cisco_credentials') + op.rename_table('nexusport_bindings', 'cisco_nexusport_bindings') + op.rename_table('qoss', 'cisco_qos_policies') + + op.drop_table('cisco_vlan_ids') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_vlan_ids', + sa.Column('vlan_id', sa.Integer, nullable=False), + sa.Column('vlan_used', sa.Boolean), + sa.PrimaryKeyConstraint('vlan_id'), + ) + + op.rename_table('cisco_credentials', 'credentials') + op.rename_table('cisco_nexusport_bindings', 'nexusport_bindings') + op.rename_table('cisco_qos_policies', 'qoss') diff --git a/neutron/db/migration/alembic_migrations/versions/27cc183af192_ml2_vnic_type.py b/neutron/db/migration/alembic_migrations/versions/27cc183af192_ml2_vnic_type.py new file mode 100644 index 000000000..db38e65a2 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/27cc183af192_ml2_vnic_type.py @@ -0,0 +1,54 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ml2_vnic_type + +Revision ID: 27cc183af192 +Revises: 4ca36cfc898c +Create Date: 2014-02-09 12:19:21.362967 + +""" + +# revision identifiers, used by Alembic. +revision = '27cc183af192' +down_revision = '4ca36cfc898c' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('ml2_port_bindings', + sa.Column('vnic_type', sa.String(length=64), + nullable=False, + server_default='normal')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('ml2_port_bindings', 'vnic_type') diff --git a/neutron/db/migration/alembic_migrations/versions/27ef74513d33_quota_in_plumgrid_pl.py b/neutron/db/migration/alembic_migrations/versions/27ef74513d33_quota_in_plumgrid_pl.py new file mode 100644 index 000000000..df447813b --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/27ef74513d33_quota_in_plumgrid_pl.py @@ -0,0 +1,65 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""quota_in_plumgrid_plugin + +Revision ID: 27ef74513d33 +Revises: 3a520dd165d0 +Create Date: 2013-10-08 10:59:19.860397 + +""" + +# revision identifiers, used by Alembic. +revision = '27ef74513d33' +down_revision = '3a520dd165d0' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.' + 'NeutronPluginPLUMgridV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'quotas', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('resource', sa.String(length=255), nullable=True), + sa.Column('limit', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('quotas') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/2a3bae1ceb8_nec_port_binding.py b/neutron/db/migration/alembic_migrations/versions/2a3bae1ceb8_nec_port_binding.py new file mode 100644 index 000000000..943317d21 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/2a3bae1ceb8_nec_port_binding.py @@ -0,0 +1,65 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""NEC Port Binding + +Revision ID: 2a3bae1ceb8 +Revises: 46a0efbd8f0 +Create Date: 2013-08-22 11:09:19.955386 + +""" + +# revision identifiers, used by Alembic. +revision = '2a3bae1ceb8' +down_revision = '46a0efbd8f0' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'portbindingports', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id') + ) + op.create_foreign_key( + 'portinfos_ibfk_1', + source='portinfos', referent='ports', + local_cols=['id'], remote_cols=['id'], + ondelete='CASCADE') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_constraint('portinfos_ibfk_1', 'portinfos', 'foreignkey') + op.drop_table('portbindingports') diff --git a/neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py b/neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py new file mode 100644 index 000000000..905536ceb --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py @@ -0,0 +1,88 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""cisco plugin cleanup + +Revision ID: 2a6d0b51f4bb +Revises: 1d76643bcec4 +Create Date: 2013-01-17 22:24:37.730466 + +""" + +# revision identifiers, used by Alembic. +revision = '2a6d0b51f4bb' +down_revision = '1d76643bcec4' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table(u'portprofile_bindings') + op.drop_table(u'portprofiles') + op.drop_table(u'port_bindings') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + u'port_bindings', + sa.Column(u'id', sa.Integer(), autoincrement=True, + nullable=False), + sa.Column(u'port_id', sa.String(255), nullable=False), + sa.Column(u'blade_intf_dn', sa.String(255), nullable=False), + sa.Column(u'portprofile_name', sa.String(255), + nullable=True), + sa.Column(u'vlan_name', sa.String(255), nullable=True), + sa.Column(u'vlan_id', sa.Integer(), nullable=True), + sa.Column(u'qos', sa.String(255), nullable=True), + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'vif_id', sa.String(255), nullable=True), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'portprofiles', + sa.Column(u'uuid', sa.String(255), nullable=False), + sa.Column(u'name', sa.String(255), nullable=True), + sa.Column(u'vlan_id', sa.Integer(), nullable=True), + sa.Column(u'qos', sa.String(255), nullable=True), + sa.PrimaryKeyConstraint(u'uuid') + ) + op.create_table( + u'portprofile_bindings', + sa.Column(u'id', sa.String(255), nullable=False), + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'port_id', sa.String(255), nullable=True), + sa.Column(u'portprofile_id', sa.String(255), nullable=True), + sa.Column(u'default', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint(['portprofile_id'], ['portprofiles.uuid'], ), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ), + sa.PrimaryKeyConstraint(u'id') + ) diff --git a/neutron/db/migration/alembic_migrations/versions/2c4af419145b_l3_support.py b/neutron/db/migration/alembic_migrations/versions/2c4af419145b_l3_support.py new file mode 100644 index 000000000..82aa30602 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/2c4af419145b_l3_support.py @@ -0,0 +1,56 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""l3_support + +Revision ID: 2c4af419145b +Revises: folsom +Create Date: 2013-03-11 19:26:45.697774 + +""" + +# revision identifiers, used by Alembic. +revision = '2c4af419145b' +down_revision = 'folsom' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2', + 'neutron.plugins.hyperv.hyperv_neutron_plugin.HyperVNeutronPlugin', + 'neutron.plugins.midonet.plugin.MidonetPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin', +] + +from neutron.db import migration +from neutron.db.migration.alembic_migrations import common_ext_ops + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + common_ext_ops.upgrade_l3() + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + common_ext_ops.downgrade_l3() diff --git a/neutron/db/migration/alembic_migrations/versions/2db5203cb7a9_nuage_floatingip.py b/neutron/db/migration/alembic_migrations/versions/2db5203cb7a9_nuage_floatingip.py new file mode 100644 index 000000000..57876d096 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/2db5203cb7a9_nuage_floatingip.py @@ -0,0 +1,83 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nuage_floatingip + +Revision ID: 2db5203cb7a9 +Revises: 10cd28e692e9 +Create Date: 2014-05-19 16:39:42.048125 + +""" + +# revision identifiers, used by Alembic. +revision = '2db5203cb7a9' +down_revision = '10cd28e692e9' + +migration_for_plugins = [ + 'neutron.plugins.nuage.plugin.NuagePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'nuage_floatingip_pool_mapping', + sa.Column('fip_pool_id', sa.String(length=36), nullable=False), + sa.Column('net_id', sa.String(length=36), nullable=True), + sa.Column('router_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['net_id'], ['networks.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('fip_pool_id'), + ) + op.create_table( + 'nuage_floatingip_mapping', + sa.Column('fip_id', sa.String(length=36), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=True), + sa.Column('nuage_fip_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['fip_id'], ['floatingips.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('fip_id'), + ) + op.rename_table('net_partitions', 'nuage_net_partitions') + op.rename_table('net_partition_router_mapping', + 'nuage_net_partition_router_mapping') + op.rename_table('router_zone_mapping', 'nuage_router_zone_mapping') + op.rename_table('subnet_l2dom_mapping', 'nuage_subnet_l2dom_mapping') + op.rename_table('port_mapping', 'nuage_port_mapping') + op.rename_table('routerroutes_mapping', 'nuage_routerroutes_mapping') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('nuage_floatingip_mapping') + op.drop_table('nuage_floatingip_pool_mapping') + op.rename_table('nuage_net_partitions', 'net_partitions') + op.rename_table('nuage_net_partition_router_mapping', + 'net_partition_router_mapping') + op.rename_table('nuage_router_zone_mapping', 'router_zone_mapping') + op.rename_table('nuage_subnet_l2dom_mapping', 'subnet_l2dom_mapping') + op.rename_table('nuage_port_mapping', 'port_mapping') + op.rename_table('nuage_routerroutes_mapping', 'routerroutes_mapping') diff --git a/neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py b/neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py new file mode 100644 index 000000000..25c40276e --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""floatingip_status + +Revision ID: 2eeaf963a447 +Revises: f44ab9871cd6 +Create Date: 2014-01-14 11:58:13.754747 + +""" + +# revision identifiers, used by Alembic. +revision = '2eeaf963a447' +down_revision = 'f44ab9871cd6' + +# This migration is applied to all L3 capable plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2', + 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2', + 'neutron.plugins.cisco.network_plugin.PluginV2', + 'neutron.plugins.cisco.n1kv.n1kv_neutron_plugin.N1kvNeutronPluginV2', + 'neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin', + 'neutron.plugins.hyperv.hyperv_neutron_plugin.HyperVNeutronPlugin', + 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2', + 'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin', + 'neutron.plugins.midonet.plugin.MidonetPluginV2', + 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.nuage.plugin.NuagePlugin', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.' + 'NeutronPluginPLUMgridV2', + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.add_column('floatingips', + sa.Column('last_known_router_id', + sa.String(length=36), + nullable=True)) + op.add_column('floatingips', + sa.Column('status', + sa.String(length=16), + nullable=True)) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.drop_column('floatingips', 'last_known_router_id') + op.drop_column('floatingips', 'status') diff --git a/neutron/db/migration/alembic_migrations/versions/32a65f71af51_ml2_portbinding.py b/neutron/db/migration/alembic_migrations/versions/32a65f71af51_ml2_portbinding.py new file mode 100644 index 000000000..6256186f4 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/32a65f71af51_ml2_portbinding.py @@ -0,0 +1,70 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ml2 portbinding + +Revision ID: 32a65f71af51 +Revises: 14f24494ca31 +Create Date: 2013-09-03 08:40:22.706651 + +""" + +# revision identifiers, used by Alembic. +revision = '32a65f71af51' +down_revision = '14f24494ca31' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ml2_port_bindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.Column('vif_type', sa.String(length=64), nullable=False), + sa.Column('cap_port_filter', sa.Boolean(), nullable=False), + sa.Column('driver', sa.String(length=64), nullable=True), + sa.Column('segment', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'], + ondelete='SET NULL'), + sa.PrimaryKeyConstraint('port_id') + ) + + # Note that 176a85fc7d79_add_portbindings_db.py was never enabled + # for ml2, so there is no need to drop the portbindingports table + # that is no longer used. + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ml2_port_bindings') diff --git a/neutron/db/migration/alembic_migrations/versions/32b517556ec9_remove_tunnelip_mode.py b/neutron/db/migration/alembic_migrations/versions/32b517556ec9_remove_tunnelip_mode.py new file mode 100644 index 000000000..4c5c1e98b --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/32b517556ec9_remove_tunnelip_mode.py @@ -0,0 +1,58 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""remove TunnelIP model + +Revision ID: 32b517556ec9 +Revises: 176a85fc7d79 +Create Date: 2013-05-23 06:46:57.390838 + +""" + +# revision identifiers, used by Alembic. +revision = '32b517556ec9' +down_revision = '176a85fc7d79' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ovs_tunnel_ips') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ovs_tunnel_ips', + sa.Column('ip_address', sa.String(length=255), nullable=False), + sa.PrimaryKeyConstraint('ip_address') + ) diff --git a/neutron/db/migration/alembic_migrations/versions/338d7508968c_vpnaas_peer_address_.py b/neutron/db/migration/alembic_migrations/versions/338d7508968c_vpnaas_peer_address_.py new file mode 100644 index 000000000..9675559c4 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/338d7508968c_vpnaas_peer_address_.py @@ -0,0 +1,55 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""vpnaas peer_address size increase + +Revision ID: 338d7508968c +Revises: 4a666eb208c2 +Create Date: 2013-09-16 11:31:39.410189 + +""" + +# revision identifiers, used by Alembic. +revision = '338d7508968c' +down_revision = '4a666eb208c2' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.vpn.plugin.VPNDriverPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('ipsec_site_connections', 'peer_address', + type_=sa.String(255), existing_type=sa.String(64)) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('ipsec_site_connections', 'peer_address', + type_=sa.String(64), existing_type=sa.String(255)) diff --git a/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py b/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py new file mode 100644 index 000000000..0882aa7f4 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py @@ -0,0 +1,58 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set_length_of_description_field_metering + +Revision ID: 33c3db036fe4 +Revises: b65aa907aec +Create Date: 2014-03-25 11:04:27.341830 + +""" + +# revision identifiers, used by Alembic. +revision = '33c3db036fe4' +down_revision = 'b65aa907aec' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.metering.metering_plugin.MeteringPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.execute("CREATE TABLE IF NOT EXISTS meteringlabels( " + "tenant_id VARCHAR(255) NULL, " + "id VARCHAR(36) PRIMARY KEY NOT NULL, " + "name VARCHAR(255) NULL, " + "description VARCHAR(255) NULL)") + + op.alter_column('meteringlabels', 'description', type_=sa.String(1024), + existing_nullable=True) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + pass diff --git a/neutron/db/migration/alembic_migrations/versions/33dd0a9fa487_embrane_lbaas_driver.py b/neutron/db/migration/alembic_migrations/versions/33dd0a9fa487_embrane_lbaas_driver.py new file mode 100644 index 000000000..aadf02d26 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/33dd0a9fa487_embrane_lbaas_driver.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""embrane_lbaas_driver + +Revision ID: 33dd0a9fa487 +Revises: 19180cf98af6 +Create Date: 2014-02-25 00:15:35.567111 + +""" + +# revision identifiers, used by Alembic. +revision = '33dd0a9fa487' +down_revision = '19180cf98af6' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + u'embrane_pool_port', + sa.Column(u'pool_id', sa.String(length=36), nullable=False), + sa.Column(u'port_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], + name=u'embrane_pool_port_ibfk_1'), + sa.ForeignKeyConstraint(['port_id'], [u'ports.id'], + name=u'embrane_pool_port_ibfk_2'), + sa.PrimaryKeyConstraint(u'pool_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table(u'embrane_pool_port') diff --git a/neutron/db/migration/alembic_migrations/versions/35c7c198ddea_lbaas_healthmon_del_status.py b/neutron/db/migration/alembic_migrations/versions/35c7c198ddea_lbaas_healthmon_del_status.py new file mode 100644 index 000000000..574172694 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/35c7c198ddea_lbaas_healthmon_del_status.py @@ -0,0 +1,58 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""remove status from HealthMonitor + +Revision ID: 35c7c198ddea +Revises: 11c6e18605c8 +Create Date: 2013-08-02 23:14:54.037976 + +""" + +# revision identifiers, used by Alembic. +revision = '35c7c198ddea' +down_revision = '11c6e18605c8' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.drop_column('healthmonitors', 'status') + op.drop_column('healthmonitors', 'status_description') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('healthmonitors', sa.Column('status', + sa.String(16), + nullable=False)) + op.add_column('healthmonitors', sa.Column('status_description', + sa.String(255))) diff --git a/neutron/db/migration/alembic_migrations/versions/363468ac592c_nvp_network_gw.py b/neutron/db/migration/alembic_migrations/versions/363468ac592c_nvp_network_gw.py new file mode 100644 index 000000000..e2727e60e --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/363468ac592c_nvp_network_gw.py @@ -0,0 +1,100 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_network_gw + +Revision ID: 363468ac592c +Revises: 1c33fa3cd1a1 +Create Date: 2013-02-07 03:19:14.455372 + +""" + +# revision identifiers, used by Alembic. +revision = '363468ac592c' +down_revision = '1c33fa3cd1a1' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.create_table('networkgateways', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('tenant_id', sa.String(length=36), + nullable=True), + sa.Column('default', sa.Boolean(), nullable=True), + sa.PrimaryKeyConstraint('id')) + op.create_table('networkgatewaydevices', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_gateway_id', sa.String(length=36), + nullable=True), + sa.Column('interface_name', sa.String(length=64), + nullable=True), + sa.ForeignKeyConstraint(['network_gateway_id'], + ['networkgateways.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id')) + op.create_table('networkconnections', + sa.Column('tenant_id', sa.String(length=255), + nullable=True), + sa.Column('network_gateway_id', sa.String(length=36), + nullable=True), + sa.Column('network_id', sa.String(length=36), + nullable=True), + sa.Column('segmentation_type', + sa.Enum('flat', 'vlan', + name="net_conn_seg_type"), + nullable=True), + sa.Column('segmentation_id', sa.Integer(), + nullable=True), + sa.Column('port_id', sa.String(length=36), + nullable=False), + sa.ForeignKeyConstraint(['network_gateway_id'], + ['networkgateways.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id'), + sa.UniqueConstraint('network_gateway_id', + 'segmentation_type', + 'segmentation_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('networkconnections') + op.drop_table('networkgatewaydevices') + op.drop_table('networkgateways') diff --git a/neutron/db/migration/alembic_migrations/versions/38335592a0dc_nvp_portmap.py b/neutron/db/migration/alembic_migrations/versions/38335592a0dc_nvp_portmap.py new file mode 100644 index 000000000..a5304ec22 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/38335592a0dc_nvp_portmap.py @@ -0,0 +1,62 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_portmap + +Revision ID: 38335592a0dc +Revises: 49332180ca96 +Create Date: 2013-01-15 06:04:56.328991 + +""" + +# revision identifiers, used by Alembic. +revision = '38335592a0dc' +down_revision = '49332180ca96' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'quantum_nvp_port_mapping', + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.Column('nvp_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['quantum_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('quantum_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('quantum_nvp_port_mapping') diff --git a/neutron/db/migration/alembic_migrations/versions/38fc1f6789f8_cisco_n1kv_overlay.py b/neutron/db/migration/alembic_migrations/versions/38fc1f6789f8_cisco_n1kv_overlay.py new file mode 100644 index 000000000..8d1178ce0 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/38fc1f6789f8_cisco_n1kv_overlay.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Cisco N1KV overlay support + +Revision ID: 38fc1f6789f8 +Revises: 1efb85914233 +Create Date: 2013-08-20 18:31:16.158387 + +""" + +revision = '38fc1f6789f8' +down_revision = '1efb85914233' + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +import sqlalchemy as sa + +from neutron.db import migration + + +new_type = sa.Enum('vlan', 'overlay', 'trunk', 'multi-segment', + name='vlan_type') +old_type = sa.Enum('vlan', 'vxlan', 'trunk', 'multi-segment', + name='vlan_type') + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + migration.alter_enum('cisco_network_profiles', 'segment_type', new_type, + nullable=False) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + migration.alter_enum('cisco_network_profiles', 'segment_type', old_type, + nullable=False) diff --git a/neutron/db/migration/alembic_migrations/versions/39cf3f799352_fwaas_havana_2_model.py b/neutron/db/migration/alembic_migrations/versions/39cf3f799352_fwaas_havana_2_model.py new file mode 100644 index 000000000..a114df1f8 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/39cf3f799352_fwaas_havana_2_model.py @@ -0,0 +1,109 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""FWaaS Havana-2 model + +Revision ID: 39cf3f799352 +Revises: e6b16a30d97 +Create Date: 2013-07-10 16:16:51.302943 + +""" + +# revision identifiers, used by Alembic. +revision = '39cf3f799352' +down_revision = 'e6b16a30d97' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.firewall.fwaas_plugin.FirewallPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('firewall_rules') + op.drop_table('firewalls') + op.drop_table('firewall_policies') + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'firewall_policies', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=1024), nullable=True), + sa.Column('shared', sa.Boolean(), autoincrement=False, nullable=True), + sa.Column('audited', sa.Boolean(), autoincrement=False, + nullable=True), + sa.PrimaryKeyConstraint('id')) + op.create_table( + 'firewalls', sa.Column('tenant_id', sa.String(length=255), + nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=1024), nullable=True), + sa.Column('shared', sa.Boolean(), autoincrement=False, nullable=True), + sa.Column('admin_state_up', sa.Boolean(), autoincrement=False, + nullable=True), + sa.Column('status', sa.String(length=16), nullable=True), + sa.Column('firewall_policy_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['firewall_policy_id'], + ['firewall_policies.id'], + name='firewalls_ibfk_1'), + sa.PrimaryKeyConstraint('id')) + op.create_table( + 'firewall_rules', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=1024), nullable=True), + sa.Column('firewall_policy_id', sa.String(length=36), nullable=True), + sa.Column('shared', sa.Boolean(), autoincrement=False, + nullable=True), + sa.Column('protocol', sa.String(length=24), nullable=True), + sa.Column('ip_version', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('source_ip_address', sa.String(length=46), nullable=True), + sa.Column('destination_ip_address', sa.String(length=46), + nullable=True), + sa.Column('source_port_range_min', sa.Integer(), nullable=True), + sa.Column('source_port_range_max', sa.Integer(), nullable=True), + sa.Column('destination_port_range_min', sa.Integer(), nullable=True), + sa.Column('destination_port_range_max', sa.Integer(), nullable=True), + sa.Column('action', + sa.Enum('allow', 'deny', name='firewallrules_action'), + nullable=True), + sa.Column('enabled', sa.Boolean(), autoincrement=False, + nullable=True), + sa.Column('position', sa.Integer(), autoincrement=False, + nullable=True), + sa.ForeignKeyConstraint(['firewall_policy_id'], + ['firewall_policies.id'], + name='firewall_rules_ibfk_1'), + sa.PrimaryKeyConstraint('id')) diff --git a/neutron/db/migration/alembic_migrations/versions/3a520dd165d0_cisco_nexus_multi_switch.py b/neutron/db/migration/alembic_migrations/versions/3a520dd165d0_cisco_nexus_multi_switch.py new file mode 100644 index 000000000..7d13e742d --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/3a520dd165d0_cisco_nexus_multi_switch.py @@ -0,0 +1,59 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Cisco Nexus multi-switch + +Revision ID: 3a520dd165d0 +Revises: 2528ceb28230 +Create Date: 2013-09-28 15:23:38.872682 + +""" + +# revision identifiers, used by Alembic. +revision = '3a520dd165d0' +down_revision = '2528ceb28230' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column( + 'cisco_nexusport_bindings', + sa.Column('instance_id', sa.String(length=255), nullable=False)) + op.add_column( + 'cisco_nexusport_bindings', + sa.Column('switch_ip', sa.String(length=255), nullable=False)) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('cisco_nexusport_bindings', 'switch_ip') + op.drop_column('cisco_nexusport_bindings', 'instance_id') diff --git a/neutron/db/migration/alembic_migrations/versions/3b54bf9e29f7_nec_plugin_sharednet.py b/neutron/db/migration/alembic_migrations/versions/3b54bf9e29f7_nec_plugin_sharednet.py new file mode 100644 index 000000000..fb2b776f5 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/3b54bf9e29f7_nec_plugin_sharednet.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""NEC plugin sharednet + +Revision ID: 3b54bf9e29f7 +Revises: 511471cc46b +Create Date: 2013-02-17 09:21:48.287134 + +""" + +# revision identifiers, used by Alembic. +revision = '3b54bf9e29f7' +down_revision = '511471cc46b' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ofctenantmappings', + sa.Column('ofc_id', sa.String(length=255), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('quantum_id'), + sa.UniqueConstraint('ofc_id') + ) + op.create_table( + 'ofcnetworkmappings', + sa.Column('ofc_id', sa.String(length=255), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('quantum_id'), + sa.UniqueConstraint('ofc_id') + ) + op.create_table( + 'ofcportmappings', + sa.Column('ofc_id', sa.String(length=255), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('quantum_id'), + sa.UniqueConstraint('ofc_id') + ) + op.create_table( + 'ofcfiltermappings', + sa.Column('ofc_id', sa.String(length=255), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('quantum_id'), + sa.UniqueConstraint('ofc_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ofcfiltermappings') + op.drop_table('ofcportmappings') + op.drop_table('ofcnetworkmappings') + op.drop_table('ofctenantmappings') diff --git a/neutron/db/migration/alembic_migrations/versions/3c6e57a23db4_add_multiprovider.py b/neutron/db/migration/alembic_migrations/versions/3c6e57a23db4_add_multiprovider.py new file mode 100644 index 000000000..70f66ac61 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/3c6e57a23db4_add_multiprovider.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add multiprovider + +Revision ID: 3c6e57a23db4 +Revises: 86cf4d88bd3 +Create Date: 2013-07-10 12:43:35.769283 + +""" + +# revision identifiers, used by Alembic. +revision = '3c6e57a23db4' +down_revision = '86cf4d88bd3' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'nvp_multi_provider_networks', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id'), + mysql_engine='InnoDB' + ) + op.create_table('rename_nvp_network_bindings', + sa.Column('network_id', sa.String(length=36), + primary_key=True), + sa.Column('binding_type', + sa.Enum( + 'flat', 'vlan', 'stt', 'gre', 'l3_ext', + name=( + 'nvp_network_bindings_binding_type')), + nullable=False, primary_key=True), + sa.Column('phy_uuid', sa.String(36), primary_key=True, + nullable=True), + sa.Column('vlan_id', sa.Integer, primary_key=True, + nullable=True, autoincrement=False)) + # copy data from nvp_network_bindings into rename_nvp_network_bindings + op.execute("INSERT INTO rename_nvp_network_bindings SELECT network_id, " + "binding_type, phy_uuid, vlan_id from nvp_network_bindings") + + op.drop_table('nvp_network_bindings') + op.rename_table('rename_nvp_network_bindings', 'nvp_network_bindings') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + # Delete the multi_provider_network entries from nvp_network_bindings + op.execute("DELETE from nvp_network_bindings WHERE network_id IN " + "(SELECT network_id from nvp_multi_provider_networks)") + + # create table with previous contains + op.create_table( + 'rename_nvp_network_bindings', + sa.Column('network_id', sa.String(length=36), primary_key=True), + sa.Column('binding_type', + sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', + name=('nvp_network_bindings_binding_type')), + nullable=False), + sa.Column('phy_uuid', sa.String(36), nullable=True), + sa.Column('vlan_id', sa.Integer, nullable=True, autoincrement=False)) + + # copy data from nvp_network_bindings into rename_nvp_network_bindings + op.execute("INSERT INTO rename_nvp_network_bindings SELECT network_id, " + "binding_type, phy_uuid, vlan_id from nvp_network_bindings") + + op.drop_table('nvp_network_bindings') + op.rename_table('rename_nvp_network_bindings', 'nvp_network_bindings') + op.drop_table('nvp_multi_provider_networks') diff --git a/neutron/db/migration/alembic_migrations/versions/3cabb850f4a5_table_to_track_port_.py b/neutron/db/migration/alembic_migrations/versions/3cabb850f4a5_table_to_track_port_.py new file mode 100644 index 000000000..dbfdc5953 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/3cabb850f4a5_table_to_track_port_.py @@ -0,0 +1,63 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Table to track port to host associations + +Revision ID: 3cabb850f4a5 +Revises: 5918cbddab04 +Create Date: 2013-06-24 14:30:33.533562 + +""" + +# revision identifiers, used by Alembic. +revision = '3cabb850f4a5' +down_revision = '5918cbddab04' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table('portlocations', + sa.Column('port_id', sa.String(length=255), + primary_key=True, nullable=False), + sa.Column('host_id', + sa.String(length=255), nullable=False) + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('portlocations') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py b/neutron/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py new file mode 100644 index 000000000..057a360aa --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""security_groups + +Revision ID: 3cb5d900c5de +Revises: 48b6f43f7471 +Create Date: 2013-01-08 00:13:43.051078 + +""" + +# revision identifiers, used by Alembic. +revision = '3cb5d900c5de' +down_revision = '48b6f43f7471' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'securitygroups', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'securitygrouprules', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.Column('remote_group_id', sa.String(length=36), nullable=True), + sa.Column('direction', + sa.Enum('ingress', 'egress', + name='securitygrouprules_direction'), + nullable=True), + sa.Column('ethertype', sa.String(length=40), nullable=True), + sa.Column('protocol', sa.String(length=40), nullable=True), + sa.Column('port_range_min', sa.Integer(), nullable=True), + sa.Column('port_range_max', sa.Integer(), nullable=True), + sa.Column('remote_ip_prefix', sa.String(length=255), nullable=True), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['remote_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'securitygroupportbindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id']), + sa.PrimaryKeyConstraint('port_id', 'security_group_id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('securitygroupportbindings') + op.drop_table('securitygrouprules') + op.drop_table('securitygroups') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/3cbf70257c28_nvp_mac_learning.py b/neutron/db/migration/alembic_migrations/versions/3cbf70257c28_nvp_mac_learning.py new file mode 100644 index 000000000..b62ebcab2 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/3cbf70257c28_nvp_mac_learning.py @@ -0,0 +1,63 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_mac_learning + +Revision ID: 3cbf70257c28 +Revises: 5ac71e65402c +Create Date: 2013-05-15 10:15:50.875314 + +""" + +# revision identifiers, used by Alembic. +revision = '3cbf70257c28' +down_revision = '5ac71e65402c' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'maclearningstates', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('mac_learning_enabled', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint( + ['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('maclearningstates') diff --git a/neutron/db/migration/alembic_migrations/versions/3d2585038b95_vmware_nsx.py b/neutron/db/migration/alembic_migrations/versions/3d2585038b95_vmware_nsx.py new file mode 100644 index 000000000..c51f02a3d --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/3d2585038b95_vmware_nsx.py @@ -0,0 +1,65 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""VMware NSX rebranding + +Revision ID: 3d2585038b95 +Revises: 157a5d299379 +Create Date: 2014-02-11 18:18:34.319031 + +""" + +# revision identifiers, used by Alembic. +revision = '3d2585038b95' +down_revision = '157a5d299379' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.rename_table('nvp_network_bindings', 'tz_network_bindings') + op.rename_table('nvp_multi_provider_networks', 'multi_provider_networks') + + engine = op.get_bind().engine + if engine.name == 'postgresql': + op.execute("ALTER TYPE nvp_network_bindings_binding_type " + "RENAME TO tz_network_bindings_binding_type;") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + engine = op.get_bind().engine + if engine.name == 'postgresql': + op.execute("ALTER TYPE tz_network_bindings_binding_type " + "RENAME TO nvp_network_bindings_binding_type;") + + op.rename_table('multi_provider_networks', 'nvp_multi_provider_networks') + op.rename_table('tz_network_bindings', 'nvp_network_bindings') diff --git a/neutron/db/migration/alembic_migrations/versions/3d3cb89d84ee_nsx_switch_mappings.py b/neutron/db/migration/alembic_migrations/versions/3d3cb89d84ee_nsx_switch_mappings.py new file mode 100644 index 000000000..2b4da4891 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/3d3cb89d84ee_nsx_switch_mappings.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nsx_switch_mappings + +Revision ID: 3d3cb89d84ee +Revises: 1421183d533f +Create Date: 2014-01-07 15:37:41.323020 + +""" + +# revision identifiers, used by Alembic. +revision = '3d3cb89d84ee' +down_revision = '1421183d533f' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + # Create table for network mappings + op.create_table( + 'neutron_nsx_network_mappings', + sa.Column('neutron_id', sa.String(length=36), nullable=False), + sa.Column('nsx_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['neutron_id'], ['networks.id'], + ondelete='CASCADE'), + # There might be multiple switches for a neutron network + sa.PrimaryKeyConstraint('neutron_id', 'nsx_id'), + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('neutron_nsx_network_mappings') diff --git a/neutron/db/migration/alembic_migrations/versions/3d6fae8b70b0_nvp_lbaas_plugin.py b/neutron/db/migration/alembic_migrations/versions/3d6fae8b70b0_nvp_lbaas_plugin.py new file mode 100644 index 000000000..cc4807fa1 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/3d6fae8b70b0_nvp_lbaas_plugin.py @@ -0,0 +1,82 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp lbaas plugin + +Revision ID: 3d6fae8b70b0 +Revises: 3ed8f075e38a +Create Date: 2013-09-13 19:34:41.522665 + +""" + +# revision identifiers, used by Alembic. +revision = '3d6fae8b70b0' +down_revision = '3ed8f075e38a' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'vcns_edge_pool_bindings', + sa.Column('pool_id', sa.String(length=36), nullable=False), + sa.Column('edge_id', sa.String(length=36), nullable=False), + sa.Column('pool_vseid', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('pool_id', 'edge_id') + ) + op.create_table( + 'vcns_edge_monitor_bindings', + sa.Column('monitor_id', sa.String(length=36), nullable=False), + sa.Column('edge_id', sa.String(length=36), nullable=False), + sa.Column('monitor_vseid', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['monitor_id'], ['healthmonitors.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('monitor_id', 'edge_id') + ) + op.create_table( + 'vcns_edge_vip_bindings', + sa.Column('vip_id', sa.String(length=36), nullable=False), + sa.Column('edge_id', sa.String(length=36), nullable=True), + sa.Column('vip_vseid', sa.String(length=36), nullable=True), + sa.Column('app_profileid', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['vip_id'], ['vips.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('vip_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('vcns_edge_vip_bindings') + op.drop_table('vcns_edge_monitor_bindings') + op.drop_table('vcns_edge_pool_bindings') diff --git a/neutron/db/migration/alembic_migrations/versions/3ed8f075e38a_nvp_fwaas_plugin.py b/neutron/db/migration/alembic_migrations/versions/3ed8f075e38a_nvp_fwaas_plugin.py new file mode 100755 index 000000000..19c08a7f2 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/3ed8f075e38a_nvp_fwaas_plugin.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp fwaas plugin + +Revision ID: 3ed8f075e38a +Revises: 338d7508968c +Create Date: 2013-09-13 19:14:25.509033 + +""" + +# revision identifiers, used by Alembic. +revision = '3ed8f075e38a' +down_revision = '338d7508968c' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'vcns_firewall_rule_bindings', + sa.Column('rule_id', sa.String(length=36), nullable=False), + sa.Column('edge_id', sa.String(length=36), nullable=False), + sa.Column('rule_vseid', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['rule_id'], ['firewall_rules.id'], ), + sa.PrimaryKeyConstraint('rule_id', 'edge_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('vcns_firewall_rule_bindings') diff --git a/neutron/db/migration/alembic_migrations/versions/40b0aff0302e_mlnx_initial.py b/neutron/db/migration/alembic_migrations/versions/40b0aff0302e_mlnx_initial.py new file mode 100644 index 000000000..7da75d927 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/40b0aff0302e_mlnx_initial.py @@ -0,0 +1,194 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""mlnx_initial + +Revision ID: 40b0aff0302e +Revises: 49f5e553f61f +Create Date: 2014-01-12 14:51:49.273105 + +""" + +# revision identifiers, used by Alembic. +revision = '40b0aff0302e' +down_revision = '49f5e553f61f' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'securitygroups', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id'), + ) + + op.create_table( + 'segmentation_id_allocation', + sa.Column('physical_network', sa.String(length=64), nullable=False), + sa.Column('segmentation_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint('physical_network', 'segmentation_id'), + ) + + op.create_table( + 'quotas', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(255), index=True), + sa.Column('resource', sa.String(255)), + sa.Column('limit', sa.Integer()), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'mlnx_network_bindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('network_type', sa.String(length=32), nullable=False), + sa.Column('physical_network', sa.String(length=64), nullable=True), + sa.Column('segmentation_id', sa.Integer(), nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id'), + ) + + op.create_table( + 'networkdhcpagentbindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('dhcp_agent_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['dhcp_agent_id'], ['agents.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id', 'dhcp_agent_id'), + ) + + op.create_table( + 'securitygrouprules', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.Column('remote_group_id', sa.String(length=36), nullable=True), + sa.Column('direction', + sa.Enum('ingress', 'egress', + name='securitygrouprules_direction'), + nullable=True), + sa.Column('ethertype', sa.String(length=40), nullable=True), + sa.Column('protocol', sa.String(length=40), nullable=True), + sa.Column('port_range_min', sa.Integer(), nullable=True), + sa.Column('port_range_max', sa.Integer(), nullable=True), + sa.Column('remote_ip_prefix', sa.String(length=255), nullable=True), + sa.ForeignKeyConstraint(['remote_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id'), + ) + + op.create_table( + 'port_profile', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('vnic_type', sa.String(length=32), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id'), + ) + + op.add_column('routers', sa.Column('enable_snat', sa.Boolean(), + nullable=False, default=True)) + op.create_table( + 'securitygroupportbindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'],), + sa.PrimaryKeyConstraint('port_id', 'security_group_id'), + ) + + op.create_table( + 'portbindingports', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id'), + ) + + op.rename_table( + 'routes', + 'subnetroutes', + ) + + op.create_table( + 'routerl3agentbindings', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=True), + sa.Column('l3_agent_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'], + ondelete='CASCADE'), + + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id'), + ) + + op.create_table( + 'routerroutes', + sa.Column('destination', sa.String(length=64), nullable=False), + sa.Column('nexthop', sa.String(length=64), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('destination', 'nexthop', 'router_id'), + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.rename_table( + 'subnetroutes', + 'routes', + ) + + op.drop_table('routerroutes') + op.drop_table('routerl3agentbindings') + op.drop_table('portbindingports') + op.drop_table('securitygroupportbindings') + op.drop_column('routers', 'enable_snat') + op.drop_table('port_profile') + op.drop_table('securitygrouprules') + op.drop_table('networkdhcpagentbindings') + op.drop_table('mlnx_network_bindings') + op.drop_table('quotas') + op.drop_table('segmentation_id_allocation') + op.drop_table('securitygroups') diff --git a/neutron/db/migration/alembic_migrations/versions/40dffbf4b549_nvp_dist_router.py b/neutron/db/migration/alembic_migrations/versions/40dffbf4b549_nvp_dist_router.py new file mode 100644 index 000000000..2bb3e83ef --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/40dffbf4b549_nvp_dist_router.py @@ -0,0 +1,63 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_dist_router + +Revision ID: 40dffbf4b549 +Revises: 63afba73813 +Create Date: 2013-08-21 18:00:26.214923 + +""" + +# revision identifiers, used by Alembic. +revision = '40dffbf4b549' +down_revision = '63afba73813' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'nsxrouterextattributess', + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.Column('distributed', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint( + ['router_id'], ['routers.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('router_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('nsxrouterextattributess') diff --git a/neutron/db/migration/alembic_migrations/versions/45680af419f9_nvp_qos.py b/neutron/db/migration/alembic_migrations/versions/45680af419f9_nvp_qos.py new file mode 100644 index 000000000..bf59288ab --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/45680af419f9_nvp_qos.py @@ -0,0 +1,94 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_qos + +Revision ID: 45680af419f9 +Revises: 54c2c487e913 +Create Date: 2013-02-17 13:27:57.999631 + +""" + +# revision identifiers, used by Alembic. +revision = '45680af419f9' +down_revision = '54c2c487e913' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'qosqueues', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('default', sa.Boolean(), nullable=True), + sa.Column('min', sa.Integer(), nullable=False), + sa.Column('max', sa.Integer(), nullable=True), + sa.Column('qos_marking', sa.Enum('untrusted', 'trusted', + name='qosqueues_qos_marking'), + nullable=True), + sa.Column('dscp', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'networkqueuemappings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('queue_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['queue_id'], ['qosqueues.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id') + ) + op.create_table( + 'portqueuemappings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('queue_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['queue_id'], ['qosqueues.id'], ), + sa.PrimaryKeyConstraint('port_id', 'queue_id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('portqueuemappings') + op.drop_table('networkqueuemappings') + op.drop_table('qosqueues') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/4692d074d587_agent_scheduler.py b/neutron/db/migration/alembic_migrations/versions/4692d074d587_agent_scheduler.py new file mode 100644 index 000000000..9338b0890 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/4692d074d587_agent_scheduler.py @@ -0,0 +1,89 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""agent scheduler + +Revision ID: 4692d074d587 +Revises: 3b54bf9e29f7 +Create Date: 2013-02-21 23:01:50.370306 + +""" + +# revision identifiers, used by Alembic. +revision = '4692d074d587' +down_revision = '3b54bf9e29f7' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'networkdhcpagentbindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('dhcp_agent_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['dhcp_agent_id'], ['agents.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id', 'dhcp_agent_id') + ) + op.create_table( + 'routerl3agentbindings', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=True), + sa.Column('l3_agent_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('routerl3agentbindings') + op.drop_table('networkdhcpagentbindings') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/46a0efbd8f0_cisco_n1kv_multisegm.py b/neutron/db/migration/alembic_migrations/versions/46a0efbd8f0_cisco_n1kv_multisegm.py new file mode 100644 index 000000000..1c55ce7f2 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/46a0efbd8f0_cisco_n1kv_multisegm.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""cisco_n1kv_multisegment_trunk + +Revision ID: 46a0efbd8f0 +Revises: 53bbd27ec841 +Create Date: 2013-08-20 20:44:08.711110 + +""" + +revision = '46a0efbd8f0' +down_revision = '53bbd27ec841' + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + +new_type = sa.Enum('vlan', 'vxlan', 'trunk', 'multi-segment', name='vlan_type') +old_type = sa.Enum('vlan', 'vxlan', name='vlan_type') + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_n1kv_trunk_segments', + sa.Column('trunk_segment_id', sa.String(length=36), nullable=False), + sa.Column('segment_id', sa.String(length=36), nullable=False), + sa.Column('dot1qtag', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['trunk_segment_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('trunk_segment_id', 'segment_id', 'dot1qtag') + ) + op.create_table( + 'cisco_n1kv_multi_segments', + sa.Column('multi_segment_id', sa.String(length=36), nullable=False), + sa.Column('segment1_id', sa.String(length=36), nullable=False), + sa.Column('segment2_id', sa.String(length=36), nullable=False), + sa.Column('encap_profile_name', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['multi_segment_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('multi_segment_id', 'segment1_id', + 'segment2_id') + ) + migration.alter_enum('cisco_network_profiles', 'segment_type', new_type, + nullable=False) + op.add_column('cisco_network_profiles', + sa.Column('sub_type', sa.String(length=255), nullable=True)) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_n1kv_trunk_segments') + op.drop_table('cisco_n1kv_multi_segments') + migration.alter_enum('cisco_network_profiles', 'segment_type', old_type, + nullable=False) + op.drop_column('cisco_network_profiles', 'sub_type') diff --git a/neutron/db/migration/alembic_migrations/versions/477a4488d3f4_ml2_vxlan_type_driver.py b/neutron/db/migration/alembic_migrations/versions/477a4488d3f4_ml2_vxlan_type_driver.py new file mode 100644 index 000000000..eeb28539d --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/477a4488d3f4_ml2_vxlan_type_driver.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""DB Migration for ML2 VXLAN Type Driver + +Revision ID: 477a4488d3f4 +Revises: 20ae61555e95 +Create Date: 2013-07-09 14:14:33.158502 + +""" + +# revision identifiers, used by Alembic. +revision = '477a4488d3f4' +down_revision = '20ae61555e95' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ml2_vxlan_allocations', + sa.Column('vxlan_vni', sa.Integer, nullable=False, + autoincrement=False), + sa.Column('allocated', sa.Boolean, nullable=False), + sa.PrimaryKeyConstraint('vxlan_vni') + ) + + op.create_table( + 'ml2_vxlan_endpoints', + sa.Column('ip_address', sa.String(length=64)), + sa.Column('udp_port', sa.Integer(), nullable=False, + autoincrement=False), + sa.PrimaryKeyConstraint('ip_address', 'udp_port') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ml2_vxlan_allocations') + op.drop_table('ml2_vxlan_endpoints') diff --git a/neutron/db/migration/alembic_migrations/versions/48b6f43f7471_service_type.py b/neutron/db/migration/alembic_migrations/versions/48b6f43f7471_service_type.py new file mode 100644 index 000000000..83f3e726b --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/48b6f43f7471_service_type.py @@ -0,0 +1,76 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""DB support for service types + +Revision ID: 48b6f43f7471 +Revises: 5a875d0e5c +Create Date: 2013-01-07 13:47:29.093160 + +""" + +# revision identifiers, used by Alembic. +revision = '48b6f43f7471' +down_revision = '5a875d0e5c' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + '*' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + u'servicetypes', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'name', sa.String(255), nullable=True), + sa.Column(u'description', sa.String(255), nullable=True), + sa.Column(u'default', sa.Boolean(), + autoincrement=False, nullable=False), + sa.Column(u'num_instances', sa.Integer(), + autoincrement=False, nullable=True), + sa.PrimaryKeyConstraint(u'id')) + op.create_table( + u'servicedefinitions', + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'service_class', sa.String(length=255), + nullable=False), + sa.Column(u'plugin', sa.String(255), nullable=True), + sa.Column(u'driver', sa.String(255), nullable=True), + sa.Column(u'service_type_id', sa.String(36), + nullable=False), + sa.ForeignKeyConstraint(['service_type_id'], [u'servicetypes.id'], + name=u'servicedefinitions_ibfk_1'), + sa.PrimaryKeyConstraint(u'id', u'service_class', u'service_type_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table(u'servicedefinitions') + op.drop_table(u'servicetypes') diff --git a/neutron/db/migration/alembic_migrations/versions/492a106273f8_brocade_ml2_mech_dri.py b/neutron/db/migration/alembic_migrations/versions/492a106273f8_brocade_ml2_mech_dri.py new file mode 100644 index 000000000..f8bf7995b --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/492a106273f8_brocade_ml2_mech_dri.py @@ -0,0 +1,70 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Brocade ML2 Mech. Driver + +Revision ID: 492a106273f8 +Revises: fcac4c42e2cc +Create Date: 2014-03-03 15:35:46.974523 + +""" + +# revision identifiers, used by Alembic. +revision = '492a106273f8' +down_revision = 'fcac4c42e2cc' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ml2_brocadenetworks', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('vlan', sa.String(length=10), nullable=True), + sa.Column('segment_id', sa.String(length=36), nullable=True), + sa.Column('network_type', sa.String(length=10), nullable=True), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id')) + + op.create_table( + 'ml2_brocadeports', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('admin_state_up', sa.Boolean()), + sa.Column('physical_interface', sa.String(length=36), nullable=True), + sa.Column('vlan_id', sa.String(length=36), nullable=True), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ml2_brocadenetworks') + op.drop_table('ml2_brocadeports') diff --git a/neutron/db/migration/alembic_migrations/versions/49332180ca96_ryu_plugin_update.py b/neutron/db/migration/alembic_migrations/versions/49332180ca96_ryu_plugin_update.py new file mode 100644 index 000000000..d44449962 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/49332180ca96_ryu_plugin_update.py @@ -0,0 +1,59 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ryu plugin update + +Revision ID: 49332180ca96 +Revises: 1149d7de0cfa +Create Date: 2013-01-30 07:52:58.472885 + +""" + +# revision identifiers, used by Alembic. +revision = '49332180ca96' +down_revision = '1149d7de0cfa' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ofp_server') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ofp_server', + sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), + sa.Column('address', sa.String(length=255)), + sa.Column('host_type', sa.String(length=255)), + sa.PrimaryKeyConstraint(u'id') + ) diff --git a/neutron/db/migration/alembic_migrations/versions/49f5e553f61f_ml2_security_groups.py b/neutron/db/migration/alembic_migrations/versions/49f5e553f61f_ml2_security_groups.py new file mode 100644 index 000000000..b7dd042d2 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/49f5e553f61f_ml2_security_groups.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""security_groups + +Revision ID: 49f5e553f61f +Revises: 27ef74513d33 +Create Date: 2013-12-21 19:58:17.071412 + +""" + +# revision identifiers, used by Alembic. +revision = '49f5e553f61f' +down_revision = '27ef74513d33' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'securitygroups', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'securitygrouprules', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.Column('remote_group_id', sa.String(length=36), nullable=True), + sa.Column('direction', + sa.Enum('ingress', 'egress', + name='securitygrouprules_direction'), + nullable=True), + sa.Column('ethertype', sa.String(length=40), nullable=True), + sa.Column('protocol', sa.String(length=40), nullable=True), + sa.Column('port_range_min', sa.Integer(), nullable=True), + sa.Column('port_range_max', sa.Integer(), nullable=True), + sa.Column('remote_ip_prefix', sa.String(length=255), nullable=True), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['remote_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'securitygroupportbindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id']), + sa.PrimaryKeyConstraint('port_id', 'security_group_id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('securitygroupportbindings') + op.drop_table('securitygrouprules') + op.drop_table('securitygroups') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/4a666eb208c2_service_router.py b/neutron/db/migration/alembic_migrations/versions/4a666eb208c2_service_router.py new file mode 100644 index 000000000..ad3356b45 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/4a666eb208c2_service_router.py @@ -0,0 +1,70 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""service router + +Revision ID: 4a666eb208c2 +Revises: 38fc1f6789f8 +Create Date: 2013-09-03 01:55:57.799217 + +""" + +# revision identifiers, used by Alembic. +revision = '4a666eb208c2' +down_revision = '38fc1f6789f8' + +# Change to ['*'] if this migration applies to all plugins +# This migration must apply to both NVP/NSX plugins as it alters a table +# used by both of them + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'vcns_router_bindings', + sa.Column('status', sa.String(length=16), nullable=False), + sa.Column('status_description', sa.String(length=255), nullable=True), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.Column('edge_id', sa.String(length=16), nullable=True), + sa.Column('lswitch_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('router_id'), + mysql_engine='InnoDB' + ) + op.add_column( + u'nsxrouterextattributess', + sa.Column('service_router', sa.Boolean(), nullable=False)) + op.execute("UPDATE nsxrouterextattributess set service_router=False") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column(u'nsxrouterextattributess', 'service_router') + op.drop_table('vcns_router_bindings') diff --git a/neutron/db/migration/alembic_migrations/versions/4ca36cfc898c_nsx_router_mappings.py b/neutron/db/migration/alembic_migrations/versions/4ca36cfc898c_nsx_router_mappings.py new file mode 100644 index 000000000..c9784a890 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/4ca36cfc898c_nsx_router_mappings.py @@ -0,0 +1,64 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nsx_router_mappings + +Revision ID: 4ca36cfc898c +Revises: 3d3cb89d84ee +Create Date: 2014-01-08 10:41:43.373031 + +""" + +# revision identifiers, used by Alembic. +revision = '4ca36cfc898c' +down_revision = '3d3cb89d84ee' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + # Create table for router/lrouter mappings + op.create_table( + 'neutron_nsx_router_mappings', + sa.Column('neutron_id', sa.String(length=36), nullable=False), + sa.Column('nsx_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['neutron_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('neutron_id'), + ) + # Execute statement to a record in nsx_router_mappings for + # each record in routers + op.execute("INSERT INTO neutron_nsx_router_mappings SELECT id,id " + "from routers") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('neutron_nsx_router_mappings') diff --git a/neutron/db/migration/alembic_migrations/versions/4eca4a84f08a_remove_ml2_cisco_cred_db.py b/neutron/db/migration/alembic_migrations/versions/4eca4a84f08a_remove_ml2_cisco_cred_db.py new file mode 100644 index 000000000..10bc6fee8 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/4eca4a84f08a_remove_ml2_cisco_cred_db.py @@ -0,0 +1,59 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Remove ML2 Cisco Credentials DB + +Revision ID: 4eca4a84f08a +Revises: 33c3db036fe4 +Create Date: 2014-04-10 19:32:46.697189 + +""" + +# revision identifiers, used by Alembic. +revision = '4eca4a84f08a' +down_revision = '33c3db036fe4' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_ml2_credentials') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_ml2_credentials', + sa.Column('credential_id', sa.String(length=255), nullable=True), + sa.Column('tenant_id', sa.String(length=255), nullable=False), + sa.Column('credential_name', sa.String(length=255), nullable=False), + sa.Column('user_name', sa.String(length=255), nullable=True), + sa.Column('password', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('tenant_id', 'credential_name') + ) diff --git a/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py b/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py new file mode 100644 index 000000000..4182eea1a --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py @@ -0,0 +1,99 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ml2 binding:vif_details + +Revision ID: 50d5ba354c23 +Revises: 27cc183af192 +Create Date: 2014-02-11 23:21:59.577972 + +""" + +# revision identifiers, used by Alembic. +revision = '50d5ba354c23' +down_revision = '27cc183af192' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('ml2_port_bindings', + sa.Column('vif_details', sa.String(length=4095), + nullable=False, server_default='')) + if op.get_bind().engine.name == 'ibm_db_sa': + op.execute( + "UPDATE ml2_port_bindings SET" + " vif_details = '{\"port_filter\": true}'" + " WHERE cap_port_filter = 1") + op.execute( + "UPDATE ml2_port_bindings SET" + " vif_details = '{\"port_filter\": false}'" + " WHERE cap_port_filter = 0") + else: + op.execute( + "UPDATE ml2_port_bindings SET" + " vif_details = '{\"port_filter\": true}'" + " WHERE cap_port_filter = true") + op.execute( + "UPDATE ml2_port_bindings SET" + " vif_details = '{\"port_filter\": false}'" + " WHERE cap_port_filter = false") + op.drop_column('ml2_port_bindings', 'cap_port_filter') + if op.get_bind().engine.name == 'ibm_db_sa': + op.execute("CALL SYSPROC.ADMIN_CMD('REORG TABLE ml2_port_bindings')") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + if op.get_bind().engine.name == 'ibm_db_sa': + # Note(xuhanp): DB2 doesn't allow nullable=False Column with + # "DEFAULT" clause not specified. So server_default is used. + # Using sa.text will result "DEFAULT 0" for cap_port_filter. + op.add_column('ml2_port_bindings', + sa.Column('cap_port_filter', sa.Boolean(), + nullable=False, + server_default=sa.text("0"))) + op.execute( + "UPDATE ml2_port_bindings SET" + " cap_port_filter = 1" + " WHERE vif_details LIKE '%\"port_filter\": true%'") + else: + op.add_column('ml2_port_bindings', + sa.Column('cap_port_filter', sa.Boolean(), + nullable=False, + server_default=sa.text("false"))) + op.execute( + "UPDATE ml2_port_bindings SET" + " cap_port_filter = true" + " WHERE vif_details LIKE '%\"port_filter\": true%'") + op.drop_column('ml2_port_bindings', 'vif_details') + if op.get_bind().engine.name == 'ibm_db_sa': + op.execute("CALL SYSPROC.ADMIN_CMD('REORG TABLE ml2_port_bindings')") diff --git a/neutron/db/migration/alembic_migrations/versions/50e86cb2637a_nsx_mappings.py b/neutron/db/migration/alembic_migrations/versions/50e86cb2637a_nsx_mappings.py new file mode 100644 index 000000000..e571f378a --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/50e86cb2637a_nsx_mappings.py @@ -0,0 +1,82 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nsx_mappings + +Revision ID: 50e86cb2637a +Revises: havana +Create Date: 2013-10-26 14:37:30.012149 + +""" + +# revision identifiers, used by Alembic. +revision = '50e86cb2637a' +down_revision = '1fcfc149aca4' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table('neutron_nsx_port_mappings', + sa.Column('neutron_id', sa.String(length=36), + nullable=False), + sa.Column('nsx_port_id', sa.String(length=36), + nullable=False), + sa.Column('nsx_switch_id', sa.String(length=36), + nullable=True), + sa.ForeignKeyConstraint(['neutron_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('neutron_id')) + + op.execute("INSERT INTO neutron_nsx_port_mappings SELECT quantum_id as " + "neutron_id, nvp_id as nsx_port_id, null as nsx_switch_id from" + " quantum_nvp_port_mapping") + op.drop_table('quantum_nvp_port_mapping') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + # Restore table to pre-icehouse version + op.create_table('quantum_nvp_port_mapping', + sa.Column('quantum_id', sa.String(length=36), + nullable=False), + sa.Column('nvp_id', sa.String(length=36), + nullable=False), + sa.ForeignKeyConstraint(['quantum_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('quantum_id')) + op.execute("INSERT INTO quantum_nvp_port_mapping SELECT neutron_id as " + "quantum_id, nsx_port_id as nvp_id from" + " neutron_nsx_port_mappings") + op.drop_table('neutron_nsx_port_mappings') diff --git a/neutron/db/migration/alembic_migrations/versions/511471cc46b_agent_ext_model_supp.py b/neutron/db/migration/alembic_migrations/versions/511471cc46b_agent_ext_model_supp.py new file mode 100644 index 000000000..ee14c1b92 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/511471cc46b_agent_ext_model_supp.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add agent management extension model support + +Revision ID: 511471cc46b +Revises: 363468ac592c +Create Date: 2013-02-18 05:09:32.523460 + +""" + +# revision identifiers, used by Alembic. +revision = '511471cc46b' +down_revision = '363468ac592c' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', + 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin', + 'neutron.plugins.ml2.plugin.Ml2Plugin', +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'agents', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('agent_type', sa.String(length=255), nullable=False), + sa.Column('binary', sa.String(length=255), nullable=False), + sa.Column('topic', sa.String(length=255), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.Column('admin_state_up', sa.Boolean(), nullable=False), + sa.Column('created_at', sa.DateTime(), nullable=False), + sa.Column('started_at', sa.DateTime(), nullable=False), + sa.Column('heartbeat_timestamp', sa.DateTime(), nullable=False), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column('configurations', sa.String(length=4095), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('agents') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/51b4de912379_cisco_nexus_ml2_mech.py b/neutron/db/migration/alembic_migrations/versions/51b4de912379_cisco_nexus_ml2_mech.py new file mode 100755 index 000000000..f6038fd3e --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/51b4de912379_cisco_nexus_ml2_mech.py @@ -0,0 +1,68 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Cisco Nexus ML2 mechanism driver + +Revision ID: 51b4de912379 +Revises: 66a59a7f516 +Create Date: 2013-08-20 15:31:40.553634 + +""" + +# revision identifiers, used by Alembic. +revision = '51b4de912379' +down_revision = '66a59a7f516' + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_ml2_nexusport_bindings', + sa.Column('binding_id', sa.Integer(), nullable=False), + sa.Column('port_id', sa.String(length=255), nullable=True), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('switch_ip', sa.String(length=255), nullable=True), + sa.Column('instance_id', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('binding_id'), + ) + op.create_table( + 'cisco_ml2_credentials', + sa.Column('credential_id', sa.String(length=255), nullable=True), + sa.Column('tenant_id', sa.String(length=255), nullable=False), + sa.Column('credential_name', sa.String(length=255), nullable=False), + sa.Column('user_name', sa.String(length=255), nullable=True), + sa.Column('password', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('tenant_id', 'credential_name'), + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_ml2_credentials') + op.drop_table('cisco_ml2_nexusport_bindings') diff --git a/neutron/db/migration/alembic_migrations/versions/52c5e4a18807_lbaas_pool_scheduler.py b/neutron/db/migration/alembic_migrations/versions/52c5e4a18807_lbaas_pool_scheduler.py new file mode 100644 index 000000000..345955a0f --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/52c5e4a18807_lbaas_pool_scheduler.py @@ -0,0 +1,63 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""LBaaS Pool scheduler + +Revision ID: 52c5e4a18807 +Revises: 2032abe8edac +Create Date: 2013-06-14 03:23:47.815865 + +""" + +# revision identifiers, used by Alembic. +revision = '52c5e4a18807' +down_revision = '2032abe8edac' + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'poolloadbalanceragentbindings', + sa.Column('pool_id', sa.String(length=36), nullable=False), + sa.Column('agent_id', sa.String(length=36), + nullable=False), + sa.ForeignKeyConstraint(['agent_id'], ['agents.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('pool_id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('poolloadbalanceragentbindings') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py b/neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py new file mode 100644 index 000000000..c9f61ff91 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py @@ -0,0 +1,183 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Support for VPNaaS + +Revision ID: 52ff27f7567a +Revises: 39cf3f799352 +Create Date: 2013-07-14 23:04:13.395955 + +""" + +# revision identifiers, used by Alembic. +revision = '52ff27f7567a' +down_revision = '39cf3f799352' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.vpn.plugin.VPNDriverPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ikepolicies', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column( + 'auth_algorithm', + sa.Enum('sha1', name='vpn_auth_algorithms'), nullable=False), + sa.Column( + 'encryption_algorithm', + sa.Enum('3des', 'aes-128', 'aes-256', 'aes-192', + name='vpn_encrypt_algorithms'), nullable=False), + sa.Column( + 'phase1_negotiation_mode', + sa.Enum('main', name='ike_phase1_mode'), nullable=False), + sa.Column( + 'lifetime_units', + sa.Enum('seconds', 'kilobytes', name='vpn_lifetime_units'), + nullable=False), + sa.Column('lifetime_value', sa.Integer(), nullable=False), + sa.Column( + 'ike_version', + sa.Enum('v1', 'v2', name='ike_versions'), nullable=False), + sa.Column( + 'pfs', + sa.Enum('group2', 'group5', 'group14', name='vpn_pfs'), + nullable=False), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'ipsecpolicies', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column( + 'transform_protocol', + sa.Enum('esp', 'ah', 'ah-esp', name='ipsec_transform_protocols'), + nullable=False), + sa.Column( + 'auth_algorithm', + sa.Enum('sha1', name='vpn_auth_algorithms'), nullable=False), + sa.Column( + 'encryption_algorithm', + sa.Enum( + '3des', 'aes-128', + 'aes-256', 'aes-192', name='vpn_encrypt_algorithms'), + nullable=False), + sa.Column( + 'encapsulation_mode', + sa.Enum('tunnel', 'transport', name='ipsec_encapsulations'), + nullable=False), + sa.Column( + 'lifetime_units', + sa.Enum( + 'seconds', 'kilobytes', + name='vpn_lifetime_units'), nullable=False), + sa.Column( + 'lifetime_value', sa.Integer(), nullable=False), + sa.Column( + 'pfs', + sa.Enum( + 'group2', 'group5', 'group14', name='vpn_pfs'), + nullable=False), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'vpnservices', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column('status', sa.String(length=16), nullable=False), + sa.Column('admin_state_up', sa.Boolean(), nullable=False), + sa.Column('subnet_id', sa.String(length=36), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ), + sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'ipsec_site_connections', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column('peer_address', sa.String(length=64), nullable=False), + sa.Column('peer_id', sa.String(length=255), nullable=False), + sa.Column('route_mode', sa.String(length=8), nullable=False), + sa.Column('mtu', sa.Integer(), nullable=False), + sa.Column( + 'initiator', + sa.Enum( + 'bi-directional', 'response-only', name='vpn_initiators'), + nullable=False), + sa.Column('auth_mode', sa.String(length=16), nullable=False), + sa.Column('psk', sa.String(length=255), nullable=False), + sa.Column( + 'dpd_action', + sa.Enum( + 'hold', 'clear', 'restart', + 'disabled', 'restart-by-peer', name='vpn_dpd_actions'), + nullable=False), + sa.Column('dpd_interval', sa.Integer(), nullable=False), + sa.Column('dpd_timeout', sa.Integer(), nullable=False), + sa.Column('status', sa.String(length=16), nullable=False), + sa.Column('admin_state_up', sa.Boolean(), nullable=False), + sa.Column('vpnservice_id', sa.String(length=36), nullable=False), + sa.Column('ipsecpolicy_id', sa.String(length=36), nullable=False), + sa.Column('ikepolicy_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['ikepolicy_id'], ['ikepolicies.id']), + sa.ForeignKeyConstraint(['ipsecpolicy_id'], ['ipsecpolicies.id']), + sa.ForeignKeyConstraint(['vpnservice_id'], ['vpnservices.id']), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'ipsecpeercidrs', + sa.Column('cidr', sa.String(length=32), nullable=False), + sa.Column('ipsec_site_connection_id', + sa.String(length=36), + nullable=False), + sa.ForeignKeyConstraint(['ipsec_site_connection_id'], + ['ipsec_site_connections.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('cidr', 'ipsec_site_connection_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ipsecpeercidrs') + op.drop_table('ipsec_site_connections') + op.drop_table('vpnservices') + op.drop_table('ipsecpolicies') + op.drop_table('ikepolicies') diff --git a/neutron/db/migration/alembic_migrations/versions/538732fa21e1_nec_rename_quantum_id_to_neutron_id.py b/neutron/db/migration/alembic_migrations/versions/538732fa21e1_nec_rename_quantum_id_to_neutron_id.py new file mode 100644 index 000000000..0dd6ca149 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/538732fa21e1_nec_rename_quantum_id_to_neutron_id.py @@ -0,0 +1,65 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""NEC Rename quantum_id to neutron_id + +Revision ID: 538732fa21e1 +Revises: 2447ad0e9585 +Create Date: 2014-03-04 05:43:33.660601 + +""" + +# revision identifiers, used by Alembic. +revision = '538732fa21e1' +down_revision = '2447ad0e9585' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + for table in ['ofctenantmappings', 'ofcnetworkmappings', + 'ofcportmappings', 'ofcfiltermappings', + 'ofcroutermappings', + ]: + op.alter_column(table, 'quantum_id', + new_column_name='neutron_id', + existing_type=sa.String(length=36), + existing_nullable=False) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + for table in ['ofctenantmappings', 'ofcnetworkmappings', + 'ofcportmappings', 'ofcfiltermappings', + 'ofcroutermappings', + ]: + op.alter_column(table, 'neutron_id', + new_column_name='quantum_id', + existing_type=sa.String(length=36), + existing_nullable=False) diff --git a/neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py b/neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py new file mode 100644 index 000000000..ef903425d --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Extra dhcp opts support + +Revision ID: 53bbd27ec841 +Revises: 40dffbf4b549 +Create Date: 2013-05-09 15:36:50.485036 + +""" + +# revision identifiers, used by Alembic. +revision = '53bbd27ec841' +down_revision = '40dffbf4b549' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'extradhcpopts', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('opt_name', sa.String(length=64), nullable=False), + sa.Column('opt_value', sa.String(length=255), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('port_id', 'opt_name', name='uidx_portid_optname')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('extradhcpopts') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/54c2c487e913_lbaas.py b/neutron/db/migration/alembic_migrations/versions/54c2c487e913_lbaas.py new file mode 100644 index 000000000..f40daf5e6 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/54c2c487e913_lbaas.py @@ -0,0 +1,163 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""'DB support for load balancing service + +Revision ID: 54c2c487e913 +Revises: 38335592a0dc +Create Date: 2013-02-04 16:32:32.048731 + +""" + +# revision identifiers, used by Alembic. +revision = '54c2c487e913' +down_revision = '38335592a0dc' + +# We need migration_for_plugins to be an empty list to avoid creating tables, +# if there's no plugin that implements the LBaaS extension. + +migration_for_plugins = [] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + u'vips', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'name', sa.String(255), nullable=True), + sa.Column(u'description', sa.String(255), nullable=True), + sa.Column(u'port_id', sa.String(36), nullable=True), + sa.Column(u'protocol_port', sa.Integer(), nullable=False), + sa.Column(u'protocol', + sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), + nullable=False), + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.Column(u'connection_limit', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ), + sa.UniqueConstraint('pool_id'), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'poolmonitorassociations', + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'monitor_id', sa.String(36), nullable=False), + sa.ForeignKeyConstraint(['monitor_id'], [u'healthmonitors.id'], ), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], ), + sa.PrimaryKeyConstraint(u'pool_id', u'monitor_id') + ) + op.create_table( + u'sessionpersistences', + sa.Column(u'vip_id', sa.String(36), nullable=False), + sa.Column(u'type', + sa.Enum("SOURCE_IP", + "HTTP_COOKIE", + "APP_COOKIE", + name="sesssionpersistences_type"), + nullable=False), + sa.Column(u'cookie_name', sa.String(1024), nullable=True), + sa.ForeignKeyConstraint(['vip_id'], [u'vips.id'], ), + sa.PrimaryKeyConstraint(u'vip_id') + ) + op.create_table( + u'pools', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'vip_id', sa.String(36), nullable=True), + sa.Column(u'name', sa.String(255), nullable=True), + sa.Column(u'description', sa.String(255), nullable=True), + sa.Column(u'subnet_id', sa.String(36), nullable=False), + sa.Column(u'protocol', + sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), + nullable=False), + sa.Column(u'lb_method', + sa.Enum("ROUND_ROBIN", + "LEAST_CONNECTIONS", + "SOURCE_IP", + name="pools_lb_method"), + nullable=False), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint(['vip_id'], [u'vips.id'], ), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'healthmonitors', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'type', + sa.Enum("PING", + "TCP", + "HTTP", + "HTTPS", + name="healthmontiors_type"), + nullable=False), + sa.Column(u'delay', sa.Integer(), nullable=False), + sa.Column(u'timeout', sa.Integer(), nullable=False), + sa.Column(u'max_retries', sa.Integer(), nullable=False), + sa.Column(u'http_method', sa.String(16), nullable=True), + sa.Column(u'url_path', sa.String(255), nullable=True), + sa.Column(u'expected_codes', sa.String(64), nullable=True), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'members', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'address', sa.String(64), nullable=False), + sa.Column(u'protocol_port', sa.Integer(), nullable=False), + sa.Column(u'weight', sa.Integer(), nullable=False), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], ), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'poolstatisticss', + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'bytes_in', sa.Integer(), nullable=False), + sa.Column(u'bytes_out', sa.Integer(), nullable=False), + sa.Column(u'active_connections', sa.Integer(), nullable=False), + sa.Column(u'total_connections', sa.Integer(), nullable=False), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], ), + sa.PrimaryKeyConstraint(u'pool_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table(u'poolstatisticss') + op.drop_table(u'members') + op.drop_table(u'healthmonitors') + op.drop_table(u'pools') + op.drop_table(u'sessionpersistences') + op.drop_table(u'poolmonitorassociations') + op.drop_table(u'vips') diff --git a/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py b/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py new file mode 100644 index 000000000..626c26fb7 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py @@ -0,0 +1,54 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set_not_null_peer_address + +Revision ID: 54f7549a0e5f +Revises: 33dd0a9fa487 +Create Date: 2014-03-17 11:00:17.539028 + +""" + +# revision identifiers, used by Alembic. +revision = '54f7549a0e5f' +down_revision = 'icehouse' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.vpn.plugin.VPNDriverPlugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('ipsec_site_connections', 'peer_address', + existing_type=sa.String(255), nullable=False) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('ipsec_site_connections', 'peer_address', nullable=True, + existing_type=sa.String(255)) diff --git a/neutron/db/migration/alembic_migrations/versions/557edfc53098_new_service_types.py b/neutron/db/migration/alembic_migrations/versions/557edfc53098_new_service_types.py new file mode 100644 index 000000000..81fe08b32 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/557edfc53098_new_service_types.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""New service types framework (service providers) + +Revision ID: 557edfc53098 +Revises: 52c5e4a18807 +Create Date: 2013-06-29 21:10:41.283358 + +""" + +# revision identifiers, used by Alembic. +revision = '557edfc53098' +down_revision = '52c5e4a18807' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.create_table( + 'providerresourceassociations', + sa.Column('provider_name', sa.String(length=255), nullable=False), + sa.Column('resource_id', sa.String(length=36), + nullable=False, unique=True), + ) + + for table in ('servicedefinitions', 'servicetypes'): + op.execute("DROP TABLE IF EXISTS %s" % table) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.create_table( + 'servicetypes', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255)), + sa.Column('name', sa.String(255)), + sa.Column('description', sa.String(255)), + sa.Column('default', sa.Boolean(), nullable=False, default=False), + sa.Column('num_instances', sa.Integer, default=0), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'servicedefinitions', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('service_class', sa.String(255)), + sa.Column('plugin', sa.String(255)), + sa.Column('driver', sa.String(255)), + sa.Column('service_type_id', sa.String(36), + sa.ForeignKey('servicetypes.id', + ondelete='CASCADE')), + sa.PrimaryKeyConstraint('id', 'service_class') + ) + op.drop_table('providerresourceassociations') diff --git a/neutron/db/migration/alembic_migrations/versions/569e98a8132b_metering.py b/neutron/db/migration/alembic_migrations/versions/569e98a8132b_metering.py new file mode 100644 index 000000000..7ec917368 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/569e98a8132b_metering.py @@ -0,0 +1,77 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""metering + +Revision ID: 569e98a8132b +Revises: 13de305df56e +Create Date: 2013-07-17 15:38:36.254595 + +""" + +# revision identifiers, used by Alembic. +revision = '569e98a8132b' +down_revision = 'f9263d6df56' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = ['neutron.services.metering.metering_plugin.' + 'MeteringPlugin'] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('meteringlabelrules') + op.drop_table('meteringlabels') + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table('meteringlabels', + sa.Column('tenant_id', sa.String(length=255), + nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), + nullable=True), + sa.Column('description', sa.String(length=255), + nullable=True), + sa.PrimaryKeyConstraint('id')) + op.create_table('meteringlabelrules', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('direction', + sa.Enum('ingress', 'egress', + name='meteringlabels_direction'), + nullable=True), + sa.Column('remote_ip_prefix', sa.String(length=64), + nullable=True), + sa.Column('metering_label_id', sa.String(length=36), + nullable=False), + sa.Column('excluded', sa.Boolean(), + autoincrement=False, nullable=True), + sa.ForeignKeyConstraint(['metering_label_id'], + ['meteringlabels.id'], + name='meteringlabelrules_ibfk_1'), + sa.PrimaryKeyConstraint('id')) diff --git a/neutron/db/migration/alembic_migrations/versions/5918cbddab04_add_tables_for_route.py b/neutron/db/migration/alembic_migrations/versions/5918cbddab04_add_tables_for_route.py new file mode 100644 index 000000000..946f9c08e --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/5918cbddab04_add_tables_for_route.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add tables for router rules support + +Revision ID: 5918cbddab04 +Revises: 3cbf70257c28 +Create Date: 2013-06-16 02:20:07.024752 + +""" + +# revision identifiers, used by Alembic. +revision = '5918cbddab04' +down_revision = '3cbf70257c28' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table('routerrules', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('source', sa.String(length=64), nullable=False), + sa.Column('destination', sa.String(length=64), + nullable=False), + sa.Column('action', sa.String(length=10), nullable=False), + sa.Column('router_id', sa.String(length=36), + nullable=True), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id')) + op.create_table('nexthops', + sa.Column('rule_id', sa.Integer(), nullable=False), + sa.Column('nexthop', sa.String(length=64), nullable=False), + sa.ForeignKeyConstraint(['rule_id'], ['routerrules.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('rule_id', 'nexthop')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('nexthops') + op.drop_table('routerrules') diff --git a/neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py b/neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py new file mode 100644 index 000000000..8eee238a1 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +"""ryu + +This retroactively provides migration support for +https://review.openstack.org/#/c/11204/ + +Revision ID: 5a875d0e5c +Revises: 2c4af419145b +Create Date: 2012-12-18 12:32:04.482477 + +""" + + +# revision identifiers, used by Alembic. +revision = '5a875d0e5c' +down_revision = '2c4af419145b' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'tunnelkeys', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('tunnel_key', sa.Integer(), autoincrement=False, + nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('tunnel_key') + ) + + op.create_table( + 'tunnelkeylasts', + sa.Column('last_key', sa.Integer(), autoincrement=False, + nullable=False), + sa.PrimaryKeyConstraint('last_key') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('tunnelkeylasts') + op.drop_table('tunnelkeys') diff --git a/neutron/db/migration/alembic_migrations/versions/5ac1c354a051_n1kv_segment_alloc.py b/neutron/db/migration/alembic_migrations/versions/5ac1c354a051_n1kv_segment_alloc.py new file mode 100644 index 000000000..516faaf78 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/5ac1c354a051_n1kv_segment_alloc.py @@ -0,0 +1,83 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""n1kv segment allocs for cisco n1kv plugin + +Revision ID: 5ac1c354a051 +Revises: 538732fa21e1 +Create Date: 2014-03-05 17:36:52.952608 + +""" + +# revision identifiers, used by Alembic. +revision = '5ac1c354a051' +down_revision = '538732fa21e1' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column( + 'cisco_n1kv_vlan_allocations', + sa.Column('network_profile_id', + sa.String(length=36), + nullable=False) + ) + op.create_foreign_key( + 'cisco_n1kv_vlan_allocations_ibfk_1', + source='cisco_n1kv_vlan_allocations', + referent='cisco_network_profiles', + local_cols=['network_profile_id'], remote_cols=['id'], + ondelete='CASCADE' + ) + op.add_column( + 'cisco_n1kv_vxlan_allocations', + sa.Column('network_profile_id', + sa.String(length=36), + nullable=False) + ) + op.create_foreign_key( + 'cisco_n1kv_vxlan_allocations_ibfk_1', + source='cisco_n1kv_vxlan_allocations', + referent='cisco_network_profiles', + local_cols=['network_profile_id'], remote_cols=['id'], + ondelete='CASCADE' + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_constraint('cisco_n1kv_vxlan_allocations_ibfk_1', + 'cisco_n1kv_vxlan_allocations', + 'foreignkey') + op.drop_column('cisco_n1kv_vxlan_allocations', 'network_profile_id') + op.drop_constraint('cisco_n1kv_vlan_allocations_ibfk_1', + 'cisco_n1kv_vlan_allocations', + 'foreignkey') + op.drop_column('cisco_n1kv_vlan_allocations', 'network_profile_id') diff --git a/neutron/db/migration/alembic_migrations/versions/5ac71e65402c_ml2_initial.py b/neutron/db/migration/alembic_migrations/versions/5ac71e65402c_ml2_initial.py new file mode 100644 index 000000000..9227197c7 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/5ac71e65402c_ml2_initial.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ml2_initial + +Revision ID: 5ac71e65402c +Revises: 128e042a2b68 +Create Date: 2013-05-27 16:08:40.853821 + +""" + +# revision identifiers, used by Alembic. +revision = '5ac71e65402c' +down_revision = '128e042a2b68' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'ml2_network_segments', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('network_type', sa.String(length=32), nullable=False), + sa.Column('physical_network', sa.String(length=64), nullable=True), + sa.Column('segmentation_id', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'ml2_vlan_allocations', + sa.Column('physical_network', sa.String(length=64), nullable=False), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), autoincrement=False, + nullable=False), + sa.PrimaryKeyConstraint('physical_network', 'vlan_id') + ) + op.create_table( + 'ml2_flat_allocations', + sa.Column('physical_network', sa.String(length=64), nullable=False), + sa.PrimaryKeyConstraint('physical_network') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('ml2_network_segments') + op.drop_table('ml2_flat_allocations') + op.drop_table('ml2_vlan_allocations') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/63afba73813_ovs_tunnelendpoints_id_unique.py b/neutron/db/migration/alembic_migrations/versions/63afba73813_ovs_tunnelendpoints_id_unique.py new file mode 100644 index 000000000..c5f255059 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/63afba73813_ovs_tunnelendpoints_id_unique.py @@ -0,0 +1,64 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add unique constraint for id column of TunnelEndpoint + +Revision ID: 63afba73813 +Revises: 3c6e57a23db4 +Create Date: 2013-04-30 13:53:31.717450 + +""" + +# revision identifiers, used by Alembic. +revision = '63afba73813' +down_revision = '3c6e57a23db4' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', +] + +from alembic import op + +from neutron.db import migration + + +CONSTRAINT_NAME = 'uniq_ovs_tunnel_endpoints0id' +TABLE_NAME = 'ovs_tunnel_endpoints' + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_unique_constraint( + name=CONSTRAINT_NAME, + source=TABLE_NAME, + local_cols=['id'] + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_constraint( + CONSTRAINT_NAME, + TABLE_NAME, + type_='unique' + ) diff --git a/neutron/db/migration/alembic_migrations/versions/66a59a7f516_nec_openflow_router.py b/neutron/db/migration/alembic_migrations/versions/66a59a7f516_nec_openflow_router.py new file mode 100644 index 000000000..ffead1147 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/66a59a7f516_nec_openflow_router.py @@ -0,0 +1,68 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""NEC OpenFlow Router + +Revision ID: 66a59a7f516 +Revises: 32a65f71af51 +Create Date: 2013-09-03 22:16:31.446031 + +""" + +# revision identifiers, used by Alembic. +revision = '66a59a7f516' +down_revision = '32a65f71af51' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ofcroutermappings', + sa.Column('ofc_id', sa.String(length=255), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('quantum_id'), + sa.UniqueConstraint('ofc_id'), + ) + op.create_table( + 'routerproviders', + sa.Column('provider', sa.String(length=255), nullable=True), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('router_id'), + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('routerproviders') + op.drop_table('ofcroutermappings') diff --git a/neutron/db/migration/alembic_migrations/versions/6be312499f9_set_not_null_vlan_id_cisco.py b/neutron/db/migration/alembic_migrations/versions/6be312499f9_set_not_null_vlan_id_cisco.py new file mode 100644 index 000000000..e304fdc24 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/6be312499f9_set_not_null_vlan_id_cisco.py @@ -0,0 +1,54 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set_not_null_vlan_id_cisco + +Revision ID: 6be312499f9 +Revises: d06e871c0d5 +Create Date: 2014-03-27 14:38:12.571173 + +""" + +# revision identifiers, used by Alembic. +revision = '6be312499f9' +down_revision = 'd06e871c0d5' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('cisco_nexusport_bindings', 'vlan_id', nullable=False, + existing_type=sa.Integer) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('cisco_nexusport_bindings', 'vlan_id', nullable=True, + existing_type=sa.Integer) diff --git a/neutron/db/migration/alembic_migrations/versions/81c553f3776c_bsn_consistencyhashes.py b/neutron/db/migration/alembic_migrations/versions/81c553f3776c_bsn_consistencyhashes.py new file mode 100644 index 000000000..0300c2475 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/81c553f3776c_bsn_consistencyhashes.py @@ -0,0 +1,56 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""bsn_consistencyhashes + +Revision ID: 81c553f3776c +Revises: 24c7ea5160d7 +Create Date: 2014-02-26 18:56:00.402855 + +""" + +# revision identifiers, used by Alembic. +revision = '81c553f3776c' +down_revision = '24c7ea5160d7' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2', + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'consistencyhashes', + sa.Column('hash_id', sa.String(255), primary_key=True), + sa.Column('hash', sa.String(255), nullable=False) + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('consistencyhashes') diff --git a/neutron/db/migration/alembic_migrations/versions/86cf4d88bd3_remove_bigswitch_por.py b/neutron/db/migration/alembic_migrations/versions/86cf4d88bd3_remove_bigswitch_por.py new file mode 100644 index 000000000..7d91893e2 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/86cf4d88bd3_remove_bigswitch_por.py @@ -0,0 +1,59 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""remove bigswitch port tracking table + +Revision ID: 86cf4d88bd3 +Revises: 569e98a8132b +Create Date: 2013-08-13 21:59:04.373496 + +""" + +# revision identifiers, used by Alembic. +revision = '86cf4d88bd3' +down_revision = '569e98a8132b' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('portlocations') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table('portlocations', + sa.Column('port_id', sa.String(length=255), + primary_key=True, nullable=False), + sa.Column('host_id', + sa.String(length=255), nullable=False) + ) diff --git a/neutron/db/migration/alembic_migrations/versions/8f682276ee4_ryu_plugin_quota.py b/neutron/db/migration/alembic_migrations/versions/8f682276ee4_ryu_plugin_quota.py new file mode 100644 index 000000000..110f206e3 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/8f682276ee4_ryu_plugin_quota.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ryu plugin quota + +Revision ID: 8f682276ee4 +Revises: ed93525fd003 +Create Date: 2014-01-07 15:47:17.349425 + +""" + +# revision identifiers, used by Alembic. +revision = '8f682276ee4' +down_revision = 'ed93525fd003' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'quotas', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('resource', sa.String(length=255), nullable=True), + sa.Column('limit', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('quotas') diff --git a/neutron/db/migration/alembic_migrations/versions/HEAD b/neutron/db/migration/alembic_migrations/versions/HEAD new file mode 100644 index 000000000..2431c7d6b --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/HEAD @@ -0,0 +1 @@ +2db5203cb7a9 \ No newline at end of file diff --git a/neutron/db/migration/alembic_migrations/versions/README b/neutron/db/migration/alembic_migrations/versions/README new file mode 100644 index 000000000..1067a3876 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/README @@ -0,0 +1,5 @@ +This directory contains the migration scripts for the Neutron project. Please +see the README in neutron/db/migration on how to use and generate new +migrations. + + diff --git a/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py b/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py new file mode 100644 index 000000000..79f39dde3 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""lb stats + +Revision ID: abc88c33f74f +Revises: 3d2585038b95 +Create Date: 2014-02-24 20:14:59.577972 + +""" + +# revision identifiers, used by Alembic. +revision = 'abc88c33f74f' +down_revision = '3d2585038b95' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('poolstatisticss', 'bytes_in', + type_=sa.BigInteger(), existing_type=sa.Integer()) + op.alter_column('poolstatisticss', 'bytes_out', + type_=sa.BigInteger(), existing_type=sa.Integer()) + op.alter_column('poolstatisticss', 'active_connections', + type_=sa.BigInteger(), existing_type=sa.Integer()) + op.alter_column('poolstatisticss', 'total_connections', + type_=sa.BigInteger(), existing_type=sa.Integer()) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('poolstatisticss', 'bytes_in', + type_=sa.Integer(), existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'bytes_out', + type_=sa.Integer(), existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'active_connections', + type_=sa.Integer(), existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'total_connections', + type_=sa.Integer(), existing_type=sa.BigInteger()) diff --git a/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py b/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py new file mode 100644 index 000000000..de82ce505 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py @@ -0,0 +1,52 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set_length_of_protocol_field + +Revision ID: b65aa907aec +Revises: 2447ad0e9585 +Create Date: 2014-03-21 16:30:10.626649 + +""" + +# revision identifiers, used by Alembic. +revision = 'b65aa907aec' +down_revision = '1e5dd1d09b22' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.firewall.fwaas_plugin.FirewallPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('firewall_rules', 'protocol', type_=sa.String(40), + existing_nullable=True) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + pass diff --git a/neutron/db/migration/alembic_migrations/versions/b7a8863760e_rm_cisco_vlan_bindin.py b/neutron/db/migration/alembic_migrations/versions/b7a8863760e_rm_cisco_vlan_bindin.py new file mode 100644 index 000000000..dcda4e686 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/b7a8863760e_rm_cisco_vlan_bindin.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Remove cisco_vlan_bindings table + +Revision ID: b7a8863760e +Revises: 3cabb850f4a5 +Create Date: 2013-07-03 19:15:19.143175 + +""" + +# revision identifiers, used by Alembic. +revision = 'b7a8863760e' +down_revision = '3cabb850f4a5' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_vlan_bindings') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_vlan_bindings', + sa.Column('vlan_id', sa.Integer(display_width=11), nullable=False), + sa.Column('vlan_name', sa.String(length=255), nullable=True), + sa.Column('network_id', sa.String(length=255), nullable=False), + sa.PrimaryKeyConstraint('vlan_id') + ) diff --git a/neutron/db/migration/alembic_migrations/versions/c88b6b5fea3_cisco_n1kv_tables.py b/neutron/db/migration/alembic_migrations/versions/c88b6b5fea3_cisco_n1kv_tables.py new file mode 100644 index 000000000..380a236ed --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/c88b6b5fea3_cisco_n1kv_tables.py @@ -0,0 +1,150 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Cisco N1KV tables + +Revision ID: c88b6b5fea3 +Revises: 263772d65691 +Create Date: 2013-08-06 15:08:32.651975 + +""" + +# revision identifiers, used by Alembic. +revision = 'c88b6b5fea3' +down_revision = '263772d65691' + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + +vlan_type = sa.Enum('vlan', 'vxlan', name='vlan_type') +network_type = sa.Enum('network', 'policy', name='network_type') + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('cisco_credentials', 'tenant_id') + op.add_column( + 'cisco_credentials', + sa.Column('type', sa.String(length=255), nullable=True) + ) + op.create_table( + 'cisco_policy_profiles', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'cisco_n1kv_vmnetworks', + sa.Column('name', sa.String(length=80), nullable=False), + sa.Column('profile_id', sa.String(length=36), nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.Column('port_count', sa.Integer(), autoincrement=False, + nullable=True), + sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id']), + sa.PrimaryKeyConstraint('name') + ) + op.create_table( + 'cisco_n1kv_vxlan_allocations', + sa.Column('vxlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), autoincrement=False, + nullable=False), + sa.PrimaryKeyConstraint('vxlan_id') + ) + op.create_table( + 'cisco_network_profiles', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('segment_type', vlan_type, nullable=False), + sa.Column('segment_range', sa.String(length=255), nullable=True), + sa.Column('multicast_ip_index', sa.Integer(), autoincrement=False, + nullable=True), + sa.Column('multicast_ip_range', sa.String(length=255), nullable=True), + sa.Column('physical_network', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'cisco_n1kv_profile_bindings', + sa.Column('profile_type', network_type, nullable=True), + sa.Column('tenant_id', sa.String(length=36), nullable=False), + sa.Column('profile_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('tenant_id', 'profile_id') + ) + op.create_table( + 'cisco_n1kv_port_bindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('profile_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id']), + sa.PrimaryKeyConstraint('port_id') + ) + op.create_table( + 'cisco_n1kv_vlan_allocations', + sa.Column('physical_network', sa.String(length=64), nullable=False), + sa.Column('vlan_id', + sa.Integer(), + autoincrement=False, + nullable=False), + sa.Column('allocated', + sa.Boolean(), + autoincrement=False, + nullable=False), + sa.PrimaryKeyConstraint('physical_network', 'vlan_id') + ) + op.create_table( + 'cisco_n1kv_network_bindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('network_type', sa.String(length=32), nullable=False), + sa.Column('physical_network', sa.String(length=64), nullable=True), + sa.Column('segmentation_id', sa.Integer(), autoincrement=False, + nullable=True), + sa.Column('multicast_ip', sa.String(length=32), nullable=True), + sa.Column('profile_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['profile_id'], ['cisco_network_profiles.id']), + sa.PrimaryKeyConstraint('network_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_n1kv_network_bindings') + op.drop_table('cisco_n1kv_vlan_allocations') + op.drop_table('cisco_n1kv_port_bindings') + op.drop_table('cisco_n1kv_profile_bindings') + network_type.drop(op.get_bind(), checkfirst=False) + op.drop_table('cisco_network_profiles') + vlan_type.drop(op.get_bind(), checkfirst=False) + op.drop_table('cisco_n1kv_vxlan_allocations') + op.drop_table('cisco_n1kv_vmnetworks') + op.drop_table('cisco_policy_profiles') + op.drop_column('cisco_credentials', 'type') + op.add_column( + 'cisco_credentials', + sa.Column('tenant_id', sa.String(length=255), nullable=False) + ) diff --git a/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py b/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py new file mode 100644 index 000000000..be99747c5 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py @@ -0,0 +1,54 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set_admin_state_up_not_null_ml2 + +Revision ID: d06e871c0d5 +Revises: 2447ad0e9585 +Create Date: 2014-03-21 17:22:20.545186 + +""" + +# revision identifiers, used by Alembic. +revision = 'd06e871c0d5' +down_revision = '4eca4a84f08a' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('ml2_brocadeports', 'admin_state_up', nullable=False, + existing_type=sa.Boolean) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('ml2_brocadeports', 'admin_state_up', nullable=True, + existing_type=sa.Boolean) diff --git a/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py b/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py new file mode 100644 index 000000000..b2f4b5a87 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py @@ -0,0 +1,65 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add unique constraint to members + +Revision ID: e197124d4b9 +Revises: havana +Create Date: 2013-11-17 10:09:37.728903 + +""" + +# revision identifiers, used by Alembic. +revision = 'e197124d4b9' +down_revision = 'havana' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', +] + +from alembic import op + +from neutron.db import migration + + +CONSTRAINT_NAME = 'uniq_member0pool_id0address0port' +TABLE_NAME = 'members' + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_unique_constraint( + name=CONSTRAINT_NAME, + source=TABLE_NAME, + local_cols=['pool_id', 'address', 'protocol_port'] + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_constraint( + CONSTRAINT_NAME, + TABLE_NAME, + type_='unique' + ) diff --git a/neutron/db/migration/alembic_migrations/versions/e6b16a30d97_cisco_provider_nets.py b/neutron/db/migration/alembic_migrations/versions/e6b16a30d97_cisco_provider_nets.py new file mode 100644 index 000000000..8671f77e9 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/e6b16a30d97_cisco_provider_nets.py @@ -0,0 +1,62 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add cisco_provider_networks table + +Revision ID: e6b16a30d97 +Revises: 557edfc53098 +Create Date: 2013-07-18 21:46:12.792504 + +""" + +# revision identifiers, used by Alembic. +revision = 'e6b16a30d97' +down_revision = '557edfc53098' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_provider_networks', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('network_type', sa.String(length=255), nullable=False), + sa.Column('segmentation_id', sa.Integer(), nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_provider_networks') diff --git a/neutron/db/migration/alembic_migrations/versions/e766b19a3bb_nuage_initial.py b/neutron/db/migration/alembic_migrations/versions/e766b19a3bb_nuage_initial.py new file mode 100644 index 000000000..3562c7d94 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/e766b19a3bb_nuage_initial.py @@ -0,0 +1,120 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nuage_initial + +Revision ID: e766b19a3bb +Revises: 1b2580001654 +Create Date: 2014-02-14 18:03:14.841064 + +""" + +# revision identifiers, used by Alembic. +revision = 'e766b19a3bb' +down_revision = '1b2580001654' + +migration_for_plugins = [ + 'neutron.plugins.nuage.plugin.NuagePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration +from neutron.db.migration.alembic_migrations import common_ext_ops + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + common_ext_ops.upgrade_l3() + + op.create_table( + 'quotas', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('resource', sa.String(length=255), nullable=True), + sa.Column('limit', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('id'), + ) + op.create_table( + 'net_partitions', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=64), nullable=True), + sa.Column('l3dom_tmplt_id', sa.String(length=36), nullable=True), + sa.Column('l2dom_tmplt_id', sa.String(length=36), nullable=True), + sa.PrimaryKeyConstraint('id'), + ) + op.create_table( + 'port_mapping', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('nuage_vport_id', sa.String(length=36), nullable=True), + sa.Column('nuage_vif_id', sa.String(length=36), nullable=True), + sa.Column('static_ip', sa.Boolean(), nullable=True), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id'), + ) + op.create_table( + 'subnet_l2dom_mapping', + sa.Column('subnet_id', sa.String(length=36), nullable=False), + sa.Column('net_partition_id', sa.String(length=36), nullable=True), + sa.Column('nuage_subnet_id', sa.String(length=36), nullable=True), + sa.Column('nuage_l2dom_tmplt_id', sa.String(length=36), + nullable=True), + sa.Column('nuage_user_id', sa.String(length=36), nullable=True), + sa.Column('nuage_group_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['net_partition_id'], ['net_partitions.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('subnet_id'), + ) + op.create_table( + 'net_partition_router_mapping', + sa.Column('net_partition_id', sa.String(length=36), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.Column('nuage_router_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['net_partition_id'], ['net_partitions.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('router_id'), + ) + op.create_table( + 'router_zone_mapping', + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.Column('nuage_zone_id', sa.String(length=36), nullable=True), + sa.Column('nuage_user_id', sa.String(length=36), nullable=True), + sa.Column('nuage_group_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('router_id'), + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('router_zone_mapping') + op.drop_table('net_partition_router_mapping') + op.drop_table('subnet_l2dom_mapping') + op.drop_table('port_mapping') + op.drop_table('net_partitions') + op.drop_table('quotas') + + common_ext_ops.downgrade_l3() diff --git a/neutron/db/migration/alembic_migrations/versions/ed93525fd003_bigswitch_quota.py b/neutron/db/migration/alembic_migrations/versions/ed93525fd003_bigswitch_quota.py new file mode 100644 index 000000000..3baa247d4 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/ed93525fd003_bigswitch_quota.py @@ -0,0 +1,64 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""bigswitch_quota + +Revision ID: ed93525fd003 +Revises: 50e86cb2637a +Create Date: 2014-01-05 10:59:19.860397 + +""" + +# revision identifiers, used by Alembic. +revision = 'ed93525fd003' +down_revision = '50e86cb2637a' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'quotas', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('resource', sa.String(length=255), nullable=True), + sa.Column('limit', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('quotas') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/f44ab9871cd6_bsn_security_groups.py b/neutron/db/migration/alembic_migrations/versions/f44ab9871cd6_bsn_security_groups.py new file mode 100644 index 000000000..b75bf4201 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/f44ab9871cd6_bsn_security_groups.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""bsn_security_groups + +Revision ID: f44ab9871cd6 +Revises: e766b19a3bb +Create Date: 2014-02-26 17:43:43.051078 + +""" + +# revision identifiers, used by Alembic. +revision = 'f44ab9871cd6' +down_revision = 'e766b19a3bb' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'securitygroups', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'securitygrouprules', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.Column('remote_group_id', sa.String(length=36), nullable=True), + sa.Column('direction', + sa.Enum('ingress', 'egress', + name='securitygrouprules_direction'), + nullable=True), + sa.Column('ethertype', sa.String(length=40), nullable=True), + sa.Column('protocol', sa.String(length=40), nullable=True), + sa.Column('port_range_min', sa.Integer(), nullable=True), + sa.Column('port_range_max', sa.Integer(), nullable=True), + sa.Column('remote_ip_prefix', sa.String(length=255), nullable=True), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['remote_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'securitygroupportbindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id']), + sa.PrimaryKeyConstraint('port_id', 'security_group_id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('securitygroupportbindings') + op.drop_table('securitygrouprules') + op.drop_table('securitygroups') + ### end Alembic commands ### diff --git a/neutron/db/migration/alembic_migrations/versions/f489cf14a79c_lbaas_havana.py b/neutron/db/migration/alembic_migrations/versions/f489cf14a79c_lbaas_havana.py new file mode 100644 index 000000000..b2d07e607 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/f489cf14a79c_lbaas_havana.py @@ -0,0 +1,162 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""DB support for load balancing service (havana) + +Revision ID: f489cf14a79c +Revises: grizzly +Create Date: 2013-02-04 16:32:32.048731 + +""" + +# revision identifiers, used by Alembic. +revision = 'f489cf14a79c' +down_revision = 'grizzly' + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + u'vips', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'name', sa.String(255), nullable=True), + sa.Column(u'description', sa.String(255), nullable=True), + sa.Column(u'port_id', sa.String(36), nullable=True), + sa.Column(u'protocol_port', sa.Integer(), nullable=False), + sa.Column(u'protocol', + sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), + nullable=False), + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.Column(u'connection_limit', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ), + sa.UniqueConstraint('pool_id'), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'sessionpersistences', + sa.Column(u'vip_id', sa.String(36), nullable=False), + sa.Column(u'type', + sa.Enum("SOURCE_IP", + "HTTP_COOKIE", + "APP_COOKIE", + name="sesssionpersistences_type"), + nullable=False), + sa.Column(u'cookie_name', sa.String(1024), nullable=True), + sa.ForeignKeyConstraint(['vip_id'], [u'vips.id'], ), + sa.PrimaryKeyConstraint(u'vip_id') + ) + op.create_table( + u'pools', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'vip_id', sa.String(36), nullable=True), + sa.Column(u'name', sa.String(255), nullable=True), + sa.Column(u'description', sa.String(255), nullable=True), + sa.Column(u'subnet_id', sa.String(36), nullable=False), + sa.Column(u'protocol', + sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), + nullable=False), + sa.Column(u'lb_method', + sa.Enum("ROUND_ROBIN", + "LEAST_CONNECTIONS", + "SOURCE_IP", + name="pools_lb_method"), + nullable=False), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint(['vip_id'], [u'vips.id'], ), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'healthmonitors', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'type', + sa.Enum("PING", + "TCP", + "HTTP", + "HTTPS", + name="healthmontiors_type"), + nullable=False), + sa.Column(u'delay', sa.Integer(), nullable=False), + sa.Column(u'timeout', sa.Integer(), nullable=False), + sa.Column(u'max_retries', sa.Integer(), nullable=False), + sa.Column(u'http_method', sa.String(16), nullable=True), + sa.Column(u'url_path', sa.String(255), nullable=True), + sa.Column(u'expected_codes', sa.String(64), nullable=True), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'poolmonitorassociations', + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'monitor_id', sa.String(36), nullable=False), + sa.ForeignKeyConstraint(['monitor_id'], [u'healthmonitors.id'], ), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], ), + sa.PrimaryKeyConstraint(u'pool_id', u'monitor_id') + ) + op.create_table( + u'members', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'address', sa.String(64), nullable=False), + sa.Column(u'protocol_port', sa.Integer(), nullable=False), + sa.Column(u'weight', sa.Integer(), nullable=False), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], ), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'poolstatisticss', + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'bytes_in', sa.Integer(), nullable=False), + sa.Column(u'bytes_out', sa.Integer(), nullable=False), + sa.Column(u'active_connections', sa.Integer(), nullable=False), + sa.Column(u'total_connections', sa.Integer(), nullable=False), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], ), + sa.PrimaryKeyConstraint(u'pool_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table(u'poolstatisticss') + op.drop_table(u'members') + op.drop_table(u'poolmonitorassociations') + op.drop_table(u'healthmonitors') + op.drop_table(u'pools') + op.drop_table(u'sessionpersistences') + op.drop_table(u'vips') diff --git a/neutron/db/migration/alembic_migrations/versions/f9263d6df56_remove_dhcp_lease.py b/neutron/db/migration/alembic_migrations/versions/f9263d6df56_remove_dhcp_lease.py new file mode 100644 index 000000000..51b8d937e --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/f9263d6df56_remove_dhcp_lease.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""remove_dhcp_lease + +Revision ID: f9263d6df56 +Revises: c88b6b5fea3 +Create Date: 2013-07-17 12:31:33.731197 + +""" + +# revision identifiers, used by Alembic. +revision = 'f9263d6df56' +down_revision = 'c88b6b5fea3' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + '*' +] + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.drop_column('ipallocations', u'expiration') + + +def downgrade(active_plugins=None, options=None): + op.add_column('ipallocations', sa.Column(u'expiration', sa.DateTime(), + nullable=True)) diff --git a/neutron/db/migration/alembic_migrations/versions/fcac4c42e2cc_bsn_addresspairs.py b/neutron/db/migration/alembic_migrations/versions/fcac4c42e2cc_bsn_addresspairs.py new file mode 100644 index 000000000..9d80be9fb --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/fcac4c42e2cc_bsn_addresspairs.py @@ -0,0 +1,58 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""bsn_addresspairs + +Revision ID: fcac4c42e2cc +Revises: 2eeaf963a447 +Create Date: 2014-02-23 12:56:00.402855 + +""" + +# revision identifiers, used by Alembic. +revision = 'fcac4c42e2cc' +down_revision = '2eeaf963a447' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'allowedaddresspairs', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('mac_address', sa.String(length=32), nullable=False), + sa.Column('ip_address', sa.String(length=64), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id', 'mac_address', 'ip_address'), + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('allowedaddresspairs') diff --git a/neutron/db/migration/alembic_migrations/versions/folsom_initial.py b/neutron/db/migration/alembic_migrations/versions/folsom_initial.py new file mode 100644 index 000000000..a6dab6188 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/folsom_initial.py @@ -0,0 +1,563 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author Mark McClain (DreamHost) + +"""folsom initial database + +Revision ID: folsom +Revises: None +Create Date: 2012-12-03 09:14:50.579765 + +""" + +PLUGINS = { + 'bigswitch': 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2', + 'brocade': 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2', + 'cisco': 'neutron.plugins.cisco.network_plugin.PluginV2', + 'lbr': 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'meta': 'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2', + 'ml2': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'mlnx': 'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin', + 'nec': 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'nvp': 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'ocnvsd': 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'ovs': 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'plumgrid': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.' + 'NeutronPluginPLUMgridV2', + 'ryu': 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2', + 'ibm': 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2', +} + +L3_CAPABLE = [ + PLUGINS['lbr'], + PLUGINS['meta'], + PLUGINS['ml2'], + PLUGINS['mlnx'], + PLUGINS['nec'], + PLUGINS['ocnvsd'], + PLUGINS['ovs'], + PLUGINS['ryu'], + PLUGINS['brocade'], + PLUGINS['plumgrid'], + PLUGINS['ibm'], +] + +FOLSOM_QUOTA = [ + PLUGINS['lbr'], + PLUGINS['ml2'], + PLUGINS['nvp'], + PLUGINS['ocnvsd'], + PLUGINS['ovs'], +] + + +# revision identifiers, used by Alembic. +revision = 'folsom' +down_revision = None + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration +from neutron.db.migration.alembic_migrations import common_ext_ops +# NOTE: This is a special migration that creates a Folsom compatible database. + + +def upgrade(active_plugins=None, options=None): + # general model + upgrade_base() + + if migration.should_run(active_plugins, L3_CAPABLE): + common_ext_ops.upgrade_l3() + + if migration.should_run(active_plugins, FOLSOM_QUOTA): + common_ext_ops.upgrade_quota(options) + + if PLUGINS['lbr'] in active_plugins: + upgrade_linuxbridge() + elif PLUGINS['ovs'] in active_plugins: + upgrade_ovs() + elif PLUGINS['cisco'] in active_plugins: + upgrade_cisco() + # Cisco plugin imports OVS models too + upgrade_ovs() + elif PLUGINS['meta'] in active_plugins: + upgrade_meta() + elif PLUGINS['nec'] in active_plugins: + upgrade_nec() + elif PLUGINS['ryu'] in active_plugins: + upgrade_ryu() + elif PLUGINS['brocade'] in active_plugins: + upgrade_brocade() + # Brocade plugin imports linux bridge models too + upgrade_linuxbridge() + + +def upgrade_base(): + op.create_table( + 'networks', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('status', sa.String(length=16), nullable=True), + sa.Column('admin_state_up', sa.Boolean(), nullable=True), + sa.Column('shared', sa.Boolean(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'subnets', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.Column('ip_version', sa.Integer(), nullable=False), + sa.Column('cidr', sa.String(length=64), nullable=False), + sa.Column('gateway_ip', sa.String(length=64), nullable=True), + sa.Column('enable_dhcp', sa.Boolean(), nullable=True), + sa.Column('shared', sa.Boolean(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'ports', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('mac_address', sa.String(length=32), nullable=False), + sa.Column('admin_state_up', sa.Boolean(), nullable=False), + sa.Column('status', sa.String(length=16), nullable=False), + sa.Column('device_id', sa.String(length=255), nullable=False), + sa.Column('device_owner', sa.String(length=255), nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'dnsnameservers', + sa.Column('address', sa.String(length=128), nullable=False), + sa.Column('subnet_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('address', 'subnet_id') + ) + + op.create_table( + 'ipallocations', + sa.Column('port_id', sa.String(length=36), nullable=True), + sa.Column('ip_address', sa.String(length=64), nullable=False), + sa.Column('subnet_id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('expiration', sa.DateTime(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('ip_address', 'subnet_id', 'network_id') + ) + + op.create_table( + 'routes', + sa.Column('destination', sa.String(length=64), nullable=False), + sa.Column('nexthop', sa.String(length=64), nullable=False), + sa.Column('subnet_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('destination', 'nexthop', 'subnet_id') + ) + + op.create_table( + 'ipallocationpools', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('subnet_id', sa.String(length=36), nullable=True), + sa.Column('first_ip', sa.String(length=64), nullable=False), + sa.Column('last_ip', sa.String(length=64), nullable=False), + sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'ipavailabilityranges', + sa.Column('allocation_pool_id', sa.String(length=36), nullable=False), + sa.Column('first_ip', sa.String(length=64), nullable=False), + sa.Column('last_ip', sa.String(length=64), nullable=False), + sa.ForeignKeyConstraint(['allocation_pool_id'], + ['ipallocationpools.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip') + ) + + +def upgrade_linuxbridge(): + op.create_table( + 'network_states', + sa.Column('physical_network', sa.String(length=64), nullable=False), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint('physical_network', 'vlan_id') + ) + + op.create_table( + 'network_bindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('physical_network', sa.String(length=64), nullable=True), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id') + ) + + +def upgrade_ovs(): + op.create_table( + 'ovs_tunnel_endpoints', + sa.Column('ip_address', sa.String(length=64), nullable=False), + sa.Column('id', sa.Integer(), nullable=False), + sa.PrimaryKeyConstraint('ip_address') + ) + + op.create_table( + 'ovs_tunnel_ips', + sa.Column('ip_address', sa.String(length=255), nullable=False), + sa.PrimaryKeyConstraint('ip_address') + ) + + op.create_table( + 'ovs_vlan_allocations', + sa.Column('physical_network', sa.String(length=64), nullable=False), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint('physical_network', 'vlan_id') + ) + + op.create_table( + 'ovs_tunnel_allocations', + sa.Column('tunnel_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint('tunnel_id') + ) + + op.create_table( + 'ovs_network_bindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('network_type', sa.String(length=32), nullable=False), + sa.Column('physical_network', sa.String(length=64), nullable=True), + sa.Column('segmentation_id', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id') + ) + + +def upgrade_meta(): + op.create_table( + 'networkflavors', + sa.Column('flavor', sa.String(length=255)), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id') + ) + + op.create_table( + 'routerflavors', + sa.Column('flavor', sa.String(length=255)), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('router_id') + ) + + +def upgrade_nec(): + op.create_table( + 'ofctenants', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'ofcnetworks', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'ofcports', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'ofcfilters', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'portinfos', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('datapath_id', sa.String(length=36), nullable=False), + sa.Column('port_no', sa.Integer(), nullable=False), + sa.Column('vlan_id', sa.Integer(), nullable=False), + sa.Column('mac', sa.String(length=32), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'packetfilters', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('priority', sa.Integer(), nullable=False), + sa.Column('action', sa.String(16), nullable=False), + sa.Column('in_port', sa.String(36), nullable=False), + sa.Column('src_mac', sa.String(32), nullable=False), + sa.Column('dst_mac', sa.String(32), nullable=False), + sa.Column('eth_type', sa.Integer(), nullable=False), + sa.Column('src_cidr', sa.String(64), nullable=False), + sa.Column('dst_cidr', sa.String(64), nullable=False), + sa.Column('protocol', sa.String(16), nullable=False), + sa.Column('src_port', sa.Integer(), nullable=False), + sa.Column('dst_port', sa.Integer(), nullable=False), + sa.Column('admin_state_up', sa.Boolean(), nullable=False), + sa.Column('status', sa.String(16), nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + + +def upgrade_ryu(): + op.create_table( + 'ofp_server', + sa.Column('id', sa.Integer(), autoincrement=False, nullable=False), + sa.Column('address', sa.String(255)), + sa.Column('host_type', sa.String(255)), + sa.PrimaryKeyConstraint('id') + ) + + +def upgrade_brocade(): + op.create_table( + 'brocadenetworks', + sa.Column('id', sa.Integer(), autoincrement=False, nullable=False), + sa.Column('vlan', sa.String(10)), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'brocadeports', + sa.Column('port_id', sa.String(36), nullable=False), + sa.Column('network_id', sa.String(36)), + sa.Column('admin_state_up', sa.Boolean()), + sa.Column('physical_interface', sa.String(36)), + sa.Column('vlan_id', sa.String(10)), + sa.Column('tenant_id', sa.String(36)), + sa.PrimaryKeyConstraint('port_id') + ) + + +def upgrade_cisco(): + op.create_table( + 'cisco_vlan_ids', + sa.Column('vlan_id', sa.Integer(), autoincrement=True), + sa.Column('vlan_used', sa.Boolean()), + sa.PrimaryKeyConstraint('vlan_id') + ) + + op.create_table( + 'cisco_vlan_bindings', + sa.Column('vlan_id', sa.Integer(), autoincrement=True), + sa.Column('vlan_name', sa.String(255)), + sa.Column('network_id', sa.String(255), nullable=False), + sa.PrimaryKeyConstraint('vlan_id') + ) + + op.create_table( + 'portprofiles', + sa.Column('uuid', sa.String(255), nullable=False), + sa.Column('name', sa.String(255)), + sa.Column('vlan_id', sa.Integer()), + sa.Column('qos', sa.String(255)), + sa.PrimaryKeyConstraint('uuid') + ) + + op.create_table( + 'portprofile_bindings', + sa.Column('id', sa.Integer(), autoincrement=True), + sa.Column('tenant_id', sa.String(255)), + sa.Column('port_id', sa.String(255), nullable=False), + sa.Column('portprofile_id', sa.String(255), nullable=False), + sa.Column('default', sa.Boolean()), + sa.PrimaryKeyConstraint('id'), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ), + sa.ForeignKeyConstraint(['portprofile_id'], ['portprofiles.uuid'], ), + ) + + op.create_table( + 'qoss', # yes two S's + sa.Column('qos_id', sa.String(255)), + sa.Column('tenant_id', sa.String(255)), + sa.Column('qos_name', sa.String(255)), + sa.Column('qos_desc', sa.String(255)), + sa.PrimaryKeyConstraint('tenant_id', 'qos_name') + ) + + op.create_table( + 'credentials', + sa.Column('credential_id', sa.String(255)), + sa.Column('tenant_id', sa.String(255)), + sa.Column('credential_name', sa.String(255)), + sa.Column('user_name', sa.String(255)), + sa.Column('password', sa.String(255)), + sa.PrimaryKeyConstraint('tenant_id', 'credential_name') + ) + + op.create_table( + 'port_bindings', + sa.Column('id', sa.Integer(), autoincrement=True), + sa.Column('port_id', sa.String(255), nullable=False), + sa.Column('blade_intf_dn', sa.String(255), nullable=False), + sa.Column('portprofile_name', sa.String(255)), + sa.Column('vlan_name', sa.String(255)), + sa.Column('vlan_id', sa.Integer()), + sa.Column('qos', sa.String(255)), + sa.Column('tenant_id', sa.String(255)), + sa.Column('instance_id', sa.String(255)), + sa.Column('vif_id', sa.String(255)), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'nexusport_bindings', + sa.Column('id', sa.Integer(), primary_key=True, autoincrement=True), + sa.Column('port_id', sa.String(255)), + sa.Column('vlan_id', sa.Integer()), + sa.PrimaryKeyConstraint('id') + ) + + +def downgrade(active_plugins=None, options=None): + if PLUGINS['lbr'] in active_plugins: + downgrade_linuxbridge() + elif PLUGINS['ovs'] in active_plugins: + downgrade_ovs() + elif PLUGINS['cisco'] in active_plugins: + # Cisco plugin imports OVS models too + downgrade_ovs() + downgrade_cisco() + elif PLUGINS['meta'] in active_plugins: + downgrade_meta() + elif PLUGINS['nec'] in active_plugins: + downgrade_nec() + elif PLUGINS['ryu'] in active_plugins: + downgrade_ryu() + elif PLUGINS['brocade'] in active_plugins: + # Brocade plugin imports linux bridge models too + downgrade_brocade() + downgrade_linuxbridge() + + if migration.should_run(active_plugins, FOLSOM_QUOTA): + common_ext_ops.downgrade_quota(options) + + if migration.should_run(active_plugins, L3_CAPABLE): + common_ext_ops.downgrade_l3() + + downgrade_base() + + +def downgrade_base(): + drop_tables( + 'ipavailabilityranges', + 'ipallocationpools', + 'routes', + 'ipallocations', + 'dnsnameservers', + 'ports', + 'subnets', + 'networks' + ) + + +def downgrade_linuxbridge(): + drop_tables('network_bindings', 'network_states') + + +def downgrade_ovs(): + drop_tables( + 'ovs_network_bindings', + 'ovs_tunnel_allocations', + 'ovs_vlan_allocations', + 'ovs_tunnel_ips', + 'ovs_tunnel_endpoints' + ) + + +def downgrade_meta(): + drop_tables('routerflavors', 'networkflavors') + + +def downgrade_nec(): + drop_tables( + 'packetfilters', + 'portinfos', + 'ofcfilters', + 'ofcports', + 'ofcnetworks', + 'ofctenants' + ) + + +def downgrade_ryu(): + op.drop_table('ofp_server') + + +def downgrade_brocade(): + op.drop_table('brocadenetworks') + op.drop_table('brocadeports') + + +def downgrade_cisco(): + drop_tables( + 'nexusport_bindings', + 'port_bindings', + 'credentials', + 'qoss', + 'portprofile_bindings', + 'portprofiles', + 'cisco_vlan_bindings', + 'cisco_vlan_ids' + ) + + +def drop_tables(*tables): + for table in tables: + op.drop_table(table) diff --git a/neutron/db/migration/alembic_migrations/versions/grizzly_release.py b/neutron/db/migration/alembic_migrations/versions/grizzly_release.py new file mode 100644 index 000000000..a7a25652f --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/grizzly_release.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""grizzly + +Revision ID: grizzly +Revises: 1341ed32cc1e +Create Date: 2013-03-12 23:59:59.000000 + +""" + +# revision identifiers, used by Alembic. +revision = 'grizzly' +down_revision = '1341ed32cc1e' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = ['*'] + + +def upgrade(active_plugins=None, options=None): + """A no-op migration for marking the Grizzly release.""" + pass + + +def downgrade(active_plugins=None, options=None): + """A no-op migration for marking the Grizzly release.""" + pass diff --git a/neutron/db/migration/alembic_migrations/versions/havana_release.py b/neutron/db/migration/alembic_migrations/versions/havana_release.py new file mode 100644 index 000000000..5be1e0676 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/havana_release.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""havana + +Revision ID: havana +Revises: 40b0aff0302e +Create Date: 2013-10-02 00:00:00.000000 + +""" + +# revision identifiers, used by Alembic. +revision = 'havana' +down_revision = '40b0aff0302e' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = ['*'] + + +def upgrade(active_plugins=None, options=None): + """A no-op migration for marking the Havana release.""" + pass + + +def downgrade(active_plugins=None, options=None): + """A no-op migration for marking the Havana release.""" + pass diff --git a/neutron/db/migration/alembic_migrations/versions/icehouse_release.py b/neutron/db/migration/alembic_migrations/versions/icehouse_release.py new file mode 100644 index 000000000..fc8708f39 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/icehouse_release.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2014 Yahoo! Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""icehouse + +Revision ID: icehouse +Revises: 5ac1c354a051 +Create Date: 2013-03-28 00:00:00.000000 + +""" + +# revision identifiers, used by Alembic. +revision = 'icehouse' +down_revision = '5ac1c354a051' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = ['*'] + + +def upgrade(active_plugins=None, options=None): + """A no-op migration for marking the Icehouse release.""" + pass + + +def downgrade(active_plugins=None, options=None): + """A no-op migration for marking the Icehouse release.""" + pass diff --git a/neutron/db/migration/cli.py b/neutron/db/migration/cli.py new file mode 100644 index 000000000..83f620b0a --- /dev/null +++ b/neutron/db/migration/cli.py @@ -0,0 +1,171 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import os + +from alembic import command as alembic_command +from alembic import config as alembic_config +from alembic import script as alembic_script +from alembic import util as alembic_util +from oslo.config import cfg + + +HEAD_FILENAME = 'HEAD' + + +_core_opts = [ + cfg.StrOpt('core_plugin', + default='', + help=_('Neutron plugin provider module')), + cfg.ListOpt('service_plugins', + default=[], + help=_("The service plugins Neutron will use")), +] + +_quota_opts = [ + cfg.StrOpt('quota_driver', + default='', + help=_('Neutron quota driver class')), +] + +_db_opts = [ + cfg.StrOpt('connection', + deprecated_name='sql_connection', + default='', + secret=True, + help=_('URL to database')), + cfg.StrOpt('engine', + default='', + help=_('Database engine')), +] + +CONF = cfg.ConfigOpts() +CONF.register_cli_opts(_core_opts) +CONF.register_cli_opts(_db_opts, 'database') +CONF.register_opts(_quota_opts, 'QUOTAS') + + +def do_alembic_command(config, cmd, *args, **kwargs): + try: + getattr(alembic_command, cmd)(config, *args, **kwargs) + except alembic_util.CommandError as e: + alembic_util.err(str(e)) + + +def do_check_migration(config, cmd): + do_alembic_command(config, 'branches') + validate_head_file(config) + + +def do_upgrade_downgrade(config, cmd): + if not CONF.command.revision and not CONF.command.delta: + raise SystemExit(_('You must provide a revision or relative delta')) + + revision = CONF.command.revision + + if CONF.command.delta: + sign = '+' if CONF.command.name == 'upgrade' else '-' + revision = sign + str(CONF.command.delta) + else: + revision = CONF.command.revision + + do_alembic_command(config, cmd, revision, sql=CONF.command.sql) + + +def do_stamp(config, cmd): + do_alembic_command(config, cmd, + CONF.command.revision, + sql=CONF.command.sql) + + +def do_revision(config, cmd): + do_alembic_command(config, cmd, + message=CONF.command.message, + autogenerate=CONF.command.autogenerate, + sql=CONF.command.sql) + update_head_file(config) + + +def validate_head_file(config): + script = alembic_script.ScriptDirectory.from_config(config) + if len(script.get_heads()) > 1: + alembic_util.err(_('Timeline branches unable to generate timeline')) + + head_path = os.path.join(script.versions, HEAD_FILENAME) + if (os.path.isfile(head_path) and + open(head_path).read().strip() == script.get_current_head()): + return + else: + alembic_util.err(_('HEAD file does not match migration timeline head')) + + +def update_head_file(config): + script = alembic_script.ScriptDirectory.from_config(config) + if len(script.get_heads()) > 1: + alembic_util.err(_('Timeline branches unable to generate timeline')) + + head_path = os.path.join(script.versions, HEAD_FILENAME) + with open(head_path, 'w+') as f: + f.write(script.get_current_head()) + + +def add_command_parsers(subparsers): + for name in ['current', 'history', 'branches']: + parser = subparsers.add_parser(name) + parser.set_defaults(func=do_alembic_command) + + parser = subparsers.add_parser('check_migration') + parser.set_defaults(func=do_check_migration) + + for name in ['upgrade', 'downgrade']: + parser = subparsers.add_parser(name) + parser.add_argument('--delta', type=int) + parser.add_argument('--sql', action='store_true') + parser.add_argument('revision', nargs='?') + parser.set_defaults(func=do_upgrade_downgrade) + + parser = subparsers.add_parser('stamp') + parser.add_argument('--sql', action='store_true') + parser.add_argument('revision') + parser.set_defaults(func=do_stamp) + + parser = subparsers.add_parser('revision') + parser.add_argument('-m', '--message') + parser.add_argument('--autogenerate', action='store_true') + parser.add_argument('--sql', action='store_true') + parser.set_defaults(func=do_revision) + + +command_opt = cfg.SubCommandOpt('command', + title='Command', + help=_('Available commands'), + handler=add_command_parsers) + +CONF.register_cli_opt(command_opt) + + +def main(): + config = alembic_config.Config( + os.path.join(os.path.dirname(__file__), 'alembic.ini') + ) + config.set_main_option('script_location', + 'neutron.db.migration:alembic_migrations') + # attach the Neutron conf to the Alembic conf + config.neutron_config = CONF + + CONF() + #TODO(gongysh) enable logging + CONF.command.func(config, CONF.command.name) diff --git a/neutron/db/migration/migrate_to_ml2.py b/neutron/db/migration/migrate_to_ml2.py new file mode 100755 index 000000000..504061ed7 --- /dev/null +++ b/neutron/db/migration/migrate_to_ml2.py @@ -0,0 +1,462 @@ +# Copyright (c) 2014 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +This script will migrate the database of an openvswitch or linuxbridge +plugin so that it can be used with the ml2 plugin. + +Known Limitations: + + - THIS SCRIPT IS DESTRUCTIVE! Make sure to backup your + Neutron database before running this script, in case anything goes + wrong. + + - It will be necessary to upgrade the database to the target release + via neutron-db-manage before attempting to migrate to ml2. + Initially, only the icehouse release is supported. + + - This script does not automate configuration migration. + +Example usage: + + python -m neutron.db.migration.migrate_to_ml2 openvswitch \ + mysql://login:pass@127.0.0.1/neutron + +Note that migration of tunneling state will only be attempted if the +--tunnel-type parameter is provided. + +To manually test migration from ovs to ml2 with devstack: + + - stack with Q_PLUGIN=openvswitch + - boot an instance and validate connectivity + - stop the neutron service and all agents + - run the neutron-migrate-to-ml2 script + - update /etc/neutron/neutron.conf as follows: + + core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin + + - Create /etc/neutron/plugins/ml2/ml2_conf.ini and ensure that: + - ml2.mechanism_drivers includes 'openvswitch' + - ovs.local_ip is set correctly + - database.connection is set correctly + - Start the neutron service with the ml2 config file created in + the previous step in place of the openvswitch config file + - Start all the agents + - verify that the booted instance still has connectivity + - boot a second instance and validate connectivity +""" + +import argparse + +import sqlalchemy as sa + +from neutron.extensions import portbindings +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2.drivers import type_vxlan + + +# Migration targets +LINUXBRIDGE = 'linuxbridge' +OPENVSWITCH = 'openvswitch' + +# Releases +ICEHOUSE = 'icehouse' + + +SUPPORTED_SCHEMA_VERSIONS = [ICEHOUSE] + + +def check_db_schema_version(engine, metadata): + """Check that current version of the db schema is supported.""" + version_table = sa.Table( + 'alembic_version', metadata, autoload=True, autoload_with=engine) + versions = [v[0] for v in engine.execute(version_table.select())] + if not versions: + raise ValueError(_("Missing version in alembic_versions table")) + elif len(versions) > 1: + raise ValueError(_("Multiple versions in alembic_versions table: %s") + % versions) + current_version = versions[0] + if current_version not in SUPPORTED_SCHEMA_VERSIONS: + raise SystemError(_("Unsupported database schema %(current)s. " + "Please migrate your database to one of following " + "versions: %(supported)s") + % {'current': current_version, + 'supported': ', '.join(SUPPORTED_SCHEMA_VERSIONS)} + ) + + +# Duplicated from neutron.plugins.linuxbridge.common.constants to +# avoid having any dependency on the linuxbridge plugin being +# installed. +def interpret_vlan_id(vlan_id): + """Return (network_type, segmentation_id) tuple for encoded vlan_id.""" + FLAT_VLAN_ID = -1 + LOCAL_VLAN_ID = -2 + if vlan_id == LOCAL_VLAN_ID: + return (p_const.TYPE_LOCAL, None) + elif vlan_id == FLAT_VLAN_ID: + return (p_const.TYPE_FLAT, None) + else: + return (p_const.TYPE_VLAN, vlan_id) + + +class BaseMigrateToMl2_Icehouse(object): + + def __init__(self, vif_type, driver_type, segment_table_name, + vlan_allocation_table_name, old_tables): + self.vif_type = vif_type + self.driver_type = driver_type + self.segment_table_name = segment_table_name + self.vlan_allocation_table_name = vlan_allocation_table_name + self.old_tables = old_tables + + def __call__(self, connection_url, save_tables=False, tunnel_type=None, + vxlan_udp_port=None): + engine = sa.create_engine(connection_url) + metadata = sa.MetaData() + check_db_schema_version(engine, metadata) + + self.define_ml2_tables(metadata) + + # Autoload the ports table to ensure that foreign keys to it and + # the network table can be created for the new tables. + sa.Table('ports', metadata, autoload=True, autoload_with=engine) + metadata.create_all(engine) + + self.migrate_network_segments(engine, metadata) + if tunnel_type: + self.migrate_tunnels(engine, tunnel_type, vxlan_udp_port) + self.migrate_vlan_allocations(engine) + self.migrate_port_bindings(engine, metadata) + + self.drop_old_tables(engine, save_tables) + + def migrate_segment_dict(self, binding): + binding['id'] = uuidutils.generate_uuid() + + def migrate_network_segments(self, engine, metadata): + # Migrating network segments requires loading the data to python + # so that a uuid can be generated for each segment. + source_table = sa.Table(self.segment_table_name, metadata, + autoload=True, autoload_with=engine) + source_segments = engine.execute(source_table.select()) + ml2_segments = [dict(x) for x in source_segments] + for segment in ml2_segments: + self.migrate_segment_dict(segment) + if ml2_segments: + ml2_network_segments = metadata.tables['ml2_network_segments'] + engine.execute(ml2_network_segments.insert(), ml2_segments) + + def migrate_tunnels(self, engine, tunnel_type, vxlan_udp_port=None): + """Override this method to perform plugin-specific tunnel migration.""" + pass + + def migrate_vlan_allocations(self, engine): + engine.execute((""" + INSERT INTO ml2_vlan_allocations + SELECT physical_network, vlan_id, allocated + FROM %(source_table)s + WHERE allocated = 1 + """) % {'source_table': self.vlan_allocation_table_name}) + + def get_port_segment_map(self, engine): + """Retrieve a mapping of port id to segment id. + + The monolithic plugins only support a single segment per + network, so the segment id can be uniquely identified by + the network associated with a given port. + + """ + port_segments = engine.execute(""" + SELECT ports_network.port_id, ml2_network_segments.id AS segment_id + FROM ml2_network_segments, ( + SELECT portbindingports.port_id, ports.network_id + FROM portbindingports, ports + WHERE portbindingports.port_id = ports.id + ) AS ports_network + WHERE ml2_network_segments.network_id = ports_network.network_id + """) + return dict(x for x in port_segments) + + def migrate_port_bindings(self, engine, metadata): + port_segment_map = self.get_port_segment_map(engine) + + port_binding_ports = sa.Table('portbindingports', metadata, + autoload=True, autoload_with=engine) + source_bindings = engine.execute(port_binding_ports.select()) + ml2_bindings = [dict(x) for x in source_bindings] + for binding in ml2_bindings: + binding['vif_type'] = self.vif_type + binding['driver'] = self.driver_type + segment = port_segment_map.get(binding['port_id']) + if segment: + binding['segment'] = segment + if ml2_bindings: + ml2_port_bindings = metadata.tables['ml2_port_bindings'] + engine.execute(ml2_port_bindings.insert(), ml2_bindings) + + def drop_old_tables(self, engine, save_tables=False): + if save_tables: + return + old_tables = self.old_tables + [self.vlan_allocation_table_name, + self.segment_table_name] + for table_name in old_tables: + engine.execute('DROP TABLE %s' % table_name) + + def define_ml2_tables(self, metadata): + + sa.Table( + 'arista_provisioned_nets', metadata, + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.Column('segmentation_id', sa.Integer(), + autoincrement=False, nullable=True), + sa.PrimaryKeyConstraint('id'), + ) + + sa.Table( + 'arista_provisioned_vms', metadata, + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('vm_id', sa.String(length=255), nullable=True), + sa.Column('host_id', sa.String(length=255), nullable=True), + sa.Column('port_id', sa.String(length=36), nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.PrimaryKeyConstraint('id'), + ) + + sa.Table( + 'arista_provisioned_tenants', metadata, + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id'), + ) + + sa.Table( + 'cisco_ml2_nexusport_bindings', metadata, + sa.Column('binding_id', sa.Integer(), nullable=False), + sa.Column('port_id', sa.String(length=255), nullable=True), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('switch_ip', sa.String(length=255), nullable=True), + sa.Column('instance_id', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('binding_id'), + ) + + sa.Table( + 'cisco_ml2_credentials', metadata, + sa.Column('credential_id', sa.String(length=255), nullable=True), + sa.Column('tenant_id', sa.String(length=255), nullable=False), + sa.Column('credential_name', sa.String(length=255), + nullable=False), + sa.Column('user_name', sa.String(length=255), nullable=True), + sa.Column('password', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('tenant_id', 'credential_name'), + ) + + sa.Table( + 'ml2_flat_allocations', metadata, + sa.Column('physical_network', sa.String(length=64), + nullable=False), + sa.PrimaryKeyConstraint('physical_network'), + ) + + sa.Table( + 'ml2_gre_allocations', metadata, + sa.Column('gre_id', sa.Integer, nullable=False, + autoincrement=False), + sa.Column('allocated', sa.Boolean, nullable=False), + sa.PrimaryKeyConstraint('gre_id'), + ) + + sa.Table( + 'ml2_gre_endpoints', metadata, + sa.Column('ip_address', sa.String(length=64)), + sa.PrimaryKeyConstraint('ip_address'), + ) + + sa.Table( + 'ml2_network_segments', metadata, + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('network_type', sa.String(length=32), nullable=False), + sa.Column('physical_network', sa.String(length=64), nullable=True), + sa.Column('segmentation_id', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id'), + ) + + sa.Table( + 'ml2_port_bindings', metadata, + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.Column('vif_type', sa.String(length=64), nullable=False), + sa.Column('driver', sa.String(length=64), nullable=True), + sa.Column('segment', sa.String(length=36), nullable=True), + sa.Column('vnic_type', sa.String(length=64), nullable=False, + server_default='normal'), + sa.Column('vif_details', sa.String(4095), nullable=False, + server_default=''), + sa.Column('profile', sa.String(4095), nullable=False, + server_default=''), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'], + ondelete='SET NULL'), + sa.PrimaryKeyConstraint('port_id'), + ) + + sa.Table( + 'ml2_vlan_allocations', metadata, + sa.Column('physical_network', sa.String(length=64), + nullable=False), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), autoincrement=False, + nullable=False), + sa.PrimaryKeyConstraint('physical_network', 'vlan_id'), + ) + + sa.Table( + 'ml2_vxlan_allocations', metadata, + sa.Column('vxlan_vni', sa.Integer, nullable=False, + autoincrement=False), + sa.Column('allocated', sa.Boolean, nullable=False), + sa.PrimaryKeyConstraint('vxlan_vni'), + ) + + sa.Table( + 'ml2_vxlan_endpoints', metadata, + sa.Column('ip_address', sa.String(length=64)), + sa.Column('udp_port', sa.Integer(), nullable=False, + autoincrement=False), + sa.PrimaryKeyConstraint('ip_address', 'udp_port'), + ) + + +class MigrateLinuxBridgeToMl2_Icehouse(BaseMigrateToMl2_Icehouse): + + def __init__(self): + super(MigrateLinuxBridgeToMl2_Icehouse, self).__init__( + vif_type=portbindings.VIF_TYPE_BRIDGE, + driver_type=LINUXBRIDGE, + segment_table_name='network_bindings', + vlan_allocation_table_name='network_states', + old_tables=['portbindingports']) + + def migrate_segment_dict(self, binding): + super(MigrateLinuxBridgeToMl2_Icehouse, self).migrate_segment_dict( + binding) + vlan_id = binding.pop('vlan_id') + network_type, segmentation_id = interpret_vlan_id(vlan_id) + binding['network_type'] = network_type + binding['segmentation_id'] = segmentation_id + + +class MigrateOpenvswitchToMl2_Icehouse(BaseMigrateToMl2_Icehouse): + + def __init__(self): + super(MigrateOpenvswitchToMl2_Icehouse, self).__init__( + vif_type=portbindings.VIF_TYPE_OVS, + driver_type=OPENVSWITCH, + segment_table_name='ovs_network_bindings', + vlan_allocation_table_name='ovs_vlan_allocations', + old_tables=[ + 'ovs_tunnel_allocations', + 'ovs_tunnel_endpoints', + 'portbindingports', + ]) + + def migrate_tunnels(self, engine, tunnel_type, vxlan_udp_port=None): + if tunnel_type == p_const.TYPE_GRE: + engine.execute(""" + INSERT INTO ml2_gre_allocations + SELECT tunnel_id as gre_id, allocated + FROM ovs_tunnel_allocations + WHERE allocated = 1 + """) + engine.execute(""" + INSERT INTO ml2_gre_endpoints + SELECT ip_address + FROM ovs_tunnel_endpoints + """) + elif tunnel_type == p_const.TYPE_VXLAN: + if not vxlan_udp_port: + vxlan_udp_port = type_vxlan.VXLAN_UDP_PORT + engine.execute(""" + INSERT INTO ml2_vxlan_allocations + SELECT tunnel_id as vxlan_vni, allocated + FROM ovs_tunnel_allocations + WHERE allocated = 1 + """) + engine.execute(sa.text(""" + INSERT INTO ml2_vxlan_endpoints + SELECT ip_address, :udp_port as udp_port + FROM ovs_tunnel_endpoints + """), udp_port=vxlan_udp_port) + else: + raise ValueError(_('Unknown tunnel type: %s') % tunnel_type) + + +migrate_map = { + ICEHOUSE: { + OPENVSWITCH: MigrateOpenvswitchToMl2_Icehouse, + LINUXBRIDGE: MigrateLinuxBridgeToMl2_Icehouse, + }, +} + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('plugin', choices=[OPENVSWITCH, LINUXBRIDGE], + help=_('The plugin type whose database will be ' + 'migrated')) + parser.add_argument('connection', + help=_('The connection url for the target db')) + parser.add_argument('--tunnel-type', choices=[p_const.TYPE_GRE, + p_const.TYPE_VXLAN], + help=_('The %s tunnel type to migrate from') % + OPENVSWITCH) + parser.add_argument('--vxlan-udp-port', default=None, type=int, + help=_('The UDP port to use for VXLAN tunnels.')) + parser.add_argument('--release', default=ICEHOUSE, choices=[ICEHOUSE]) + parser.add_argument('--save-tables', default=False, action='store_true', + help=_("Retain the old plugin's tables")) + #TODO(marun) Provide a verbose option + args = parser.parse_args() + + if args.plugin == LINUXBRIDGE and (args.tunnel_type or + args.vxlan_udp_port): + msg = _('Tunnel args (tunnel-type and vxlan-udp-port) are not valid ' + 'for the %s plugin') + parser.error(msg % LINUXBRIDGE) + + try: + migrate_func = migrate_map[args.release][args.plugin]() + except KeyError: + msg = _('Support for migrating %(plugin)s for release ' + '%(release)s is not yet implemented') + parser.error(msg % {'plugin': args.plugin, 'release': args.release}) + else: + migrate_func(args.connection, args.save_tables, args.tunnel_type, + args.vxlan_udp_port) + + +if __name__ == '__main__': + main() diff --git a/neutron/db/model_base.py b/neutron/db/model_base.py new file mode 100644 index 000000000..7f8f051f9 --- /dev/null +++ b/neutron/db/model_base.py @@ -0,0 +1,52 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy.ext import declarative +from sqlalchemy import orm + +from neutron.openstack.common.db.sqlalchemy import models + + +class NeutronBase(models.ModelBase): + """Base class for Neutron Models.""" + + __table_args__ = {'mysql_engine': 'InnoDB'} + + def __iter__(self): + self._i = iter(orm.object_mapper(self).columns) + return self + + def next(self): + n = self._i.next().name + return n, getattr(self, n) + + def __repr__(self): + """sqlalchemy based automatic __repr__ method.""" + items = ['%s=%r' % (col.name, getattr(self, col.name)) + for col in self.__table__.columns] + return "<%s.%s[object at %x] {%s}>" % (self.__class__.__module__, + self.__class__.__name__, + id(self), ', '.join(items)) + + +class NeutronBaseV2(NeutronBase): + + @declarative.declared_attr + def __tablename__(cls): + # NOTE(jkoelker) use the pluralized name of the class as the table + return cls.__name__.lower() + 's' + + +BASEV2 = declarative.declarative_base(cls=NeutronBaseV2) diff --git a/neutron/db/models_v2.py b/neutron/db/models_v2.py new file mode 100644 index 000000000..53efc6692 --- /dev/null +++ b/neutron/db/models_v2.py @@ -0,0 +1,204 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.common import constants +from neutron.db import model_base +from neutron.openstack.common import uuidutils + + +class HasTenant(object): + """Tenant mixin, add to subclasses that have a tenant.""" + + # NOTE(jkoelker) tenant_id is just a free form string ;( + tenant_id = sa.Column(sa.String(255)) + + +class HasId(object): + """id mixin, add to subclasses that have an id.""" + + id = sa.Column(sa.String(36), + primary_key=True, + default=uuidutils.generate_uuid) + + +class HasStatusDescription(object): + """Status with description mixin.""" + + status = sa.Column(sa.String(16), nullable=False) + status_description = sa.Column(sa.String(255)) + + +class IPAvailabilityRange(model_base.BASEV2): + """Internal representation of available IPs for Neutron subnets. + + Allocation - first entry from the range will be allocated. + If the first entry is equal to the last entry then this row + will be deleted. + Recycling ips involves reading the IPAllocationPool and IPAllocation tables + and inserting ranges representing available ips. This happens after the + final allocation is pulled from this table and a new ip allocation is + requested. Any contiguous ranges of available ips will be inserted as a + single range. + """ + + allocation_pool_id = sa.Column(sa.String(36), + sa.ForeignKey('ipallocationpools.id', + ondelete="CASCADE"), + nullable=False, + primary_key=True) + first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) + last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) + + def __repr__(self): + return "%s - %s" % (self.first_ip, self.last_ip) + + +class IPAllocationPool(model_base.BASEV2, HasId): + """Representation of an allocation pool in a Neutron subnet.""" + + subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', + ondelete="CASCADE"), + nullable=True) + first_ip = sa.Column(sa.String(64), nullable=False) + last_ip = sa.Column(sa.String(64), nullable=False) + available_ranges = orm.relationship(IPAvailabilityRange, + backref='ipallocationpool', + lazy="joined", + cascade='all, delete-orphan') + + def __repr__(self): + return "%s - %s" % (self.first_ip, self.last_ip) + + +class IPAllocation(model_base.BASEV2): + """Internal representation of allocated IP addresses in a Neutron subnet. + """ + + port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', + ondelete="CASCADE"), + nullable=True) + ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) + subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', + ondelete="CASCADE"), + nullable=False, primary_key=True) + network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", + ondelete="CASCADE"), + nullable=False, primary_key=True) + + +class Route(object): + """mixin of a route.""" + + destination = sa.Column(sa.String(64), nullable=False, primary_key=True) + nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True) + + +class SubnetRoute(model_base.BASEV2, Route): + + subnet_id = sa.Column(sa.String(36), + sa.ForeignKey('subnets.id', + ondelete="CASCADE"), + primary_key=True) + + +class Port(model_base.BASEV2, HasId, HasTenant): + """Represents a port on a Neutron v2 network.""" + + name = sa.Column(sa.String(255)) + network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"), + nullable=False) + fixed_ips = orm.relationship(IPAllocation, backref='ports', lazy='joined') + mac_address = sa.Column(sa.String(32), nullable=False) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + status = sa.Column(sa.String(16), nullable=False) + device_id = sa.Column(sa.String(255), nullable=False) + device_owner = sa.Column(sa.String(255), nullable=False) + + def __init__(self, id=None, tenant_id=None, name=None, network_id=None, + mac_address=None, admin_state_up=None, status=None, + device_id=None, device_owner=None, fixed_ips=None): + self.id = id + self.tenant_id = tenant_id + self.name = name + self.network_id = network_id + self.mac_address = mac_address + self.admin_state_up = admin_state_up + self.device_owner = device_owner + self.device_id = device_id + # Since this is a relationship only set it if one is passed in. + if fixed_ips: + self.fixed_ips = fixed_ips + + # NOTE(arosen): status must be set last as an event is triggered on! + self.status = status + + +class DNSNameServer(model_base.BASEV2): + """Internal representation of a DNS nameserver.""" + + address = sa.Column(sa.String(128), nullable=False, primary_key=True) + subnet_id = sa.Column(sa.String(36), + sa.ForeignKey('subnets.id', + ondelete="CASCADE"), + primary_key=True) + + +class Subnet(model_base.BASEV2, HasId, HasTenant): + """Represents a neutron subnet. + + When a subnet is created the first and last entries will be created. These + are used for the IP allocation. + """ + + name = sa.Column(sa.String(255)) + network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id')) + ip_version = sa.Column(sa.Integer, nullable=False) + cidr = sa.Column(sa.String(64), nullable=False) + gateway_ip = sa.Column(sa.String(64)) + allocation_pools = orm.relationship(IPAllocationPool, + backref='subnet', + lazy="joined", + cascade='delete') + enable_dhcp = sa.Column(sa.Boolean()) + dns_nameservers = orm.relationship(DNSNameServer, + backref='subnet', + cascade='all, delete, delete-orphan') + routes = orm.relationship(SubnetRoute, + backref='subnet', + cascade='all, delete, delete-orphan') + shared = sa.Column(sa.Boolean) + ipv6_ra_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, + constants.DHCPV6_STATEFUL, + constants.DHCPV6_STATELESS, + name='ipv6_ra_modes'), nullable=True) + ipv6_address_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, + constants.DHCPV6_STATEFUL, + constants.DHCPV6_STATELESS, + name='ipv6_address_modes'), nullable=True) + + +class Network(model_base.BASEV2, HasId, HasTenant): + """Represents a v2 neutron network.""" + + name = sa.Column(sa.String(255)) + ports = orm.relationship(Port, backref='networks') + subnets = orm.relationship(Subnet, backref='networks', + lazy="joined") + status = sa.Column(sa.String(16)) + admin_state_up = sa.Column(sa.Boolean) + shared = sa.Column(sa.Boolean) diff --git a/neutron/db/portbindings_base.py b/neutron/db/portbindings_base.py new file mode 100644 index 000000000..045b7e3f2 --- /dev/null +++ b/neutron/db/portbindings_base.py @@ -0,0 +1,41 @@ +# Copyright 2013 UnitedStack Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Yong Sheng Gong, UnitedStack Inc. + +from neutron.api.v2 import attributes +from neutron.db import db_base_plugin_v2 + + +class PortBindingBaseMixin(object): + base_binding_dict = None + + def _process_portbindings_create_and_update(self, context, port_data, + port): + self.extend_port_dict_binding(port, None) + + def extend_port_dict_binding(self, port_res, port_db): + if self.base_binding_dict: + port_res.update(self.base_binding_dict) + + +def _extend_port_dict_binding(plugin, port_res, port_db): + if not isinstance(plugin, PortBindingBaseMixin): + return + plugin.extend_port_dict_binding(port_res, port_db) + + +def register_port_dict_function(): + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.PORTS, [_extend_port_dict_binding]) diff --git a/neutron/db/portbindings_db.py b/neutron/db/portbindings_db.py new file mode 100644 index 000000000..1f94f8397 --- /dev/null +++ b/neutron/db/portbindings_db.py @@ -0,0 +1,121 @@ +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Yong Sheng Gong, IBM, Corp. + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.api.v2 import attributes +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.db import portbindings_base +from neutron.extensions import portbindings + + +class PortBindingPort(model_base.BASEV2): + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + host = sa.Column(sa.String(255), nullable=False) + port = orm.relationship( + models_v2.Port, + backref=orm.backref("portbinding", + lazy='joined', uselist=False, + cascade='delete')) + + +class PortBindingMixin(portbindings_base.PortBindingBaseMixin): + extra_binding_dict = None + + def _port_model_hook(self, context, original_model, query): + query = query.outerjoin(PortBindingPort, + (original_model.id == + PortBindingPort.port_id)) + return query + + def _port_result_filter_hook(self, query, filters): + values = filters and filters.get(portbindings.HOST_ID, []) + if not values: + return query + if len(values) == 1: + query = query.filter(PortBindingPort.host == values[0]) + else: + query = query.filter(PortBindingPort.host.in_(values)) + return query + + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Port, + "portbindings_port", + '_port_model_hook', + None, + '_port_result_filter_hook') + + def _process_portbindings_create_and_update(self, context, port_data, + port): + binding_profile = port.get(portbindings.PROFILE) + binding_profile_set = attributes.is_attr_set(binding_profile) + if not binding_profile_set and binding_profile is not None: + del port[portbindings.PROFILE] + + binding_vnic = port.get(portbindings.VNIC_TYPE) + binding_vnic_set = attributes.is_attr_set(binding_vnic) + if not binding_vnic_set and binding_vnic is not None: + del port[portbindings.VNIC_TYPE] + # REVISIT(irenab) Add support for vnic_type for plugins that + # can handle more than one type. + # Currently implemented for ML2 plugin that does not use + # PortBindingMixin. + + host = port_data.get(portbindings.HOST_ID) + host_set = attributes.is_attr_set(host) + with context.session.begin(subtransactions=True): + bind_port = context.session.query( + PortBindingPort).filter_by(port_id=port['id']).first() + if host_set: + if not bind_port: + context.session.add(PortBindingPort(port_id=port['id'], + host=host)) + else: + bind_port.host = host + else: + host = (bind_port and bind_port.host or None) + self._extend_port_dict_binding_host(port, host) + + def get_port_host(self, context, port_id): + with context.session.begin(subtransactions=True): + bind_port = context.session.query( + PortBindingPort).filter_by(port_id=port_id).first() + return bind_port and bind_port.host or None + + def _extend_port_dict_binding_host(self, port_res, host): + super(PortBindingMixin, self).extend_port_dict_binding( + port_res, None) + port_res[portbindings.HOST_ID] = host + + def extend_port_dict_binding(self, port_res, port_db): + host = (port_db.portbinding and port_db.portbinding.host or None) + self._extend_port_dict_binding_host(port_res, host) + + +def _extend_port_dict_binding(plugin, port_res, port_db): + if not isinstance(plugin, PortBindingMixin): + return + plugin.extend_port_dict_binding(port_res, port_db) + + +# Register dict extend functions for ports +db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.PORTS, [_extend_port_dict_binding]) diff --git a/neutron/db/portsecurity_db.py b/neutron/db/portsecurity_db.py new file mode 100644 index 000000000..d01eecd24 --- /dev/null +++ b/neutron/db/portsecurity_db.py @@ -0,0 +1,185 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc + +from neutron.api.v2 import attributes as attrs +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import portsecurity as psec +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class PortSecurityBinding(model_base.BASEV2): + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + port_security_enabled = sa.Column(sa.Boolean(), nullable=False) + + # Add a relationship to the Port model in order to be to able to + # instruct SQLAlchemy to eagerly load port security binding + port = orm.relationship( + models_v2.Port, + backref=orm.backref("port_security", uselist=False, + cascade='delete', lazy='joined')) + + +class NetworkSecurityBinding(model_base.BASEV2): + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + port_security_enabled = sa.Column(sa.Boolean(), nullable=False) + + # Add a relationship to the Port model in order to be able to instruct + # SQLAlchemy to eagerly load default port security setting for ports + # on this network + network = orm.relationship( + models_v2.Network, + backref=orm.backref("port_security", uselist=False, + cascade='delete', lazy='joined')) + + +class PortSecurityDbMixin(object): + """Mixin class to add port security.""" + + def _process_network_port_security_create( + self, context, network_req, network_res): + with context.session.begin(subtransactions=True): + db = NetworkSecurityBinding( + network_id=network_res['id'], + port_security_enabled=network_req[psec.PORTSECURITY]) + context.session.add(db) + network_res[psec.PORTSECURITY] = network_req[psec.PORTSECURITY] + return self._make_network_port_security_dict(db) + + def _process_port_port_security_create( + self, context, port_req, port_res): + with context.session.begin(subtransactions=True): + db = PortSecurityBinding( + port_id=port_res['id'], + port_security_enabled=port_req[psec.PORTSECURITY]) + context.session.add(db) + port_res[psec.PORTSECURITY] = port_req[psec.PORTSECURITY] + return self._make_port_security_dict(db) + + def _extend_port_security_dict(self, response_data, db_data): + if ('port-security' in + getattr(self, 'supported_extension_aliases', [])): + psec_value = db_data['port_security'][psec.PORTSECURITY] + response_data[psec.PORTSECURITY] = psec_value + + def _get_network_security_binding(self, context, network_id): + try: + query = self._model_query(context, NetworkSecurityBinding) + binding = query.filter( + NetworkSecurityBinding.network_id == network_id).one() + except exc.NoResultFound: + raise psec.PortSecurityBindingNotFound() + return binding[psec.PORTSECURITY] + + def _get_port_security_binding(self, context, port_id): + try: + query = self._model_query(context, PortSecurityBinding) + binding = query.filter( + PortSecurityBinding.port_id == port_id).one() + except exc.NoResultFound: + raise psec.PortSecurityBindingNotFound() + return binding[psec.PORTSECURITY] + + def _process_port_port_security_update( + self, context, port_req, port_res): + if psec.PORTSECURITY in port_req: + port_security_enabled = port_req[psec.PORTSECURITY] + else: + return + try: + query = self._model_query(context, PortSecurityBinding) + port_id = port_res['id'] + binding = query.filter( + PortSecurityBinding.port_id == port_id).one() + + binding.port_security_enabled = port_security_enabled + port_res[psec.PORTSECURITY] = port_security_enabled + except exc.NoResultFound: + raise psec.PortSecurityBindingNotFound() + + def _process_network_port_security_update( + self, context, network_req, network_res): + if psec.PORTSECURITY in network_req: + port_security_enabled = network_req[psec.PORTSECURITY] + else: + return + try: + query = self._model_query(context, NetworkSecurityBinding) + network_id = network_res['id'] + binding = query.filter( + NetworkSecurityBinding.network_id == network_id).one() + + binding.port_security_enabled = port_security_enabled + network_res[psec.PORTSECURITY] = port_security_enabled + except exc.NoResultFound: + raise psec.PortSecurityBindingNotFound() + + def _make_network_port_security_dict(self, port_security, fields=None): + res = {'network_id': port_security['network_id'], + psec.PORTSECURITY: port_security[psec.PORTSECURITY]} + return self._fields(res, fields) + + def _determine_port_security_and_has_ip(self, context, port): + """Returns a tuple of booleans (port_security_enabled, has_ip). + + Port_security is the value assocated with the port if one is present + otherwise the value associated with the network is returned. has_ip is + if the port is associated with an ip or not. + """ + has_ip = self._ip_on_port(port) + # we don't apply security groups for dhcp, router + if (port.get('device_owner') and + port['device_owner'].startswith('network:')): + return (False, has_ip) + + if (psec.PORTSECURITY in port and + isinstance(port[psec.PORTSECURITY], bool)): + port_security_enabled = port[psec.PORTSECURITY] + + # If port has an ip and security_groups are passed in + # conveniently set port_security_enabled to true this way + # user doesn't also have to pass in port_security_enabled=True + # when creating ports. + elif (has_ip and attrs.is_attr_set('security_groups')): + port_security_enabled = True + else: + port_security_enabled = self._get_network_security_binding( + context, port['network_id']) + + return (port_security_enabled, has_ip) + + def _make_port_security_dict(self, port, fields=None): + res = {'port_id': port['port_id'], + psec.PORTSECURITY: port[psec.PORTSECURITY]} + return self._fields(res, fields) + + def _ip_on_port(self, port): + return bool(port.get('fixed_ips')) + + # Register dict extend functions for ports and networks + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attrs.NETWORKS, ['_extend_port_security_dict']) + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attrs.PORTS, ['_extend_port_security_dict']) diff --git a/neutron/db/quota_db.py b/neutron/db/quota_db.py new file mode 100644 index 000000000..dc6a3cf4b --- /dev/null +++ b/neutron/db/quota_db.py @@ -0,0 +1,179 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa + +from neutron.common import exceptions +from neutron.db import model_base +from neutron.db import models_v2 + + +class Quota(model_base.BASEV2, models_v2.HasId): + """Represent a single quota override for a tenant. + + If there is no row for a given tenant id and resource, then the + default for the quota class is used. + """ + tenant_id = sa.Column(sa.String(255), index=True) + resource = sa.Column(sa.String(255)) + limit = sa.Column(sa.Integer) + + +class DbQuotaDriver(object): + """Driver to perform necessary checks to enforce quotas and obtain quota + information. + + The default driver utilizes the local database. + """ + + @staticmethod + def get_tenant_quotas(context, resources, tenant_id): + """Given a list of resources, retrieve the quotas for the given + tenant. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resource keys. + :param tenant_id: The ID of the tenant to return quotas for. + :return dict: from resource name to dict of name and limit + """ + + # init with defaults + tenant_quota = dict((key, resource.default) + for key, resource in resources.items()) + + # update with tenant specific limits + q_qry = context.session.query(Quota).filter_by(tenant_id=tenant_id) + tenant_quota.update((q['resource'], q['limit']) for q in q_qry) + + return tenant_quota + + @staticmethod + def delete_tenant_quota(context, tenant_id): + """Delete the quota entries for a given tenant_id. + + Atfer deletion, this tenant will use default quota values in conf. + """ + with context.session.begin(): + tenant_quotas = context.session.query(Quota) + tenant_quotas = tenant_quotas.filter_by(tenant_id=tenant_id) + tenant_quotas.delete() + + @staticmethod + def get_all_quotas(context, resources): + """Given a list of resources, retrieve the quotas for the all tenants. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resource keys. + :return quotas: list of dict of tenant_id:, resourcekey1: + resourcekey2: ... + """ + tenant_default = dict((key, resource.default) + for key, resource in resources.items()) + + all_tenant_quotas = {} + + for quota in context.session.query(Quota): + tenant_id = quota['tenant_id'] + + # avoid setdefault() because only want to copy when actually req'd + tenant_quota = all_tenant_quotas.get(tenant_id) + if tenant_quota is None: + tenant_quota = tenant_default.copy() + tenant_quota['tenant_id'] = tenant_id + all_tenant_quotas[tenant_id] = tenant_quota + + tenant_quota[quota['resource']] = quota['limit'] + + return all_tenant_quotas.values() + + @staticmethod + def update_quota_limit(context, tenant_id, resource, limit): + with context.session.begin(): + tenant_quota = context.session.query(Quota).filter_by( + tenant_id=tenant_id, resource=resource).first() + + if tenant_quota: + tenant_quota.update({'limit': limit}) + else: + tenant_quota = Quota(tenant_id=tenant_id, + resource=resource, + limit=limit) + context.session.add(tenant_quota) + + def _get_quotas(self, context, tenant_id, resources, keys): + """Retrieves the quotas for specific resources. + + A helper method which retrieves the quotas for the specific + resources identified by keys, and which apply to the current + context. + + :param context: The request context, for access checks. + :param tenant_id: the tenant_id to check quota. + :param resources: A dictionary of the registered resources. + :param keys: A list of the desired quotas to retrieve. + + """ + desired = set(keys) + sub_resources = dict((k, v) for k, v in resources.items() + if k in desired) + + # Make sure we accounted for all of them... + if len(keys) != len(sub_resources): + unknown = desired - set(sub_resources.keys()) + raise exceptions.QuotaResourceUnknown(unknown=sorted(unknown)) + + # Grab and return the quotas (without usages) + quotas = DbQuotaDriver.get_tenant_quotas( + context, sub_resources, tenant_id) + + return dict((k, v) for k, v in quotas.items()) + + def limit_check(self, context, tenant_id, resources, values): + """Check simple quota limits. + + For limits--those quotas for which there is no usage + synchronization function--this method checks that a set of + proposed values are permitted by the limit restriction. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it is not a simple limit + resource. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns + nothing. + + :param context: The request context, for access checks. + :param tenant_id: The tenant_id to check the quota. + :param resources: A dictionary of the registered resources. + :param values: A dictionary of the values to check against the + quota. + """ + + # Ensure no value is less than zero + unders = [key for key, val in values.items() if val < 0] + if unders: + raise exceptions.InvalidQuotaValue(unders=sorted(unders)) + + # Get the applicable quotas + quotas = self._get_quotas(context, tenant_id, resources, values.keys()) + + # Check the quotas and construct a list of the resources that + # would be put over limit by the desired values + overs = [key for key, val in values.items() + if quotas[key] >= 0 and quotas[key] < val] + if overs: + raise exceptions.OverQuota(overs=sorted(overs)) diff --git a/neutron/db/routedserviceinsertion_db.py b/neutron/db/routedserviceinsertion_db.py new file mode 100644 index 000000000..25b87ca42 --- /dev/null +++ b/neutron/db/routedserviceinsertion_db.py @@ -0,0 +1,106 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kaiwei Fan, VMware, Inc + +import sqlalchemy as sa +from sqlalchemy import event + +from neutron.common import exceptions as qexception +from neutron.db import model_base +from neutron.extensions import routedserviceinsertion as rsi + + +class ServiceRouterBinding(model_base.BASEV2): + resource_id = sa.Column(sa.String(36), + primary_key=True) + resource_type = sa.Column(sa.String(36), + primary_key=True) + router_id = sa.Column(sa.String(36), + sa.ForeignKey('routers.id'), + nullable=False) + + +class AttributeException(qexception.NeutronException): + message = _("Resource type '%(resource_type)s' is longer " + "than %(maxlen)d characters") + + +@event.listens_for(ServiceRouterBinding.resource_type, 'set', retval=True) +def validate_resource_type(target, value, oldvalue, initiator): + """Make sure the resource type fit the resource_type column.""" + maxlen = ServiceRouterBinding.resource_type.property.columns[0].type.length + if len(value) > maxlen: + raise AttributeException(resource_type=value, maxlen=maxlen) + return value + + +class RoutedServiceInsertionDbMixin(object): + """Mixin class to add router service insertion.""" + + def _process_create_resource_router_id(self, context, resource, model): + with context.session.begin(subtransactions=True): + db = ServiceRouterBinding( + resource_id=resource['id'], + resource_type=model.__tablename__, + router_id=resource[rsi.ROUTER_ID]) + context.session.add(db) + return self._make_resource_router_id_dict(db, model) + + def _extend_resource_router_id_dict(self, context, resource, model): + binding = self._get_resource_router_id_binding( + context, resource['resource_id'], model) + resource[rsi.ROUTER_ID] = binding['router_id'] + + def _get_resource_router_id_binding(self, context, model, + resource_id=None, + router_id=None): + query = self._model_query(context, ServiceRouterBinding) + query = query.filter( + ServiceRouterBinding.resource_type == model.__tablename__) + if resource_id: + query = query.filter( + ServiceRouterBinding.resource_id == resource_id) + if router_id: + query = query.filter( + ServiceRouterBinding.router_id == router_id) + return query.first() + + def _get_resource_router_id_bindings(self, context, model, + resource_ids=None, + router_ids=None): + query = self._model_query(context, ServiceRouterBinding) + query = query.filter( + ServiceRouterBinding.resource_type == model.__tablename__) + if resource_ids: + query = query.filter( + ServiceRouterBinding.resource_id.in_(resource_ids)) + if router_ids: + query = query.filter( + ServiceRouterBinding.router_id.in_(router_ids)) + return query.all() + + def _make_resource_router_id_dict(self, resource_router_binding, model, + fields=None): + resource = {'resource_id': resource_router_binding['resource_id'], + 'resource_type': model.__tablename__, + rsi.ROUTER_ID: resource_router_binding[rsi.ROUTER_ID]} + return self._fields(resource, fields) + + def _delete_resource_router_id_binding(self, context, resource_id, model): + with context.session.begin(subtransactions=True): + binding = self._get_resource_router_id_binding( + context, model, resource_id=resource_id) + if binding: + context.session.delete(binding) diff --git a/neutron/db/routerservicetype_db.py b/neutron/db/routerservicetype_db.py new file mode 100644 index 000000000..9037a0bb4 --- /dev/null +++ b/neutron/db/routerservicetype_db.py @@ -0,0 +1,57 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kaiwei Fan, VMware, Inc + +import sqlalchemy as sa + +from neutron.db import model_base +from neutron.extensions import routerservicetype as rst + + +class RouterServiceTypeBinding(model_base.BASEV2): + router_id = sa.Column(sa.String(36), + sa.ForeignKey('routers.id', ondelete="CASCADE"), + primary_key=True) + service_type_id = sa.Column(sa.String(36), + nullable=False) + + +class RouterServiceTypeDbMixin(object): + """Mixin class to add router service type.""" + + def _process_create_router_service_type_id(self, context, router): + with context.session.begin(subtransactions=True): + db = RouterServiceTypeBinding( + router_id=router['id'], + service_type_id=router[rst.SERVICE_TYPE_ID]) + context.session.add(db) + return self._make_router_service_type_id_dict(db) + + def _extend_router_service_type_id_dict(self, context, router): + rsbind = self._get_router_service_type_id_binding( + context, router['id']) + if rsbind: + router[rst.SERVICE_TYPE_ID] = rsbind['service_type_id'] + + def _get_router_service_type_id_binding(self, context, router_id): + query = self._model_query(context, RouterServiceTypeBinding) + query = query.filter( + RouterServiceTypeBinding.router_id == router_id) + return query.first() + + def _make_router_service_type_id_dict(self, router_service_type): + res = {'router_id': router_service_type['router_id'], + 'service_type_id': router_service_type[rst.SERVICE_TYPE_ID]} + return self._fields(res, None) diff --git a/neutron/db/securitygroups_db.py b/neutron/db/securitygroups_db.py new file mode 100644 index 000000000..c897071b3 --- /dev/null +++ b/neutron/db/securitygroups_db.py @@ -0,0 +1,564 @@ +# Copyright 2012 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc +from sqlalchemy.orm import scoped_session + +from neutron.api.v2 import attributes as attr +from neutron.common import constants +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import uuidutils + + +IP_PROTOCOL_MAP = {constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP, + constants.PROTO_NAME_UDP: constants.PROTO_NUM_UDP, + constants.PROTO_NAME_ICMP: constants.PROTO_NUM_ICMP, + constants.PROTO_NAME_ICMP_V6: constants.PROTO_NUM_ICMP_V6} + + +class SecurityGroup(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a v2 neutron security group.""" + + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + + +class SecurityGroupPortBinding(model_base.BASEV2): + """Represents binding between neutron ports and security profiles.""" + + port_id = sa.Column(sa.String(36), + sa.ForeignKey("ports.id", + ondelete='CASCADE'), + primary_key=True) + security_group_id = sa.Column(sa.String(36), + sa.ForeignKey("securitygroups.id"), + primary_key=True) + + # Add a relationship to the Port model in order to instruct SQLAlchemy to + # eagerly load security group bindings + ports = orm.relationship( + models_v2.Port, + backref=orm.backref("security_groups", + lazy='joined', cascade='delete')) + + +class SecurityGroupRule(model_base.BASEV2, models_v2.HasId, + models_v2.HasTenant): + """Represents a v2 neutron security group rule.""" + + security_group_id = sa.Column(sa.String(36), + sa.ForeignKey("securitygroups.id", + ondelete="CASCADE"), + nullable=False) + + remote_group_id = sa.Column(sa.String(36), + sa.ForeignKey("securitygroups.id", + ondelete="CASCADE"), + nullable=True) + + direction = sa.Column(sa.Enum('ingress', 'egress', + name='securitygrouprules_direction')) + ethertype = sa.Column(sa.String(40)) + protocol = sa.Column(sa.String(40)) + port_range_min = sa.Column(sa.Integer) + port_range_max = sa.Column(sa.Integer) + remote_ip_prefix = sa.Column(sa.String(255)) + security_group = orm.relationship( + SecurityGroup, + backref=orm.backref('rules', cascade='all,delete'), + primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id") + source_group = orm.relationship( + SecurityGroup, + backref=orm.backref('source_rules', cascade='all,delete'), + primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id") + + +class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): + """Mixin class to add security group to db_base_plugin_v2.""" + + __native_bulk_support = True + + def create_security_group_bulk(self, context, security_group_rule): + return self._create_bulk('security_group', context, + security_group_rule) + + def create_security_group(self, context, security_group, default_sg=False): + """Create security group. + + If default_sg is true that means we are a default security group for + a given tenant if it does not exist. + """ + s = security_group['security_group'] + tenant_id = self._get_tenant_id_for_create(context, s) + + if not default_sg: + self._ensure_default_security_group(context, tenant_id) + + with context.session.begin(subtransactions=True): + security_group_db = SecurityGroup(id=s.get('id') or ( + uuidutils.generate_uuid()), + description=s['description'], + tenant_id=tenant_id, + name=s['name']) + context.session.add(security_group_db) + for ethertype in ext_sg.sg_supported_ethertypes: + if s.get('name') == 'default': + # Allow intercommunication + ingress_rule = SecurityGroupRule( + id=uuidutils.generate_uuid(), tenant_id=tenant_id, + security_group=security_group_db, + direction='ingress', + ethertype=ethertype, + source_group=security_group_db) + context.session.add(ingress_rule) + + egress_rule = SecurityGroupRule( + id=uuidutils.generate_uuid(), tenant_id=tenant_id, + security_group=security_group_db, + direction='egress', + ethertype=ethertype) + context.session.add(egress_rule) + + return self._make_security_group_dict(security_group_db) + + def get_security_groups(self, context, filters=None, fields=None, + sorts=None, limit=None, + marker=None, page_reverse=False, default_sg=False): + + # If default_sg is True do not call _ensure_default_security_group() + # so this can be done recursively. Context.tenant_id is checked + # because all the unit tests do not explicitly set the context on + # GETS. TODO(arosen) context handling can probably be improved here. + if not default_sg and context.tenant_id: + self._ensure_default_security_group(context, context.tenant_id) + marker_obj = self._get_marker_obj(context, 'security_group', limit, + marker) + return self._get_collection(context, + SecurityGroup, + self._make_security_group_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_security_groups_count(self, context, filters=None): + return self._get_collection_count(context, SecurityGroup, + filters=filters) + + def get_security_group(self, context, id, fields=None, tenant_id=None): + """Tenant id is given to handle the case when creating a security + group rule on behalf of another use. + """ + + if tenant_id: + tmp_context_tenant_id = context.tenant_id + context.tenant_id = tenant_id + + try: + with context.session.begin(subtransactions=True): + ret = self._make_security_group_dict(self._get_security_group( + context, id), fields) + ret['security_group_rules'] = self.get_security_group_rules( + context, {'security_group_id': [id]}) + finally: + if tenant_id: + context.tenant_id = tmp_context_tenant_id + return ret + + def _get_security_group(self, context, id): + try: + query = self._model_query(context, SecurityGroup) + sg = query.filter(SecurityGroup.id == id).one() + + except exc.NoResultFound: + raise ext_sg.SecurityGroupNotFound(id=id) + return sg + + def delete_security_group(self, context, id): + filters = {'security_group_id': [id]} + ports = self._get_port_security_group_bindings(context, filters) + if ports: + raise ext_sg.SecurityGroupInUse(id=id) + # confirm security group exists + sg = self._get_security_group(context, id) + + if sg['name'] == 'default' and not context.is_admin: + raise ext_sg.SecurityGroupCannotRemoveDefault() + with context.session.begin(subtransactions=True): + context.session.delete(sg) + + def update_security_group(self, context, id, security_group): + s = security_group['security_group'] + with context.session.begin(subtransactions=True): + sg = self._get_security_group(context, id) + if sg['name'] == 'default' and 'name' in s: + raise ext_sg.SecurityGroupCannotUpdateDefault() + sg.update(s) + return self._make_security_group_dict(sg) + + def _make_security_group_dict(self, security_group, fields=None): + res = {'id': security_group['id'], + 'name': security_group['name'], + 'tenant_id': security_group['tenant_id'], + 'description': security_group['description']} + res['security_group_rules'] = [self._make_security_group_rule_dict(r) + for r in security_group.rules] + return self._fields(res, fields) + + def _make_security_group_binding_dict(self, security_group, fields=None): + res = {'port_id': security_group['port_id'], + 'security_group_id': security_group['security_group_id']} + return self._fields(res, fields) + + def _create_port_security_group_binding(self, context, port_id, + security_group_id): + with context.session.begin(subtransactions=True): + db = SecurityGroupPortBinding(port_id=port_id, + security_group_id=security_group_id) + context.session.add(db) + + def _get_port_security_group_bindings(self, context, + filters=None, fields=None): + return self._get_collection(context, + SecurityGroupPortBinding, + self._make_security_group_binding_dict, + filters=filters, fields=fields) + + def _delete_port_security_group_bindings(self, context, port_id): + query = self._model_query(context, SecurityGroupPortBinding) + bindings = query.filter( + SecurityGroupPortBinding.port_id == port_id) + with context.session.begin(subtransactions=True): + for binding in bindings: + context.session.delete(binding) + + def create_security_group_rule_bulk(self, context, security_group_rule): + return self._create_bulk('security_group_rule', context, + security_group_rule) + + def create_security_group_rule_bulk_native(self, context, + security_group_rule): + r = security_group_rule['security_group_rules'] + + scoped_session(context.session) + security_group_id = self._validate_security_group_rules( + context, security_group_rule) + with context.session.begin(subtransactions=True): + if not self.get_security_group(context, security_group_id): + raise ext_sg.SecurityGroupNotFound(id=security_group_id) + + self._check_for_duplicate_rules(context, r) + ret = [] + for rule_dict in r: + rule = rule_dict['security_group_rule'] + tenant_id = self._get_tenant_id_for_create(context, rule) + db = SecurityGroupRule( + id=uuidutils.generate_uuid(), tenant_id=tenant_id, + security_group_id=rule['security_group_id'], + direction=rule['direction'], + remote_group_id=rule.get('remote_group_id'), + ethertype=rule['ethertype'], + protocol=rule['protocol'], + port_range_min=rule['port_range_min'], + port_range_max=rule['port_range_max'], + remote_ip_prefix=rule.get('remote_ip_prefix')) + context.session.add(db) + ret.append(self._make_security_group_rule_dict(db)) + return ret + + def create_security_group_rule(self, context, security_group_rule): + bulk_rule = {'security_group_rules': [security_group_rule]} + return self.create_security_group_rule_bulk_native(context, + bulk_rule)[0] + + def _get_ip_proto_number(self, protocol): + if protocol is None: + return + return IP_PROTOCOL_MAP.get(protocol, protocol) + + def _validate_port_range(self, rule): + """Check that port_range is valid.""" + if (rule['port_range_min'] is None and + rule['port_range_max'] is None): + return + if not rule['protocol']: + raise ext_sg.SecurityGroupProtocolRequiredWithPorts() + ip_proto = self._get_ip_proto_number(rule['protocol']) + if ip_proto in [constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP]: + if (rule['port_range_min'] is not None and + rule['port_range_min'] <= rule['port_range_max']): + pass + else: + raise ext_sg.SecurityGroupInvalidPortRange() + elif ip_proto == constants.PROTO_NUM_ICMP: + for attr, field in [('port_range_min', 'type'), + ('port_range_max', 'code')]: + if rule[attr] > 255: + raise ext_sg.SecurityGroupInvalidIcmpValue( + field=field, attr=attr, value=rule[attr]) + if (rule['port_range_min'] is None and + rule['port_range_max']): + raise ext_sg.SecurityGroupMissingIcmpType( + value=rule['port_range_max']) + + def _validate_security_group_rules(self, context, security_group_rule): + """Check that rules being installed. + + Check that all rules belong to the same security + group, remote_group_id/security_group_id belong to the same tenant, + and rules are valid. + """ + new_rules = set() + tenant_ids = set() + for rules in security_group_rule['security_group_rules']: + rule = rules.get('security_group_rule') + new_rules.add(rule['security_group_id']) + + self._validate_port_range(rule) + self._validate_ip_prefix(rule) + + if rule['remote_ip_prefix'] and rule['remote_group_id']: + raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix() + + if rule['tenant_id'] not in tenant_ids: + tenant_ids.add(rule['tenant_id']) + remote_group_id = rule.get('remote_group_id') + # Check that remote_group_id exists for tenant + if remote_group_id: + self.get_security_group(context, remote_group_id, + tenant_id=rule['tenant_id']) + if len(new_rules) > 1: + raise ext_sg.SecurityGroupNotSingleGroupRules() + security_group_id = new_rules.pop() + + # Confirm single tenant and that the tenant has permission + # to add rules to this security group. + if len(tenant_ids) > 1: + raise ext_sg.SecurityGroupRulesNotSingleTenant() + for tenant_id in tenant_ids: + self.get_security_group(context, security_group_id, + tenant_id=tenant_id) + return security_group_id + + def _make_security_group_rule_dict(self, security_group_rule, fields=None): + res = {'id': security_group_rule['id'], + 'tenant_id': security_group_rule['tenant_id'], + 'security_group_id': security_group_rule['security_group_id'], + 'ethertype': security_group_rule['ethertype'], + 'direction': security_group_rule['direction'], + 'protocol': security_group_rule['protocol'], + 'port_range_min': security_group_rule['port_range_min'], + 'port_range_max': security_group_rule['port_range_max'], + 'remote_ip_prefix': security_group_rule['remote_ip_prefix'], + 'remote_group_id': security_group_rule['remote_group_id']} + + return self._fields(res, fields) + + def _make_security_group_rule_filter_dict(self, security_group_rule): + sgr = security_group_rule['security_group_rule'] + res = {'tenant_id': [sgr['tenant_id']], + 'security_group_id': [sgr['security_group_id']], + 'direction': [sgr['direction']]} + + include_if_present = ['protocol', 'port_range_max', 'port_range_min', + 'ethertype', 'remote_ip_prefix', + 'remote_group_id'] + for key in include_if_present: + value = sgr.get(key) + if value: + res[key] = [value] + return res + + def _check_for_duplicate_rules(self, context, security_group_rules): + for i in security_group_rules: + found_self = False + for j in security_group_rules: + if i['security_group_rule'] == j['security_group_rule']: + if found_self: + raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i) + found_self = True + + # Check in database if rule exists + filters = self._make_security_group_rule_filter_dict(i) + db_rules = self.get_security_group_rules(context, filters) + # Note(arosen): the call to get_security_group_rules wildcards + # values in the filter that have a value of [None]. For + # example, filters = {'remote_group_id': [None]} will return + # all security group rules regardless of their value of + # remote_group_id. Therefore it is not possible to do this + # query unless the behavior of _get_collection() + # is changed which cannot be because other methods are already + # relying on this behavor. Therefore, we do the filtering + # below to check for these corner cases. + for db_rule in db_rules: + # need to remove id from db_rule for matching + id = db_rule.pop('id') + if (i['security_group_rule'] == db_rule): + raise ext_sg.SecurityGroupRuleExists(id=id) + + def _validate_ip_prefix(self, rule): + """Check that a valid cidr was specified as remote_ip_prefix + + No need to check that it is in fact an IP address as this is already + validated by attribute validators. + Check that rule ethertype is consistent with remote_ip_prefix ip type. + Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32). + """ + input_prefix = rule['remote_ip_prefix'] + if input_prefix: + addr = netaddr.IPNetwork(input_prefix) + # set input_prefix to always include the netmask: + rule['remote_ip_prefix'] = str(addr) + # check consistency of ethertype with addr version + if rule['ethertype'] != "IPv%d" % (addr.version): + raise ext_sg.SecurityGroupRuleParameterConflict( + ethertype=rule['ethertype'], cidr=input_prefix) + + def get_security_group_rules(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'security_group_rule', + limit, marker) + return self._get_collection(context, + SecurityGroupRule, + self._make_security_group_rule_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_security_group_rules_count(self, context, filters=None): + return self._get_collection_count(context, SecurityGroupRule, + filters=filters) + + def get_security_group_rule(self, context, id, fields=None): + security_group_rule = self._get_security_group_rule(context, id) + return self._make_security_group_rule_dict(security_group_rule, fields) + + def _get_security_group_rule(self, context, id): + try: + query = self._model_query(context, SecurityGroupRule) + sgr = query.filter(SecurityGroupRule.id == id).one() + except exc.NoResultFound: + raise ext_sg.SecurityGroupRuleNotFound(id=id) + return sgr + + def delete_security_group_rule(self, context, id): + with context.session.begin(subtransactions=True): + rule = self._get_security_group_rule(context, id) + context.session.delete(rule) + + def _extend_port_dict_security_group(self, port_res, port_db): + # Security group bindings will be retrieved from the sqlalchemy + # model. As they're loaded eagerly with ports because of the + # joined load they will not cause an extra query. + security_group_ids = [sec_group_mapping['security_group_id'] for + sec_group_mapping in port_db.security_groups] + port_res[ext_sg.SECURITYGROUPS] = security_group_ids + return port_res + + # Register dict extend functions for ports + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attr.PORTS, ['_extend_port_dict_security_group']) + + def _process_port_create_security_group(self, context, port, + security_group_ids): + if attr.is_attr_set(security_group_ids): + for security_group_id in security_group_ids: + self._create_port_security_group_binding(context, port['id'], + security_group_id) + # Convert to list as a set might be passed here and + # this has to be serialized + port[ext_sg.SECURITYGROUPS] = (security_group_ids and + list(security_group_ids) or []) + + def _ensure_default_security_group(self, context, tenant_id): + """Create a default security group if one doesn't exist. + + :returns: the default security group id. + """ + filters = {'name': ['default'], 'tenant_id': [tenant_id]} + default_group = self.get_security_groups(context, filters, + default_sg=True) + if not default_group: + security_group = {'security_group': {'name': 'default', + 'tenant_id': tenant_id, + 'description': 'default'}} + ret = self.create_security_group(context, security_group, True) + return ret['id'] + else: + return default_group[0]['id'] + + def _get_security_groups_on_port(self, context, port): + """Check that all security groups on port belong to tenant. + + :returns: all security groups IDs on port belonging to tenant. + """ + p = port['port'] + if not attr.is_attr_set(p.get(ext_sg.SECURITYGROUPS)): + return + if p.get('device_owner') and p['device_owner'].startswith('network:'): + return + + port_sg = p.get(ext_sg.SECURITYGROUPS, []) + valid_groups = set(g['id'] for g in + self.get_security_groups(context, fields=['id'], + filters={'id': port_sg})) + + requested_groups = set(port_sg) + port_sg_missing = requested_groups - valid_groups + if port_sg_missing: + raise ext_sg.SecurityGroupNotFound(id=str(port_sg_missing[0])) + + return requested_groups + + def _ensure_default_security_group_on_port(self, context, port): + # we don't apply security groups for dhcp, router + if (port['port'].get('device_owner') and + port['port']['device_owner'].startswith('network:')): + return + tenant_id = self._get_tenant_id_for_create(context, + port['port']) + default_sg = self._ensure_default_security_group(context, tenant_id) + if attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)): + sgids = port['port'].get(ext_sg.SECURITYGROUPS) + else: + sgids = [default_sg] + port['port'][ext_sg.SECURITYGROUPS] = sgids + + def _check_update_deletes_security_groups(self, port): + """Return True if port has as a security group and it's value + is either [] or not is_attr_set, otherwise return False + """ + if (ext_sg.SECURITYGROUPS in port['port'] and + not (attr.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) + and port['port'][ext_sg.SECURITYGROUPS] != [])): + return True + return False + + def _check_update_has_security_groups(self, port): + """Return True if port has as a security group and False if the + security_group field is is_attr_set or []. + """ + if (ext_sg.SECURITYGROUPS in port['port'] and + (attr.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and + port['port'][ext_sg.SECURITYGROUPS] != [])): + return True + return False diff --git a/neutron/db/securitygroups_rpc_base.py b/neutron/db/securitygroups_rpc_base.py new file mode 100644 index 000000000..b9db8b394 --- /dev/null +++ b/neutron/db/securitygroups_rpc_base.py @@ -0,0 +1,374 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from sqlalchemy.orm import exc + +from neutron.common import constants as q_const +from neutron.common import ipv6_utils as ipv6 +from neutron.common import utils +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +IP_MASK = {q_const.IPv4: 32, + q_const.IPv6: 128} + + +DIRECTION_IP_PREFIX = {'ingress': 'source_ip_prefix', + 'egress': 'dest_ip_prefix'} + + +class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): + + def create_security_group_rule(self, context, security_group_rule): + bulk_rule = {'security_group_rules': [security_group_rule]} + rule = self.create_security_group_rule_bulk_native(context, + bulk_rule)[0] + sgids = [rule['security_group_id']] + self.notifier.security_groups_rule_updated(context, sgids) + return rule + + def create_security_group_rule_bulk(self, context, + security_group_rule): + rules = super(SecurityGroupServerRpcMixin, + self).create_security_group_rule_bulk_native( + context, security_group_rule) + sgids = set([r['security_group_id'] for r in rules]) + self.notifier.security_groups_rule_updated(context, list(sgids)) + return rules + + def delete_security_group_rule(self, context, sgrid): + rule = self.get_security_group_rule(context, sgrid) + super(SecurityGroupServerRpcMixin, + self).delete_security_group_rule(context, sgrid) + self.notifier.security_groups_rule_updated(context, + [rule['security_group_id']]) + + def update_security_group_on_port(self, context, id, port, + original_port, updated_port): + """Update security groups on port. + + This method returns a flag which indicates request notification + is required and does not perform notification itself. + It is because another changes for the port may require notification. + """ + need_notify = False + port_updates = port['port'] + if (ext_sg.SECURITYGROUPS in port_updates and + not utils.compare_elements( + original_port.get(ext_sg.SECURITYGROUPS), + port_updates[ext_sg.SECURITYGROUPS])): + # delete the port binding and read it with the new rules + port_updates[ext_sg.SECURITYGROUPS] = ( + self._get_security_groups_on_port(context, port)) + self._delete_port_security_group_bindings(context, id) + self._process_port_create_security_group( + context, + updated_port, + port_updates[ext_sg.SECURITYGROUPS]) + need_notify = True + else: + updated_port[ext_sg.SECURITYGROUPS] = ( + original_port[ext_sg.SECURITYGROUPS]) + return need_notify + + def is_security_group_member_updated(self, context, + original_port, updated_port): + """Check security group member updated or not. + + This method returns a flag which indicates request notification + is required and does not perform notification itself. + It is because another changes for the port may require notification. + """ + need_notify = False + if (original_port['fixed_ips'] != updated_port['fixed_ips'] or + not utils.compare_elements( + original_port.get(ext_sg.SECURITYGROUPS), + updated_port.get(ext_sg.SECURITYGROUPS))): + need_notify = True + return need_notify + + def notify_security_groups_member_updated(self, context, port): + """Notify update event of security group members. + + The agent setups the iptables rule to allow + ingress packet from the dhcp server (as a part of provider rules), + so we need to notify an update of dhcp server ip + address to the plugin agent. + security_groups_provider_updated() just notifies that an event + occurs and the plugin agent fetches the update provider + rule in the other RPC call (security_group_rules_for_devices). + """ + if port['device_owner'] == q_const.DEVICE_OWNER_DHCP: + self.notifier.security_groups_provider_updated(context) + else: + self.notifier.security_groups_member_updated( + context, port.get(ext_sg.SECURITYGROUPS)) + + +class SecurityGroupServerRpcCallbackMixin(object): + """A mix-in that enable SecurityGroup agent support in plugin + implementations. + """ + + def security_group_rules_for_devices(self, context, **kwargs): + """Return security group rules for each port. + + also convert remote_group_id rule + to source_ip_prefix and dest_ip_prefix rule + + :params devices: list of devices + :returns: port correspond to the devices with security group rules + """ + devices = kwargs.get('devices') + + ports = {} + for device in devices: + port = self.get_port_from_device(device) + if not port: + continue + if port['device_owner'].startswith('network:'): + continue + ports[port['id']] = port + return self._security_group_rules_for_ports(context, ports) + + def _select_rules_for_ports(self, context, ports): + if not ports: + return [] + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id + + sgr_sgid = sg_db.SecurityGroupRule.security_group_id + + query = context.session.query(sg_db.SecurityGroupPortBinding, + sg_db.SecurityGroupRule) + query = query.join(sg_db.SecurityGroupRule, + sgr_sgid == sg_binding_sgid) + query = query.filter(sg_binding_port.in_(ports.keys())) + return query.all() + + def _select_ips_for_remote_group(self, context, remote_group_ids): + ips_by_group = {} + if not remote_group_ids: + return ips_by_group + for remote_group_id in remote_group_ids: + ips_by_group[remote_group_id] = [] + + ip_port = models_v2.IPAllocation.port_id + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id + + query = context.session.query(sg_binding_sgid, + models_v2.Port, + models_v2.IPAllocation.ip_address) + query = query.join(models_v2.IPAllocation, + ip_port == sg_binding_port) + query = query.join(models_v2.Port, + ip_port == models_v2.Port.id) + query = query.filter(sg_binding_sgid.in_(remote_group_ids)) + for security_group_id, port, ip_address in query: + ips_by_group[security_group_id].append(ip_address) + # if there are allowed_address_pairs add them + if getattr(port, 'allowed_address_pairs', None): + for address_pair in port.allowed_address_pairs: + ips_by_group[security_group_id].append( + address_pair['ip_address']) + return ips_by_group + + def _select_remote_group_ids(self, ports): + remote_group_ids = [] + for port in ports.values(): + for rule in port.get('security_group_rules'): + remote_group_id = rule.get('remote_group_id') + if remote_group_id: + remote_group_ids.append(remote_group_id) + return remote_group_ids + + def _select_network_ids(self, ports): + return set((port['network_id'] for port in ports.values())) + + def _select_dhcp_ips_for_network_ids(self, context, network_ids): + if not network_ids: + return {} + query = context.session.query(models_v2.Port, + models_v2.IPAllocation.ip_address) + query = query.join(models_v2.IPAllocation) + query = query.filter(models_v2.Port.network_id.in_(network_ids)) + owner = q_const.DEVICE_OWNER_DHCP + query = query.filter(models_v2.Port.device_owner == owner) + ips = {} + + for network_id in network_ids: + ips[network_id] = [] + + for port, ip in query: + ips[port['network_id']].append(ip) + return ips + + def _select_ra_ips_for_network_ids(self, context, network_ids): + """Select IP addresses to allow sending router advertisement from. + + If OpenStack dnsmasq sends RA, get link local address of + gateway and allow RA from this Link Local address. + The gateway port link local address will only be obtained + when router is created before VM instance is booted and + subnet is attached to router. + + If OpenStack doesn't send RA, allow RA from gateway IP. + Currently, the gateway IP needs to be link local to be able + to send RA to VM. + """ + if not network_ids: + return {} + ips = {} + for network_id in network_ids: + ips[network_id] = set([]) + query = context.session.query(models_v2.Subnet) + subnets = query.filter(models_v2.Subnet.network_id.in_(network_ids)) + for subnet in subnets: + gateway_ip = subnet['gateway_ip'] + if subnet['ip_version'] != 6 or not gateway_ip: + continue + # TODO(xuhanp): Figure out how to call the following code + # each time router is created or updated. + if not netaddr.IPAddress(gateway_ip).is_link_local(): + if subnet['ipv6_ra_mode']: + gateway_ip = self._get_lla_gateway_ip_for_subnet(context, + subnet) + else: + # TODO(xuhanp):Figure out how to allow gateway IP from + # existing device to be global address and figure out the + # link local address by other method. + continue + if gateway_ip: + ips[subnet['network_id']].add(gateway_ip) + + return ips + + def _get_lla_gateway_ip_for_subnet(self, context, subnet): + query = context.session.query(models_v2.Port) + query = query.join(models_v2.IPAllocation) + query = query.filter( + models_v2.IPAllocation.subnet_id == subnet['id']) + query = query.filter( + models_v2.IPAllocation.ip_address == subnet['gateway_ip']) + query = query.filter(models_v2.Port.device_owner == + q_const.DEVICE_OWNER_ROUTER_INTF) + try: + gateway_port = query.one() + except (exc.NoResultFound, exc.MultipleResultsFound): + LOG.warn(_('No valid gateway port on subnet %s is ' + 'found for IPv6 RA'), subnet['id']) + return + mac_address = gateway_port['mac_address'] + lla_ip = str(ipv6.get_ipv6_addr_by_EUI64( + q_const.IPV6_LLA_PREFIX, + mac_address)) + return lla_ip + + def _convert_remote_group_id_to_ip_prefix(self, context, ports): + remote_group_ids = self._select_remote_group_ids(ports) + ips = self._select_ips_for_remote_group(context, remote_group_ids) + for port in ports.values(): + updated_rule = [] + for rule in port.get('security_group_rules'): + remote_group_id = rule.get('remote_group_id') + direction = rule.get('direction') + direction_ip_prefix = DIRECTION_IP_PREFIX[direction] + if not remote_group_id: + updated_rule.append(rule) + continue + + port['security_group_source_groups'].append(remote_group_id) + base_rule = rule + for ip in ips[remote_group_id]: + if ip in port.get('fixed_ips', []): + continue + ip_rule = base_rule.copy() + version = netaddr.IPNetwork(ip).version + ethertype = 'IPv%s' % version + if base_rule['ethertype'] != ethertype: + continue + ip_rule[direction_ip_prefix] = str( + netaddr.IPNetwork(ip).cidr) + updated_rule.append(ip_rule) + port['security_group_rules'] = updated_rule + return ports + + def _add_ingress_dhcp_rule(self, port, ips): + dhcp_ips = ips.get(port['network_id']) + for dhcp_ip in dhcp_ips: + if not netaddr.IPAddress(dhcp_ip).version == 4: + return + + dhcp_rule = {'direction': 'ingress', + 'ethertype': q_const.IPv4, + 'protocol': 'udp', + 'port_range_min': 68, + 'port_range_max': 68, + 'source_port_range_min': 67, + 'source_port_range_max': 67} + dhcp_rule['source_ip_prefix'] = "%s/%s" % (dhcp_ip, + IP_MASK[q_const.IPv4]) + port['security_group_rules'].append(dhcp_rule) + + def _add_ingress_ra_rule(self, port, ips): + ra_ips = ips.get(port['network_id']) + for ra_ip in ra_ips: + if not netaddr.IPAddress(ra_ip).version == 6: + return + + ra_rule = {'direction': 'ingress', + 'ethertype': q_const.IPv6, + 'protocol': q_const.PROTO_NAME_ICMP_V6, + 'source_ip_prefix': ra_ip, + 'source_port_range_min': q_const.ICMPV6_TYPE_RA} + port['security_group_rules'].append(ra_rule) + + def _apply_provider_rule(self, context, ports): + network_ids = self._select_network_ids(ports) + ips_dhcp = self._select_dhcp_ips_for_network_ids(context, network_ids) + ips_ra = self._select_ra_ips_for_network_ids(context, network_ids) + for port in ports.values(): + self._add_ingress_ra_rule(port, ips_ra) + self._add_ingress_dhcp_rule(port, ips_dhcp) + + def _security_group_rules_for_ports(self, context, ports): + rules_in_db = self._select_rules_for_ports(context, ports) + for (binding, rule_in_db) in rules_in_db: + port_id = binding['port_id'] + port = ports[port_id] + direction = rule_in_db['direction'] + rule_dict = { + 'security_group_id': rule_in_db['security_group_id'], + 'direction': direction, + 'ethertype': rule_in_db['ethertype'], + } + for key in ('protocol', 'port_range_min', 'port_range_max', + 'remote_ip_prefix', 'remote_group_id'): + if rule_in_db.get(key): + if key == 'remote_ip_prefix': + direction_ip_prefix = DIRECTION_IP_PREFIX[direction] + rule_dict[direction_ip_prefix] = rule_in_db[key] + continue + rule_dict[key] = rule_in_db[key] + port['security_group_rules'].append(rule_dict) + self._apply_provider_rule(context, ports) + return self._convert_remote_group_id_to_ip_prefix(context, ports) diff --git a/neutron/db/servicetype_db.py b/neutron/db/servicetype_db.py new file mode 100644 index 000000000..3e9ad15e0 --- /dev/null +++ b/neutron/db/servicetype_db.py @@ -0,0 +1,99 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Salvatore Orlando, VMware +# + +import sqlalchemy as sa + +from neutron.db import api as db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.services import provider_configuration as pconf + +LOG = logging.getLogger(__name__) + + +class ProviderResourceAssociation(model_base.BASEV2): + provider_name = sa.Column(sa.String(255), + nullable=False, primary_key=True) + # should be manualy deleted on resource deletion + resource_id = sa.Column(sa.String(36), nullable=False, primary_key=True, + unique=True) + + +class ServiceTypeManager(object): + """Manage service type objects in Neutron.""" + + _instance = None + + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def __init__(self): + self._initialize_db() + self._load_conf() + + def _initialize_db(self): + db.configure_db() + db.register_models(models_v2.model_base.BASEV2) + + def _load_conf(self): + self.conf = pconf.ProviderConfiguration( + pconf.parse_service_provider_opt()) + + def get_service_providers(self, context, filters=None, fields=None): + return self.conf.get_service_providers(filters, fields) + + def get_default_service_provider(self, context, service_type): + """Return the default provider for a given service type.""" + filters = {'service_type': [service_type], + 'default': [True]} + providers = self.get_service_providers(context, filters=filters) + # By construction we expect at most a single item in provider + if not providers: + raise pconf.DefaultServiceProviderNotFound( + service_type=service_type + ) + return providers[0] + + def add_resource_association(self, context, service_type, provider_name, + resource_id): + r = self.conf.get_service_providers( + filters={'service_type': [service_type], 'name': [provider_name]}) + if not r: + raise pconf.ServiceProviderNotFound(provider=provider_name, + service_type=service_type) + + with context.session.begin(subtransactions=True): + # we don't actually need service type for association. + # resource_id is unique and belongs to specific service + # which knows its type + assoc = ProviderResourceAssociation(provider_name=provider_name, + resource_id=resource_id) + context.session.add(assoc) + + def del_resource_associations(self, context, resource_ids): + if not resource_ids: + return + with context.session.begin(subtransactions=True): + (context.session.query(ProviderResourceAssociation). + filter( + ProviderResourceAssociation.resource_id.in_(resource_ids)). + delete(synchronize_session='fetch')) diff --git a/neutron/db/sqlalchemyutils.py b/neutron/db/sqlalchemyutils.py new file mode 100644 index 000000000..adf034432 --- /dev/null +++ b/neutron/db/sqlalchemyutils.py @@ -0,0 +1,107 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from six import moves +import sqlalchemy +from sqlalchemy.orm.properties import RelationshipProperty + +from neutron.common import exceptions as n_exc +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def paginate_query(query, model, limit, sorts, marker_obj=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort key, specified by sorts. + (If sort keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort key, this would be easy: sort_key > X. + With a compound-values sort key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + The reason of didn't use OFFSET clause was it don't scale, please refer + discussion at https://lists.launchpad.net/openstack/msg02547.html + + We also have to cope with different sort directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sorts: array of attributes and direction by which results should + be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + if not sorts: + return query + + # A primary key must be specified in sort keys + assert not (limit and + len(set(dict(sorts).keys()) & + set(model.__table__.primary_key.columns.keys())) == 0) + + # Add sorting + for sort_key, sort_direction in sorts: + sort_dir_func = sqlalchemy.asc if sort_direction else sqlalchemy.desc + try: + sort_key_attr = getattr(model, sort_key) + except AttributeError: + # Extension attribute doesn't support for sorting. Because it + # existed in attr_info, it will be catched at here + msg = _("%s is invalid attribute for sort_key") % sort_key + raise n_exc.BadRequest(resource=model.__tablename__, msg=msg) + if isinstance(sort_key_attr.property, RelationshipProperty): + msg = _("The attribute '%(attr)s' is reference to other " + "resource, can't used by sort " + "'%(resource)s'") % {'attr': sort_key, + 'resource': model.__tablename__} + raise n_exc.BadRequest(resource=model.__tablename__, msg=msg) + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if marker_obj: + marker_values = [getattr(marker_obj, sort[0]) for sort in sorts] + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i, sort in enumerate(sorts): + crit_attrs = [(getattr(model, sorts[j][0]) == marker_values[j]) + for j in moves.xrange(i)] + model_attr = getattr(model, sort[0]) + if sort[1]: + crit_attrs.append((model_attr > marker_values[i])) + else: + crit_attrs.append((model_attr < marker_values[i])) + + criteria = sqlalchemy.sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sqlalchemy.sql.or_(*criteria_list) + query = query.filter(f) + + if limit: + query = query.limit(limit) + + return query diff --git a/neutron/db/vpn/__init__.py b/neutron/db/vpn/__init__.py new file mode 100644 index 000000000..7f4f3b9f8 --- /dev/null +++ b/neutron/db/vpn/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Swaminathan Vasudevan, Hewlett-Packard. diff --git a/neutron/db/vpn/vpn_db.py b/neutron/db/vpn/vpn_db.py new file mode 100644 index 000000000..a434b26ba --- /dev/null +++ b/neutron/db/vpn/vpn_db.py @@ -0,0 +1,691 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Swaminathan Vasudevan, Hewlett-Packard. + +import netaddr +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc + +from neutron.common import constants as n_constants +from neutron.db import api as qdbapi +from neutron.db import db_base_plugin_v2 as base_db +from neutron.db import l3_agentschedulers_db as l3_agent_db +from neutron.db import l3_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import vpnaas +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.plugins.common import utils + +LOG = logging.getLogger(__name__) + +IP_MIN_MTU = {4: 68, 6: 1280} + + +class IPsecPeerCidr(model_base.BASEV2): + """Internal representation of a IPsec Peer Cidrs.""" + + cidr = sa.Column(sa.String(32), nullable=False, primary_key=True) + ipsec_site_connection_id = sa.Column( + sa.String(36), + sa.ForeignKey('ipsec_site_connections.id', + ondelete="CASCADE"), + primary_key=True) + + +class IPsecPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a v2 IPsecPolicy Object.""" + __tablename__ = 'ipsecpolicies' + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + transform_protocol = sa.Column(sa.Enum("esp", "ah", "ah-esp", + name="ipsec_transform_protocols"), + nullable=False) + auth_algorithm = sa.Column(sa.Enum("sha1", + name="vpn_auth_algorithms"), + nullable=False) + encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128", + "aes-256", "aes-192", + name="vpn_encrypt_algorithms"), + nullable=False) + encapsulation_mode = sa.Column(sa.Enum("tunnel", "transport", + name="ipsec_encapsulations"), + nullable=False) + lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes", + name="vpn_lifetime_units"), + nullable=False) + lifetime_value = sa.Column(sa.Integer, nullable=False) + pfs = sa.Column(sa.Enum("group2", "group5", "group14", + name="vpn_pfs"), nullable=False) + + +class IKEPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a v2 IKEPolicy Object.""" + __tablename__ = 'ikepolicies' + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + auth_algorithm = sa.Column(sa.Enum("sha1", + name="vpn_auth_algorithms"), + nullable=False) + encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128", + "aes-256", "aes-192", + name="vpn_encrypt_algorithms"), + nullable=False) + phase1_negotiation_mode = sa.Column(sa.Enum("main", + name="ike_phase1_mode"), + nullable=False) + lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes", + name="vpn_lifetime_units"), + nullable=False) + lifetime_value = sa.Column(sa.Integer, nullable=False) + ike_version = sa.Column(sa.Enum("v1", "v2", name="ike_versions"), + nullable=False) + pfs = sa.Column(sa.Enum("group2", "group5", "group14", + name="vpn_pfs"), nullable=False) + + +class IPsecSiteConnection(model_base.BASEV2, + models_v2.HasId, models_v2.HasTenant): + """Represents a IPsecSiteConnection Object.""" + __tablename__ = 'ipsec_site_connections' + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + peer_address = sa.Column(sa.String(255), nullable=False) + peer_id = sa.Column(sa.String(255), nullable=False) + route_mode = sa.Column(sa.String(8), nullable=False) + mtu = sa.Column(sa.Integer, nullable=False) + initiator = sa.Column(sa.Enum("bi-directional", "response-only", + name="vpn_initiators"), nullable=False) + auth_mode = sa.Column(sa.String(16), nullable=False) + psk = sa.Column(sa.String(255), nullable=False) + dpd_action = sa.Column(sa.Enum("hold", "clear", + "restart", "disabled", + "restart-by-peer", name="vpn_dpd_actions"), + nullable=False) + dpd_interval = sa.Column(sa.Integer, nullable=False) + dpd_timeout = sa.Column(sa.Integer, nullable=False) + status = sa.Column(sa.String(16), nullable=False) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + vpnservice_id = sa.Column(sa.String(36), + sa.ForeignKey('vpnservices.id'), + nullable=False) + ipsecpolicy_id = sa.Column(sa.String(36), + sa.ForeignKey('ipsecpolicies.id'), + nullable=False) + ikepolicy_id = sa.Column(sa.String(36), + sa.ForeignKey('ikepolicies.id'), + nullable=False) + ipsecpolicy = orm.relationship( + IPsecPolicy, backref='ipsec_site_connection') + ikepolicy = orm.relationship(IKEPolicy, backref='ipsec_site_connection') + peer_cidrs = orm.relationship(IPsecPeerCidr, + backref='ipsec_site_connection', + lazy='joined', + cascade='all, delete, delete-orphan') + + +class VPNService(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a v2 VPNService Object.""" + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + status = sa.Column(sa.String(16), nullable=False) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'), + nullable=False) + router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'), + nullable=False) + subnet = orm.relationship(models_v2.Subnet) + router = orm.relationship(l3_db.Router) + ipsec_site_connections = orm.relationship( + IPsecSiteConnection, + backref='vpnservice', + cascade="all, delete-orphan") + + +class VPNPluginDb(vpnaas.VPNPluginBase, base_db.CommonDbMixin): + """VPN plugin database class using SQLAlchemy models.""" + + def __init__(self): + """Do the initialization for the vpn service plugin here.""" + qdbapi.register_models() + + def update_status(self, context, model, v_id, status): + with context.session.begin(subtransactions=True): + v_db = self._get_resource(context, model, v_id) + v_db.update({'status': status}) + + def _get_resource(self, context, model, v_id): + try: + r = self._get_by_id(context, model, v_id) + except exc.NoResultFound: + with excutils.save_and_reraise_exception(reraise=False) as ctx: + if issubclass(model, IPsecSiteConnection): + raise vpnaas.IPsecSiteConnectionNotFound( + ipsec_site_conn_id=v_id + ) + elif issubclass(model, IKEPolicy): + raise vpnaas.IKEPolicyNotFound(ikepolicy_id=v_id) + elif issubclass(model, IPsecPolicy): + raise vpnaas.IPsecPolicyNotFound(ipsecpolicy_id=v_id) + elif issubclass(model, VPNService): + raise vpnaas.VPNServiceNotFound(vpnservice_id=v_id) + ctx.reraise = True + return r + + def assert_update_allowed(self, obj): + status = getattr(obj, 'status', None) + _id = getattr(obj, 'id', None) + if utils.in_pending_status(status): + raise vpnaas.VPNStateInvalidToUpdate(id=_id, state=status) + + def _make_ipsec_site_connection_dict(self, ipsec_site_conn, fields=None): + + res = {'id': ipsec_site_conn['id'], + 'tenant_id': ipsec_site_conn['tenant_id'], + 'name': ipsec_site_conn['name'], + 'description': ipsec_site_conn['description'], + 'peer_address': ipsec_site_conn['peer_address'], + 'peer_id': ipsec_site_conn['peer_id'], + 'route_mode': ipsec_site_conn['route_mode'], + 'mtu': ipsec_site_conn['mtu'], + 'auth_mode': ipsec_site_conn['auth_mode'], + 'psk': ipsec_site_conn['psk'], + 'initiator': ipsec_site_conn['initiator'], + 'dpd': { + 'action': ipsec_site_conn['dpd_action'], + 'interval': ipsec_site_conn['dpd_interval'], + 'timeout': ipsec_site_conn['dpd_timeout'] + }, + 'admin_state_up': ipsec_site_conn['admin_state_up'], + 'status': ipsec_site_conn['status'], + 'vpnservice_id': ipsec_site_conn['vpnservice_id'], + 'ikepolicy_id': ipsec_site_conn['ikepolicy_id'], + 'ipsecpolicy_id': ipsec_site_conn['ipsecpolicy_id'], + 'peer_cidrs': [pcidr['cidr'] + for pcidr in ipsec_site_conn['peer_cidrs']] + } + + return self._fields(res, fields) + + def create_ipsec_site_connection(self, context, ipsec_site_connection): + ipsec_sitecon = ipsec_site_connection['ipsec_site_connection'] + dpd = ipsec_sitecon['dpd'] + ipsec_sitecon['dpd_action'] = dpd.get('action', 'hold') + ipsec_sitecon['dpd_interval'] = dpd.get('interval', 30) + ipsec_sitecon['dpd_timeout'] = dpd.get('timeout', 120) + tenant_id = self._get_tenant_id_for_create(context, ipsec_sitecon) + self._check_dpd(ipsec_sitecon) + with context.session.begin(subtransactions=True): + #Check permissions + self._get_resource(context, + VPNService, + ipsec_sitecon['vpnservice_id']) + self._get_resource(context, + IKEPolicy, + ipsec_sitecon['ikepolicy_id']) + self._get_resource(context, + IPsecPolicy, + ipsec_sitecon['ipsecpolicy_id']) + self._check_mtu(context, + ipsec_sitecon['mtu'], + ipsec_sitecon['vpnservice_id']) + ipsec_site_conn_db = IPsecSiteConnection( + id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=ipsec_sitecon['name'], + description=ipsec_sitecon['description'], + peer_address=ipsec_sitecon['peer_address'], + peer_id=ipsec_sitecon['peer_id'], + route_mode='static', + mtu=ipsec_sitecon['mtu'], + auth_mode='psk', + psk=ipsec_sitecon['psk'], + initiator=ipsec_sitecon['initiator'], + dpd_action=ipsec_sitecon['dpd_action'], + dpd_interval=ipsec_sitecon['dpd_interval'], + dpd_timeout=ipsec_sitecon['dpd_timeout'], + admin_state_up=ipsec_sitecon['admin_state_up'], + status=constants.PENDING_CREATE, + vpnservice_id=ipsec_sitecon['vpnservice_id'], + ikepolicy_id=ipsec_sitecon['ikepolicy_id'], + ipsecpolicy_id=ipsec_sitecon['ipsecpolicy_id'] + ) + context.session.add(ipsec_site_conn_db) + for cidr in ipsec_sitecon['peer_cidrs']: + peer_cidr_db = IPsecPeerCidr( + cidr=cidr, + ipsec_site_connection_id=ipsec_site_conn_db['id'] + ) + context.session.add(peer_cidr_db) + return self._make_ipsec_site_connection_dict(ipsec_site_conn_db) + + def _check_dpd(self, ipsec_sitecon): + if ipsec_sitecon['dpd_timeout'] <= ipsec_sitecon['dpd_interval']: + raise vpnaas.IPsecSiteConnectionDpdIntervalValueError( + attr='dpd_timeout') + + def _check_mtu(self, context, mtu, vpnservice_id): + vpn_service_db = self._get_vpnservice(context, vpnservice_id) + subnet = vpn_service_db.subnet['cidr'] + version = netaddr.IPNetwork(subnet).version + if mtu < IP_MIN_MTU[version]: + raise vpnaas.IPsecSiteConnectionMtuError(mtu=mtu, version=version) + + def update_ipsec_site_connection( + self, context, + ipsec_site_conn_id, ipsec_site_connection): + conn = ipsec_site_connection['ipsec_site_connection'] + changed_peer_cidrs = False + with context.session.begin(subtransactions=True): + ipsec_site_conn_db = self._get_resource( + context, + IPsecSiteConnection, + ipsec_site_conn_id) + dpd = conn.get('dpd', {}) + if dpd.get('action'): + conn['dpd_action'] = dpd.get('action') + if dpd.get('interval') or dpd.get('timeout'): + conn['dpd_interval'] = dpd.get( + 'interval', ipsec_site_conn_db.dpd_interval) + conn['dpd_timeout'] = dpd.get( + 'timeout', ipsec_site_conn_db.dpd_timeout) + self._check_dpd(conn) + + if 'mtu' in conn: + self._check_mtu(context, + conn['mtu'], + ipsec_site_conn_db.vpnservice_id) + + self.assert_update_allowed(ipsec_site_conn_db) + + if "peer_cidrs" in conn: + changed_peer_cidrs = True + old_peer_cidr_list = ipsec_site_conn_db['peer_cidrs'] + old_peer_cidr_dict = dict( + (peer_cidr['cidr'], peer_cidr) + for peer_cidr in old_peer_cidr_list) + new_peer_cidr_set = set(conn["peer_cidrs"]) + old_peer_cidr_set = set(old_peer_cidr_dict) + + new_peer_cidrs = list(new_peer_cidr_set) + for peer_cidr in old_peer_cidr_set - new_peer_cidr_set: + context.session.delete(old_peer_cidr_dict[peer_cidr]) + for peer_cidr in new_peer_cidr_set - old_peer_cidr_set: + pcidr = IPsecPeerCidr( + cidr=peer_cidr, + ipsec_site_connection_id=ipsec_site_conn_id) + context.session.add(pcidr) + del conn["peer_cidrs"] + if conn: + ipsec_site_conn_db.update(conn) + result = self._make_ipsec_site_connection_dict(ipsec_site_conn_db) + if changed_peer_cidrs: + result['peer_cidrs'] = new_peer_cidrs + return result + + def delete_ipsec_site_connection(self, context, ipsec_site_conn_id): + with context.session.begin(subtransactions=True): + ipsec_site_conn_db = self._get_resource( + context, IPsecSiteConnection, ipsec_site_conn_id + ) + context.session.delete(ipsec_site_conn_db) + + def _get_ipsec_site_connection( + self, context, ipsec_site_conn_id): + return self._get_resource( + context, IPsecSiteConnection, ipsec_site_conn_id) + + def get_ipsec_site_connection(self, context, + ipsec_site_conn_id, fields=None): + ipsec_site_conn_db = self._get_ipsec_site_connection( + context, ipsec_site_conn_id) + return self._make_ipsec_site_connection_dict( + ipsec_site_conn_db, fields) + + def get_ipsec_site_connections(self, context, filters=None, fields=None): + return self._get_collection(context, IPsecSiteConnection, + self._make_ipsec_site_connection_dict, + filters=filters, fields=fields) + + def update_ipsec_site_conn_status(self, context, conn_id, new_status): + with context.session.begin(): + self._update_connection_status(context, conn_id, new_status, True) + + def _update_connection_status(self, context, conn_id, new_status, + updated_pending): + """Update the connection status, if changed. + + If the connection is not in a pending state, unconditionally update + the status. Likewise, if in a pending state, and have an indication + that the status has changed, then update the database. + """ + try: + conn_db = self._get_ipsec_site_connection(context, conn_id) + except vpnaas.IPsecSiteConnectionNotFound: + return + if not utils.in_pending_status(conn_db.status) or updated_pending: + conn_db.status = new_status + + def _make_ikepolicy_dict(self, ikepolicy, fields=None): + res = {'id': ikepolicy['id'], + 'tenant_id': ikepolicy['tenant_id'], + 'name': ikepolicy['name'], + 'description': ikepolicy['description'], + 'auth_algorithm': ikepolicy['auth_algorithm'], + 'encryption_algorithm': ikepolicy['encryption_algorithm'], + 'phase1_negotiation_mode': ikepolicy['phase1_negotiation_mode'], + 'lifetime': { + 'units': ikepolicy['lifetime_units'], + 'value': ikepolicy['lifetime_value'], + }, + 'ike_version': ikepolicy['ike_version'], + 'pfs': ikepolicy['pfs'] + } + + return self._fields(res, fields) + + def create_ikepolicy(self, context, ikepolicy): + ike = ikepolicy['ikepolicy'] + tenant_id = self._get_tenant_id_for_create(context, ike) + lifetime_info = ike.get('lifetime', []) + lifetime_units = lifetime_info.get('units', 'seconds') + lifetime_value = lifetime_info.get('value', 3600) + + with context.session.begin(subtransactions=True): + ike_db = IKEPolicy( + id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=ike['name'], + description=ike['description'], + auth_algorithm=ike['auth_algorithm'], + encryption_algorithm=ike['encryption_algorithm'], + phase1_negotiation_mode=ike['phase1_negotiation_mode'], + lifetime_units=lifetime_units, + lifetime_value=lifetime_value, + ike_version=ike['ike_version'], + pfs=ike['pfs'] + ) + + context.session.add(ike_db) + return self._make_ikepolicy_dict(ike_db) + + def update_ikepolicy(self, context, ikepolicy_id, ikepolicy): + ike = ikepolicy['ikepolicy'] + with context.session.begin(subtransactions=True): + ikepolicy = context.session.query(IPsecSiteConnection).filter_by( + ikepolicy_id=ikepolicy_id).first() + if ikepolicy: + raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id) + ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id) + if ike: + lifetime_info = ike.get('lifetime') + if lifetime_info: + if lifetime_info.get('units'): + ike['lifetime_units'] = lifetime_info['units'] + if lifetime_info.get('value'): + ike['lifetime_value'] = lifetime_info['value'] + ike_db.update(ike) + return self._make_ikepolicy_dict(ike_db) + + def delete_ikepolicy(self, context, ikepolicy_id): + with context.session.begin(subtransactions=True): + ikepolicy = context.session.query(IPsecSiteConnection).filter_by( + ikepolicy_id=ikepolicy_id).first() + if ikepolicy: + raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id) + ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id) + context.session.delete(ike_db) + + def get_ikepolicy(self, context, ikepolicy_id, fields=None): + ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id) + return self._make_ikepolicy_dict(ike_db, fields) + + def get_ikepolicies(self, context, filters=None, fields=None): + return self._get_collection(context, IKEPolicy, + self._make_ikepolicy_dict, + filters=filters, fields=fields) + + def _make_ipsecpolicy_dict(self, ipsecpolicy, fields=None): + + res = {'id': ipsecpolicy['id'], + 'tenant_id': ipsecpolicy['tenant_id'], + 'name': ipsecpolicy['name'], + 'description': ipsecpolicy['description'], + 'transform_protocol': ipsecpolicy['transform_protocol'], + 'auth_algorithm': ipsecpolicy['auth_algorithm'], + 'encryption_algorithm': ipsecpolicy['encryption_algorithm'], + 'encapsulation_mode': ipsecpolicy['encapsulation_mode'], + 'lifetime': { + 'units': ipsecpolicy['lifetime_units'], + 'value': ipsecpolicy['lifetime_value'], + }, + 'pfs': ipsecpolicy['pfs'] + } + + return self._fields(res, fields) + + def create_ipsecpolicy(self, context, ipsecpolicy): + ipsecp = ipsecpolicy['ipsecpolicy'] + tenant_id = self._get_tenant_id_for_create(context, ipsecp) + lifetime_info = ipsecp['lifetime'] + lifetime_units = lifetime_info.get('units', 'seconds') + lifetime_value = lifetime_info.get('value', 3600) + + with context.session.begin(subtransactions=True): + ipsecp_db = IPsecPolicy(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=ipsecp['name'], + description=ipsecp['description'], + transform_protocol=ipsecp['transform_' + 'protocol'], + auth_algorithm=ipsecp['auth_algorithm'], + encryption_algorithm=ipsecp['encryption_' + 'algorithm'], + encapsulation_mode=ipsecp['encapsulation_' + 'mode'], + lifetime_units=lifetime_units, + lifetime_value=lifetime_value, + pfs=ipsecp['pfs']) + context.session.add(ipsecp_db) + return self._make_ipsecpolicy_dict(ipsecp_db) + + def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy): + ipsecp = ipsecpolicy['ipsecpolicy'] + with context.session.begin(subtransactions=True): + ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by( + ipsecpolicy_id=ipsecpolicy_id).first() + if ipsecpolicy: + raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id) + ipsecp_db = self._get_resource(context, + IPsecPolicy, + ipsecpolicy_id) + if ipsecp: + lifetime_info = ipsecp.get('lifetime') + if lifetime_info: + if lifetime_info.get('units'): + ipsecp['lifetime_units'] = lifetime_info['units'] + if lifetime_info.get('value'): + ipsecp['lifetime_value'] = lifetime_info['value'] + ipsecp_db.update(ipsecp) + return self._make_ipsecpolicy_dict(ipsecp_db) + + def delete_ipsecpolicy(self, context, ipsecpolicy_id): + with context.session.begin(subtransactions=True): + ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by( + ipsecpolicy_id=ipsecpolicy_id).first() + if ipsecpolicy: + raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id) + ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id) + context.session.delete(ipsec_db) + + def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None): + ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id) + return self._make_ipsecpolicy_dict(ipsec_db, fields) + + def get_ipsecpolicies(self, context, filters=None, fields=None): + return self._get_collection(context, IPsecPolicy, + self._make_ipsecpolicy_dict, + filters=filters, fields=fields) + + def _make_vpnservice_dict(self, vpnservice, fields=None): + res = {'id': vpnservice['id'], + 'name': vpnservice['name'], + 'description': vpnservice['description'], + 'tenant_id': vpnservice['tenant_id'], + 'subnet_id': vpnservice['subnet_id'], + 'router_id': vpnservice['router_id'], + 'admin_state_up': vpnservice['admin_state_up'], + 'status': vpnservice['status']} + return self._fields(res, fields) + + def _check_router(self, context, router_id): + l3_plugin = manager.NeutronManager.get_service_plugins().get( + constants.L3_ROUTER_NAT) + router = l3_plugin.get_router(context, router_id) + if not router.get(l3_db.EXTERNAL_GW_INFO): + raise vpnaas.RouterIsNotExternal(router_id=router_id) + + def _check_subnet_id(self, context, router_id, subnet_id): + core_plugin = manager.NeutronManager.get_plugin() + ports = core_plugin.get_ports( + context, + filters={ + 'fixed_ips': {'subnet_id': [subnet_id]}, + 'device_id': [router_id]}) + if not ports: + raise vpnaas.SubnetIsNotConnectedToRouter( + subnet_id=subnet_id, + router_id=router_id) + + def create_vpnservice(self, context, vpnservice): + vpns = vpnservice['vpnservice'] + tenant_id = self._get_tenant_id_for_create(context, vpns) + self._check_router(context, vpns['router_id']) + self._check_subnet_id(context, vpns['router_id'], vpns['subnet_id']) + with context.session.begin(subtransactions=True): + vpnservice_db = VPNService(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=vpns['name'], + description=vpns['description'], + subnet_id=vpns['subnet_id'], + router_id=vpns['router_id'], + admin_state_up=vpns['admin_state_up'], + status=constants.PENDING_CREATE) + context.session.add(vpnservice_db) + return self._make_vpnservice_dict(vpnservice_db) + + def update_vpnservice(self, context, vpnservice_id, vpnservice): + vpns = vpnservice['vpnservice'] + with context.session.begin(subtransactions=True): + vpns_db = self._get_resource(context, VPNService, vpnservice_id) + self.assert_update_allowed(vpns_db) + if vpns: + vpns_db.update(vpns) + return self._make_vpnservice_dict(vpns_db) + + def delete_vpnservice(self, context, vpnservice_id): + with context.session.begin(subtransactions=True): + if context.session.query(IPsecSiteConnection).filter_by( + vpnservice_id=vpnservice_id + ).first(): + raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id) + vpns_db = self._get_resource(context, VPNService, vpnservice_id) + context.session.delete(vpns_db) + + def _get_vpnservice(self, context, vpnservice_id): + return self._get_resource(context, VPNService, vpnservice_id) + + def get_vpnservice(self, context, vpnservice_id, fields=None): + vpns_db = self._get_resource(context, VPNService, vpnservice_id) + return self._make_vpnservice_dict(vpns_db, fields) + + def get_vpnservices(self, context, filters=None, fields=None): + return self._get_collection(context, VPNService, + self._make_vpnservice_dict, + filters=filters, fields=fields) + + def check_router_in_use(self, context, router_id): + vpnservices = self.get_vpnservices( + context, filters={'router_id': [router_id]}) + if vpnservices: + raise vpnaas.RouterInUseByVPNService( + router_id=router_id, + vpnservice_id=vpnservices[0]['id']) + + +class VPNPluginRpcDbMixin(): + def _get_agent_hosting_vpn_services(self, context, host): + + plugin = manager.NeutronManager.get_plugin() + agent = plugin._get_agent_by_type_and_host( + context, n_constants.AGENT_TYPE_L3, host) + if not agent.admin_state_up: + return [] + query = context.session.query(VPNService) + query = query.join(IPsecSiteConnection) + query = query.join(IKEPolicy) + query = query.join(IPsecPolicy) + query = query.join(IPsecPeerCidr) + query = query.join(l3_agent_db.RouterL3AgentBinding, + l3_agent_db.RouterL3AgentBinding.router_id == + VPNService.router_id) + query = query.filter( + l3_agent_db.RouterL3AgentBinding.l3_agent_id == agent.id) + return query + + def update_status_by_agent(self, context, service_status_info_list): + """Updating vpnservice and vpnconnection status. + + :param context: context variable + :param service_status_info_list: list of status + The structure is + [{id: vpnservice_id, + status: ACTIVE|DOWN|ERROR, + updated_pending_status: True|False + ipsec_site_connections: { + ipsec_site_connection_id: { + status: ACTIVE|DOWN|ERROR, + updated_pending_status: True|False + } + }] + The agent will set updated_pending_status as True, + when agent update any pending status. + """ + with context.session.begin(subtransactions=True): + for vpnservice in service_status_info_list: + try: + vpnservice_db = self._get_vpnservice( + context, vpnservice['id']) + except vpnaas.VPNServiceNotFound: + LOG.warn(_('vpnservice %s in db is already deleted'), + vpnservice['id']) + continue + + if (not utils.in_pending_status(vpnservice_db.status) + or vpnservice['updated_pending_status']): + vpnservice_db.status = vpnservice['status'] + for conn_id, conn in vpnservice[ + 'ipsec_site_connections'].items(): + self._update_connection_status( + context, conn_id, conn['status'], + conn['updated_pending_status']) diff --git a/neutron/debug/README b/neutron/debug/README new file mode 100644 index 000000000..181cb4fab --- /dev/null +++ b/neutron/debug/README @@ -0,0 +1,38 @@ +Debug Helper Script for Neutron + +- Configure +export NEUTRON_TEST_CONFIG_FILE=/etc/neutron/debug.ini +or +export NEUTRON_TEST_CONFIG_FILE=/etc/neutron/l3_agent.ini + +you can also specify config file by --config-file option + +- Usage +neutron-debug commands + +probe-create + Create probe port - create port and interface, then plug it in. + This commands returns a port id of a probe port. A probe port is a port which is used to test. + The port id is probe id. + We can have multiple probe probes in a network, in order to check connectivity between ports. + + neutron-debug probe-exec probe_id_1 'nc -l 192.168.100.3 22' + neutron-debug probe-exec probe_id_2 'nc -vz 192.168.100.4 22' + + Note: You should use a user and a tenant who has permission to + modify network and subnet if you want to probe. For example, you need to be admin user if you + want to probe external network. + +probe-delete Delete probe - delete port then uplug +probe-exec 'command' Exec commands on the namespace of the probe +`probe-exec ` 'interactive command' Exec interactive command (eg, ssh) + +probe-list List probes +probe-clear Clear All probes + +ping-all --id --timeout 1 (optional) + ping-all is all-in-one command to ping all fixed ip's in all network or a specified network. + In the command probe is automatically created if needed. + +neutron-debug extends the shell of neutronclient, so you can use all the commands of neutron + diff --git a/neutron/debug/__init__.py b/neutron/debug/__init__.py new file mode 100644 index 000000000..1854ca9aa --- /dev/null +++ b/neutron/debug/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/debug/commands.py b/neutron/debug/commands.py new file mode 100644 index 000000000..8a0173e2b --- /dev/null +++ b/neutron/debug/commands.py @@ -0,0 +1,157 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cliff import lister +from neutronclient.common import utils +from neutronclient.neutron import v2_0 as client +from neutronclient.neutron.v2_0 import port + +from neutron.openstack.common import log as logging + + +class ProbeCommand(client.NeutronCommand): + log = logging.getLogger(__name__ + '.ProbeCommand') + + def get_debug_agent(self): + return self.app.debug_agent + + def run(self, parsed_args): + self.log.debug('run(%s)', parsed_args) + self.log.info(_('Unimplemented commands')) + + +class CreateProbe(ProbeCommand): + """Create probe port and interface, then plug it in.""" + + log = logging.getLogger(__name__ + '.CreateProbe') + + def get_parser(self, prog_name): + parser = super(CreateProbe, self).get_parser(prog_name) + parser.add_argument( + 'id', metavar='network_id', + help=_('ID of network to probe')) + parser.add_argument( + '--device-owner', + default='network', choices=['network', 'compute'], + help=_('Owner type of the device: network/compute')) + return parser + + def run(self, parsed_args): + self.log.debug('run(%s)' % parsed_args) + debug_agent = self.get_debug_agent() + probe_port = debug_agent.create_probe(parsed_args.id, + parsed_args.device_owner) + self.log.info(_('Probe created : %s '), probe_port.id) + + +class DeleteProbe(ProbeCommand): + """Delete probe - delete port then uplug.""" + + log = logging.getLogger(__name__ + '.DeleteProbe') + + def get_parser(self, prog_name): + parser = super(DeleteProbe, self).get_parser(prog_name) + parser.add_argument( + 'id', metavar='port_id', + help=_('ID of probe port to delete')) + return parser + + def run(self, parsed_args): + self.log.debug('run(%s)' % parsed_args) + debug_agent = self.get_debug_agent() + debug_agent.delete_probe(parsed_args.id) + self.log.info(_('Probe %s deleted'), parsed_args.id) + + +class ListProbe(client.NeutronCommand, lister.Lister): + """List probes.""" + + log = logging.getLogger(__name__ + '.ListProbe') + _formatters = {'fixed_ips': port._format_fixed_ips, } + + def get_debug_agent(self): + return self.app.debug_agent + + def get_data(self, parsed_args): + + debug_agent = self.get_debug_agent() + info = debug_agent.list_probes() + columns = len(info) > 0 and sorted(info[0].keys()) or [] + return (columns, (utils.get_item_properties( + s, columns, formatters=self._formatters, ) + for s in info), ) + + +class ClearProbe(ProbeCommand): + """Clear All probes.""" + + log = logging.getLogger(__name__ + '.ClearProbe') + + def run(self, parsed_args): + self.log.debug('run(%s)' % parsed_args) + debug_agent = self.get_debug_agent() + debug_agent.clear_probe() + self.log.info(_('All Probes deleted ')) + + +class ExecProbe(ProbeCommand): + """Exec commands on the namespace of the probe.""" + + log = logging.getLogger(__name__ + '.ExecProbe') + + def get_parser(self, prog_name): + parser = super(ExecProbe, self).get_parser(prog_name) + parser.add_argument( + 'id', metavar='port_id', + help=_('ID of probe port to execute command')) + parser.add_argument( + 'command', metavar='command', + nargs='?', + default=None, + help=_('Command to execute')) + return parser + + def run(self, parsed_args): + self.log.debug('run(%s)' % parsed_args) + debug_agent = self.get_debug_agent() + result = debug_agent.exec_command(parsed_args.id, parsed_args.command) + self.app.stdout.write(result + '\n') + + +class PingAll(ProbeCommand): + """Ping all fixed_ip.""" + + log = logging.getLogger(__name__ + '.ExecProbe') + + def get_parser(self, prog_name): + parser = super(PingAll, self).get_parser(prog_name) + parser.add_argument( + '--timeout', metavar='', + default=10, + help=_('Ping timeout')) + parser.add_argument( + '--id', metavar='network_id', + default=None, + help=_('ID of network')) + return parser + + def run(self, parsed_args): + self.log.debug('run(%s)' % parsed_args) + debug_agent = self.get_debug_agent() + result = debug_agent.ping_all(parsed_args.id, + timeout=parsed_args.timeout) + self.app.stdout.write(result + '\n') diff --git a/neutron/debug/debug_agent.py b/neutron/debug/debug_agent.py new file mode 100644 index 000000000..d6465ab52 --- /dev/null +++ b/neutron/debug/debug_agent.py @@ -0,0 +1,198 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import shlex +import socket + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import dhcp +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +DEVICE_OWNER_NETWORK_PROBE = 'network:probe' + +DEVICE_OWNER_COMPUTE_PROBE = 'compute:probe' + + +class NeutronDebugAgent(): + + OPTS = [ + # Needed for drivers + cfg.StrOpt('external_network_bridge', default='br-ex', + help=_("Name of bridge used for external network " + "traffic.")), + ] + + def __init__(self, conf, client, driver): + self.conf = conf + self.root_helper = config.get_root_helper(conf) + self.client = client + self.driver = driver + + def _get_namespace(self, port): + return "qprobe-%s" % port.id + + def create_probe(self, network_id, device_owner='network'): + network = self._get_network(network_id) + bridge = None + if network.external: + bridge = self.conf.external_network_bridge + + port = self._create_port(network, device_owner) + interface_name = self.driver.get_device_name(port) + namespace = None + if self.conf.use_namespaces: + namespace = self._get_namespace(port) + + if ip_lib.device_exists(interface_name, self.root_helper, namespace): + LOG.debug(_('Reusing existing device: %s.'), interface_name) + else: + self.driver.plug(network.id, + port.id, + interface_name, + port.mac_address, + bridge=bridge, + namespace=namespace) + ip_cidrs = [] + for fixed_ip in port.fixed_ips: + subnet = fixed_ip.subnet + net = netaddr.IPNetwork(subnet.cidr) + ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen) + ip_cidrs.append(ip_cidr) + self.driver.init_l3(interface_name, ip_cidrs, namespace=namespace) + return port + + def _get_subnet(self, subnet_id): + subnet_dict = self.client.show_subnet(subnet_id)['subnet'] + return dhcp.DictModel(subnet_dict) + + def _get_network(self, network_id): + network_dict = self.client.show_network(network_id)['network'] + network = dhcp.DictModel(network_dict) + network.external = network_dict.get('router:external') + obj_subnet = [self._get_subnet(s_id) for s_id in network.subnets] + network.subnets = obj_subnet + return network + + def clear_probe(self): + ports = self.client.list_ports( + device_id=socket.gethostname(), + device_owner=[DEVICE_OWNER_NETWORK_PROBE, + DEVICE_OWNER_COMPUTE_PROBE]) + info = ports['ports'] + for port in info: + self.delete_probe(port['id']) + + def delete_probe(self, port_id): + port = dhcp.DictModel(self.client.show_port(port_id)['port']) + network = self._get_network(port.network_id) + bridge = None + if network.external: + bridge = self.conf.external_network_bridge + ip = ip_lib.IPWrapper(self.root_helper) + namespace = self._get_namespace(port) + if self.conf.use_namespaces and ip.netns.exists(namespace): + self.driver.unplug(self.driver.get_device_name(port), + bridge=bridge, + namespace=namespace) + try: + ip.netns.delete(namespace) + except Exception: + LOG.warn(_('Failed to delete namespace %s'), namespace) + else: + self.driver.unplug(self.driver.get_device_name(port), + bridge=bridge) + self.client.delete_port(port.id) + + def list_probes(self): + ports = self.client.list_ports( + device_owner=[DEVICE_OWNER_NETWORK_PROBE, + DEVICE_OWNER_COMPUTE_PROBE]) + info = ports['ports'] + for port in info: + port['device_name'] = self.driver.get_device_name( + dhcp.DictModel(port)) + return info + + def exec_command(self, port_id, command=None): + port = dhcp.DictModel(self.client.show_port(port_id)['port']) + ip = ip_lib.IPWrapper(self.root_helper) + namespace = self._get_namespace(port) + if self.conf.use_namespaces: + if not command: + return "sudo ip netns exec %s" % self._get_namespace(port) + namespace = ip.ensure_namespace(namespace) + return namespace.netns.execute(shlex.split(command)) + else: + return utils.execute(shlex.split(command)) + + def ensure_probe(self, network_id): + ports = self.client.list_ports(network_id=network_id, + device_id=socket.gethostname(), + device_owner=DEVICE_OWNER_NETWORK_PROBE) + info = ports.get('ports', []) + if info: + return dhcp.DictModel(info[0]) + else: + return self.create_probe(network_id) + + def ping_all(self, network_id=None, timeout=1): + if network_id: + ports = self.client.list_ports(network_id=network_id)['ports'] + else: + ports = self.client.list_ports()['ports'] + result = "" + for port in ports: + probe = self.ensure_probe(port['network_id']) + if port['device_owner'] == DEVICE_OWNER_NETWORK_PROBE: + continue + for fixed_ip in port['fixed_ips']: + address = fixed_ip['ip_address'] + subnet = self._get_subnet(fixed_ip['subnet_id']) + if subnet.ip_version == 4: + ping_command = 'ping' + else: + ping_command = 'ping6' + result += self.exec_command(probe.id, + '%s -c 1 -w %s %s' % (ping_command, + timeout, + address)) + return result + + def _create_port(self, network, device_owner): + host = self.conf.host + body = {'port': {'admin_state_up': True, + 'network_id': network.id, + 'device_id': '%s' % socket.gethostname(), + 'device_owner': '%s:probe' % device_owner, + 'tenant_id': network.tenant_id, + 'binding:host_id': host, + 'fixed_ips': [dict(subnet_id=s.id) + for s in network.subnets]}} + port_dict = self.client.create_port(body)['port'] + port = dhcp.DictModel(port_dict) + port.network = network + for fixed_ip in port.fixed_ips: + fixed_ip.subnet = self._get_subnet(fixed_ip.subnet_id) + return port diff --git a/neutron/debug/shell.py b/neutron/debug/shell.py new file mode 100644 index 000000000..a175f1a95 --- /dev/null +++ b/neutron/debug/shell.py @@ -0,0 +1,90 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import interface +from neutron.debug import debug_agent +from neutron.openstack.common import importutils +from neutronclient.common import exceptions as exc +from neutronclient.common import utils +from neutronclient import shell + +COMMAND_V2 = { + 'probe-create': utils.import_class( + 'neutron.debug.commands.CreateProbe'), + 'probe-delete': utils.import_class( + 'neutron.debug.commands.DeleteProbe'), + 'probe-list': utils.import_class( + 'neutron.debug.commands.ListProbe'), + 'probe-clear': utils.import_class( + 'neutron.debug.commands.ClearProbe'), + 'probe-exec': utils.import_class( + 'neutron.debug.commands.ExecProbe'), + 'ping-all': utils.import_class( + 'neutron.debug.commands.PingAll'), + #TODO(nati) ping, netcat , nmap, bench +} +COMMANDS = {'2.0': COMMAND_V2} + + +class NeutronDebugShell(shell.NeutronShell): + def __init__(self, api_version): + super(NeutronDebugShell, self).__init__(api_version) + for k, v in COMMANDS[api_version].items(): + self.command_manager.add_command(k, v) + + def build_option_parser(self, description, version): + parser = super(NeutronDebugShell, self).build_option_parser( + description, version) + default = ( + shell.env('NEUTRON_TEST_CONFIG_FILE') or + shell.env('QUANTUM_TEST_CONFIG_FILE') + ) + parser.add_argument( + '--config-file', + default=default, + help=_('Config file for interface driver ' + '(You may also use l3_agent.ini)')) + return parser + + def initialize_app(self, argv): + super(NeutronDebugShell, self).initialize_app(argv) + if not self.options.config_file: + raise exc.CommandError( + _("You must provide a config file for bridge -" + " either --config-file or env[NEUTRON_TEST_CONFIG_FILE]")) + client = self.client_manager.neutron + cfg.CONF.register_opts(interface.OPTS) + cfg.CONF.register_opts(debug_agent.NeutronDebugAgent.OPTS) + config.register_interface_driver_opts_helper(cfg.CONF) + config.register_use_namespaces_opts_helper(cfg.CONF) + config.register_root_helper(cfg.CONF) + cfg.CONF(['--config-file', self.options.config_file]) + config.setup_logging(cfg.CONF) + driver = importutils.import_object(cfg.CONF.interface_driver, cfg.CONF) + self.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF, + client, + driver) + + +def main(argv=None): + return NeutronDebugShell(shell.NEUTRON_API_VERSION).run( + argv or sys.argv[1:]) diff --git a/neutron/extensions/__init__.py b/neutron/extensions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/extensions/agent.py b/neutron/extensions/agent.py new file mode 100644 index 000000000..0dc0acd05 --- /dev/null +++ b/neutron/extensions/agent.py @@ -0,0 +1,163 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.common import exceptions +from neutron import manager + + +# Attribute Map +RESOURCE_NAME = 'agent' +RESOURCE_ATTRIBUTE_MAP = { + RESOURCE_NAME + 's': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'agent_type': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'binary': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'topic': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'host': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'admin_state_up': {'allow_post': False, 'allow_put': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'created_at': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'started_at': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'heartbeat_timestamp': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'alive': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'configurations': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'description': {'allow_post': False, 'allow_put': True, + 'is_visible': True, + 'validate': {'type:string': None}}, + }, +} + + +class AgentNotFound(exceptions.NotFound): + message = _("Agent %(id)s could not be found") + + +class AgentNotFoundByTypeHost(exceptions.NotFound): + message = _("Agent with agent_type=%(agent_type)s and host=%(host)s " + "could not be found") + + +class MultipleAgentFoundByTypeHost(exceptions.Conflict): + message = _("Multiple agents with agent_type=%(agent_type)s and " + "host=%(host)s found") + + +class Agent(object): + """Agent management extension.""" + + @classmethod + def get_name(cls): + return "agent" + + @classmethod + def get_alias(cls): + return "agent" + + @classmethod + def get_description(cls): + return "The agent management extension." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/agent/api/v2.0" + + @classmethod + def get_updated(cls): + return "2013-02-03T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] + attr.PLURALS.update(dict(my_plurals)) + plugin = manager.NeutronManager.get_plugin() + params = RESOURCE_ATTRIBUTE_MAP.get(RESOURCE_NAME + 's') + controller = base.create_resource(RESOURCE_NAME + 's', + RESOURCE_NAME, + plugin, params + ) + + ex = extensions.ResourceExtension(RESOURCE_NAME + 's', + controller) + + return [ex] + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +class AgentPluginBase(object): + """REST API to operate the Agent. + + All of method must be in an admin context. + """ + + def create_agent(self, context, agent): + """Create agent. + + This operation is not allow in REST API. + @raise exceptions.BadRequest: + """ + raise exceptions.BadRequest + + @abc.abstractmethod + def delete_agent(self, context, id): + """Delete agent. + + Agents register themselves on reporting state. + But if a agent does not report its status + for a long time (for example, it is dead for ever. ), + admin can remove it. Agents must be disabled before + being removed. + """ + pass + + @abc.abstractmethod + def update_agent(self, context, agent): + """Disable or Enable the agent. + + Discription also can be updated. Some agents cannot be disabled, such + as plugins, services. An error code should be reported in this case. + @raise exceptions.BadRequest: + """ + pass + + @abc.abstractmethod + def get_agents(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_agent(self, context, id, fields=None): + pass diff --git a/neutron/extensions/allowedaddresspairs.py b/neutron/extensions/allowedaddresspairs.py new file mode 100644 index 000000000..a9328aaa4 --- /dev/null +++ b/neutron/extensions/allowedaddresspairs.py @@ -0,0 +1,116 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.exc + +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as nexception + + +class AllowedAddressPairsMissingIP(nexception.InvalidInput): + message = _("AllowedAddressPair must contain ip_address") + + +class AddressPairAndPortSecurityRequired(nexception.Conflict): + message = _("Port Security must be enabled in order to have allowed " + "address pairs on a port.") + + +class DuplicateAddressPairInRequest(nexception.InvalidInput): + message = _("Request contains duplicate address pair: " + "mac_address %(mac_address)s ip_address %(ip_address)s.") + + +def _validate_allowed_address_pairs(address_pairs, valid_values=None): + unique_check = {} + for address_pair in address_pairs: + # mac_address is optional, if not set we use the mac on the port + if 'mac_address' in address_pair: + msg = attr._validate_mac_address(address_pair['mac_address']) + if msg: + raise webob.exc.HTTPBadRequest(msg) + if 'ip_address' not in address_pair: + raise AllowedAddressPairsMissingIP() + + mac = address_pair.get('mac_address') + ip_address = address_pair['ip_address'] + if (mac, ip_address) not in unique_check: + unique_check[(mac, ip_address)] = None + else: + raise DuplicateAddressPairInRequest(mac_address=mac, + ip_address=ip_address) + + invalid_attrs = set(address_pair.keys()) - set(['mac_address', + 'ip_address']) + if invalid_attrs: + msg = (_("Unrecognized attribute(s) '%s'") % + ', '.join(set(address_pair.keys()) - + set(['mac_address', 'ip_address']))) + raise webob.exc.HTTPBadRequest(msg) + + if '/' in ip_address: + msg = attr._validate_subnet(ip_address) + else: + msg = attr._validate_ip_address(ip_address) + if msg: + raise webob.exc.HTTPBadRequest(msg) + +attr.validators['type:validate_allowed_address_pairs'] = ( + _validate_allowed_address_pairs) + +ADDRESS_PAIRS = 'allowed_address_pairs' +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': { + ADDRESS_PAIRS: {'allow_post': True, 'allow_put': True, + 'convert_list_to': + attr.convert_kvp_list_to_dict, + 'validate': {'type:validate_allowed_address_pairs': + None}, + 'enforce_policy': True, + 'default': attr.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + } +} + + +class Allowedaddresspairs(object): + """Extension class supporting allowed address pairs.""" + + @classmethod + def get_name(cls): + return "Allowed Address Pairs" + + @classmethod + def get_alias(cls): + return "allowed-address-pairs" + + @classmethod + def get_description(cls): + return "Provides allowed address pairs" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/allowedaddresspairs/api/v2.0" + + @classmethod + def get_updated(cls): + return "2013-07-23T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + attr.PLURALS.update({'allowed_address_pairs': + 'allowed_address_pair'}) + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/extensions/dhcpagentscheduler.py b/neutron/extensions/dhcpagentscheduler.py new file mode 100644 index 000000000..d86ba614f --- /dev/null +++ b/neutron/extensions/dhcpagentscheduler.py @@ -0,0 +1,152 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from neutron.api import extensions +from neutron.api.v2 import base +from neutron.api.v2 import resource +from neutron.common import constants +from neutron.common import exceptions +from neutron.extensions import agent +from neutron import manager +from neutron import policy +from neutron import wsgi + +DHCP_NET = 'dhcp-network' +DHCP_NETS = DHCP_NET + 's' +DHCP_AGENT = 'dhcp-agent' +DHCP_AGENTS = DHCP_AGENT + 's' + + +class NetworkSchedulerController(wsgi.Controller): + def index(self, request, **kwargs): + plugin = manager.NeutronManager.get_plugin() + policy.enforce(request.context, + "get_%s" % DHCP_NETS, + {}) + return plugin.list_networks_on_dhcp_agent( + request.context, kwargs['agent_id']) + + def create(self, request, body, **kwargs): + plugin = manager.NeutronManager.get_plugin() + policy.enforce(request.context, + "create_%s" % DHCP_NET, + {}) + return plugin.add_network_to_dhcp_agent( + request.context, kwargs['agent_id'], body['network_id']) + + def delete(self, request, id, **kwargs): + plugin = manager.NeutronManager.get_plugin() + policy.enforce(request.context, + "delete_%s" % DHCP_NET, + {}) + return plugin.remove_network_from_dhcp_agent( + request.context, kwargs['agent_id'], id) + + +class DhcpAgentsHostingNetworkController(wsgi.Controller): + def index(self, request, **kwargs): + plugin = manager.NeutronManager.get_plugin() + policy.enforce(request.context, + "get_%s" % DHCP_AGENTS, + {}) + return plugin.list_dhcp_agents_hosting_network( + request.context, kwargs['network_id']) + + +class Dhcpagentscheduler(extensions.ExtensionDescriptor): + """Extension class supporting dhcp agent scheduler. + """ + + @classmethod + def get_name(cls): + return "DHCP Agent Scheduler" + + @classmethod + def get_alias(cls): + return constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS + + @classmethod + def get_description(cls): + return "Schedule networks among dhcp agents" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/dhcp_agent_scheduler/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-02-07T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + parent = dict(member_name="agent", + collection_name="agents") + controller = resource.Resource(NetworkSchedulerController(), + base.FAULT_MAP) + exts.append(extensions.ResourceExtension( + DHCP_NETS, controller, parent)) + + parent = dict(member_name="network", + collection_name="networks") + + controller = resource.Resource(DhcpAgentsHostingNetworkController(), + base.FAULT_MAP) + exts.append(extensions.ResourceExtension( + DHCP_AGENTS, controller, parent)) + return exts + + def get_extended_resources(self, version): + return {} + + +class InvalidDHCPAgent(agent.AgentNotFound): + message = _("Agent %(id)s is not a valid DHCP Agent or has been disabled") + + +class NetworkHostedByDHCPAgent(exceptions.Conflict): + message = _("The network %(network_id)s has been already hosted" + " by the DHCP Agent %(agent_id)s.") + + +class NetworkNotHostedByDhcpAgent(exceptions.Conflict): + message = _("The network %(network_id)s is not hosted" + " by the DHCP agent %(agent_id)s.") + + +class DhcpAgentSchedulerPluginBase(object): + """REST API to operate the DHCP agent scheduler. + + All of method must be in an admin context. + """ + + @abc.abstractmethod + def add_network_to_dhcp_agent(self, context, id, network_id): + pass + + @abc.abstractmethod + def remove_network_from_dhcp_agent(self, context, id, network_id): + pass + + @abc.abstractmethod + def list_networks_on_dhcp_agent(self, context, id): + pass + + @abc.abstractmethod + def list_dhcp_agents_hosting_network(self, context, network_id): + pass diff --git a/neutron/extensions/external_net.py b/neutron/extensions/external_net.py new file mode 100644 index 000000000..6e50e93cb --- /dev/null +++ b/neutron/extensions/external_net.py @@ -0,0 +1,68 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as qexception +from neutron.extensions import l3 + + +class ExternalNetworkInUse(qexception.InUse): + message = _("External network %(net_id)s cannot be updated to be made " + "non-external, since it has existing gateway ports") + + +# For backward compatibility the 'router' prefix is kept. +EXTERNAL = 'router:external' +EXTENDED_ATTRIBUTES_2_0 = { + 'networks': {EXTERNAL: {'allow_post': True, + 'allow_put': True, + 'default': attr.ATTR_NOT_SPECIFIED, + 'is_visible': True, + 'convert_to': attr.convert_to_boolean, + 'enforce_policy': True, + 'required_by_policy': True}}} + + +class External_net(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Neutron external network" + + @classmethod + def get_alias(cls): + return "external-net" + + @classmethod + def get_description(cls): + return _("Adds external network attribute to network resource.") + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/external_net/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-01-14T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} + + def get_alias_namespace_compatibility_map(self): + return {l3.L3.get_alias(): l3.L3.get_namespace()} diff --git a/neutron/extensions/extra_dhcp_opt.py b/neutron/extensions/extra_dhcp_opt.py new file mode 100644 index 000000000..59a3a0c59 --- /dev/null +++ b/neutron/extensions/extra_dhcp_opt.py @@ -0,0 +1,91 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @Author Don Kehn, dekehn@gmail.com + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions + + +# ExtraDHcpOpts Exceptions +class ExtraDhcpOptNotFound(exceptions.NotFound): + message = _("ExtraDhcpOpt %(id)s could not be found") + + +class ExtraDhcpOptBadData(exceptions.InvalidInput): + message = _("Invalid data format for extra-dhcp-opt: %(data)s") + + +def _validate_list_of_dict_or_none(data, key_specs=None): + if data is not None: + if not isinstance(data, list): + raise ExtraDhcpOptBadData(data=data) + for d in data: + msg = attr._validate_dict(d, key_specs) + if msg: + raise ExtraDhcpOptBadData(data=msg) + + +attr.validators['type:list_of_dict_or_none'] = _validate_list_of_dict_or_none + +# Attribute Map +EXTRADHCPOPTS = 'extra_dhcp_opts' + +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': { + EXTRADHCPOPTS: + {'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': None, + 'validate': { + 'type:list_of_dict_or_none': { + 'id': {'type:uuid': None, 'required': False}, + 'opt_name': {'type:not_empty_string': None, + 'required': True}, + 'opt_value': {'type:not_empty_string_or_none': None, + 'required': True}}}}}} + + +class Extra_dhcp_opt(extensions.ExtensionDescriptor): + @classmethod + def get_name(cls): + return "Neutron Extra DHCP opts" + + @classmethod + def get_alias(cls): + return "extra_dhcp_opt" + + @classmethod + def get_description(cls): + return ("Extra options configuration for DHCP. " + "For example PXE boot options to DHCP clients can " + "be specified (e.g. tftp-server, server-ip-address, " + "bootfile-name)") + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/extra_dhcp_opt/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-03-17T12:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/extensions/extraroute.py b/neutron/extensions/extraroute.py new file mode 100644 index 000000000..7c63baa22 --- /dev/null +++ b/neutron/extensions/extraroute.py @@ -0,0 +1,74 @@ +# Copyright 2013, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as qexception + + +# Extra Routes Exceptions +class InvalidRoutes(qexception.InvalidInput): + message = _("Invalid format for routes: %(routes)s, %(reason)s") + + +class RouterInterfaceInUseByRoute(qexception.InUse): + message = _("Router interface for subnet %(subnet_id)s on router " + "%(router_id)s cannot be deleted, as it is required " + "by one or more routes.") + + +class RoutesExhausted(qexception.BadRequest): + message = _("Unable to complete operation for %(router_id)s. " + "The number of routes exceeds the maximum %(quota)s.") + +# Attribute Map +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': { + 'routes': {'allow_post': False, 'allow_put': True, + 'validate': {'type:hostroutes': None}, + 'convert_to': attr.convert_none_to_empty_list, + 'is_visible': True, 'default': attr.ATTR_NOT_SPECIFIED}, + } +} + + +class Extraroute(): + + @classmethod + def get_name(cls): + return "Neutron Extra Route" + + @classmethod + def get_alias(cls): + return "extraroute" + + @classmethod + def get_description(cls): + return "Extra routes configuration for L3 router" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/extraroutes/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-02-01T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + attr.PLURALS.update({'routes': 'route'}) + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/extensions/firewall.py b/neutron/extensions/firewall.py new file mode 100644 index 000000000..bbb5d163e --- /dev/null +++ b/neutron/extensions/firewall.py @@ -0,0 +1,431 @@ +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. + +import abc + +from oslo.config import cfg +import six + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import resource_helper +from neutron.common import exceptions as qexception +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services import service_base + + +LOG = logging.getLogger(__name__) + + +# Firewall Exceptions +class FirewallNotFound(qexception.NotFound): + message = _("Firewall %(firewall_id)s could not be found.") + + +class FirewallInUse(qexception.InUse): + message = _("Firewall %(firewall_id)s is still active.") + + +class FirewallInPendingState(qexception.Conflict): + message = _("Operation cannot be performed since associated Firewall " + "%(firewall_id)s is in %(pending_state)s.") + + +class FirewallPolicyNotFound(qexception.NotFound): + message = _("Firewall Policy %(firewall_policy_id)s could not be found.") + + +class FirewallPolicyInUse(qexception.InUse): + message = _("Firewall Policy %(firewall_policy_id)s is being used.") + + +class FirewallRuleNotFound(qexception.NotFound): + message = _("Firewall Rule %(firewall_rule_id)s could not be found.") + + +class FirewallRuleInUse(qexception.InUse): + message = _("Firewall Rule %(firewall_rule_id)s is being used.") + + +class FirewallRuleNotAssociatedWithPolicy(qexception.InvalidInput): + message = _("Firewall Rule %(firewall_rule_id)s is not associated " + " with Firewall Policy %(firewall_policy_id)s.") + + +class FirewallRuleInvalidProtocol(qexception.InvalidInput): + message = _("Firewall Rule protocol %(protocol)s is not supported. " + "Only protocol values %(values)s and their integer " + "representation (0 to 255) are supported.") + + +class FirewallRuleInvalidAction(qexception.InvalidInput): + message = _("Firewall rule action %(action)s is not supported. " + "Only action values %(values)s are supported.") + + +class FirewallRuleInvalidICMPParameter(qexception.InvalidInput): + message = _("%(param)s are not allowed when protocol " + "is set to ICMP.") + + +class FirewallInvalidPortValue(qexception.InvalidInput): + message = _("Invalid value for port %(port)s.") + + +class FirewallRuleInfoMissing(qexception.InvalidInput): + message = _("Missing rule info argument for insert/remove " + "rule operation.") + + +class FirewallInternalDriverError(qexception.NeutronException): + """Fwaas exception for all driver errors. + + On any failure or exception in the driver, driver should log it and + raise this exception to the agent + """ + message = _("%(driver)s: Internal driver error.") + + +fw_valid_protocol_values = [None, constants.TCP, constants.UDP, constants.ICMP] +fw_valid_action_values = [constants.FWAAS_ALLOW, constants.FWAAS_DENY] + + +def convert_protocol(value): + if value is None: + return + if value.isdigit(): + val = int(value) + if 0 <= val <= 255: + return val + else: + raise FirewallRuleInvalidProtocol(protocol=value, + values= + fw_valid_protocol_values) + elif value.lower() in fw_valid_protocol_values: + return value.lower() + else: + raise FirewallRuleInvalidProtocol(protocol=value, + values= + fw_valid_protocol_values) + + +def convert_action_to_case_insensitive(value): + if value is None: + return + else: + return value.lower() + + +def convert_port_to_string(value): + if value is None: + return + else: + return str(value) + + +def _validate_port_range(data, key_specs=None): + if data is None: + return + data = str(data) + ports = data.split(':') + for p in ports: + try: + val = int(p) + except (ValueError, TypeError): + msg = _("Port '%s' is not a valid number") % p + LOG.debug(msg) + return msg + if val <= 0 or val > 65535: + msg = _("Invalid port '%s'") % p + LOG.debug(msg) + return msg + + +def _validate_ip_or_subnet_or_none(data, valid_values=None): + if data is None: + return None + msg_ip = attr._validate_ip_address(data, valid_values) + if not msg_ip: + return + msg_subnet = attr._validate_subnet(data, valid_values) + if not msg_subnet: + return + return _("%(msg_ip)s and %(msg_subnet)s") % {'msg_ip': msg_ip, + 'msg_subnet': msg_subnet} + + +attr.validators['type:port_range'] = _validate_port_range +attr.validators['type:ip_or_subnet_or_none'] = _validate_ip_or_subnet_or_none + + +RESOURCE_ATTRIBUTE_MAP = { + 'firewall_rules': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'firewall_policy_id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'is_visible': True}, + 'shared': {'allow_post': True, 'allow_put': True, + 'default': False, 'convert_to': attr.convert_to_boolean, + 'is_visible': True, 'required_by_policy': True, + 'enforce_policy': True}, + 'protocol': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None, + 'convert_to': convert_protocol, + 'validate': {'type:values': fw_valid_protocol_values}}, + 'ip_version': {'allow_post': True, 'allow_put': True, + 'default': 4, 'convert_to': attr.convert_to_int, + 'validate': {'type:values': [4, 6]}, + 'is_visible': True}, + 'source_ip_address': {'allow_post': True, 'allow_put': True, + 'validate': {'type:ip_or_subnet_or_none': None}, + 'is_visible': True, 'default': None}, + 'destination_ip_address': {'allow_post': True, 'allow_put': True, + 'validate': {'type:ip_or_subnet_or_none': + None}, + 'is_visible': True, 'default': None}, + 'source_port': {'allow_post': True, 'allow_put': True, + 'validate': {'type:port_range': None}, + 'convert_to': convert_port_to_string, + 'default': None, 'is_visible': True}, + 'destination_port': {'allow_post': True, 'allow_put': True, + 'validate': {'type:port_range': None}, + 'convert_to': convert_port_to_string, + 'default': None, 'is_visible': True}, + 'position': {'allow_post': False, 'allow_put': False, + 'default': None, 'is_visible': True}, + 'action': {'allow_post': True, 'allow_put': True, + 'convert_to': convert_action_to_case_insensitive, + 'validate': {'type:values': fw_valid_action_values}, + 'is_visible': True, 'default': 'deny'}, + 'enabled': {'allow_post': True, 'allow_put': True, + 'default': True, 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + }, + 'firewall_policies': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'shared': {'allow_post': True, 'allow_put': True, + 'default': False, 'convert_to': attr.convert_to_boolean, + 'is_visible': True, 'required_by_policy': True, + 'enforce_policy': True}, + 'firewall_rules': {'allow_post': True, 'allow_put': True, + 'validate': {'type:uuid_list': None}, + 'convert_to': attr.convert_none_to_empty_list, + 'default': None, 'is_visible': True}, + 'audited': {'allow_post': True, 'allow_put': True, + 'default': False, 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + }, + 'firewalls': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'shared': {'allow_post': True, 'allow_put': True, + 'default': False, 'convert_to': attr.convert_to_boolean, + 'is_visible': False, 'required_by_policy': True, + 'enforce_policy': True}, + 'firewall_policy_id': {'allow_post': True, 'allow_put': True, + 'validate': {'type:uuid_or_none': None}, + 'is_visible': True}, + }, +} + +firewall_quota_opts = [ + cfg.IntOpt('quota_firewall', + default=1, + help=_('Number of firewalls allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_firewall_policy', + default=1, + help=_('Number of firewall policies allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_firewall_rule', + default=-1, + help=_('Number of firewall rules allowed per tenant. ' + 'A negative value means unlimited.')), +] +cfg.CONF.register_opts(firewall_quota_opts, 'QUOTAS') + + +class Firewall(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Firewall service" + + @classmethod + def get_alias(cls): + return "fwaas" + + @classmethod + def get_description(cls): + return "Extension for Firewall service" + + @classmethod + def get_namespace(cls): + return "http://wiki.openstack.org/Neutron/FWaaS/API_1.0" + + @classmethod + def get_updated(cls): + return "2013-02-25T10:00:00-00:00" + + @classmethod + def get_resources(cls): + special_mappings = {'firewall_policies': 'firewall_policy'} + plural_mappings = resource_helper.build_plural_mappings( + special_mappings, RESOURCE_ATTRIBUTE_MAP) + attr.PLURALS.update(plural_mappings) + action_map = {'firewall_policy': {'insert_rule': 'PUT', + 'remove_rule': 'PUT'}} + return resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + constants.FIREWALL, + action_map=action_map) + + @classmethod + def get_plugin_interface(cls): + return FirewallPluginBase + + def update_attributes_map(self, attributes): + super(Firewall, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +@six.add_metaclass(abc.ABCMeta) +class FirewallPluginBase(service_base.ServicePluginBase): + + def get_plugin_name(self): + return constants.FIREWALL + + def get_plugin_type(self): + return constants.FIREWALL + + def get_plugin_description(self): + return 'Firewall service plugin' + + @abc.abstractmethod + def get_firewalls(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_firewall(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_firewall(self, context, firewall): + pass + + @abc.abstractmethod + def update_firewall(self, context, id, firewall): + pass + + @abc.abstractmethod + def delete_firewall(self, context, id): + pass + + @abc.abstractmethod + def get_firewall_rules(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_firewall_rule(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_firewall_rule(self, context, firewall_rule): + pass + + @abc.abstractmethod + def update_firewall_rule(self, context, id, firewall_rule): + pass + + @abc.abstractmethod + def delete_firewall_rule(self, context, id): + pass + + @abc.abstractmethod + def get_firewall_policy(self, context, id, fields=None): + pass + + @abc.abstractmethod + def get_firewall_policies(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def create_firewall_policy(self, context, firewall_policy): + pass + + @abc.abstractmethod + def update_firewall_policy(self, context, id, firewall_policy): + pass + + @abc.abstractmethod + def delete_firewall_policy(self, context, id): + pass + + @abc.abstractmethod + def insert_rule(self, context, id, rule_info): + pass + + @abc.abstractmethod + def remove_rule(self, context, id, rule_info): + pass diff --git a/neutron/extensions/flavor.py b/neutron/extensions/flavor.py new file mode 100644 index 000000000..c5937d932 --- /dev/null +++ b/neutron/extensions/flavor.py @@ -0,0 +1,67 @@ +# Copyright 2012 Nachi Ueno, NTT MCL, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +FLAVOR_NETWORK = 'flavor:network' +FLAVOR_ROUTER = 'flavor:router' + +FLAVOR_ATTRIBUTE = { + 'networks': { + FLAVOR_NETWORK: {'allow_post': True, + 'allow_put': False, + 'is_visible': True, + 'default': attributes.ATTR_NOT_SPECIFIED} + }, + 'routers': { + FLAVOR_ROUTER: {'allow_post': True, + 'allow_put': False, + 'is_visible': True, + 'default': attributes.ATTR_NOT_SPECIFIED} + } +} + + +class Flavor(extensions.ExtensionDescriptor): + @classmethod + def get_name(cls): + return "Flavor support for network and router" + + @classmethod + def get_alias(cls): + return "flavor" + + @classmethod + def get_description(cls): + return "Flavor" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/flavor/api/v1.0" + + @classmethod + def get_updated(cls): + return "2012-07-20T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return FLAVOR_ATTRIBUTE + else: + return {} diff --git a/neutron/extensions/l3.py b/neutron/extensions/l3.py new file mode 100644 index 000000000..b02c9337b --- /dev/null +++ b/neutron/extensions/l3.py @@ -0,0 +1,254 @@ +# Copyright 2012 VMware, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from oslo.config import cfg + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import resource_helper +from neutron.common import exceptions as qexception +from neutron.plugins.common import constants + + +# L3 Exceptions +class RouterNotFound(qexception.NotFound): + message = _("Router %(router_id)s could not be found") + + +class RouterInUse(qexception.InUse): + message = _("Router %(router_id)s still has ports") + + +class RouterInterfaceNotFound(qexception.NotFound): + message = _("Router %(router_id)s does not have " + "an interface with id %(port_id)s") + + +class RouterInterfaceNotFoundForSubnet(qexception.NotFound): + message = _("Router %(router_id)s has no interface " + "on subnet %(subnet_id)s") + + +class RouterInterfaceInUseByFloatingIP(qexception.InUse): + message = _("Router interface for subnet %(subnet_id)s on router " + "%(router_id)s cannot be deleted, as it is required " + "by one or more floating IPs.") + + +class FloatingIPNotFound(qexception.NotFound): + message = _("Floating IP %(floatingip_id)s could not be found") + + +class ExternalGatewayForFloatingIPNotFound(qexception.NotFound): + message = _("External network %(external_network_id)s is not reachable " + "from subnet %(subnet_id)s. Therefore, cannot associate " + "Port %(port_id)s with a Floating IP.") + + +class FloatingIPPortAlreadyAssociated(qexception.InUse): + message = _("Cannot associate floating IP %(floating_ip_address)s " + "(%(fip_id)s) with port %(port_id)s " + "using fixed IP %(fixed_ip)s, as that fixed IP already " + "has a floating IP on external network %(net_id)s.") + + +class L3PortInUse(qexception.InUse): + message = _("Port %(port_id)s has owner %(device_owner)s and therefore" + " cannot be deleted directly via the port API.") + + +class RouterExternalGatewayInUseByFloatingIp(qexception.InUse): + message = _("Gateway cannot be updated for router %(router_id)s, since a " + "gateway to external network %(net_id)s is required by one or " + "more floating IPs.") + +ROUTERS = 'routers' +EXTERNAL_GW_INFO = 'external_gateway_info' + +RESOURCE_ATTRIBUTE_MAP = { + ROUTERS: { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + EXTERNAL_GW_INFO: {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None, + 'enforce_policy': True} + }, + 'floatingips': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'floating_ip_address': {'allow_post': False, 'allow_put': False, + 'validate': {'type:ip_address_or_none': None}, + 'is_visible': True}, + 'floating_network_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'router_id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'is_visible': True, 'default': None}, + 'port_id': {'allow_post': True, 'allow_put': True, + 'validate': {'type:uuid_or_none': None}, + 'is_visible': True, 'default': None, + 'required_by_policy': True}, + 'fixed_ip_address': {'allow_post': True, 'allow_put': True, + 'validate': {'type:ip_address_or_none': None}, + 'is_visible': True, 'default': None}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + }, +} + +l3_quota_opts = [ + cfg.IntOpt('quota_router', + default=10, + help=_('Number of routers allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_floatingip', + default=50, + help=_('Number of floating IPs allowed per tenant. ' + 'A negative value means unlimited.')), +] +cfg.CONF.register_opts(l3_quota_opts, 'QUOTAS') + + +class L3(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Neutron L3 Router" + + @classmethod + def get_alias(cls): + return "router" + + @classmethod + def get_description(cls): + return ("Router abstraction for basic L3 forwarding" + " between L2 Neutron networks and access to external" + " networks via a NAT gateway.") + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/router/api/v1.0" + + @classmethod + def get_updated(cls): + return "2012-07-20T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + plural_mappings = resource_helper.build_plural_mappings( + {}, RESOURCE_ATTRIBUTE_MAP) + attr.PLURALS.update(plural_mappings) + action_map = {'router': {'add_router_interface': 'PUT', + 'remove_router_interface': 'PUT'}} + return resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + constants.L3_ROUTER_NAT, + action_map=action_map, + register_quota=True) + + def update_attributes_map(self, attributes): + super(L3, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +class RouterPluginBase(object): + + @abc.abstractmethod + def create_router(self, context, router): + pass + + @abc.abstractmethod + def update_router(self, context, id, router): + pass + + @abc.abstractmethod + def get_router(self, context, id, fields=None): + pass + + @abc.abstractmethod + def delete_router(self, context, id): + pass + + @abc.abstractmethod + def get_routers(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + pass + + @abc.abstractmethod + def add_router_interface(self, context, router_id, interface_info): + pass + + @abc.abstractmethod + def remove_router_interface(self, context, router_id, interface_info): + pass + + @abc.abstractmethod + def create_floatingip(self, context, floatingip): + pass + + @abc.abstractmethod + def update_floatingip(self, context, id, floatingip): + pass + + @abc.abstractmethod + def get_floatingip(self, context, id, fields=None): + pass + + @abc.abstractmethod + def delete_floatingip(self, context, id): + pass + + @abc.abstractmethod + def get_floatingips(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + pass + + def get_routers_count(self, context, filters=None): + raise NotImplementedError() + + def get_floatingips_count(self, context, filters=None): + raise NotImplementedError() diff --git a/neutron/extensions/l3_ext_gw_mode.py b/neutron/extensions/l3_ext_gw_mode.py new file mode 100644 index 000000000..31c943a9c --- /dev/null +++ b/neutron/extensions/l3_ext_gw_mode.py @@ -0,0 +1,66 @@ +# Copyright 2013 VMware, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions +from neutron.api.v2 import attributes as attrs +from neutron.extensions import l3 + + +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': {l3.EXTERNAL_GW_INFO: + {'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': None, + 'enforce_policy': True, + 'validate': + {'type:dict_or_nodata': + {'network_id': {'type:uuid': None, 'required': True}, + 'enable_snat': {'type:boolean': None, 'required': False, + 'convert_to': attrs.convert_to_boolean}} + }}}} + + +class L3_ext_gw_mode(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Neutron L3 Configurable external gateway mode" + + @classmethod + def get_alias(cls): + return "ext-gw-mode" + + @classmethod + def get_description(cls): + return ("Extension of the router abstraction for specifying whether " + "SNAT should occur on the external gateway") + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/ext-gw-mode/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-03-28T10:00:00-00:00" + + def get_required_extensions(self): + return ["router"] + + def get_extended_resources(self, version): + if version == "2.0": + return dict(EXTENDED_ATTRIBUTES_2_0.items()) + else: + return {} diff --git a/neutron/extensions/l3agentscheduler.py b/neutron/extensions/l3agentscheduler.py new file mode 100644 index 000000000..689cc9ba9 --- /dev/null +++ b/neutron/extensions/l3agentscheduler.py @@ -0,0 +1,194 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import webob.exc + +from neutron.api import extensions +from neutron.api.v2 import base +from neutron.api.v2 import resource +from neutron.common import constants +from neutron.common import exceptions +from neutron.extensions import agent +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as service_constants +from neutron import policy +from neutron import wsgi + + +LOG = logging.getLogger(__name__) + + +L3_ROUTER = 'l3-router' +L3_ROUTERS = L3_ROUTER + 's' +L3_AGENT = 'l3-agent' +L3_AGENTS = L3_AGENT + 's' + + +class RouterSchedulerController(wsgi.Controller): + def get_plugin(self): + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if not plugin: + LOG.error(_('No plugin for L3 routing registered to handle ' + 'router scheduling')) + msg = _('The resource could not be found.') + raise webob.exc.HTTPNotFound(msg) + return plugin + + def index(self, request, **kwargs): + plugin = self.get_plugin() + policy.enforce(request.context, + "get_%s" % L3_ROUTERS, + {}) + return plugin.list_routers_on_l3_agent( + request.context, kwargs['agent_id']) + + def create(self, request, body, **kwargs): + plugin = self.get_plugin() + policy.enforce(request.context, + "create_%s" % L3_ROUTER, + {}) + return plugin.add_router_to_l3_agent( + request.context, + kwargs['agent_id'], + body['router_id']) + + def delete(self, request, id, **kwargs): + plugin = self.get_plugin() + policy.enforce(request.context, + "delete_%s" % L3_ROUTER, + {}) + return plugin.remove_router_from_l3_agent( + request.context, kwargs['agent_id'], id) + + +class L3AgentsHostingRouterController(wsgi.Controller): + def get_plugin(self): + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if not plugin: + LOG.error(_('No plugin for L3 routing registered to handle ' + 'router scheduling')) + msg = _('The resource could not be found.') + raise webob.exc.HTTPNotFound(msg) + return plugin + + def index(self, request, **kwargs): + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + policy.enforce(request.context, + "get_%s" % L3_AGENTS, + {}) + return plugin.list_l3_agents_hosting_router( + request.context, kwargs['router_id']) + + +class L3agentscheduler(extensions.ExtensionDescriptor): + """Extension class supporting l3 agent scheduler. + """ + + @classmethod + def get_name(cls): + return "L3 Agent Scheduler" + + @classmethod + def get_alias(cls): + return constants.L3_AGENT_SCHEDULER_EXT_ALIAS + + @classmethod + def get_description(cls): + return "Schedule routers among l3 agents" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/l3_agent_scheduler/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-02-07T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + parent = dict(member_name="agent", + collection_name="agents") + + controller = resource.Resource(RouterSchedulerController(), + base.FAULT_MAP) + exts.append(extensions.ResourceExtension( + L3_ROUTERS, controller, parent)) + + parent = dict(member_name="router", + collection_name="routers") + + controller = resource.Resource(L3AgentsHostingRouterController(), + base.FAULT_MAP) + exts.append(extensions.ResourceExtension( + L3_AGENTS, controller, parent)) + return exts + + def get_extended_resources(self, version): + return {} + + +class InvalidL3Agent(agent.AgentNotFound): + message = _("Agent %(id)s is not a L3 Agent or has been disabled") + + +class RouterHostedByL3Agent(exceptions.Conflict): + message = _("The router %(router_id)s has been already hosted" + " by the L3 Agent %(agent_id)s.") + + +class RouterSchedulingFailed(exceptions.Conflict): + message = _("Failed scheduling router %(router_id)s to" + " the L3 Agent %(agent_id)s.") + + +class RouterReschedulingFailed(exceptions.Conflict): + message = _("Failed rescheduling router %(router_id)s: " + "no eligible l3 agent found.") + + +class RouterNotHostedByL3Agent(exceptions.Conflict): + message = _("The router %(router_id)s is not hosted" + " by L3 agent %(agent_id)s.") + + +class L3AgentSchedulerPluginBase(object): + """REST API to operate the l3 agent scheduler. + + All of method must be in an admin context. + """ + + @abc.abstractmethod + def add_router_to_l3_agent(self, context, id, router_id): + pass + + @abc.abstractmethod + def remove_router_from_l3_agent(self, context, id, router_id): + pass + + @abc.abstractmethod + def list_routers_on_l3_agent(self, context, id): + pass + + @abc.abstractmethod + def list_l3_agents_hosting_router(self, context, router_id): + pass diff --git a/neutron/extensions/lbaas_agentscheduler.py b/neutron/extensions/lbaas_agentscheduler.py new file mode 100644 index 000000000..a821cb6e8 --- /dev/null +++ b/neutron/extensions/lbaas_agentscheduler.py @@ -0,0 +1,137 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from neutron.api import extensions +from neutron.api.v2 import base +from neutron.api.v2 import resource +from neutron.common import constants +from neutron.extensions import agent +from neutron.extensions import loadbalancer +from neutron import manager +from neutron.plugins.common import constants as plugin_const +from neutron import policy +from neutron import wsgi + +LOADBALANCER_POOL = 'loadbalancer-pool' +LOADBALANCER_POOLS = LOADBALANCER_POOL + 's' +LOADBALANCER_AGENT = 'loadbalancer-agent' + + +class PoolSchedulerController(wsgi.Controller): + def index(self, request, **kwargs): + lbaas_plugin = manager.NeutronManager.get_service_plugins().get( + plugin_const.LOADBALANCER) + if not lbaas_plugin: + return {'pools': []} + + policy.enforce(request.context, + "get_%s" % LOADBALANCER_POOLS, + {}, + plugin=lbaas_plugin) + return lbaas_plugin.list_pools_on_lbaas_agent( + request.context, kwargs['agent_id']) + + +class LbaasAgentHostingPoolController(wsgi.Controller): + def index(self, request, **kwargs): + lbaas_plugin = manager.NeutronManager.get_service_plugins().get( + plugin_const.LOADBALANCER) + if not lbaas_plugin: + return + + policy.enforce(request.context, + "get_%s" % LOADBALANCER_AGENT, + {}, + plugin=lbaas_plugin) + return lbaas_plugin.get_lbaas_agent_hosting_pool( + request.context, kwargs['pool_id']) + + +class Lbaas_agentscheduler(extensions.ExtensionDescriptor): + """Extension class supporting LBaaS agent scheduler. + """ + + @classmethod + def get_name(cls): + return "Loadbalancer Agent Scheduler" + + @classmethod + def get_alias(cls): + return constants.LBAAS_AGENT_SCHEDULER_EXT_ALIAS + + @classmethod + def get_description(cls): + return "Schedule pools among lbaas agents" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/lbaas_agent_scheduler/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-02-07T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + parent = dict(member_name="agent", + collection_name="agents") + + controller = resource.Resource(PoolSchedulerController(), + base.FAULT_MAP) + exts.append(extensions.ResourceExtension( + LOADBALANCER_POOLS, controller, parent)) + + parent = dict(member_name="pool", + collection_name="pools") + + controller = resource.Resource(LbaasAgentHostingPoolController(), + base.FAULT_MAP) + exts.append(extensions.ResourceExtension( + LOADBALANCER_AGENT, controller, parent, + path_prefix=plugin_const. + COMMON_PREFIXES[plugin_const.LOADBALANCER])) + return exts + + def get_extended_resources(self, version): + return {} + + +class NoEligibleLbaasAgent(loadbalancer.NoEligibleBackend): + message = _("No eligible loadbalancer agent found " + "for pool %(pool_id)s.") + + +class NoActiveLbaasAgent(agent.AgentNotFound): + message = _("No active loadbalancer agent found " + "for pool %(pool_id)s.") + + +class LbaasAgentSchedulerPluginBase(object): + """REST API to operate the lbaas agent scheduler. + + All of method must be in an admin context. + """ + + @abc.abstractmethod + def list_pools_on_lbaas_agent(self, context, id): + pass + + @abc.abstractmethod + def get_lbaas_agent_hosting_pool(self, context, pool_id): + pass diff --git a/neutron/extensions/loadbalancer.py b/neutron/extensions/loadbalancer.py new file mode 100644 index 000000000..ae91b6515 --- /dev/null +++ b/neutron/extensions/loadbalancer.py @@ -0,0 +1,506 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from oslo.config import cfg +import six + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.api.v2 import resource_helper +from neutron.common import exceptions as qexception +from neutron import manager +from neutron.plugins.common import constants +from neutron.services import service_base + + +# Loadbalancer Exceptions +class DelayOrTimeoutInvalid(qexception.BadRequest): + message = _("Delay must be greater than or equal to timeout") + + +class NoEligibleBackend(qexception.NotFound): + message = _("No eligible backend for pool %(pool_id)s") + + +class VipNotFound(qexception.NotFound): + message = _("Vip %(vip_id)s could not be found") + + +class VipExists(qexception.NeutronException): + message = _("Another Vip already exists for pool %(pool_id)s") + + +class PoolNotFound(qexception.NotFound): + message = _("Pool %(pool_id)s could not be found") + + +class MemberNotFound(qexception.NotFound): + message = _("Member %(member_id)s could not be found") + + +class HealthMonitorNotFound(qexception.NotFound): + message = _("Health_monitor %(monitor_id)s could not be found") + + +class PoolMonitorAssociationNotFound(qexception.NotFound): + message = _("Monitor %(monitor_id)s is not associated " + "with Pool %(pool_id)s") + + +class PoolMonitorAssociationExists(qexception.Conflict): + message = _('health_monitor %(monitor_id)s is already associated ' + 'with pool %(pool_id)s') + + +class StateInvalid(qexception.NeutronException): + message = _("Invalid state %(state)s of Loadbalancer resource %(id)s") + + +class PoolInUse(qexception.InUse): + message = _("Pool %(pool_id)s is still in use") + + +class HealthMonitorInUse(qexception.InUse): + message = _("Health monitor %(monitor_id)s still has associations with " + "pools") + + +class PoolStatsNotFound(qexception.NotFound): + message = _("Statistics of Pool %(pool_id)s could not be found") + + +class ProtocolMismatch(qexception.BadRequest): + message = _("Protocol %(vip_proto)s does not match " + "pool protocol %(pool_proto)s") + + +class MemberExists(qexception.NeutronException): + message = _("Member with address %(address)s and port %(port)s " + "already present in pool %(pool)s") + + +RESOURCE_ATTRIBUTE_MAP = { + 'vips': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': '', + 'is_visible': True}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'subnet_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'address': {'allow_post': True, 'allow_put': False, + 'default': attr.ATTR_NOT_SPECIFIED, + 'validate': {'type:ip_address_or_none': None}, + 'is_visible': True}, + 'port_id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'protocol_port': {'allow_post': True, 'allow_put': False, + 'validate': {'type:range': [0, 65535]}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'protocol': {'allow_post': True, 'allow_put': False, + 'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']}, + 'is_visible': True}, + 'pool_id': {'allow_post': True, 'allow_put': True, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'session_persistence': {'allow_post': True, 'allow_put': True, + 'convert_to': attr.convert_none_to_empty_dict, + 'default': {}, + 'validate': { + 'type:dict_or_empty': { + 'type': {'type:values': ['APP_COOKIE', + 'HTTP_COOKIE', + 'SOURCE_IP'], + 'required': True}, + 'cookie_name': {'type:string': None, + 'required': False}}}, + 'is_visible': True}, + 'connection_limit': {'allow_post': True, 'allow_put': True, + 'default': -1, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'status_description': {'allow_post': False, 'allow_put': False, + 'is_visible': True} + }, + 'pools': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'vip_id': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': '', + 'is_visible': True}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'subnet_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'protocol': {'allow_post': True, 'allow_put': False, + 'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']}, + 'is_visible': True}, + 'provider': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': attr.ATTR_NOT_SPECIFIED}, + 'lb_method': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'members': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'health_monitors': {'allow_post': True, 'allow_put': True, + 'default': None, + 'validate': {'type:uuid_list': None}, + 'convert_to': attr.convert_to_list, + 'is_visible': True}, + 'health_monitors_status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'status_description': {'allow_post': False, 'allow_put': False, + 'is_visible': True} + }, + 'members': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'pool_id': {'allow_post': True, 'allow_put': True, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'address': {'allow_post': True, 'allow_put': False, + 'validate': {'type:ip_address': None}, + 'is_visible': True}, + 'protocol_port': {'allow_post': True, 'allow_put': False, + 'validate': {'type:range': [0, 65535]}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'weight': {'allow_post': True, 'allow_put': True, + 'default': 1, + 'validate': {'type:range': [0, 256]}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'status_description': {'allow_post': False, 'allow_put': False, + 'is_visible': True} + }, + 'health_monitors': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'type': {'allow_post': True, 'allow_put': False, + 'validate': {'type:values': ['PING', 'TCP', 'HTTP', 'HTTPS']}, + 'is_visible': True}, + 'delay': {'allow_post': True, 'allow_put': True, + 'validate': {'type:non_negative': None}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'timeout': {'allow_post': True, 'allow_put': True, + 'validate': {'type:non_negative': None}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'max_retries': {'allow_post': True, 'allow_put': True, + 'validate': {'type:range': [1, 10]}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'http_method': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': 'GET', + 'is_visible': True}, + 'url_path': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': '/', + 'is_visible': True}, + 'expected_codes': {'allow_post': True, 'allow_put': True, + 'validate': { + 'type:regex': + '^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$'}, + 'default': '200', + 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'status_description': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'pools': {'allow_post': False, 'allow_put': False, + 'is_visible': True} + } +} + +SUB_RESOURCE_ATTRIBUTE_MAP = { + 'health_monitors': { + 'parent': {'collection_name': 'pools', + 'member_name': 'pool'}, + 'parameters': {'id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + } + } +} + +lbaas_quota_opts = [ + cfg.IntOpt('quota_vip', + default=10, + help=_('Number of vips allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_pool', + default=10, + help=_('Number of pools allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_member', + default=-1, + help=_('Number of pool members allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_health_monitor', + default=-1, + help=_('Number of health monitors allowed per tenant. ' + 'A negative value means unlimited.')) +] +cfg.CONF.register_opts(lbaas_quota_opts, 'QUOTAS') + + +class Loadbalancer(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "LoadBalancing service" + + @classmethod + def get_alias(cls): + return "lbaas" + + @classmethod + def get_description(cls): + return "Extension for LoadBalancing service" + + @classmethod + def get_namespace(cls): + return "http://wiki.openstack.org/neutron/LBaaS/API_1.0" + + @classmethod + def get_updated(cls): + return "2012-10-07T10:00:00-00:00" + + @classmethod + def get_resources(cls): + plural_mappings = resource_helper.build_plural_mappings( + {}, RESOURCE_ATTRIBUTE_MAP) + plural_mappings['health_monitors_status'] = 'health_monitor_status' + attr.PLURALS.update(plural_mappings) + action_map = {'pool': {'stats': 'GET'}} + resources = resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + constants.LOADBALANCER, + action_map=action_map, + register_quota=True) + plugin = manager.NeutronManager.get_service_plugins()[ + constants.LOADBALANCER] + for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP: + # Special handling needed for sub-resources with 'y' ending + # (e.g. proxies -> proxy) + resource_name = collection_name[:-1] + parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent') + params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( + 'parameters') + + controller = base.create_resource(collection_name, resource_name, + plugin, params, + allow_bulk=True, + parent=parent) + + resource = extensions.ResourceExtension( + collection_name, + controller, parent, + path_prefix=constants.COMMON_PREFIXES[constants.LOADBALANCER], + attr_map=params) + resources.append(resource) + + return resources + + @classmethod + def get_plugin_interface(cls): + return LoadBalancerPluginBase + + def update_attributes_map(self, attributes): + super(Loadbalancer, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +@six.add_metaclass(abc.ABCMeta) +class LoadBalancerPluginBase(service_base.ServicePluginBase): + + def get_plugin_name(self): + return constants.LOADBALANCER + + def get_plugin_type(self): + return constants.LOADBALANCER + + def get_plugin_description(self): + return 'LoadBalancer service plugin' + + @abc.abstractmethod + def get_vips(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_vip(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_vip(self, context, vip): + pass + + @abc.abstractmethod + def update_vip(self, context, id, vip): + pass + + @abc.abstractmethod + def delete_vip(self, context, id): + pass + + @abc.abstractmethod + def get_pools(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_pool(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_pool(self, context, pool): + pass + + @abc.abstractmethod + def update_pool(self, context, id, pool): + pass + + @abc.abstractmethod + def delete_pool(self, context, id): + pass + + @abc.abstractmethod + def stats(self, context, pool_id): + pass + + @abc.abstractmethod + def create_pool_health_monitor(self, context, health_monitor, pool_id): + pass + + @abc.abstractmethod + def get_pool_health_monitor(self, context, id, pool_id, fields=None): + pass + + @abc.abstractmethod + def delete_pool_health_monitor(self, context, id, pool_id): + pass + + @abc.abstractmethod + def get_members(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_member(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_member(self, context, member): + pass + + @abc.abstractmethod + def update_member(self, context, id, member): + pass + + @abc.abstractmethod + def delete_member(self, context, id): + pass + + @abc.abstractmethod + def get_health_monitors(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_health_monitor(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_health_monitor(self, context, health_monitor): + pass + + @abc.abstractmethod + def update_health_monitor(self, context, id, health_monitor): + pass + + @abc.abstractmethod + def delete_health_monitor(self, context, id): + pass diff --git a/neutron/extensions/metering.py b/neutron/extensions/metering.py new file mode 100644 index 000000000..02aefc86b --- /dev/null +++ b/neutron/extensions/metering.py @@ -0,0 +1,190 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import resource_helper +from neutron.common import exceptions as qexception +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services import service_base + +LOG = logging.getLogger(__name__) + + +class MeteringLabelNotFound(qexception.NotFound): + message = _("Metering label %(label_id)s does not exist") + + +class DuplicateMeteringRuleInPost(qexception.InUse): + message = _("Duplicate Metering Rule in POST.") + + +class MeteringLabelRuleNotFound(qexception.NotFound): + message = _("Metering label rule %(rule_id)s does not exist") + + +class MeteringLabelRuleOverlaps(qexception.NotFound): + message = _("Metering label rule with remote_ip_prefix " + "%(remote_ip_prefix)s overlaps another") + + +RESOURCE_ATTRIBUTE_MAP = { + 'metering_labels': { + 'id': {'allow_post': False, 'allow_put': False, + 'is_visible': True, + 'primary_key': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True} + }, + 'metering_label_rules': { + 'id': {'allow_post': False, 'allow_put': False, + 'is_visible': True, + 'primary_key': True}, + 'metering_label_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, 'required_by_policy': True}, + 'direction': {'allow_post': True, 'allow_put': True, + 'is_visible': True, + 'validate': {'type:values': ['ingress', 'egress']}}, + 'excluded': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': False, + 'convert_to': attr.convert_to_boolean}, + 'remote_ip_prefix': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'required_by_policy': True, + 'validate': {'type:subnet': None}}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True} + } +} + + +class Metering(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Neutron Metering" + + @classmethod + def get_alias(cls): + return "metering" + + @classmethod + def get_description(cls): + return "Neutron Metering extension." + + @classmethod + def get_namespace(cls): + return "http://wiki.openstack.org/wiki/Neutron/Metering/Bandwidth#API" + + @classmethod + def get_updated(cls): + return "2013-06-12T10:00:00-00:00" + + @classmethod + def get_plugin_interface(cls): + return MeteringPluginBase + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + plural_mappings = resource_helper.build_plural_mappings( + {}, RESOURCE_ATTRIBUTE_MAP) + attr.PLURALS.update(plural_mappings) + # PCM: Metering sets pagination and sorting to True. Do we have cfg + # entries for these so can be read? Else, must pass in. + return resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + constants.METERING, + translate_name=True, + allow_bulk=True) + + def update_attributes_map(self, attributes): + super(Metering, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +@six.add_metaclass(abc.ABCMeta) +class MeteringPluginBase(service_base.ServicePluginBase): + + def get_plugin_name(self): + return constants.METERING + + def get_plugin_description(self): + return constants.METERING + + def get_plugin_type(self): + return constants.METERING + + @abc.abstractmethod + def create_metering_label(self, context, metering_label): + """Create a metering label.""" + pass + + @abc.abstractmethod + def delete_metering_label(self, context, label_id): + """Delete a metering label.""" + pass + + @abc.abstractmethod + def get_metering_label(self, context, label_id, fields=None): + """Get a metering label.""" + pass + + @abc.abstractmethod + def get_metering_labels(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + """List all metering labels.""" + pass + + @abc.abstractmethod + def create_metering_label_rule(self, context, metering_label_rule): + """Create a metering label rule.""" + pass + + @abc.abstractmethod + def get_metering_label_rule(self, context, rule_id, fields=None): + """Get a metering label rule.""" + pass + + @abc.abstractmethod + def delete_metering_label_rule(self, context, rule_id): + """Delete a metering label rule.""" + pass + + @abc.abstractmethod + def get_metering_label_rules(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + """List all metering label rules.""" + pass diff --git a/neutron/extensions/multiprovidernet.py b/neutron/extensions/multiprovidernet.py new file mode 100644 index 000000000..79fcb9e4a --- /dev/null +++ b/neutron/extensions/multiprovidernet.py @@ -0,0 +1,114 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.exc + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as qexception +from neutron.extensions import providernet as pnet + +SEGMENTS = 'segments' + + +class SegmentsSetInConjunctionWithProviders(qexception.InvalidInput): + message = _("Segments and provider values cannot both be set.") + + +class SegmentsContainDuplicateEntry(qexception.InvalidInput): + message = _("Duplicate segment entry in request.") + + +def _convert_and_validate_segments(segments, valid_values=None): + unique = set() + for segment in segments: + unique.add(tuple(segment.iteritems())) + network_type = segment.get(pnet.NETWORK_TYPE, + attr.ATTR_NOT_SPECIFIED) + segment[pnet.NETWORK_TYPE] = network_type + physical_network = segment.get(pnet.PHYSICAL_NETWORK, + attr.ATTR_NOT_SPECIFIED) + segment[pnet.PHYSICAL_NETWORK] = physical_network + segmentation_id = segment.get(pnet.SEGMENTATION_ID) + if segmentation_id: + segment[pnet.SEGMENTATION_ID] = attr.convert_to_int( + segmentation_id) + else: + segment[pnet.SEGMENTATION_ID] = attr.ATTR_NOT_SPECIFIED + if len(segment.keys()) != 3: + msg = (_("Unrecognized attribute(s) '%s'") % + ', '.join(set(segment.keys()) - + set([pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID]))) + raise webob.exc.HTTPBadRequest(msg) + if len(unique) != len(segments): + raise SegmentsContainDuplicateEntry() + + +attr.validators['type:convert_segments'] = ( + _convert_and_validate_segments) + + +EXTENDED_ATTRIBUTES_2_0 = { + 'networks': { + SEGMENTS: {'allow_post': True, 'allow_put': True, + 'validate': {'type:convert_segments': None}, + 'convert_list_to': attr.convert_kvp_list_to_dict, + 'default': attr.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'is_visible': True}, + } +} + + +class Multiprovidernet(extensions.ExtensionDescriptor): + """Extension class supporting multiple provider networks. + + This class is used by neutron's extension framework to make + metadata about the multiple provider network extension available to + clients. No new resources are defined by this extension. Instead, + the existing network resource's request and response messages are + extended with 'segments' attribute. + + With admin rights, network dictionaries returned will also include + 'segments' attribute. + """ + + @classmethod + def get_name(cls): + return "Multi Provider Network" + + @classmethod + def get_alias(cls): + return "multi-provider" + + @classmethod + def get_description(cls): + return ("Expose mapping of virtual networks to multiple physical " + "networks") + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/multi-provider/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-06-27T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/extensions/portbindings.py b/neutron/extensions/portbindings.py new file mode 100644 index 000000000..7e5c76dd3 --- /dev/null +++ b/neutron/extensions/portbindings.py @@ -0,0 +1,133 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions +from neutron.api.v2 import attributes + +# The type of vnic that this port should be attached to +VNIC_TYPE = 'binding:vnic_type' +# The service will return the vif type for the specific port. +VIF_TYPE = 'binding:vif_type' +# The service may return a dictionary containing additional +# information needed by the interface driver. The set of items +# returned may depend on the value of VIF_TYPE. +VIF_DETAILS = 'binding:vif_details' +# In some cases different implementations may be run on different hosts. +# The host on which the port will be allocated. +HOST_ID = 'binding:host_id' +# The profile will be a dictionary that enables the application running +# on the specific host to pass and receive vif port specific information to +# the plugin. +PROFILE = 'binding:profile' + +# The keys below are used in the VIF_DETAILS attribute to convey +# information to the VIF driver. + +# TODO(rkukura): Replace CAP_PORT_FILTER, which nova no longer +# understands, with the new set of VIF security details to be used in +# the VIF_DETAILS attribute. +# +# - port_filter : Boolean value indicating Neutron provides port filtering +# features such as security group and anti MAC/IP spoofing +# - ovs_hybrid_plug: Boolean used to inform Nova that the hybrid plugging +# strategy for OVS should be used +CAP_PORT_FILTER = 'port_filter' +OVS_HYBRID_PLUG = 'ovs_hybrid_plug' + +VIF_TYPE_UNBOUND = 'unbound' +VIF_TYPE_BINDING_FAILED = 'binding_failed' +VIF_TYPE_IOVISOR = 'iovisor' +VIF_TYPE_OVS = 'ovs' +VIF_TYPE_IVS = 'ivs' +VIF_TYPE_BRIDGE = 'bridge' +VIF_TYPE_802_QBG = '802.1qbg' +VIF_TYPE_802_QBH = '802.1qbh' +VIF_TYPE_HYPERV = 'hyperv' +VIF_TYPE_MIDONET = 'midonet' +VIF_TYPE_MLNX_DIRECT = 'mlnx_direct' +VIF_TYPE_MLNX_HOSTDEV = 'hostdev' +VIF_TYPE_OTHER = 'other' +VIF_TYPES = [VIF_TYPE_UNBOUND, VIF_TYPE_BINDING_FAILED, VIF_TYPE_OVS, + VIF_TYPE_IVS, VIF_TYPE_BRIDGE, VIF_TYPE_802_QBG, + VIF_TYPE_802_QBH, VIF_TYPE_HYPERV, VIF_TYPE_MIDONET, + VIF_TYPE_MLNX_DIRECT, VIF_TYPE_MLNX_HOSTDEV, VIF_TYPE_OTHER] + +VNIC_NORMAL = 'normal' +VNIC_DIRECT = 'direct' +VNIC_MACVTAP = 'macvtap' +VNIC_TYPES = [VNIC_NORMAL, VNIC_DIRECT, VNIC_MACVTAP] + +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': { + VIF_TYPE: {'allow_post': False, 'allow_put': False, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'is_visible': True}, + VIF_DETAILS: {'allow_post': False, 'allow_put': False, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'is_visible': True}, + VNIC_TYPE: {'allow_post': True, 'allow_put': True, + 'default': VNIC_NORMAL, + 'is_visible': True, + 'validate': {'type:values': VNIC_TYPES}, + 'enforce_policy': True}, + HOST_ID: {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True, + 'enforce_policy': True}, + PROFILE: {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'validate': {'type:dict_or_none': None}, + 'is_visible': True}, + } +} + + +class Portbindings(extensions.ExtensionDescriptor): + """Extension class supporting port bindings. + + This class is used by neutron's extension framework to make + metadata about the port bindings available to external applications. + + With admin rights one will be able to update and read the values. + """ + + @classmethod + def get_name(cls): + return "Port Binding" + + @classmethod + def get_alias(cls): + return "binding" + + @classmethod + def get_description(cls): + return "Expose port bindings of a virtual port to external application" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/binding/api/v1.0" + + @classmethod + def get_updated(cls): + return "2014-02-03T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/extensions/portsecurity.py b/neutron/extensions/portsecurity.py new file mode 100644 index 000000000..b652436ed --- /dev/null +++ b/neutron/extensions/portsecurity.py @@ -0,0 +1,78 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.v2 import attributes +from neutron.common import exceptions as qexception + + +class PortSecurityPortHasSecurityGroup(qexception.InUse): + message = _("Port has security group associated. Cannot disable port " + "security or ip address until security group is removed") + + +class PortSecurityAndIPRequiredForSecurityGroups(qexception.InvalidInput): + message = _("Port security must be enabled and port must have an IP" + " address in order to use security groups.") + + +class PortSecurityBindingNotFound(qexception.InvalidExtensionEnv): + message = _("Port does not have port security binding.") + +PORTSECURITY = 'port_security_enabled' +EXTENDED_ATTRIBUTES_2_0 = { + 'networks': { + PORTSECURITY: {'allow_post': True, 'allow_put': True, + 'convert_to': attributes.convert_to_boolean, + 'enforce_policy': True, + 'default': True, + 'is_visible': True}, + }, + 'ports': { + PORTSECURITY: {'allow_post': True, 'allow_put': True, + 'convert_to': attributes.convert_to_boolean, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'is_visible': True}, + } +} + + +class Portsecurity(object): + """Extension class supporting port security.""" + + @classmethod + def get_name(cls): + return "Port Security" + + @classmethod + def get_alias(cls): + return "port-security" + + @classmethod + def get_description(cls): + return "Provides port security" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/portsecurity/api/v1.0" + + @classmethod + def get_updated(cls): + return "2012-07-23T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/extensions/providernet.py b/neutron/extensions/providernet.py new file mode 100644 index 000000000..944de104f --- /dev/null +++ b/neutron/extensions/providernet.py @@ -0,0 +1,95 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.common import exceptions as n_exc + + +NETWORK_TYPE = 'provider:network_type' +PHYSICAL_NETWORK = 'provider:physical_network' +SEGMENTATION_ID = 'provider:segmentation_id' + +EXTENDED_ATTRIBUTES_2_0 = { + 'networks': { + NETWORK_TYPE: {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'is_visible': True}, + PHYSICAL_NETWORK: {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'is_visible': True}, + SEGMENTATION_ID: {'allow_post': True, 'allow_put': True, + 'convert_to': int, + 'enforce_policy': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + } +} + + +def _raise_if_updates_provider_attributes(attrs): + """Raise exception if provider attributes are present. + + This method is used for plugins that do not support + updating provider networks. + """ + immutable = (NETWORK_TYPE, PHYSICAL_NETWORK, SEGMENTATION_ID) + if any(attributes.is_attr_set(attrs.get(a)) for a in immutable): + msg = _("Plugin does not support updating provider attributes") + raise n_exc.InvalidInput(error_message=msg) + + +class Providernet(extensions.ExtensionDescriptor): + """Extension class supporting provider networks. + + This class is used by neutron's extension framework to make + metadata about the provider network extension available to + clients. No new resources are defined by this extension. Instead, + the existing network resource's request and response messages are + extended with attributes in the provider namespace. + + With admin rights, network dictionaries returned will also include + provider attributes. + """ + + @classmethod + def get_name(cls): + return "Provider Network" + + @classmethod + def get_alias(cls): + return "provider" + + @classmethod + def get_description(cls): + return "Expose mapping of virtual networks to physical networks" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/provider/api/v1.0" + + @classmethod + def get_updated(cls): + return "2012-09-07T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/extensions/quotasv2.py b/neutron/extensions/quotasv2.py new file mode 100644 index 000000000..4fa9bf280 --- /dev/null +++ b/neutron/extensions/quotasv2.py @@ -0,0 +1,152 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo.config import cfg +import webob + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron.api.v2 import resource +from neutron.common import exceptions as n_exc +from neutron import manager +from neutron.openstack.common import importutils +from neutron import quota +from neutron import wsgi + + +RESOURCE_NAME = 'quota' +RESOURCE_COLLECTION = RESOURCE_NAME + "s" +QUOTAS = quota.QUOTAS +DB_QUOTA_DRIVER = 'neutron.db.quota_db.DbQuotaDriver' +EXTENDED_ATTRIBUTES_2_0 = { + RESOURCE_COLLECTION: {} +} + + +class QuotaSetsController(wsgi.Controller): + + def __init__(self, plugin): + self._resource_name = RESOURCE_NAME + self._plugin = plugin + self._driver = importutils.import_class( + cfg.CONF.QUOTAS.quota_driver + ) + self._update_extended_attributes = True + + def _update_attributes(self): + for quota_resource in QUOTAS.resources.iterkeys(): + attr_dict = EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION] + attr_dict[quota_resource] = { + 'allow_post': False, + 'allow_put': True, + 'convert_to': attributes.convert_to_int, + 'validate': {'type:range': [-1, sys.maxsize]}, + 'is_visible': True} + self._update_extended_attributes = False + + def _get_quotas(self, request, tenant_id): + return self._driver.get_tenant_quotas( + request.context, QUOTAS.resources, tenant_id) + + def create(self, request, body=None): + msg = _('POST requests are not supported on this resource.') + raise webob.exc.HTTPNotImplemented(msg) + + def index(self, request): + context = request.context + self._check_admin(context) + return {self._resource_name + "s": + self._driver.get_all_quotas(context, QUOTAS.resources)} + + def tenant(self, request): + """Retrieve the tenant info in context.""" + context = request.context + if not context.tenant_id: + raise n_exc.QuotaMissingTenant() + return {'tenant': {'tenant_id': context.tenant_id}} + + def show(self, request, id): + if id != request.context.tenant_id: + self._check_admin(request.context, + reason=_("Only admin is authorized " + "to access quotas for another tenant")) + return {self._resource_name: self._get_quotas(request, id)} + + def _check_admin(self, context, + reason=_("Only admin can view or configure quota")): + if not context.is_admin: + raise n_exc.AdminRequired(reason=reason) + + def delete(self, request, id): + self._check_admin(request.context) + self._driver.delete_tenant_quota(request.context, id) + + def update(self, request, id, body=None): + self._check_admin(request.context) + if self._update_extended_attributes: + self._update_attributes() + body = base.Controller.prepare_request_body( + request.context, body, False, self._resource_name, + EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION]) + for key, value in body[self._resource_name].items(): + self._driver.update_quota_limit(request.context, id, key, value) + return {self._resource_name: self._get_quotas(request, id)} + + +class Quotasv2(extensions.ExtensionDescriptor): + """Quotas management support.""" + + @classmethod + def get_name(cls): + return "Quota management support" + + @classmethod + def get_alias(cls): + return RESOURCE_COLLECTION + + @classmethod + def get_description(cls): + description = 'Expose functions for quotas management' + if cfg.CONF.QUOTAS.quota_driver == DB_QUOTA_DRIVER: + description += ' per tenant' + return description + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/network/ext/quotas-sets/api/v2.0" + + @classmethod + def get_updated(cls): + return "2012-07-29T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + controller = resource.Resource( + QuotaSetsController(manager.NeutronManager.get_plugin()), + faults=base.FAULT_MAP) + return [extensions.ResourceExtension( + Quotasv2.get_alias(), + controller, + collection_actions={'tenant': 'GET'})] + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/extensions/routedserviceinsertion.py b/neutron/extensions/routedserviceinsertion.py new file mode 100644 index 000000000..06ff9e259 --- /dev/null +++ b/neutron/extensions/routedserviceinsertion.py @@ -0,0 +1,71 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kaiwei Fan, VMware, Inc + + +ROUTER_ID = 'router_id' +EXTENDED_ATTRIBUTES_2_0 = { + 'vips': { + ROUTER_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'default': None, 'is_visible': True}, + }, + 'pools': { + ROUTER_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'default': None, 'is_visible': True}, + }, + 'health_monitors': { + ROUTER_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'default': None, 'is_visible': True}, + }, + + 'firewalls': { + ROUTER_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'default': None, 'is_visible': True}, + } +} + + +class Routedserviceinsertion(object): + """Extension class supporting routed service type.""" + + @classmethod + def get_name(cls): + return "Routed Service Insertion" + + @classmethod + def get_alias(cls): + return "routed-service-insertion" + + @classmethod + def get_description(cls): + return "Provides routed service type" + + @classmethod + def get_namespace(cls): + return "" + + @classmethod + def get_updated(cls): + return "2013-01-29T00:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/extensions/routerservicetype.py b/neutron/extensions/routerservicetype.py new file mode 100644 index 000000000..6168adf36 --- /dev/null +++ b/neutron/extensions/routerservicetype.py @@ -0,0 +1,55 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kaiwei Fan, VMware, Inc + + +SERVICE_TYPE_ID = 'service_type_id' +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': { + SERVICE_TYPE_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'default': None, 'is_visible': True}, + } +} + + +class Routerservicetype(object): + """Extension class supporting router service type.""" + + @classmethod + def get_name(cls): + return "Router Service Type" + + @classmethod + def get_alias(cls): + return "router-service-type" + + @classmethod + def get_description(cls): + return "Provides router service type" + + @classmethod + def get_namespace(cls): + return "" + + @classmethod + def get_updated(cls): + return "2013-01-29T00:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/extensions/securitygroup.py b/neutron/extensions/securitygroup.py new file mode 100644 index 000000000..5f004af48 --- /dev/null +++ b/neutron/extensions/securitygroup.py @@ -0,0 +1,354 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import netaddr + +from oslo.config import cfg +import six + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.common import constants as const +from neutron.common import exceptions as qexception +from neutron import manager +from neutron.openstack.common import uuidutils +from neutron import quota + + +# Security group Exceptions +class SecurityGroupInvalidPortRange(qexception.InvalidInput): + message = _("For TCP/UDP protocols, port_range_min must be " + "<= port_range_max") + + +class SecurityGroupInvalidPortValue(qexception.InvalidInput): + message = _("Invalid value for port %(port)s") + + +class SecurityGroupInvalidIcmpValue(qexception.InvalidInput): + message = _("Invalid value for ICMP %(field)s (%(attr)s) " + "%(value)s. It must be 0 to 255.") + + +class SecurityGroupMissingIcmpType(qexception.InvalidInput): + message = _("ICMP code (port-range-max) %(value)s is provided" + " but ICMP type (port-range-min) is missing.") + + +class SecurityGroupInUse(qexception.InUse): + message = _("Security Group %(id)s in use.") + + +class SecurityGroupCannotRemoveDefault(qexception.InUse): + message = _("Removing default security group not allowed.") + + +class SecurityGroupCannotUpdateDefault(qexception.InUse): + message = _("Updating default security group not allowed.") + + +class SecurityGroupDefaultAlreadyExists(qexception.InUse): + message = _("Default security group already exists.") + + +class SecurityGroupRuleInvalidProtocol(qexception.InvalidInput): + message = _("Security group rule protocol %(protocol)s not supported. " + "Only protocol values %(values)s and their integer " + "representation (0 to 255) are supported.") + + +class SecurityGroupRulesNotSingleTenant(qexception.InvalidInput): + message = _("Multiple tenant_ids in bulk security group rule create" + " not allowed") + + +class SecurityGroupRemoteGroupAndRemoteIpPrefix(qexception.InvalidInput): + message = _("Only remote_ip_prefix or remote_group_id may " + "be provided.") + + +class SecurityGroupProtocolRequiredWithPorts(qexception.InvalidInput): + message = _("Must also specifiy protocol if port range is given.") + + +class SecurityGroupNotSingleGroupRules(qexception.InvalidInput): + message = _("Only allowed to update rules for " + "one security profile at a time") + + +class SecurityGroupNotFound(qexception.NotFound): + message = _("Security group %(id)s does not exist") + + +class SecurityGroupRuleNotFound(qexception.NotFound): + message = _("Security group rule %(id)s does not exist") + + +class DuplicateSecurityGroupRuleInPost(qexception.InUse): + message = _("Duplicate Security Group Rule in POST.") + + +class SecurityGroupRuleExists(qexception.InUse): + message = _("Security group rule already exists. Group id is %(id)s.") + + +class SecurityGroupRuleParameterConflict(qexception.InvalidInput): + message = _("Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s") + + +def convert_protocol(value): + if value is None: + return + try: + val = int(value) + if val >= 0 and val <= 255: + return val + raise SecurityGroupRuleInvalidProtocol( + protocol=value, values=sg_supported_protocols) + except (ValueError, TypeError): + if value.lower() in sg_supported_protocols: + return value.lower() + raise SecurityGroupRuleInvalidProtocol( + protocol=value, values=sg_supported_protocols) + except AttributeError: + raise SecurityGroupRuleInvalidProtocol( + protocol=value, values=sg_supported_protocols) + + +def convert_ethertype_to_case_insensitive(value): + if isinstance(value, basestring): + for ethertype in sg_supported_ethertypes: + if ethertype.lower() == value.lower(): + return ethertype + + +def convert_validate_port_value(port): + if port is None: + return port + try: + val = int(port) + except (ValueError, TypeError): + raise SecurityGroupInvalidPortValue(port=port) + + if val >= 0 and val <= 65535: + return val + else: + raise SecurityGroupInvalidPortValue(port=port) + + +def convert_to_uuid_list_or_none(value_list): + if value_list is None: + return + for sg_id in value_list: + if not uuidutils.is_uuid_like(sg_id): + msg = _("'%s' is not an integer or uuid") % sg_id + raise qexception.InvalidInput(error_message=msg) + return value_list + + +def convert_ip_prefix_to_cidr(ip_prefix): + if not ip_prefix: + return + try: + cidr = netaddr.IPNetwork(ip_prefix) + return str(cidr) + except (ValueError, TypeError, netaddr.AddrFormatError): + raise qexception.InvalidCIDR(input=ip_prefix) + + +def _validate_name_not_default(data, valid_values=None): + if data == "default": + raise SecurityGroupDefaultAlreadyExists() + + +attr.validators['type:name_not_default'] = _validate_name_not_default + +sg_supported_protocols = [None, const.PROTO_NAME_TCP, + const.PROTO_NAME_UDP, const.PROTO_NAME_ICMP] +sg_supported_ethertypes = ['IPv4', 'IPv6'] + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + 'security_groups': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': '', + 'validate': {'type:name_not_default': None}}, + 'description': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + 'security_group_rules': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + }, + 'security_group_rules': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'security_group_id': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'required_by_policy': True}, + 'remote_group_id': {'allow_post': True, 'allow_put': False, + 'default': None, 'is_visible': True}, + 'direction': {'allow_post': True, 'allow_put': True, + 'is_visible': True, + 'validate': {'type:values': ['ingress', 'egress']}}, + 'protocol': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': None, + 'convert_to': convert_protocol}, + 'port_range_min': {'allow_post': True, 'allow_put': False, + 'convert_to': convert_validate_port_value, + 'default': None, 'is_visible': True}, + 'port_range_max': {'allow_post': True, 'allow_put': False, + 'convert_to': convert_validate_port_value, + 'default': None, 'is_visible': True}, + 'ethertype': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': 'IPv4', + 'convert_to': convert_ethertype_to_case_insensitive, + 'validate': {'type:values': sg_supported_ethertypes}}, + 'remote_ip_prefix': {'allow_post': True, 'allow_put': False, + 'default': None, 'is_visible': True, + 'convert_to': convert_ip_prefix_to_cidr}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + } +} + + +SECURITYGROUPS = 'security_groups' +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': {SECURITYGROUPS: {'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'convert_to': convert_to_uuid_list_or_none, + 'default': attr.ATTR_NOT_SPECIFIED}}} +security_group_quota_opts = [ + cfg.IntOpt('quota_security_group', + default=10, + help=_('Number of security groups allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_security_group_rule', + default=100, + help=_('Number of security rules allowed per tenant. ' + 'A negative value means unlimited.')), +] +cfg.CONF.register_opts(security_group_quota_opts, 'QUOTAS') + + +class Securitygroup(extensions.ExtensionDescriptor): + """Security group extension.""" + + @classmethod + def get_name(cls): + return "security-group" + + @classmethod + def get_alias(cls): + return "security-group" + + @classmethod + def get_description(cls): + return "The security groups extension." + + @classmethod + def get_namespace(cls): + # todo + return "http://docs.openstack.org/ext/securitygroups/api/v2.0" + + @classmethod + def get_updated(cls): + return "2012-10-05T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] + attr.PLURALS.update(dict(my_plurals)) + exts = [] + plugin = manager.NeutronManager.get_plugin() + for resource_name in ['security_group', 'security_group_rule']: + collection_name = resource_name.replace('_', '-') + "s" + params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) + quota.QUOTAS.register_resource_by_name(resource_name) + controller = base.create_resource(collection_name, + resource_name, + plugin, params, allow_bulk=True, + allow_pagination=True, + allow_sorting=True) + + ex = extensions.ResourceExtension(collection_name, + controller, + attr_map=params) + exts.append(ex) + + return exts + + def get_extended_resources(self, version): + if version == "2.0": + return dict(EXTENDED_ATTRIBUTES_2_0.items() + + RESOURCE_ATTRIBUTE_MAP.items()) + else: + return {} + + +@six.add_metaclass(abc.ABCMeta) +class SecurityGroupPluginBase(object): + + @abc.abstractmethod + def create_security_group(self, context, security_group): + pass + + @abc.abstractmethod + def update_security_group(self, context, id, security_group): + pass + + @abc.abstractmethod + def delete_security_group(self, context, id): + pass + + @abc.abstractmethod + def get_security_groups(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + pass + + @abc.abstractmethod + def get_security_group(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_security_group_rule(self, context, security_group_rule): + pass + + @abc.abstractmethod + def delete_security_group_rule(self, context, id): + pass + + @abc.abstractmethod + def get_security_group_rules(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + pass + + @abc.abstractmethod + def get_security_group_rule(self, context, id, fields=None): + pass diff --git a/neutron/extensions/servicetype.py b/neutron/extensions/servicetype.py new file mode 100644 index 000000000..25633775d --- /dev/null +++ b/neutron/extensions/servicetype.py @@ -0,0 +1,91 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Salvatore Orlando, VMware +# + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron.db import servicetype_db +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +RESOURCE_NAME = "service_provider" +COLLECTION_NAME = "%ss" % RESOURCE_NAME +SERVICE_ATTR = 'service_type' +PLUGIN_ATTR = 'plugin' +DRIVER_ATTR = 'driver' +EXT_ALIAS = 'service-type' + +# Attribute Map for Service Provider Resource +# Allow read-only access +RESOURCE_ATTRIBUTE_MAP = { + COLLECTION_NAME: { + 'service_type': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'name': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'default': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + } +} + + +class Servicetype(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return _("Neutron Service Type Management") + + @classmethod + def get_alias(cls): + return EXT_ALIAS + + @classmethod + def get_description(cls): + return _("API for retrieving service providers for " + "Neutron advanced services") + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/service-type/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-01-20T00:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Extended Resource for service type management.""" + my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] + attributes.PLURALS.update(dict(my_plurals)) + attr_map = RESOURCE_ATTRIBUTE_MAP[COLLECTION_NAME] + collection_name = COLLECTION_NAME.replace('_', '-') + controller = base.create_resource( + collection_name, + RESOURCE_NAME, + servicetype_db.ServiceTypeManager.get_instance(), + attr_map) + return [extensions.ResourceExtension(collection_name, + controller, + attr_map=attr_map)] + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} diff --git a/neutron/extensions/vpnaas.py b/neutron/extensions/vpnaas.py new file mode 100644 index 000000000..f6bdcd761 --- /dev/null +++ b/neutron/extensions/vpnaas.py @@ -0,0 +1,482 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Swaminathan Vasudevan, Hewlett-Packard. + +import abc + +import six + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import resource_helper +from neutron.common import exceptions as qexception +from neutron.plugins.common import constants +from neutron.services import service_base + + +class VPNServiceNotFound(qexception.NotFound): + message = _("VPNService %(vpnservice_id)s could not be found") + + +class IPsecSiteConnectionNotFound(qexception.NotFound): + message = _("ipsec_site_connection %(ipsecsite_conn_id)s not found") + + +class IPsecSiteConnectionDpdIntervalValueError(qexception.InvalidInput): + message = _("ipsec_site_connection %(attr)s is " + "equal to or less than dpd_interval") + + +class IPsecSiteConnectionMtuError(qexception.InvalidInput): + message = _("ipsec_site_connection MTU %(mtu)d is too small " + "for ipv%(version)s") + + +class IKEPolicyNotFound(qexception.NotFound): + message = _("IKEPolicy %(ikepolicy_id)s could not be found") + + +class IPsecPolicyNotFound(qexception.NotFound): + message = _("IPsecPolicy %(ipsecpolicy_id)s could not be found") + + +class IKEPolicyInUse(qexception.InUse): + message = _("IKEPolicy %(ikepolicy_id)s is in use by existing " + "IPsecSiteConnection and can't be updated or deleted") + + +class VPNServiceInUse(qexception.InUse): + message = _("VPNService %(vpnservice_id)s is still in use") + + +class RouterInUseByVPNService(qexception.InUse): + message = _("Router %(router_id)s is used by VPNService %(vpnservice_id)s") + + +class VPNStateInvalidToUpdate(qexception.BadRequest): + message = _("Invalid state %(state)s of vpnaas resource %(id)s" + " for updating") + + +class IPsecPolicyInUse(qexception.InUse): + message = _("IPsecPolicy %(ipsecpolicy_id)s is in use by existing " + "IPsecSiteConnection and can't be updated or deleted") + + +class DeviceDriverImportError(qexception.NeutronException): + message = _("Can not load driver :%(device_driver)s") + + +class SubnetIsNotConnectedToRouter(qexception.BadRequest): + message = _("Subnet %(subnet_id)s is not " + "connected to Router %(router_id)s") + + +class RouterIsNotExternal(qexception.BadRequest): + message = _("Router %(router_id)s has no external network gateway set") + + +vpn_supported_initiators = ['bi-directional', 'response-only'] +vpn_supported_encryption_algorithms = ['3des', 'aes-128', + 'aes-192', 'aes-256'] +vpn_dpd_supported_actions = [ + 'hold', 'clear', 'restart', 'restart-by-peer', 'disabled' +] +vpn_supported_transform_protocols = ['esp', 'ah', 'ah-esp'] +vpn_supported_encapsulation_mode = ['tunnel', 'transport'] +#TODO(nati) add kilobytes when we support it +vpn_supported_lifetime_units = ['seconds'] +vpn_supported_pfs = ['group2', 'group5', 'group14'] +vpn_supported_ike_versions = ['v1', 'v2'] +vpn_supported_auth_mode = ['psk'] +vpn_supported_auth_algorithms = ['sha1'] +vpn_supported_phase1_negotiation_mode = ['main'] + +vpn_lifetime_limits = (60, attr.UNLIMITED) +positive_int = (0, attr.UNLIMITED) + +RESOURCE_ATTRIBUTE_MAP = { + + 'vpnservices': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'subnet_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'router_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True} + }, + + 'ipsec_site_connections': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'peer_address': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'peer_id': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'peer_cidrs': {'allow_post': True, 'allow_put': True, + 'convert_to': attr.convert_to_list, + 'validate': {'type:subnet_list': None}, + 'is_visible': True}, + 'route_mode': {'allow_post': False, 'allow_put': False, + 'default': 'static', + 'is_visible': True}, + 'mtu': {'allow_post': True, 'allow_put': True, + 'default': '1500', + 'validate': {'type:range': positive_int}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'initiator': {'allow_post': True, 'allow_put': True, + 'default': 'bi-directional', + 'validate': {'type:values': vpn_supported_initiators}, + 'is_visible': True}, + 'auth_mode': {'allow_post': False, 'allow_put': False, + 'default': 'psk', + 'validate': {'type:values': vpn_supported_auth_mode}, + 'is_visible': True}, + 'psk': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'dpd': {'allow_post': True, 'allow_put': True, + 'convert_to': attr.convert_none_to_empty_dict, + 'is_visible': True, + 'default': {}, + 'validate': { + 'type:dict_or_empty': { + 'actions': { + 'type:values': vpn_dpd_supported_actions, + }, + 'interval': { + 'type:range': positive_int + }, + 'timeout': { + 'type:range': positive_int + }}}}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'vpnservice_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'ikepolicy_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'ipsecpolicy_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True} + }, + + 'ipsecpolicies': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'transform_protocol': { + 'allow_post': True, + 'allow_put': True, + 'default': 'esp', + 'validate': { + 'type:values': vpn_supported_transform_protocols}, + 'is_visible': True}, + 'auth_algorithm': { + 'allow_post': True, + 'allow_put': True, + 'default': 'sha1', + 'validate': { + 'type:values': vpn_supported_auth_algorithms + }, + 'is_visible': True}, + 'encryption_algorithm': { + 'allow_post': True, + 'allow_put': True, + 'default': 'aes-128', + 'validate': { + 'type:values': vpn_supported_encryption_algorithms + }, + 'is_visible': True}, + 'encapsulation_mode': { + 'allow_post': True, + 'allow_put': True, + 'default': 'tunnel', + 'validate': { + 'type:values': vpn_supported_encapsulation_mode + }, + 'is_visible': True}, + 'lifetime': {'allow_post': True, 'allow_put': True, + 'convert_to': attr.convert_none_to_empty_dict, + 'default': {}, + 'validate': { + 'type:dict_or_empty': { + 'units': { + 'type:values': vpn_supported_lifetime_units, + }, + 'value': { + 'type:range': vpn_lifetime_limits + }}}, + 'is_visible': True}, + 'pfs': {'allow_post': True, 'allow_put': True, + 'default': 'group5', + 'validate': {'type:values': vpn_supported_pfs}, + 'is_visible': True} + }, + + 'ikepolicies': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'auth_algorithm': {'allow_post': True, 'allow_put': True, + 'default': 'sha1', + 'validate': { + 'type:values': vpn_supported_auth_algorithms}, + 'is_visible': True}, + 'encryption_algorithm': { + 'allow_post': True, 'allow_put': True, + 'default': 'aes-128', + 'validate': {'type:values': vpn_supported_encryption_algorithms}, + 'is_visible': True}, + 'phase1_negotiation_mode': { + 'allow_post': True, 'allow_put': True, + 'default': 'main', + 'validate': { + 'type:values': vpn_supported_phase1_negotiation_mode + }, + 'is_visible': True}, + 'lifetime': {'allow_post': True, 'allow_put': True, + 'convert_to': attr.convert_none_to_empty_dict, + 'default': {}, + 'validate': { + 'type:dict_or_empty': { + 'units': { + 'type:values': vpn_supported_lifetime_units, + }, + 'value': { + 'type:range': vpn_lifetime_limits, + }}}, + 'is_visible': True}, + 'ike_version': {'allow_post': True, 'allow_put': True, + 'default': 'v1', + 'validate': { + 'type:values': vpn_supported_ike_versions}, + 'is_visible': True}, + 'pfs': {'allow_post': True, 'allow_put': True, + 'default': 'group5', + 'validate': {'type:values': vpn_supported_pfs}, + 'is_visible': True} + } +} + + +class Vpnaas(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "VPN service" + + @classmethod + def get_alias(cls): + return "vpnaas" + + @classmethod + def get_description(cls): + return "Extension for VPN service" + + @classmethod + def get_namespace(cls): + return "https://wiki.openstack.org/Neutron/VPNaaS" + + @classmethod + def get_updated(cls): + return "2013-05-29T10:00:00-00:00" + + @classmethod + def get_resources(cls): + special_mappings = {'ikepolicies': 'ikepolicy', + 'ipsecpolicies': 'ipsecpolicy'} + plural_mappings = resource_helper.build_plural_mappings( + special_mappings, RESOURCE_ATTRIBUTE_MAP) + plural_mappings['peer_cidrs'] = 'peer_cidr' + attr.PLURALS.update(plural_mappings) + return resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + constants.VPN, + register_quota=True, + translate_name=True) + + @classmethod + def get_plugin_interface(cls): + return VPNPluginBase + + def update_attributes_map(self, attributes): + super(Vpnaas, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +@six.add_metaclass(abc.ABCMeta) +class VPNPluginBase(service_base.ServicePluginBase): + + def get_plugin_name(self): + return constants.VPN + + def get_plugin_type(self): + return constants.VPN + + def get_plugin_description(self): + return 'VPN service plugin' + + @abc.abstractmethod + def get_vpnservices(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_vpnservice(self, context, vpnservice_id, fields=None): + pass + + @abc.abstractmethod + def create_vpnservice(self, context, vpnservice): + pass + + @abc.abstractmethod + def update_vpnservice(self, context, vpnservice_id, vpnservice): + pass + + @abc.abstractmethod + def delete_vpnservice(self, context, vpnservice_id): + pass + + @abc.abstractmethod + def get_ipsec_site_connections(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_ipsec_site_connection(self, context, + ipsecsite_conn_id, fields=None): + pass + + @abc.abstractmethod + def create_ipsec_site_connection(self, context, ipsec_site_connection): + pass + + @abc.abstractmethod + def update_ipsec_site_connection(self, context, + ipsecsite_conn_id, ipsec_site_connection): + pass + + @abc.abstractmethod + def delete_ipsec_site_connection(self, context, ipsecsite_conn_id): + pass + + @abc.abstractmethod + def get_ikepolicy(self, context, ikepolicy_id, fields=None): + pass + + @abc.abstractmethod + def get_ikepolicies(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def create_ikepolicy(self, context, ikepolicy): + pass + + @abc.abstractmethod + def update_ikepolicy(self, context, ikepolicy_id, ikepolicy): + pass + + @abc.abstractmethod + def delete_ikepolicy(self, context, ikepolicy_id): + pass + + @abc.abstractmethod + def get_ipsecpolicies(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None): + pass + + @abc.abstractmethod + def create_ipsecpolicy(self, context, ipsecpolicy): + pass + + @abc.abstractmethod + def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy): + pass + + @abc.abstractmethod + def delete_ipsecpolicy(self, context, ipsecpolicy_id): + pass diff --git a/neutron/hacking/__init__.py b/neutron/hacking/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/hacking/checks.py b/neutron/hacking/checks.py new file mode 100644 index 000000000..899d76243 --- /dev/null +++ b/neutron/hacking/checks.py @@ -0,0 +1,50 @@ +# Copyright (c) 2014 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +import pep8 + +""" +Guidelines for writing new hacking checks + + - Use only for Neutron specific tests. OpenStack general tests + should be submitted to the common 'hacking' module. + - Pick numbers in the range N3xx. Find the current test with + the highest allocated number and then pick the next value. + - Keep the test method code in the source file ordered based + on the N3xx value. + - List the new rule in the top level HACKING.rst file + - Add test cases for each new rule to + neutron/tests/unit/test_hacking.py + +""" + +log_translation = re.compile( + r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")") + + +def validate_log_translations(logical_line, physical_line, filename): + # Translations are not required in the test directory + if "neutron/tests" in filename: + return + if pep8.noqa(physical_line): + return + msg = "N320: Log messages require translations!" + if log_translation.match(logical_line): + yield (0, msg) + + +def factory(register): + register(validate_log_translations) diff --git a/neutron/hooks.py b/neutron/hooks.py new file mode 100644 index 000000000..282ae8f11 --- /dev/null +++ b/neutron/hooks.py @@ -0,0 +1,29 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + + +def setup_hook(config): + """Filter config parsed from a setup.cfg to inject our defaults.""" + metadata = config['metadata'] + requires = metadata.get('requires_dist', '').split('\n') + if sys.platform == 'win32': + requires.append('pywin32') + requires.append('wmi') + metadata['requires_dist'] = "\n".join(requires) + config['metadata'] = metadata diff --git a/neutron/locale/de/LC_MESSAGES/neutron-log-error.po b/neutron/locale/de/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 000000000..48d2edcb4 --- /dev/null +++ b/neutron/locale/de/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,170 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: German (http://www.transifex.com/projects/p/neutron/language/" +"de/)\n" +"Language: de\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Ursprüngliche Ausnahme wird gelöscht: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "Unerwartete Ausnahme %d mal(e) aufgetreten... Neuversuch." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "" + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "Nicht behandelte Ausnahme" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "Datenbankausnahme eingeschlossen." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Ausnahme bei Nachrichtenbehandlung" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "Ausnahme bei Zeichenfolgeformatoperation" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Ausnahme %s wird an Aufrufenden zurückgegeben" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP-Server auf %(hostname)s:%(port)d ist nicht erreichbar: %(err_str)s. " +"Erneuter Versuch in %(sleep_time)d Sekunden." + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" +"Fehler beim Deklarieren von Consumer für Topic '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Nachricht aus Warteschlange wurde nicht verarbeitet: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" +"Fehler beim Veröffentlichen von Nachricht zu Topic '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "Nachricht wurde nicht verarbeitet und wird übersprungen." + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" +"Verbindung zu AMQP-Server kann nicht hergestellt werden: %(e)s. %(delay)s " +"Sekunden Ruhemodus" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "Fehler beim Verarbeiten der Nachricht. Wird übersprungen." + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON-Serialisierung fehlgeschlagen." + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC-Nachricht hat keine Methode enthalten." + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "Erstellung von Topicsocketdatei fehlgeschlagen." + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"Lokaler topicbezogener Rückstandspuffer für Topic %(topic)s voll. Nachricht " +"wird gelöscht." + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "das benötigte IPC-Verzeichnis existiert nicht unter %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "Zugriff verweigert zum IPC Verzeichnis %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" +"ZeroMQ-Empfängerdämon konnte nicht erstellt werden. Socket ist " +"möglicherweise bereits belegt." + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ-Umschlagsversion nicht unterstützt oder unbekannt." diff --git a/neutron/locale/de/LC_MESSAGES/neutron-log-info.po b/neutron/locale/de/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 000000000..338a6e23d --- /dev/null +++ b/neutron/locale/de/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,131 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +# Carsten Duch , 2014 +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-06-14 19:31+0000\n" +"Last-Translator: Carsten Duch \n" +"Language-Team: German (http://www.transifex.com/projects/p/neutron/language/" +"de/)\n" +"Language: de\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "%s abgefangen. Vorgang wird beendet" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" +"Übergeordneter Prozess wurde unerwartet abgebrochen. Vorgang wird beendet" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Verzweigung zu schnell; im Ruhemodus" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Untergeordnetes Element %d gestartet" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Starten von %d Workers" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Untergeordnetes Element %(pid)d durch Signal %(sig)d abgebrochen" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Untergeordnete %(pid)s mit Status %(code)d beendet" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s abgefangen, untergeordnete Elemente werden gestoppt" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "Warten aufgerufen nach dem der Thread abgebrochen wurde. Bereinige." + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Warten auf Beenden von %d untergeordneten Elementen" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "Lösche doppelte Zeile mit der ID %(id)s aus der Tabelle %(table)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" +"Wiederherstellung der Verbindung zu AMQP-Server auf %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Verbunden mit AMQP-Server auf %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Verbunden mit AMQP-Server auf %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registrieren von Reaktor" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "Eingangsreaktor registriert" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Socketverwendung" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Erstellen von Proxy für Topic: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "Überspringen von Topicregistrierung. Bereits registriert." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "MatchMaker nicht registriert: %(key)s, %(host)s" diff --git a/neutron/locale/de/LC_MESSAGES/neutron-log-warning.po b/neutron/locale/de/LC_MESSAGES/neutron-log-warning.po new file mode 100644 index 000000000..71ff0eb13 --- /dev/null +++ b/neutron/locale/de/LC_MESSAGES/neutron-log-warning.po @@ -0,0 +1,57 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: German (http://www.transifex.com/projects/p/neutron/language/" +"de/)\n" +"Language: de\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/service.py:363 +#, python-format +msgid "pid %d not in child list" +msgstr "PID %d nicht in Liste untergeordneter Elemente" + +#: neutron/openstack/common/db/sqlalchemy/session.py:506 +#, python-format +msgid "Database server has gone away: %s" +msgstr "Datenbankserver ist nicht mehr vorhanden: %s" + +#: neutron/openstack/common/db/sqlalchemy/session.py:559 +msgid "Unable to detect effective SQL mode" +msgstr "Unfähig geltenden SQL Modus zu erkennen." + +#: neutron/openstack/common/db/sqlalchemy/session.py:567 +#, python-format +msgid "" +"MySQL SQL mode is '%s', consider enabling TRADITIONAL or STRICT_ALL_TABLES" +msgstr "" +"MySQL SQL Modus ist '%s', erwägen Sie TRADITIONAL oder STRICT_ALL_TABLES zu " +"aktivieren" + +#: neutron/openstack/common/db/sqlalchemy/session.py:673 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "SQL-Verbindung fehlgeschlagen. Noch %s weitere Versuche übrig." + +#: neutron/openstack/common/db/sqlalchemy/utils.py:97 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "ID ist nicht in 'sort_keys' enthalten; ist 'sort_keys' eindeutig?" + +#: neutron/openstack/common/rpc/matchmaker_ring.py:75 +#: neutron/openstack/common/rpc/matchmaker_ring.py:93 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "Keine schlüsseldefinierenden Hosts für Topic '%s', siehe Ringdatei" diff --git a/neutron/locale/en_AU/LC_MESSAGES/neutron-log-error.po b/neutron/locale/en_AU/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 000000000..e8e05cae2 --- /dev/null +++ b/neutron/locale/en_AU/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,163 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: English (Australia) (http://www.transifex.com/projects/p/" +"neutron/language/en_AU/)\n" +"Language: en_AU\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Original exception being dropped: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "Unexpected exception occurred %d time(s)... retrying." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "Exception during rpc cleanup." + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "Unhandled exception" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "DB exception wrapped." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Exception during message handling" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "Exception in string format operation" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Returning exception %s to caller" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "Failed to process message ... skipping it." + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "Failed to process message ... will requeue." + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Failed to declare consumer for topic '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Failed to consume message from queue: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Failed to publish message to topic '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "Failed to process message... skipping it." + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "Error processing message. Skipping it." + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON serialization failed." + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "ZeroMQ socket could not be closed." + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC message did not include method." + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "Topic socket file creation failed." + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "Required IPC directory does not exist at %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "Permission denied to IPC directory at %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "Could not create ZeroMQ receiver daemon. Socket may already be in use." + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ Envelope version unsupported or unknown." diff --git a/neutron/locale/en_AU/LC_MESSAGES/neutron-log-info.po b/neutron/locale/en_AU/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 000000000..1c16092b3 --- /dev/null +++ b/neutron/locale/en_AU/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: English (Australia) (http://www.transifex.com/projects/p/" +"neutron/language/en_AU/)\n" +"Language: en_AU\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "Caught %s, exiting" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Parent process has died unexpectedly, exiting" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Forking too fast, sleeping" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Started child %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Starting %d workers" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Child %(pid)d killed by signal %(sig)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Child %(pid)s exited with status %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "Caught %s, stopping children" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Waiting on %d children to exit" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Reconnecting to AMQP server on %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Connected to AMQP server on %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Connected to AMQP server on %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registering reactor" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "In reactor registered" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Consuming socket" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Creating proxy for topic: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "Skipping topic registration. Already registered." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "Matchmaker unregistered: %(key)s, %(host)s" diff --git a/neutron/locale/en_GB/LC_MESSAGES/neutron-log-error.po b/neutron/locale/en_GB/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 000000000..75ea2a0bc --- /dev/null +++ b/neutron/locale/en_GB/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,163 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" +"neutron/language/en_GB/)\n" +"Language: en_GB\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Original exception being dropped: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "Unexpected exception occurred %d time(s)... retrying." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "Exception during rpc cleanup." + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "Unhandled exception" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "DB exception wrapped." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Exception during message handling" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "Exception in string format operation" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Returning exception %s to caller" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "Failed to process message ... skipping it." + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "Failed to process message ... will requeue." + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Failed to declare consumer for topic '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Failed to consume message from queue: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Failed to publish message to topic '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "Failed to process message... skipping it." + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "Error processing message. Skipping it." + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON serialization failed." + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC message did not include method." + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "Topic socket file creation failed." + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "Required IPC directory does not exist at %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "Permission denied to IPC directory at %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "Could not create ZeroMQ receiver daemon. Socket may already be in use." + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ Envelope version unsupported or unknown." diff --git a/neutron/locale/en_GB/LC_MESSAGES/neutron-log-info.po b/neutron/locale/en_GB/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 000000000..9586f9ea5 --- /dev/null +++ b/neutron/locale/en_GB/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" +"neutron/language/en_GB/)\n" +"Language: en_GB\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "Caught %s, exiting" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Parent process has died unexpectedly, exiting" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Forking too fast, sleeping" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Started child %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Starting %d workers" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Child %(pid)d killed by signal %(sig)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Child %(pid)s exited with status %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "Caught %s, stopping children" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Waiting on %d children to exit" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Reconnecting to AMQP server on %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Connected to AMQP server on %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Connected to AMQP server on %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registering reactor" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "In reactor registered" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Consuming socket" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Creating proxy for topic: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "Skipping topic registration. Already registered." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "Matchmaker unregistered: %(key)s, %(host)s" diff --git a/neutron/locale/en_US/LC_MESSAGES/neutron.po b/neutron/locale/en_US/LC_MESSAGES/neutron.po new file mode 100644 index 000000000..f2085bffa --- /dev/null +++ b/neutron/locale/en_US/LC_MESSAGES/neutron.po @@ -0,0 +1,16163 @@ +# English (United States) translations for neutron. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-21 06:08+0000\n" +"PO-Revision-Date: 2013-01-28 21:54+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: English (United States) " +"(http://www.transifex.com/projects/p/openstack/language/en_US/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: neutron/auth.py:37 +msgid "X_USER_ID is not found in request" +msgstr "" + +#: neutron/context.py:83 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: neutron/context.py:111 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: neutron/manager.py:71 +#, python-format +msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." +msgstr "" + +#: neutron/manager.py:83 +msgid "Neutron core_plugin not configured!" +msgstr "" + +#: neutron/manager.py:112 +#, python-format +msgid "Loading core plugin: %s" +msgstr "" + +#: neutron/manager.py:137 +#, python-format +msgid "Error loading plugin by name, %s" +msgstr "" + +#: neutron/manager.py:138 +#, python-format +msgid "Error loading plugin by class, %s" +msgstr "" + +#: neutron/manager.py:139 +msgid "Plugin not found." +msgstr "" + +#: neutron/manager.py:144 +msgid "Loading services supported by the core plugin" +msgstr "" + +#: neutron/manager.py:152 +#, python-format +msgid "Service %s is supported by the core plugin" +msgstr "" + +#: neutron/manager.py:165 +#, python-format +msgid "Loading service plugins: %s" +msgstr "" + +#: neutron/manager.py:170 +#, python-format +msgid "Loading Plugin: %s" +msgstr "" + +#: neutron/manager.py:178 +#, python-format +msgid "Multiple plugins for service %s were configured" +msgstr "" + +#: neutron/manager.py:190 +#, python-format +msgid "Successfully loaded %(type)s plugin. Description: %(desc)s" +msgstr "" + +#: neutron/policy.py:88 +#, python-format +msgid "Loading policies from file: %s" +msgstr "" + +#: neutron/policy.py:95 +#, python-format +msgid "" +"Found deprecated policy rule:%s. Please consider upgrading your policy " +"configuration file" +msgstr "" + +#: neutron/policy.py:107 +#, python-format +msgid "" +"Inserting policy:%(new_policy)s in place of deprecated " +"policy:%(old_policy)s" +msgstr "" + +#: neutron/policy.py:115 +#, python-format +msgid "" +"Backward compatibility unavailable for deprecated policy %s. The policy " +"will not be enforced" +msgstr "" + +#: neutron/policy.py:137 +#, python-format +msgid "Unable to find data type descriptor for attribute %s" +msgstr "" + +#: neutron/policy.py:142 +#, python-format +msgid "" +"Attribute type descriptor is not a dict. Unable to generate any sub-attr " +"policy rule for %s." +msgstr "" + +#: neutron/policy.py:215 +#, python-format +msgid "" +"Unable to identify a target field from:%s.match should be in the form " +"%%()s" +msgstr "" + +#: neutron/policy.py:241 +#, python-format +msgid "Unable to find ':' as separator in %s." +msgstr "" + +#: neutron/policy.py:245 +#, python-format +msgid "Unable to find resource name in %s" +msgstr "" + +#: neutron/policy.py:254 +#, python-format +msgid "" +"Unable to verify match:%(match)s as the parent resource: %(res)s was not " +"found" +msgstr "" + +#: neutron/policy.py:280 +#, python-format +msgid "Policy check error while calling %s!" +msgstr "" + +#: neutron/policy.py:311 +#, python-format +msgid "Unable to find requested field: %(field)s in target: %(target_dict)s" +msgstr "" + +#: neutron/policy.py:369 +#, python-format +msgid "Failed policy check for '%s'" +msgstr "" + +#: neutron/quota.py:36 +msgid "Resource name(s) that are supported in quota features" +msgstr "" + +#: neutron/quota.py:40 +msgid "" +"Default number of resource allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/quota.py:44 +msgid "Number of networks allowed per tenant.A negative value means unlimited." +msgstr "" + +#: neutron/quota.py:48 +msgid "Number of subnets allowed per tenant, A negative value means unlimited." +msgstr "" + +#: neutron/quota.py:52 +msgid "Number of ports allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/quota.py:56 +msgid "Default driver to use for quota checks" +msgstr "" + +#: neutron/quota.py:150 neutron/quota.py:155 +msgid "Access to this resource was denied." +msgstr "" + +#: neutron/quota.py:228 +msgid "" +"ConfDriver is used as quota_driver because the loaded plugin does not " +"support 'quotas' table." +msgstr "" + +#: neutron/quota.py:233 +#, python-format +msgid "Loaded quota_driver: %s." +msgstr "" + +#: neutron/quota.py:242 +#, python-format +msgid "%s is already registered." +msgstr "" + +#: neutron/service.py:40 +msgid "Seconds between running periodic tasks" +msgstr "" + +#: neutron/service.py:43 +msgid "Number of separate worker processes for service" +msgstr "" + +#: neutron/service.py:46 +msgid "Number of RPC worker processes for service" +msgstr "" + +#: neutron/service.py:49 +msgid "" +"Range of seconds to randomly delay when starting the periodic task " +"scheduler to reduce stampeding. (Disable by setting to 0)" +msgstr "" + +#: neutron/service.py:105 neutron/service.py:163 +msgid "Unrecoverable error: please check log for details." +msgstr "" + +#: neutron/service.py:144 +msgid "Active plugin doesn't implement start_rpc_listeners" +msgstr "" + +#: neutron/service.py:146 +#, python-format +msgid "'rpc_workers = %d' ignored because start_rpc_listeners is not implemented." +msgstr "" + +#: neutron/service.py:170 +msgid "No known API applications configured." +msgstr "" + +#: neutron/service.py:177 +#, python-format +msgid "Neutron service started, listening on %(host)s:%(port)s" +msgstr "" + +#: neutron/service.py:278 +msgid "Exception occurs when timer stops" +msgstr "" + +#: neutron/service.py:288 +msgid "Exception occurs when waiting for timer" +msgstr "" + +#: neutron/wsgi.py:53 +msgid "Number of backlog requests to configure the socket with" +msgstr "" + +#: neutron/wsgi.py:57 +msgid "" +"Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not " +"supported on OS X." +msgstr "" + +#: neutron/wsgi.py:61 +msgid "Number of seconds to keep retrying to listen" +msgstr "" + +#: neutron/wsgi.py:64 +msgid "Max header line to accommodate large tokens" +msgstr "" + +#: neutron/wsgi.py:67 +msgid "Enable SSL on the API server" +msgstr "" + +#: neutron/wsgi.py:69 +msgid "CA certificate file to use to verify connecting clients" +msgstr "" + +#: neutron/wsgi.py:72 +msgid "Certificate file to use when starting the server securely" +msgstr "" + +#: neutron/wsgi.py:75 +msgid "Private key file to use when starting the server securely" +msgstr "" + +#: neutron/wsgi.py:134 +#, python-format +msgid "Unable to listen on %(host)s:%(port)s" +msgstr "" + +#: neutron/wsgi.py:140 +#, python-format +msgid "Unable to find ssl_cert_file : %s" +msgstr "" + +#: neutron/wsgi.py:146 +#, python-format +msgid "Unable to find ssl_key_file : %s" +msgstr "" + +#: neutron/wsgi.py:151 +#, python-format +msgid "Unable to find ssl_ca_file : %s" +msgstr "" + +#: neutron/wsgi.py:184 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" +msgstr "" + +#: neutron/wsgi.py:344 +msgid "Missing Content-Type" +msgstr "" + +#: neutron/wsgi.py:533 +#, python-format +msgid "Data %(data)s type is %(type)s" +msgstr "" + +#: neutron/wsgi.py:616 +msgid "Cannot understand JSON" +msgstr "" + +#: neutron/wsgi.py:629 neutron/wsgi.py:632 +msgid "Inline DTD forbidden" +msgstr "" + +#: neutron/wsgi.py:713 +msgid "Cannot understand XML" +msgstr "" + +#: neutron/wsgi.py:822 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: neutron/wsgi.py:826 +msgid "No Content-Type provided in request" +msgstr "" + +#: neutron/wsgi.py:830 +msgid "Empty body provided in request" +msgstr "" + +#: neutron/wsgi.py:837 +msgid "Unable to deserialize body as provided Content-Type" +msgstr "" + +#: neutron/wsgi.py:933 +msgid "You must implement __call__" +msgstr "" + +#: neutron/wsgi.py:1026 neutron/api/v2/base.py:192 neutron/api/v2/base.py:333 +#: neutron/api/v2/base.py:473 neutron/api/v2/base.py:524 +#: neutron/extensions/l3agentscheduler.py:49 +#: neutron/extensions/l3agentscheduler.py:87 +msgid "The resource could not be found." +msgstr "" + +#: neutron/wsgi.py:1073 +#, python-format +msgid "%(method)s %(url)s" +msgstr "" + +#: neutron/wsgi.py:1079 +msgid "Unsupported Content-Type" +msgstr "" + +#: neutron/wsgi.py:1080 +#, python-format +msgid "InvalidContentType: %s" +msgstr "" + +#: neutron/wsgi.py:1084 +msgid "Malformed request body" +msgstr "" + +#: neutron/wsgi.py:1085 +#, python-format +msgid "MalformedRequestBody: %s" +msgstr "" + +#: neutron/wsgi.py:1092 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: neutron/wsgi.py:1097 +msgid "Internal error" +msgstr "" + +#: neutron/wsgi.py:1112 neutron/wsgi.py:1214 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: neutron/wsgi.py:1115 +#, python-format +msgid "%(url)s returned a fault: %(exception)s" +msgstr "" + +#: neutron/wsgi.py:1233 +#, python-format +msgid "The requested content type %s is invalid." +msgstr "" + +#: neutron/wsgi.py:1292 +msgid "Could not deserialize data" +msgstr "" + +#: neutron/agent/dhcp_agent.py:53 +msgid "Interval to resync." +msgstr "" + +#: neutron/agent/dhcp_agent.py:56 +msgid "The driver used to manage the DHCP server." +msgstr "" + +#: neutron/agent/dhcp_agent.py:58 +msgid "Support Metadata requests on isolated networks." +msgstr "" + +#: neutron/agent/dhcp_agent.py:60 +msgid "" +"Allows for serving metadata requests from a dedicated network. Requires " +"enable_isolated_metadata = True" +msgstr "" + +#: neutron/agent/dhcp_agent.py:64 +msgid "Number of threads to use during sync process." +msgstr "" + +#: neutron/agent/dhcp_agent.py:67 neutron/agent/l3_agent.py:190 +#: neutron/agent/metadata/namespace_proxy.py:167 +msgid "Location of Metadata Proxy UNIX domain socket" +msgstr "" + +#: neutron/agent/dhcp_agent.py:104 +#, python-format +msgid "" +"The '%s' DHCP-driver does not support retrieving of a list of existing " +"networks" +msgstr "" + +#: neutron/agent/dhcp_agent.py:111 neutron/agent/dhcp_agent.py:600 +msgid "DHCP agent started" +msgstr "" + +#: neutron/agent/dhcp_agent.py:120 +#, python-format +msgid "Calling driver for network: %(net)s action: %(action)s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:136 +#, python-format +msgid "" +"Unable to %(action)s dhcp for %(net_id)s: there is a conflict with its " +"current state; please check that the network and/or its subnet(s) still " +"exist." +msgstr "" + +#: neutron/agent/dhcp_agent.py:145 neutron/agent/dhcp_agent.py:203 +#, python-format +msgid "Network %s has been deleted." +msgstr "" + +#: neutron/agent/dhcp_agent.py:147 +#, python-format +msgid "Unable to %(action)s dhcp for %(net_id)s." +msgstr "" + +#: neutron/agent/dhcp_agent.py:157 +msgid "Synchronizing state" +msgstr "" + +#: neutron/agent/dhcp_agent.py:169 +#, python-format +msgid "Unable to sync network state on deleted network %s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:175 +msgid "Synchronizing state complete" +msgstr "" + +#: neutron/agent/dhcp_agent.py:179 +msgid "Unable to sync network state." +msgstr "" + +#: neutron/agent/dhcp_agent.py:191 +#, python-format +msgid "resync: %(reason)s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:207 +#, python-format +msgid "Network %s info call failed." +msgstr "" + +#: neutron/agent/dhcp_agent.py:219 +#, python-format +msgid "" +"Network %s may have been deleted and its resources may have already been " +"disposed." +msgstr "" + +#: neutron/agent/dhcp_agent.py:344 +#, python-format +msgid "" +"%(port_num)d router ports found on the metadata access network. Only the " +"port %(port_id)s, for router %(router_id)s will be considered" +msgstr "" + +#: neutron/agent/dhcp_agent.py:582 neutron/agent/l3_agent.py:961 +#: neutron/agent/metadata/agent.py:364 +#: neutron/services/metering/agents/metering_agent.py:273 +msgid "" +"Neutron server does not support state report. State report for this agent" +" will be disabled." +msgstr "" + +#: neutron/agent/dhcp_agent.py:588 neutron/agent/l3_agent.py:966 +#: neutron/agent/metadata/agent.py:369 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:111 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:798 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:250 +#: neutron/plugins/nec/agent/nec_neutron_agent.py:182 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:265 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:240 +#: neutron/services/loadbalancer/agent/agent_manager.py:123 +#: neutron/services/metering/agents/metering_agent.py:278 +msgid "Failed reporting state!" +msgstr "" + +#: neutron/agent/dhcp_agent.py:595 +#, python-format +msgid "Agent updated: %(payload)s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:597 neutron/agent/l3_agent.py:971 +#: neutron/services/metering/agents/metering_agent.py:281 +#, python-format +msgid "agent_updated by server side %s!" +msgstr "" + +#: neutron/agent/l3_agent.py:164 neutron/debug/debug_agent.py:43 +msgid "Name of bridge used for external network traffic." +msgstr "" + +#: neutron/agent/l3_agent.py:168 +msgid "TCP Port used by Neutron metadata namespace proxy." +msgstr "" + +#: neutron/agent/l3_agent.py:172 +msgid "" +"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, " +"the feature is disabled" +msgstr "" + +#: neutron/agent/l3_agent.py:175 +msgid "" +"If namespaces is disabled, the l3 agent can only configure a router that " +"has the matching router ID." +msgstr "" + +#: neutron/agent/l3_agent.py:180 +msgid "Agent should implement routers with no gateway" +msgstr "" + +#: neutron/agent/l3_agent.py:182 +msgid "UUID of external network for routers implemented by the agents." +msgstr "" + +#: neutron/agent/l3_agent.py:185 +msgid "Allow running metadata proxy." +msgstr "" + +#: neutron/agent/l3_agent.py:187 +msgid "Delete namespace after removing a router." +msgstr "" + +#: neutron/agent/l3_agent.py:210 +#, python-format +msgid "Error importing interface driver '%s'" +msgstr "" + +#: neutron/agent/l3_agent.py:238 neutron/agent/linux/dhcp.py:716 +#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 +msgid "An interface driver must be specified" +msgstr "" + +#: neutron/agent/l3_agent.py:243 +msgid "Router id is required if not using namespaces." +msgstr "" + +#: neutron/agent/l3_agent.py:264 +msgid "RuntimeError in obtaining router list for namespace cleanup." +msgstr "" + +#: neutron/agent/l3_agent.py:284 +#, python-format +msgid "Failed to destroy stale router namespace %s" +msgstr "" + +#: neutron/agent/l3_agent.py:305 neutron/agent/linux/dhcp.py:227 +#, python-format +msgid "Failed trying to delete namespace: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:335 +msgid "" +"The 'gateway_external_network_id' option must be configured for this " +"agent as Neutron has more than one external network." +msgstr "" + +#: neutron/agent/l3_agent.py:359 +#, python-format +msgid "Info for router %s were not found. Skipping router removal" +msgstr "" + +#: neutron/agent/l3_agent.py:408 +#: neutron/services/firewall/agents/varmour/varmour_router.py:104 +#, python-format +msgid "Router port %s has no IP address" +msgstr "" + +#: neutron/agent/l3_agent.py:410 neutron/db/l3_db.py:973 +#: neutron/services/firewall/agents/varmour/varmour_router.py:107 +#, python-format +msgid "Ignoring multiple IPs on router port %s" +msgstr "" + +#: neutron/agent/l3_agent.py:450 +#, python-format +msgid "Deleting stale internal router device: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:479 +#, python-format +msgid "Deleting stale external router device: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:598 +#, python-format +msgid "Unable to configure IP address for floating IP: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:628 +#, python-format +msgid "Failed sending gratuitous ARP: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:737 +#, python-format +msgid "Got router deleted notification for %s" +msgstr "" + +#: neutron/agent/l3_agent.py:742 +#, python-format +msgid "Got routers updated notification :%s" +msgstr "" + +#: neutron/agent/l3_agent.py:750 +#, python-format +msgid "Got router removed from agent :%r" +msgstr "" + +#: neutron/agent/l3_agent.py:754 +#, python-format +msgid "Got router added to agent :%r" +msgstr "" + +#: neutron/agent/l3_agent.py:761 +#, python-format +msgid "The external network bridge '%s' does not exist" +msgstr "" + +#: neutron/agent/l3_agent.py:811 +#, python-format +msgid "Starting RPC loop for %d updated routers" +msgstr "" + +#: neutron/agent/l3_agent.py:829 +msgid "RPC loop successfully completed" +msgstr "" + +#: neutron/agent/l3_agent.py:831 neutron/agent/l3_agent.py:869 +#: neutron/services/metering/agents/metering_agent.py:62 +msgid "Failed synchronizing routers" +msgstr "" + +#: neutron/agent/l3_agent.py:849 +#, python-format +msgid "Starting _sync_routers_task - fullsync:%s" +msgstr "" + +#: neutron/agent/l3_agent.py:860 +#, python-format +msgid "Processing :%r" +msgstr "" + +#: neutron/agent/l3_agent.py:863 +msgid "_sync_routers_task successfully completed" +msgstr "" + +#: neutron/agent/l3_agent.py:865 +msgid "Failed synchronizing routers due to RPC error" +msgstr "" + +#: neutron/agent/l3_agent.py:878 +msgid "L3 agent started" +msgstr "" + +#: neutron/agent/l3_agent.py:893 +#, python-format +msgid "Added route entry is '%s'" +msgstr "" + +#: neutron/agent/l3_agent.py:901 +#, python-format +msgid "Removed route entry is '%s'" +msgstr "" + +#: neutron/agent/l3_agent.py:934 +msgid "Report state task started" +msgstr "" + +#: neutron/agent/l3_agent.py:958 +msgid "Report state task successfully completed" +msgstr "" + +#: neutron/agent/netns_cleanup_util.py:61 +msgid "Delete the namespace by removing all devices." +msgstr "" + +#: neutron/agent/netns_cleanup_util.py:118 +#, python-format +msgid "Unable to find bridge for device: %s" +msgstr "" + +#: neutron/agent/netns_cleanup_util.py:142 +#, python-format +msgid "Error unable to destroy namespace: %s" +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:41 +msgid "" +"True to delete all ports on all the OpenvSwitch bridges. False to delete " +"ports created by Neutron on integration and external network bridges." +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:75 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:668 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:278 +#, python-format +msgid "Delete %s" +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:105 +#, python-format +msgid "Cleaning %s" +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:112 +msgid "OVS cleanup completed successfully" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:31 +msgid "Driver for security groups firewall in the L2 agent" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:35 +msgid "" +"Controls whether the neutron security group API is enabled in the server." +" It should be false when using no security groups or using the nova " +"security group API." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:58 +#: neutron/agent/securitygroups_rpc.py:142 +msgid "Driver configuration doesn't match with enable_security_group" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:71 +msgid "Disabled security-group extension." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:73 +msgid "Disabled allowed-address-pairs extension." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:80 +#, python-format +msgid "Get security group rules for devices via rpc %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:97 +msgid "" +"Security group agent binding currently not set. This should be set by the" +" end of the init process." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:108 +#, python-format +msgid "Security group rule updated on remote: %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:120 +#, python-format +msgid "Security group member updated on remote: %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:127 +#: neutron/agent/securitygroups_rpc.py:196 +msgid "Provider rule updated" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:140 +#, python-format +msgid "Init firewall settings (driver=%s)" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:159 +#, python-format +msgid "Preparing filters for devices %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:167 +#, python-format +msgid "Security group rule updated %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:174 +#, python-format +msgid "Security group member updated %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:188 +#, python-format +msgid "" +"Adding %s devices to the list of devices for which firewall needs to be " +"refreshed" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:208 +#, python-format +msgid "Remove device filter for %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:217 +msgid "Refresh firewall rules" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:221 +msgid "No ports here to refresh firewall" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:227 +#, python-format +msgid "Update port filter for %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:245 +#, python-format +msgid "Preparing device filters for %d new devices" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:258 +msgid "Refreshing firewall for all filtered devices" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:266 +#, python-format +msgid "Refreshing firewall for %d devices" +msgstr "" + +#: neutron/agent/common/config.py:31 +msgid "Root helper application." +msgstr "" + +#: neutron/agent/common/config.py:36 +msgid "" +"Seconds between nodes reporting state to server; should be less than " +"agent_down_time, best if it is half or less than agent_down_time." +msgstr "" + +#: neutron/agent/common/config.py:43 +msgid "The driver used to manage the virtual interface." +msgstr "" + +#: neutron/agent/common/config.py:48 +msgid "Allow overlapping IP." +msgstr "" + +#: neutron/agent/common/config.py:104 +msgid "" +"DEFAULT.root_helper is deprecated! Please move root_helper configuration " +"to [AGENT] section." +msgstr "" + +#: neutron/agent/common/config.py:115 +msgid "Top-level directory for maintaining dhcp state" +msgstr "" + +#: neutron/agent/linux/async_process.py:68 +msgid "respawn_interval must be >= 0 if provided." +msgstr "" + +#: neutron/agent/linux/async_process.py:82 +msgid "Process is already started" +msgstr "" + +#: neutron/agent/linux/async_process.py:84 +#, python-format +msgid "Launching async process [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:90 +#, python-format +msgid "Halting async process [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:93 +msgid "Process is not running." +msgstr "" + +#: neutron/agent/linux/async_process.py:165 +#, python-format +msgid "An error occurred while killing [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:172 +#, python-format +msgid "Halting async process [%s] in response to an error." +msgstr "" + +#: neutron/agent/linux/async_process.py:178 +#, python-format +msgid "Respawning async process [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:187 +#, python-format +msgid "An error occurred while communicating with async process [%s]." +msgstr "" + +#: neutron/agent/linux/daemon.py:39 +#, python-format +msgid "Error while handling pidfile: %s" +msgstr "" + +#: neutron/agent/linux/daemon.py:47 +msgid "Unable to unlock pid file" +msgstr "" + +#: neutron/agent/linux/daemon.py:96 +msgid "Fork failed" +msgstr "" + +#: neutron/agent/linux/daemon.py:138 +#, python-format +msgid "Pidfile %s already exist. Daemon already running?" +msgstr "" + +#: neutron/agent/linux/dhcp.py:45 +msgid "Location to store DHCP server config files" +msgstr "" + +#: neutron/agent/linux/dhcp.py:48 neutron/plugins/vmware/dhcp_meta/nsx.py:44 +msgid "Domain to use for building the hostnames" +msgstr "" + +#: neutron/agent/linux/dhcp.py:51 +msgid "Override the default dnsmasq settings with this file" +msgstr "" + +#: neutron/agent/linux/dhcp.py:53 +msgid "Comma-separated list of the DNS servers which will be used as forwarders." +msgstr "" + +#: neutron/agent/linux/dhcp.py:57 +msgid "Delete namespace after removing a dhcp server." +msgstr "" + +#: neutron/agent/linux/dhcp.py:61 +msgid "Limit number of leases to prevent a denial-of-service." +msgstr "" + +#: neutron/agent/linux/dhcp.py:209 +#, python-format +msgid "" +"DHCP for %(net_id)s is stale, pid %(pid)d does not exist, performing " +"cleanup" +msgstr "" + +#: neutron/agent/linux/dhcp.py:216 +#, python-format +msgid "No DHCP started for %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:248 neutron/agent/linux/external_process.py:80 +#, python-format +msgid "Error while reading %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:255 neutron/agent/linux/external_process.py:88 +#, python-format +msgid "Unable to convert value in %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:257 neutron/agent/linux/external_process.py:86 +#, python-format +msgid "Unable to access %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:318 +#, python-format +msgid "" +"FAILED VERSION REQUIREMENT FOR DNSMASQ. DHCP AGENT MAY NOT RUN CORRECTLY!" +" Please ensure that its version is %s or above!" +msgstr "" + +#: neutron/agent/linux/dhcp.py:323 +#, python-format +msgid "" +"Unable to determine dnsmasq version. Please ensure that its version is %s" +" or above!" +msgstr "" + +#: neutron/agent/linux/dhcp.py:421 +#, python-format +msgid "Killing dhcpmasq for network since all subnets have turned off DHCP: %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:433 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: neutron/agent/linux/dhcp.py:434 +#, python-format +msgid "Reloading allocations for network: %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:474 +#, python-format +msgid "Building host file: %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:484 +#, python-format +msgid "Adding %(mac)s : %(name)s : %(ip)s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:500 +#, python-format +msgid "Done building host file %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:723 +#, python-format +msgid "Error importing interface driver '%(driver)s': %(inner)s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:763 +#, python-format +msgid "Setting gateway for dhcp netns on net %(n)s to %(ip)s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:773 +#, python-format +msgid "Removing gateway for dhcp netns on net %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:817 +#, python-format +msgid "" +"DHCP port %(device_id)s on network %(network_id)s does not yet exist. " +"Checking for a reserved port." +msgstr "" + +#: neutron/agent/linux/dhcp.py:831 +#, python-format +msgid "DHCP port %(device_id)s on network %(network_id)s does not yet exist." +msgstr "" + +#: neutron/agent/linux/dhcp.py:866 neutron/debug/debug_agent.py:69 +#, python-format +msgid "Reusing existing device: %s." +msgstr "" + +#: neutron/agent/linux/external_process.py:32 +msgid "Location to store child pid files" +msgstr "" + +#: neutron/agent/linux/external_process.py:63 +#, python-format +msgid "Process for %(uuid)s pid %(pid)d is stale, ignoring command" +msgstr "" + +#: neutron/agent/linux/external_process.py:66 +#, python-format +msgid "No process started for %s" +msgstr "" + +#: neutron/agent/linux/interface.py:39 +msgid "Name of Open vSwitch bridge to use" +msgstr "" + +#: neutron/agent/linux/interface.py:42 +msgid "Uses veth for an interface or not" +msgstr "" + +#: neutron/agent/linux/interface.py:44 +msgid "MTU setting for device." +msgstr "" + +#: neutron/agent/linux/interface.py:46 +msgid "Mapping between flavor and LinuxInterfaceDriver" +msgstr "" + +#: neutron/agent/linux/interface.py:48 +msgid "Admin username" +msgstr "" + +#: neutron/agent/linux/interface.py:50 neutron/agent/metadata/agent.py:56 +#: neutron/plugins/metaplugin/common/config.py:67 +msgid "Admin password" +msgstr "" + +#: neutron/agent/linux/interface.py:53 neutron/agent/metadata/agent.py:59 +#: neutron/plugins/metaplugin/common/config.py:70 +msgid "Admin tenant name" +msgstr "" + +#: neutron/agent/linux/interface.py:55 neutron/agent/metadata/agent.py:61 +#: neutron/plugins/metaplugin/common/config.py:72 +msgid "Authentication URL" +msgstr "" + +#: neutron/agent/linux/interface.py:57 neutron/agent/metadata/agent.py:63 +#: neutron/common/config.py:47 neutron/plugins/metaplugin/common/config.py:74 +msgid "The type of authentication to use" +msgstr "" + +#: neutron/agent/linux/interface.py:59 neutron/agent/metadata/agent.py:65 +#: neutron/plugins/metaplugin/common/config.py:76 +msgid "Authentication region" +msgstr "" + +#: neutron/agent/linux/interface.py:216 neutron/agent/linux/interface.py:270 +#: neutron/agent/linux/interface.py:332 neutron/agent/linux/interface.py:381 +#, python-format +msgid "Device %s already exists" +msgstr "" + +#: neutron/agent/linux/interface.py:234 neutron/agent/linux/interface.py:281 +#: neutron/agent/linux/interface.py:344 neutron/agent/linux/interface.py:388 +#, python-format +msgid "Unplugged interface '%s'" +msgstr "" + +#: neutron/agent/linux/interface.py:236 neutron/agent/linux/interface.py:280 +#: neutron/agent/linux/interface.py:346 neutron/agent/linux/interface.py:390 +#, python-format +msgid "Failed unplugging interface '%s'" +msgstr "" + +#: neutron/agent/linux/interface.py:448 +#, python-format +msgid "Driver location: %s" +msgstr "" + +#: neutron/agent/linux/ip_lib.py:27 +msgid "Force ip_lib calls to use the root helper" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:58 +#, python-format +msgid "Preparing device (%s) filter" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:66 +#, python-format +msgid "Updating device (%s) filter" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:68 +#, python-format +msgid "Attempted to update port filter which is not filtered %s" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:77 +#, python-format +msgid "Removing device (%s) filter" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:79 +#, python-format +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:159 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:201 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:236 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:374 +#, python-format +msgid "Got semaphore / lock \"%s\"" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:377 +#, python-format +msgid "Semaphore / lock released \"%s\"" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:426 +#: neutron/tests/unit/test_iptables_manager.py:560 +#: neutron/tests/unit/test_iptables_manager.py:594 +#, python-format +msgid "" +"IPTablesManager.apply failed to apply the following set of iptables " +"rules:\n" +"%s" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:429 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:439 +#, python-format +msgid "Unable to find table %s" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:639 +#, python-format +msgid "Attempted to get traffic counters of chain %s which does not exist" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:34 +msgid "Timeout in seconds for ovs-vsctl commands" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:68 neutron/agent/linux/ovs_lib.py:168 +#: neutron/agent/linux/ovs_lib.py:315 +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:55 +#, python-format +msgid "Unable to execute %(cmd)s. Exception: %(exception)s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:223 +msgid "defer_apply_on" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:227 +msgid "defer_apply_off" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:237 +#, python-format +msgid "Applying following deferred flows to bridge %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:240 +#, python-format +msgid "%(action)s: %(flow)s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:266 +msgid "" +"Unable to create VXLAN tunnel port. Please ensure that an openvswitch " +"version that supports VXLAN is installed." +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:363 +#, python-format +msgid "Found not yet ready openvswitch port: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:378 +#, python-format +msgid "Found failed openvswitch port: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:433 +#, python-format +msgid "Port: %(port_name)s is on %(switch)s, not on %(br_name)s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:441 +#, python-format +msgid "ofport: %(ofport)s for VIF: %(vif)s is not a positive integer" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:451 +#, python-format +msgid "Unable to parse interface details. Exception: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:469 +#, python-format +msgid "Unable to determine mac address for %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:486 +#, python-format +msgid "Interface %s not found." +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:497 +#, python-format +msgid "Unable to retrieve bridges. Exception: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:506 +#, python-format +msgid "Bridge %s not found." +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:522 +msgid "Cannot match priority on flow deletion or modification" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:527 +msgid "Must specify one or more actions on flow addition or modification" +msgstr "" + +#: neutron/agent/linux/ovsdb_monitor.py:46 +#, python-format +msgid "Output received from ovsdb monitor: %s" +msgstr "" + +#: neutron/agent/linux/ovsdb_monitor.py:52 +#, python-format +msgid "Error received from ovsdb monitor: %s" +msgstr "" + +#: neutron/agent/linux/utils.py:48 +#, python-format +msgid "Running command: %s" +msgstr "" + +#: neutron/agent/linux/utils.py:71 +#, python-format +msgid "" +"\n" +"Command: %(cmd)s\n" +"Exit code: %(code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: neutron/agent/metadata/agent.py:54 +#: neutron/plugins/metaplugin/common/config.py:65 +msgid "Admin user" +msgstr "" + +#: neutron/agent/metadata/agent.py:68 +msgid "Turn off verification of the certificate for ssl" +msgstr "" + +#: neutron/agent/metadata/agent.py:71 +msgid "Certificate Authority public key (CA cert) file for ssl" +msgstr "" + +#: neutron/agent/metadata/agent.py:75 +msgid "Network service endpoint type to pull from the keystone catalog" +msgstr "" + +#: neutron/agent/metadata/agent.py:78 +msgid "IP address used by Nova metadata server." +msgstr "" + +#: neutron/agent/metadata/agent.py:81 +msgid "TCP Port used by Nova metadata server." +msgstr "" + +#: neutron/agent/metadata/agent.py:84 +#: neutron/plugins/vmware/dhcp_meta/nsx.py:63 +msgid "Shared secret to sign instance-id request" +msgstr "" + +#: neutron/agent/metadata/agent.py:89 +msgid "Protocol to access nova metadata, http or https" +msgstr "" + +#: neutron/agent/metadata/agent.py:91 +msgid "Allow to perform insecure SSL (https) requests to nova metadata" +msgstr "" + +#: neutron/agent/metadata/agent.py:95 +msgid "Client certificate for nova metadata api server." +msgstr "" + +#: neutron/agent/metadata/agent.py:98 +msgid "Private key of client certificate." +msgstr "" + +#: neutron/agent/metadata/agent.py:128 +#: neutron/agent/metadata/namespace_proxy.py:70 +#, python-format +msgid "Request: %s" +msgstr "" + +#: neutron/agent/metadata/agent.py:137 +#: neutron/agent/metadata/namespace_proxy.py:78 +msgid "Unexpected error." +msgstr "" + +#: neutron/agent/metadata/agent.py:138 +#: neutron/agent/metadata/namespace_proxy.py:79 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: neutron/agent/metadata/agent.py:180 +msgid "" +"Either one of parameter network_id or router_id must be passed to " +"_get_ports method." +msgstr "" + +#: neutron/agent/metadata/agent.py:232 +msgid "" +"The remote metadata server responded with Forbidden. This response " +"usually occurs when shared secrets do not match." +msgstr "" + +#: neutron/agent/metadata/agent.py:243 +#: neutron/agent/metadata/namespace_proxy.py:122 +msgid "Remote metadata server experienced an internal server error." +msgstr "" + +#: neutron/agent/metadata/agent.py:249 +#: neutron/agent/metadata/namespace_proxy.py:128 +#, python-format +msgid "Unexpected response code: %s" +msgstr "" + +#: neutron/agent/metadata/agent.py:309 +msgid "Location for Metadata Proxy UNIX domain socket" +msgstr "" + +#: neutron/agent/metadata/agent.py:312 +msgid "Number of separate worker processes for metadata server" +msgstr "" + +#: neutron/agent/metadata/agent.py:316 +msgid "Number of backlog requests to configure the metadata server socket with" +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:65 +msgid "network_id and router_id are None. One must be provided." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:151 +msgid "Network that will have instance metadata proxied." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:154 +msgid "Router that will have connected instances' metadata proxied." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:157 +msgid "Location of pid file of this process." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:160 +msgid "Run as daemon." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:163 +msgid "TCP Port to listen for metadata server requests." +msgstr "" + +#: neutron/api/api_common.py:101 +#, python-format +msgid "" +"Invalid value for pagination_max_limit: %s. It should be an integer " +"greater to 0" +msgstr "" + +#: neutron/api/api_common.py:115 +#, python-format +msgid "Limit must be an integer 0 or greater and not '%d'" +msgstr "" + +#: neutron/api/api_common.py:132 +msgid "The number of sort_keys and sort_dirs must be same" +msgstr "" + +#: neutron/api/api_common.py:137 +#, python-format +msgid "%s is invalid attribute for sort_keys" +msgstr "" + +#: neutron/api/api_common.py:141 +#, python-format +msgid "" +"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s'" +" and '%(desc)s'" +msgstr "" + +#: neutron/api/api_common.py:315 neutron/api/v2/base.py:594 +#, python-format +msgid "Unable to find '%s' in request body" +msgstr "" + +#: neutron/api/api_common.py:322 +#, python-format +msgid "Failed to parse request. Parameter '%s' not specified" +msgstr "" + +#: neutron/api/extensions.py:253 +#, python-format +msgid "Extension with alias %s does not exist" +msgstr "" + +#: neutron/api/extensions.py:257 neutron/api/extensions.py:261 +msgid "Resource not found." +msgstr "" + +#: neutron/api/extensions.py:283 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: neutron/api/extensions.py:305 +#, python-format +msgid "Extended action: %s" +msgstr "" + +#: neutron/api/extensions.py:313 +#, python-format +msgid "Extended request: %s" +msgstr "" + +#: neutron/api/extensions.py:403 +msgid "Initializing extension manager." +msgstr "" + +#: neutron/api/extensions.py:486 +#, python-format +msgid "Error fetching extended attributes for extension '%s'" +msgstr "" + +#: neutron/api/extensions.py:492 +#, python-format +msgid "" +"Extension '%s' provides no backward compatibility map for extended " +"attributes" +msgstr "" + +#: neutron/api/extensions.py:502 +#, python-format +msgid "" +"It was impossible to process the following extensions: %s because of " +"missing requirements." +msgstr "" + +#: neutron/api/extensions.py:513 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: neutron/api/extensions.py:514 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: neutron/api/extensions.py:515 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: neutron/api/extensions.py:516 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: neutron/api/extensions.py:517 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: neutron/api/extensions.py:519 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: neutron/api/extensions.py:537 +#, python-format +msgid "Extension path '%s' doesn't exist!" +msgstr "" + +#: neutron/api/extensions.py:545 +#, python-format +msgid "Loading extension file: %s" +msgstr "" + +#: neutron/api/extensions.py:553 +#, python-format +msgid "Did not find expected name \"%(ext_name)s\" in %(file)s" +msgstr "" + +#: neutron/api/extensions.py:561 +#, python-format +msgid "Extension file %(f)s wasn't loaded due to %(exception)s" +msgstr "" + +#: neutron/api/extensions.py:570 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: neutron/api/extensions.py:601 +#, python-format +msgid "Extension %s not supported by any of loaded plugins" +msgstr "" + +#: neutron/api/extensions.py:612 +#, python-format +msgid "Loaded plugins do not implement extension %s interface" +msgstr "" + +#: neutron/api/versions.py:45 +msgid "Unknown API version specified" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:65 +#, python-format +msgid "" +"Unable to schedule network %s: no agents available; will retry on " +"subsequent port creation events." +msgstr "" + +#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:78 +#, python-format +msgid "" +"Only %(active)d of %(total)d DHCP agents associated with network " +"'%(net_id)s' are marked as active, so notifications may be sent to " +"inactive agents." +msgstr "" + +#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:90 +#, python-format +msgid "" +"Will not send event %(method)s for network %(net_id)s: no agent " +"available. Payload: %(payload)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:38 +#, python-format +msgid "Nofity agent at %(host)s the message %(method)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:58 +#, python-format +msgid "Notify agent at %(topic)s.%(host)s the message %(method)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:74 +#, python-format +msgid "" +"No plugin for L3 routing registered. Cannot notify agents with the " +"message %s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:92 +#, python-format +msgid "" +"Fanout notify agent at %(topic)s the message %(method)s on router " +"%(router_id)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py:49 +#, python-format +msgid "Notify metering agent at %(topic)s.%(host)s the message %(method)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py:64 +#, python-format +msgid "" +"Fanout notify metering agent at %(topic)s the message %(method)s on " +"router %(router_id)s" +msgstr "" + +#: neutron/api/v2/attributes.py:46 +#, python-format +msgid "" +"Invalid input. '%(target_dict)s' must be a dictionary with keys: " +"%(expected_keys)s" +msgstr "" + +#: neutron/api/v2/attributes.py:57 +#, python-format +msgid "" +"Validation of dictionary's keys failed.Expected keys: %(expected_keys)s " +"Provided keys: %(provided_keys)s" +msgstr "" + +#: neutron/api/v2/attributes.py:71 +#, python-format +msgid "'%(data)s' is not in %(valid_values)s" +msgstr "" + +#: neutron/api/v2/attributes.py:87 +#, python-format +msgid "'%s' Blank strings are not permitted" +msgstr "" + +#: neutron/api/v2/attributes.py:97 +#, python-format +msgid "'%s' is not a valid string" +msgstr "" + +#: neutron/api/v2/attributes.py:102 +#, python-format +msgid "'%(data)s' exceeds maximum length of %(max_len)s" +msgstr "" + +#: neutron/api/v2/attributes.py:112 +#, python-format +msgid "'%s' is not a valid boolean value" +msgstr "" + +#: neutron/api/v2/attributes.py:131 neutron/api/v2/attributes.py:456 +#, python-format +msgid "'%s' is not an integer" +msgstr "" + +#: neutron/api/v2/attributes.py:135 +#, python-format +msgid "'%(data)s' is too small - must be at least '%(limit)d'" +msgstr "" + +#: neutron/api/v2/attributes.py:140 +#, python-format +msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" +msgstr "" + +#: neutron/api/v2/attributes.py:149 +#, python-format +msgid "'%s' contains whitespace" +msgstr "" + +#: neutron/api/v2/attributes.py:166 +#, python-format +msgid "'%s' is not a valid MAC address" +msgstr "" + +#: neutron/api/v2/attributes.py:181 +#, python-format +msgid "'%s' is not a valid IP address" +msgstr "" + +#: neutron/api/v2/attributes.py:192 +#, python-format +msgid "Invalid data format for IP pool: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:211 neutron/api/v2/attributes.py:218 +#, python-format +msgid "Invalid data format for fixed IP: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:226 +#, python-format +msgid "Duplicate IP address '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:242 +#, python-format +msgid "Invalid data format for nameserver: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:253 +#, python-format +msgid "'%s' is not a valid nameserver" +msgstr "" + +#: neutron/api/v2/attributes.py:257 +#, python-format +msgid "Duplicate nameserver '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:265 +#, python-format +msgid "Invalid data format for hostroute: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:285 +#, python-format +msgid "Duplicate hostroute '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:302 neutron/tests/unit/test_attributes.py:462 +#: neutron/tests/unit/test_attributes.py:476 +#: neutron/tests/unit/test_attributes.py:484 +#, python-format +msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" +msgstr "" + +#: neutron/api/v2/attributes.py:308 +#, python-format +msgid "'%s' is not a valid IP subnet" +msgstr "" + +#: neutron/api/v2/attributes.py:316 neutron/api/v2/attributes.py:369 +#, python-format +msgid "'%s' is not a list" +msgstr "" + +#: neutron/api/v2/attributes.py:321 neutron/api/v2/attributes.py:380 +#, python-format +msgid "Duplicate items in the list: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:344 +#, python-format +msgid "'%s' is not a valid input" +msgstr "" + +#: neutron/api/v2/attributes.py:357 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:551 +#, python-format +msgid "'%s' is not a valid UUID" +msgstr "" + +#: neutron/api/v2/attributes.py:400 +#, python-format +msgid "Validator '%s' does not exist." +msgstr "" + +#: neutron/api/v2/attributes.py:410 +#, python-format +msgid "'%s' is not a dictionary" +msgstr "" + +#: neutron/api/v2/attributes.py:461 +#, python-format +msgid "'%s' should be non-negative" +msgstr "" + +#: neutron/api/v2/attributes.py:480 +#, python-format +msgid "'%s' cannot be converted to boolean" +msgstr "" + +#: neutron/api/v2/attributes.py:488 +#: neutron/plugins/nec/extensions/packetfilter.py:77 +#, python-format +msgid "'%s' is not a integer" +msgstr "" + +#: neutron/api/v2/attributes.py:501 +#, python-format +msgid "'%s' is not of the form =[value]" +msgstr "" + +#: neutron/api/v2/base.py:88 +msgid "Native pagination depend on native sorting" +msgstr "" + +#: neutron/api/v2/base.py:91 +msgid "Allow sorting is enabled because native pagination requires native sorting" +msgstr "" + +#: neutron/api/v2/base.py:362 +#, python-format +msgid "Unable to undo add for %(resource)s %(id)s" +msgstr "" + +#: neutron/api/v2/base.py:494 +#, python-format +msgid "Invalid format: %s" +msgstr "" + +#: neutron/api/v2/base.py:547 +msgid "" +"Specifying 'tenant_id' other than authenticated tenant in request " +"requires admin privileges" +msgstr "" + +#: neutron/api/v2/base.py:555 +msgid "Running without keystone AuthN requires that tenant_id is specified" +msgstr "" + +#: neutron/api/v2/base.py:573 +msgid "Resource body required" +msgstr "" + +#: neutron/api/v2/base.py:575 +#, python-format +msgid "Request body: %(body)s" +msgstr "" + +#: neutron/api/v2/base.py:585 +msgid "Bulk operation not supported" +msgstr "" + +#: neutron/api/v2/base.py:589 +msgid "Resources required" +msgstr "" + +#: neutron/api/v2/base.py:605 +#, python-format +msgid "Failed to parse request. Required attribute '%s' not specified" +msgstr "" + +#: neutron/api/v2/base.py:612 +#, python-format +msgid "Attribute '%s' not allowed in POST" +msgstr "" + +#: neutron/api/v2/base.py:617 +#, python-format +msgid "Cannot update read-only attribute %s" +msgstr "" + +#: neutron/api/v2/base.py:635 +#, python-format +msgid "Invalid input for %(attr)s. Reason: %(reason)s." +msgstr "" + +#: neutron/api/v2/base.py:644 neutron/extensions/allowedaddresspairs.py:57 +#: neutron/extensions/multiprovidernet.py:51 +#, python-format +msgid "Unrecognized attribute(s) '%s'" +msgstr "" + +#: neutron/api/v2/base.py:663 +#, python-format +msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" +msgstr "" + +#: neutron/api/v2/resource.py:97 +#, python-format +msgid "%(action)s failed (client error): %(exc)s" +msgstr "" + +#: neutron/api/v2/resource.py:100 neutron/api/v2/resource.py:110 +#: neutron/api/v2/resource.py:129 +#, python-format +msgid "%s failed" +msgstr "" + +#: neutron/api/v2/resource.py:131 +#: neutron/tests/unit/test_api_v2_resource.py:277 +#: neutron/tests/unit/test_api_v2_resource.py:293 +msgid "Request Failed: internal server error while processing your request." +msgstr "" + +#: neutron/cmd/sanity_check.py:39 +msgid "" +"Check for Open vSwitch VXLAN support failed. Please ensure that the " +"version of openvswitch being used has VXLAN support." +msgstr "" + +#: neutron/cmd/sanity_check.py:48 +msgid "" +"Check for Open vSwitch patch port support failed. Please ensure that the " +"version of openvswitch being used has patch port support or disable " +"features requiring patch ports (gre/vxlan, etc.)." +msgstr "" + +#: neutron/cmd/sanity_check.py:58 +msgid "Check for vxlan support" +msgstr "" + +#: neutron/cmd/sanity_check.py:60 +msgid "Check for patch port support" +msgstr "" + +#: neutron/common/config.py:37 +msgid "The host IP to bind to" +msgstr "" + +#: neutron/common/config.py:39 +msgid "The port to bind to" +msgstr "" + +#: neutron/common/config.py:41 +msgid "The API paste config file to use" +msgstr "" + +#: neutron/common/config.py:43 +msgid "The path for API extensions" +msgstr "" + +#: neutron/common/config.py:45 +msgid "The policy file to use" +msgstr "" + +#: neutron/common/config.py:49 +msgid "The core plugin Neutron will use" +msgstr "" + +#: neutron/common/config.py:51 neutron/db/migration/cli.py:35 +msgid "The service plugins Neutron will use" +msgstr "" + +#: neutron/common/config.py:53 +msgid "The base MAC address Neutron will use for VIFs" +msgstr "" + +#: neutron/common/config.py:55 +msgid "How many times Neutron will retry MAC generation" +msgstr "" + +#: neutron/common/config.py:57 +msgid "Allow the usage of the bulk API" +msgstr "" + +#: neutron/common/config.py:59 +msgid "Allow the usage of the pagination" +msgstr "" + +#: neutron/common/config.py:61 +msgid "Allow the usage of the sorting" +msgstr "" + +#: neutron/common/config.py:63 +msgid "" +"The maximum number of items returned in a single response, value was " +"'infinite' or negative integer means no limit" +msgstr "" + +#: neutron/common/config.py:67 +msgid "Maximum number of DNS nameservers" +msgstr "" + +#: neutron/common/config.py:69 +msgid "Maximum number of host routes per subnet" +msgstr "" + +#: neutron/common/config.py:71 +msgid "Maximum number of fixed ips per port" +msgstr "" + +#: neutron/common/config.py:74 +msgid "" +"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " +"lease times." +msgstr "" + +#: neutron/common/config.py:77 +msgid "Allow sending resource operation notification to DHCP agent" +msgstr "" + +#: neutron/common/config.py:80 +msgid "Allow overlapping IP support in Neutron" +msgstr "" + +#: neutron/common/config.py:82 +msgid "The hostname Neutron is running on" +msgstr "" + +#: neutron/common/config.py:84 +msgid "Ensure that configured gateway is on subnet" +msgstr "" + +#: neutron/common/config.py:86 +msgid "Send notification to nova when port status changes" +msgstr "" + +#: neutron/common/config.py:88 +msgid "" +"Send notification to nova when port data (fixed_ips/floatingip) changes " +"so nova can update its cache." +msgstr "" + +#: neutron/common/config.py:92 +msgid "URL for connection to nova" +msgstr "" + +#: neutron/common/config.py:94 +msgid "Username for connecting to nova in admin context" +msgstr "" + +#: neutron/common/config.py:96 +msgid "Password for connection to nova in admin context" +msgstr "" + +#: neutron/common/config.py:99 +msgid "The uuid of the admin nova tenant" +msgstr "" + +#: neutron/common/config.py:102 +msgid "Authorization URL for connecting to nova in admin context" +msgstr "" + +#: neutron/common/config.py:105 +msgid "CA file for novaclient to verify server certificates" +msgstr "" + +#: neutron/common/config.py:107 +msgid "If True, ignore any SSL validation issues" +msgstr "" + +#: neutron/common/config.py:109 +msgid "" +"Name of nova region to use. Useful if keystone manages more than one " +"region." +msgstr "" + +#: neutron/common/config.py:112 +msgid "" +"Number of seconds between sending events to nova if there are any events " +"to send." +msgstr "" + +#: neutron/common/config.py:119 +msgid "" +"Where to store Neutron state files. This directory must be writable by " +"the agent." +msgstr "" + +#: neutron/common/config.py:151 +#, python-format +msgid "Base MAC: %s" +msgstr "" + +#: neutron/common/config.py:162 +msgid "Logging enabled!" +msgstr "" + +#: neutron/common/config.py:178 +#, python-format +msgid "Config paste file: %s" +msgstr "" + +#: neutron/common/config.py:183 +#, python-format +msgid "Unable to load %(app_name)s from configuration file %(config_path)s." +msgstr "" + +#: neutron/common/exceptions.py:30 +#: neutron/plugins/vmware/api_client/exception.py:27 +msgid "An unknown exception occurred." +msgstr "" + +#: neutron/common/exceptions.py:51 +#, python-format +msgid "Bad %(resource)s request: %(msg)s" +msgstr "" + +#: neutron/common/exceptions.py:63 +msgid "Not authorized." +msgstr "" + +#: neutron/common/exceptions.py:67 +msgid "The service is unavailable" +msgstr "" + +#: neutron/common/exceptions.py:71 +#, python-format +msgid "User does not have admin privileges: %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:75 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: neutron/common/exceptions.py:79 +#, python-format +msgid "Network %(net_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:83 +#, python-format +msgid "Subnet %(subnet_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:87 +#, python-format +msgid "Port %(port_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:91 +#, python-format +msgid "Port %(port_id)s could not be found on network %(net_id)s" +msgstr "" + +#: neutron/common/exceptions.py:96 +msgid "Policy configuration policy.json could not be found" +msgstr "" + +#: neutron/common/exceptions.py:100 +#, python-format +msgid "Failed to init policy %(policy)s because %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:104 +#, python-format +msgid "Failed to check policy %(policy)s because %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:108 +#, python-format +msgid "Unsupported port state: %(port_state)s" +msgstr "" + +#: neutron/common/exceptions.py:112 +msgid "The resource is inuse" +msgstr "" + +#: neutron/common/exceptions.py:116 +#, python-format +msgid "" +"Unable to complete operation on network %(net_id)s. There are one or more" +" ports still in use on the network." +msgstr "" + +#: neutron/common/exceptions.py:121 +#, python-format +msgid "" +"Unable to complete operation on subnet %(subnet_id)s. One or more ports " +"have an IP allocation from this subnet." +msgstr "" + +#: neutron/common/exceptions.py:126 +#, python-format +msgid "" +"Unable to complete operation on port %(port_id)s for network %(net_id)s. " +"Port already has an attacheddevice %(device_id)s." +msgstr "" + +#: neutron/common/exceptions.py:132 +#, python-format +msgid "" +"Unable to complete operation for network %(net_id)s. The mac address " +"%(mac)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:138 +#, python-format +msgid "" +"Unable to complete operation for %(subnet_id)s. The number of host routes" +" exceeds the limit %(quota)s." +msgstr "" + +#: neutron/common/exceptions.py:144 +#, python-format +msgid "" +"Unable to complete operation for %(subnet_id)s. The number of DNS " +"nameservers exceeds the limit %(quota)s." +msgstr "" + +#: neutron/common/exceptions.py:149 +#, python-format +msgid "" +"Unable to complete operation for network %(net_id)s. The IP address " +"%(ip_address)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:154 +#, python-format +msgid "" +"Unable to create the network. The VLAN %(vlan_id)s on physical network " +"%(physical_network)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:160 +#, python-format +msgid "" +"Unable to create the flat network. Physical network %(physical_network)s " +"is in use." +msgstr "" + +#: neutron/common/exceptions.py:165 +#, python-format +msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:170 +msgid "Tenant network creation is not enabled." +msgstr "" + +#: neutron/common/exceptions.py:178 +msgid "" +"Unable to create the network. No tenant network is available for " +"allocation." +msgstr "" + +#: neutron/common/exceptions.py:183 +#, python-format +msgid "" +"Subnet on port %(port_id)s does not match the requested subnet " +"%(subnet_id)s" +msgstr "" + +#: neutron/common/exceptions.py:188 +#, python-format +msgid "Malformed request body: %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:198 +#, python-format +msgid "Invalid input for operation: %(error_message)s." +msgstr "" + +#: neutron/common/exceptions.py:202 +#, python-format +msgid "The allocation pool %(pool)s is not valid." +msgstr "" + +#: neutron/common/exceptions.py:206 +#, python-format +msgid "" +"Found overlapping allocation pools:%(pool_1)s %(pool_2)s for subnet " +"%(subnet_cidr)s." +msgstr "" + +#: neutron/common/exceptions.py:211 +#, python-format +msgid "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." +msgstr "" + +#: neutron/common/exceptions.py:216 +#, python-format +msgid "Unable to generate unique mac on network %(net_id)s." +msgstr "" + +#: neutron/common/exceptions.py:220 +#, python-format +msgid "No more IP addresses available on network %(net_id)s." +msgstr "" + +#: neutron/common/exceptions.py:224 +#, python-format +msgid "Bridge %(bridge)s does not exist." +msgstr "" + +#: neutron/common/exceptions.py:228 +#, python-format +msgid "Creation failed. %(dev_name)s already exists." +msgstr "" + +#: neutron/common/exceptions.py:232 +msgid "Sudo privilege is required to run this command." +msgstr "" + +#: neutron/common/exceptions.py:236 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: neutron/common/exceptions.py:240 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: neutron/common/exceptions.py:244 +msgid "Tenant-id was missing from Quota request" +msgstr "" + +#: neutron/common/exceptions.py:248 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: neutron/common/exceptions.py:253 +#, python-format +msgid "" +"Unable to reconfigure sharing settings for network %(network)s. Multiple " +"tenants are using it" +msgstr "" + +#: neutron/common/exceptions.py:258 +#, python-format +msgid "Invalid extension environment: %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:262 +#, python-format +msgid "Extensions not found: %(extensions)s" +msgstr "" + +#: neutron/common/exceptions.py:266 +#, python-format +msgid "Invalid content type %(content_type)s" +msgstr "" + +#: neutron/common/exceptions.py:270 +#, python-format +msgid "Unable to find any IP address on external network %(net_id)s." +msgstr "" + +#: neutron/common/exceptions.py:275 +msgid "More than one external network exists" +msgstr "" + +#: neutron/common/exceptions.py:279 +#, python-format +msgid "An invalid value was provided for %(opt_name)s: %(opt_value)s" +msgstr "" + +#: neutron/common/exceptions.py:284 +#, python-format +msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s" +msgstr "" + +#: neutron/common/exceptions.py:289 +#, python-format +msgid "" +"Current gateway ip %(ip_address)s already in use by port %(port_id)s. " +"Unable to update." +msgstr "" + +#: neutron/common/exceptions.py:294 +#, python-format +msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'" +msgstr "" + +#: neutron/common/exceptions.py:304 +#, python-format +msgid "Invalid network VXLAN port range: '%(vxlan_range)s'" +msgstr "" + +#: neutron/common/exceptions.py:308 +msgid "VXLAN Network unsupported." +msgstr "" + +#: neutron/common/exceptions.py:312 +#, python-format +msgid "Found duplicate extension: %(alias)s" +msgstr "" + +#: neutron/common/exceptions.py:316 +#, python-format +msgid "" +"The following device_id %(device_id)s is not owned by your tenant or " +"matches another tenants router." +msgstr "" + +#: neutron/common/exceptions.py:321 +#, python-format +msgid "Invalid CIDR %(input)s given as IP prefix" +msgstr "" + +#: neutron/common/ipv6_utils.py:27 +msgid "Unable to generate IP address by EUI64 for IPv4 prefix" +msgstr "" + +#: neutron/common/ipv6_utils.py:34 +#, python-format +msgid "" +"Bad prefix or mac format for generating IPv6 address by EUI-64: " +"%(prefix)s, %(mac)s:" +msgstr "" + +#: neutron/common/ipv6_utils.py:38 +#, python-format +msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" +msgstr "" + +#: neutron/common/log.py:32 +#, python-format +msgid "" +"%(class_name)s method %(method_name)s called with arguments %(args)s " +"%(kwargs)s" +msgstr "" + +#: neutron/common/utils.py:68 +#, python-format +msgid "" +"Method %(func_name)s cannot be cached due to unhashable parameters: args:" +" %(args)s, kwargs: %(kwargs)s" +msgstr "" + +#: neutron/common/utils.py:91 +#, python-format +msgid "" +"Instance of class %(module)s.%(class)s doesn't contain attribute _cache " +"therefore results cannot be cached for %(func_name)s." +msgstr "" + +#: neutron/common/utils.py:117 neutron/openstack/common/fileutils.py:63 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: neutron/common/utils.py:200 +#, python-format +msgid "Invalid mapping: '%s'" +msgstr "" + +#: neutron/common/utils.py:203 +#, python-format +msgid "Missing key in mapping: '%s'" +msgstr "" + +#: neutron/common/utils.py:206 +#, python-format +msgid "Missing value in mapping: '%s'" +msgstr "" + +#: neutron/common/utils.py:208 +#, python-format +msgid "Key %(key)s in mapping: '%(mapping)s' not unique" +msgstr "" + +#: neutron/common/utils.py:211 +#, python-format +msgid "Value %(value)s in mapping: '%(mapping)s' not unique" +msgstr "" + +#: neutron/db/agents_db.py:36 +msgid "" +"Seconds to regard the agent is down; should be at least twice " +"report_interval, to be sure the agent is down for good." +msgstr "" + +#: neutron/db/agents_db.py:93 +#, python-format +msgid "Configuration for agent %(agent_type)s on host %(host)s is invalid." +msgstr "" + +#: neutron/db/agents_db.py:214 +msgid "Message with invalid timestamp received" +msgstr "" + +#: neutron/db/agentschedulers_db.py:37 +msgid "Driver to use for scheduling network to DHCP agent" +msgstr "" + +#: neutron/db/agentschedulers_db.py:39 +msgid "Allow auto scheduling networks to DHCP agent." +msgstr "" + +#: neutron/db/agentschedulers_db.py:41 +msgid "Number of DHCP agents scheduled to host a network." +msgstr "" + +#: neutron/db/api.py:77 +#, python-format +msgid "Database registration exception: %s" +msgstr "" + +#: neutron/db/api.py:89 +msgid "Database exception" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:148 +msgid "Cannot create resource for another tenant" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:317 +#, python-format +msgid "Generated mac for network %(network_id)s is %(mac_address)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:323 +#, python-format +msgid "Generated mac %(mac_address)s exists. Remaining attempts %(max_retries)s." +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:327 +#, python-format +msgid "Unable to generate mac address after %s attempts" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:345 +#, python-format +msgid "Delete allocated IP %(ip_address)s (%(network_id)s/%(subnet_id)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:383 +#, python-format +msgid "All IPs from subnet %(subnet_id)s (%(cidr)s) allocated" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:388 +#, python-format +msgid "Allocated IP - %(ip_address)s from %(first_ip)s to %(last_ip)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:395 +msgid "No more free IP's in slice. Deleting allocation pool." +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:414 +#, python-format +msgid "Rebuilding availability ranges for subnet %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:546 +msgid "IP allocation requires subnet_id or ip_address" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:558 +#, python-format +msgid "IP address %s is not a valid IP for the defined networks subnets" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:564 +#, python-format +msgid "" +"Failed to create port on network %(network_id)s, because fixed_ips " +"included invalid subnet %(subnet_id)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:584 +#, python-format +msgid "IP address %s is not a valid IP for the defined subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:593 neutron/db/db_base_plugin_v2.py:626 +msgid "Exceeded maximim amount of fixed ips per port" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:641 +#, python-format +msgid "Port update. Hold %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:648 +#, python-format +msgid "Port update. Adding %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:717 +#, python-format +msgid "" +"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps" +" with another subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:722 +#, python-format +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:742 +msgid "Performing IP validity checks on allocation pools" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:749 +#, python-format +msgid "Found invalid IP address in pool: %(start)s - %(end)s:" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:756 +msgid "Specified IP addresses do not match the subnet IP version" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:760 +#, python-format +msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:765 +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:778 +msgid "Checking for overlaps among allocation pools and gateway ip" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:789 +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:802 neutron/db/db_base_plugin_v2.py:806 +#, python-format +msgid "Invalid route: %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:879 +#, python-format +msgid "" +"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " +"'%(addr_mode)s' is not valid. If both attributes are set, they must be " +"the same value" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:887 +msgid "" +"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set " +"to False." +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:893 +msgid "Cannot disable enable_dhcp with ipv6 attributes set" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:983 +#, python-format +msgid "An exception occurred while creating the %(resource)s:%(item)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1080 +#, python-format +msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1104 +msgid "Gateway is not valid on subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1124 neutron/db/db_base_plugin_v2.py:1138 +msgid "new subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1131 +#, python-format +msgid "Error parsing dns address %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1147 +msgid "ipv6_ra_mode is not valid when ip_version is 4" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1151 +msgid "ipv6_address_mode is not valid when ip_version is 4" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1452 +#, python-format +msgid "Allocated IP %(ip_address)s (%(network_id)s/%(subnet_id)s/%(port_id)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1531 +#, python-format +msgid "" +"Ignoring PortNotFound when deleting port '%s'. The port has already been " +"deleted." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:58 +msgid "Unrecognized action" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:75 +#, python-format +msgid "" +"Action %(action)s for network %(net_id)s could not complete successfully:" +" %(reason)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:85 +#, python-format +msgid "get_active_networks requested from %s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:92 +#, python-format +msgid "get_active_networks_info from %s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:112 +#, python-format +msgid "Network %(network_id)s requested from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:119 neutron/db/dhcp_rpc_base.py:183 +#, python-format +msgid "Network %s could not be found, it might have been deleted concurrently." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:141 +#, python-format +msgid "Port %(device_id)s for %(network_id)s requested from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:175 +#, python-format +msgid "" +"DHCP port %(device_id)s on network %(network_id)s does not exist on " +"%(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:215 +#, python-format +msgid "DHCP port deletion for %(network_id)s request from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:228 +#, python-format +msgid "DHCP port remove fixed_ip for %(subnet_id)s request from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:252 +#, python-format +msgid "Updating lease expiration is now deprecated. Issued from host %s." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:263 +#, python-format +msgid "Create dhcp port %(port)s from %(host)s." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:280 +#, python-format +msgid "Update dhcp port %(port)s from %(host)s." +msgstr "" + +#: neutron/db/extraroute_db.py:36 +msgid "Maximum number of routes" +msgstr "" + +#: neutron/db/extraroute_db.py:91 +msgid "the nexthop is not connected with router" +msgstr "" + +#: neutron/db/extraroute_db.py:96 +msgid "the nexthop is used by router" +msgstr "" + +#: neutron/db/extraroute_db.py:125 +#, python-format +msgid "Added routes are %s" +msgstr "" + +#: neutron/db/extraroute_db.py:133 +#, python-format +msgid "Removed routes are %s" +msgstr "" + +#: neutron/db/l3_agentschedulers_db.py:34 +msgid "Driver to use for scheduling router to a default L3 agent" +msgstr "" + +#: neutron/db/l3_agentschedulers_db.py:37 +msgid "Allow auto scheduling of routers to L3 agent." +msgstr "" + +#: neutron/db/l3_db.py:239 +#, python-format +msgid "No eligible l3 agent associated with external network %s found" +msgstr "" + +#: neutron/db/l3_db.py:260 +#, python-format +msgid "No IPs available for external network %s" +msgstr "" + +#: neutron/db/l3_db.py:274 +#, python-format +msgid "Network %s is not an external network" +msgstr "" + +#: neutron/db/l3_db.py:388 +#, python-format +msgid "Router already has a port on subnet %s" +msgstr "" + +#: neutron/db/l3_db.py:402 +#, python-format +msgid "" +"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s " +"of subnet %(sub_id)s" +msgstr "" + +#: neutron/db/l3_db.py:416 neutron/db/l3_db.py:542 +#: neutron/plugins/bigswitch/plugin.py:989 +#: neutron/plugins/bigswitch/plugin.py:998 +msgid "Either subnet_id or port_id must be specified" +msgstr "" + +#: neutron/db/l3_db.py:421 +msgid "Cannot specify both subnet-id and port-id" +msgstr "" + +#: neutron/db/l3_db.py:434 +msgid "Router port must have exactly one fixed IP" +msgstr "" + +#: neutron/db/l3_db.py:448 +msgid "Subnet for router interface must have a gateway IP" +msgstr "" + +#: neutron/db/l3_db.py:596 neutron/plugins/nec/nec_router.py:199 +#, python-format +msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" +msgstr "" + +#: neutron/db/l3_db.py:632 +#, python-format +msgid "" +"Port %(port_id)s is associated with a different tenant than Floating IP " +"%(floatingip_id)s and therefore cannot be bound." +msgstr "" + +#: neutron/db/l3_db.py:636 +#, python-format +msgid "" +"Cannot create floating IP and bind it to Port %s, since that port is " +"owned by a different tenant." +msgstr "" + +#: neutron/db/l3_db.py:648 +#, python-format +msgid "Port %(id)s does not have fixed ip %(address)s" +msgstr "" + +#: neutron/db/l3_db.py:655 +#, python-format +msgid "Cannot add floating IP to port %s that hasno fixed IP addresses" +msgstr "" + +#: neutron/db/l3_db.py:659 +#, python-format +msgid "" +"Port %s has multiple fixed IPs. Must provide a specific IP when " +"assigning a floating IP" +msgstr "" + +#: neutron/db/l3_db.py:702 neutron/plugins/vmware/plugins/base.py:1871 +msgid "fixed_ip_address cannot be specified without a port_id" +msgstr "" + +#: neutron/db/l3_db.py:737 +#, python-format +msgid "Network %s is not a valid external network" +msgstr "" + +#: neutron/db/l3_db.py:874 +#, python-format +msgid "" +"Port %(port_id)s has owner %(port_owner)s, but no IP address, so it can " +"be deleted" +msgstr "" + +#: neutron/db/l3_db.py:979 +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "" + +#: neutron/db/l3_rpc_base.py:50 +msgid "" +"No plugin for L3 routing registered! Will reply to l3 agent with empty " +"router dictionary." +msgstr "" + +#: neutron/db/l3_rpc_base.py:64 +#, python-format +msgid "" +"Routers returned to l3 agent:\n" +" %s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:70 +#, python-format +msgid "Checking router: %(id)s for host: %(host)s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:95 +#, python-format +msgid "External network ID returned to l3 agent: %s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:105 +#, python-format +msgid "New status for floating IP %(floatingip_id)s: %(status)s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:113 +#, python-format +msgid "Floating IP: %s no longer present." +msgstr "" + +#: neutron/db/routedserviceinsertion_db.py:36 +#, python-format +msgid "Resource type '%(resource_type)s' is longer than %(maxlen)d characters" +msgstr "" + +#: neutron/db/securitygroups_rpc_base.py:277 +#, python-format +msgid "No valid gateway port on subnet %s is found for IPv6 RA" +msgstr "" + +#: neutron/db/sqlalchemyutils.py:73 +#, python-format +msgid "%s is invalid attribute for sort_key" +msgstr "" + +#: neutron/db/sqlalchemyutils.py:76 +#, python-format +msgid "" +"The attribute '%(attr)s' is reference to other resource, can't used by " +"sort '%(resource)s'" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:241 +#: neutron/plugins/vmware/plugins/service.py:915 +#: neutron/services/firewall/fwaas_plugin.py:229 +msgid "create_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:257 +#: neutron/plugins/vmware/plugins/service.py:942 +#: neutron/services/firewall/fwaas_plugin.py:244 +msgid "update_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:267 +#: neutron/plugins/vmware/plugins/service.py:968 +#: neutron/services/firewall/fwaas_plugin.py:259 +msgid "delete_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:277 +msgid "get_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:282 +msgid "get_firewalls() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:288 +msgid "get_firewalls_count() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:293 +msgid "create_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:309 +#: neutron/plugins/vmware/plugins/service.py:1030 +#: neutron/services/firewall/fwaas_plugin.py:268 +msgid "update_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:321 +msgid "delete_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:333 +msgid "get_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:338 +msgid "get_firewall_policies() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:344 +msgid "get_firewall_policies_count() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:349 +msgid "create_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:378 +#: neutron/plugins/vmware/plugins/service.py:1004 +#: neutron/services/firewall/fwaas_plugin.py:276 +msgid "update_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:402 +msgid "delete_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:410 +msgid "get_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:415 +msgid "get_firewall_rules() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:421 +msgid "get_firewall_rules_count() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:430 +#: neutron/plugins/vmware/plugins/service.py:1057 +#: neutron/services/firewall/fwaas_plugin.py:286 +msgid "insert_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:470 +#: neutron/plugins/vmware/plugins/service.py:1086 +#: neutron/services/firewall/fwaas_plugin.py:294 +msgid "remove_rule() called" +msgstr "" + +#: neutron/db/loadbalancer/loadbalancer_db.py:70 +#, python-format +msgid "The %(key)s field can not have negative value. Current value is %(value)d." +msgstr "" + +#: neutron/db/loadbalancer/loadbalancer_db.py:274 +msgid "'cookie_name' should be specified for this type of session persistence." +msgstr "" + +#: neutron/db/loadbalancer/loadbalancer_db.py:278 +msgid "'cookie_name' is not allowed for this type of session persistence" +msgstr "" + +#: neutron/db/metering/metering_rpc.py:46 +#, python-format +msgid "Unable to find agent %s." +msgstr "" + +#: neutron/db/migration/cli.py:32 +msgid "Neutron plugin provider module" +msgstr "" + +#: neutron/db/migration/cli.py:41 +msgid "Neutron quota driver class" +msgstr "" + +#: neutron/db/migration/cli.py:49 +msgid "URL to database" +msgstr "" + +#: neutron/db/migration/cli.py:52 +msgid "Database engine" +msgstr "" + +#: neutron/db/migration/cli.py:75 +msgid "You must provide a revision or relative delta" +msgstr "" + +#: neutron/db/migration/cli.py:105 neutron/db/migration/cli.py:118 +msgid "Timeline branches unable to generate timeline" +msgstr "" + +#: neutron/db/migration/cli.py:112 +msgid "HEAD file does not match migration timeline head" +msgstr "" + +#: neutron/db/migration/cli.py:154 +msgid "Available commands" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:88 +msgid "Missing version in alembic_versions table" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:90 +#, python-format +msgid "Multiple versions in alembic_versions table: %s" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:94 +#, python-format +msgid "" +"Unsupported database schema %(current)s. Please migrate your database to " +"one of following versions: %(supported)s" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:414 +#, python-format +msgid "Unknown tunnel type: %s" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:428 +msgid "The plugin type whose database will be migrated" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:431 +msgid "The connection url for the target db" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:434 +#, python-format +msgid "The %s tunnel type to migrate from" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:437 +#: neutron/plugins/openvswitch/common/config.py:77 +msgid "The UDP port to use for VXLAN tunnels." +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:440 +msgid "Retain the old plugin's tables" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:446 +#, python-format +msgid "" +"Tunnel args (tunnel-type and vxlan-udp-port) are not valid for the %s " +"plugin" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:453 +#, python-format +msgid "" +"Support for migrating %(plugin)s for release %(release)s is not yet " +"implemented" +msgstr "" + +#: neutron/db/vpn/vpn_db.py:680 +#, python-format +msgid "vpnservice %s in db is already deleted" +msgstr "" + +#: neutron/debug/commands.py:34 +msgid "Unimplemented commands" +msgstr "" + +#: neutron/debug/commands.py:46 +msgid "ID of network to probe" +msgstr "" + +#: neutron/debug/commands.py:50 +msgid "Owner type of the device: network/compute" +msgstr "" + +#: neutron/debug/commands.py:58 +#, python-format +msgid "Probe created : %s " +msgstr "" + +#: neutron/debug/commands.py:70 +msgid "ID of probe port to delete" +msgstr "" + +#: neutron/debug/commands.py:77 +#, python-format +msgid "Probe %s deleted" +msgstr "" + +#: neutron/debug/commands.py:108 +msgid "All Probes deleted " +msgstr "" + +#: neutron/debug/commands.py:120 +msgid "ID of probe port to execute command" +msgstr "" + +#: neutron/debug/commands.py:125 +msgid "Command to execute" +msgstr "" + +#: neutron/debug/commands.py:145 +msgid "Ping timeout" +msgstr "" + +#: neutron/debug/commands.py:149 +msgid "ID of network" +msgstr "" + +#: neutron/debug/debug_agent.py:122 +#, python-format +msgid "Failed to delete namespace %s" +msgstr "" + +#: neutron/debug/shell.py:64 +msgid "Config file for interface driver (You may also use l3_agent.ini)" +msgstr "" + +#: neutron/debug/shell.py:72 +msgid "" +"You must provide a config file for bridge - either --config-file or " +"env[NEUTRON_TEST_CONFIG_FILE]" +msgstr "" + +#: neutron/extensions/agent.py:61 +#, python-format +msgid "Agent %(id)s could not be found" +msgstr "" + +#: neutron/extensions/agent.py:65 +#, python-format +msgid "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" +msgstr "" + +#: neutron/extensions/agent.py:70 +#, python-format +msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" +msgstr "" + +#: neutron/extensions/allowedaddresspairs.py:22 +msgid "AllowedAddressPair must contain ip_address" +msgstr "" + +#: neutron/extensions/allowedaddresspairs.py:26 +msgid "" +"Port Security must be enabled in order to have allowed address pairs on a" +" port." +msgstr "" + +#: neutron/extensions/allowedaddresspairs.py:31 +#, python-format +msgid "" +"Request contains duplicate address pair: mac_address %(mac_address)s " +"ip_address %(ip_address)s." +msgstr "" + +#: neutron/extensions/dhcpagentscheduler.py:119 +#, python-format +msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" +msgstr "" + +#: neutron/extensions/dhcpagentscheduler.py:123 +#, python-format +msgid "" +"The network %(network_id)s has been already hosted by the DHCP Agent " +"%(agent_id)s." +msgstr "" + +#: neutron/extensions/dhcpagentscheduler.py:128 +#, python-format +msgid "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." +msgstr "" + +#: neutron/extensions/external_net.py:23 +#, python-format +msgid "" +"External network %(net_id)s cannot be updated to be made non-external, " +"since it has existing gateway ports" +msgstr "" + +#: neutron/extensions/external_net.py:51 +msgid "Adds external network attribute to network resource." +msgstr "" + +#: neutron/extensions/extra_dhcp_opt.py:25 +#, python-format +msgid "ExtraDhcpOpt %(id)s could not be found" +msgstr "" + +#: neutron/extensions/extra_dhcp_opt.py:29 +#, python-format +msgid "Invalid data format for extra-dhcp-opt: %(data)s" +msgstr "" + +#: neutron/extensions/extraroute.py:23 +#, python-format +msgid "Invalid format for routes: %(routes)s, %(reason)s" +msgstr "" + +#: neutron/extensions/extraroute.py:27 +#, python-format +msgid "" +"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot " +"be deleted, as it is required by one or more routes." +msgstr "" + +#: neutron/extensions/extraroute.py:33 +#, python-format +msgid "" +"Unable to complete operation for %(router_id)s. The number of routes " +"exceeds the maximum %(quota)s." +msgstr "" + +#: neutron/extensions/firewall.py:37 +#, python-format +msgid "Firewall %(firewall_id)s could not be found." +msgstr "" + +#: neutron/extensions/firewall.py:41 +#, python-format +msgid "Firewall %(firewall_id)s is still active." +msgstr "" + +#: neutron/extensions/firewall.py:45 +#, python-format +msgid "" +"Operation cannot be performed since associated Firewall %(firewall_id)s " +"is in %(pending_state)s." +msgstr "" + +#: neutron/extensions/firewall.py:50 +#, python-format +msgid "Firewall Policy %(firewall_policy_id)s could not be found." +msgstr "" + +#: neutron/extensions/firewall.py:54 +#, python-format +msgid "Firewall Policy %(firewall_policy_id)s is being used." +msgstr "" + +#: neutron/extensions/firewall.py:58 +#, python-format +msgid "Firewall Rule %(firewall_rule_id)s could not be found." +msgstr "" + +#: neutron/extensions/firewall.py:62 +#, python-format +msgid "Firewall Rule %(firewall_rule_id)s is being used." +msgstr "" + +#: neutron/extensions/firewall.py:66 +#, python-format +msgid "" +"Firewall Rule %(firewall_rule_id)s is not associated with Firewall " +"Policy %(firewall_policy_id)s." +msgstr "" + +#: neutron/extensions/firewall.py:71 +#, python-format +msgid "" +"Firewall Rule protocol %(protocol)s is not supported. Only protocol " +"values %(values)s and their integer representation (0 to 255) are " +"supported." +msgstr "" + +#: neutron/extensions/firewall.py:77 +#, python-format +msgid "" +"Firewall rule action %(action)s is not supported. Only action values " +"%(values)s are supported." +msgstr "" + +#: neutron/extensions/firewall.py:82 +#, python-format +msgid "%(param)s are not allowed when protocol is set to ICMP." +msgstr "" + +#: neutron/extensions/firewall.py:87 +#, python-format +msgid "Invalid value for port %(port)s." +msgstr "" + +#: neutron/extensions/firewall.py:91 +msgid "Missing rule info argument for insert/remove rule operation." +msgstr "" + +#: neutron/extensions/firewall.py:101 +#, python-format +msgid "%(driver)s: Internal driver error." +msgstr "" + +#: neutron/extensions/firewall.py:150 +#, python-format +msgid "Port '%s' is not a valid number" +msgstr "" + +#: neutron/extensions/firewall.py:154 +#, python-format +msgid "Invalid port '%s'" +msgstr "" + +#: neutron/extensions/firewall.py:168 +#, python-format +msgid "%(msg_ip)s and %(msg_subnet)s" +msgstr "" + +#: neutron/extensions/firewall.py:289 +msgid "Number of firewalls allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/firewall.py:293 +msgid "" +"Number of firewall policies allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/firewall.py:297 +msgid "" +"Number of firewall rules allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/l3.py:29 +#, python-format +msgid "Router %(router_id)s could not be found" +msgstr "" + +#: neutron/extensions/l3.py:33 +#, python-format +msgid "Router %(router_id)s still has ports" +msgstr "" + +#: neutron/extensions/l3.py:37 +#, python-format +msgid "Router %(router_id)s does not have an interface with id %(port_id)s" +msgstr "" + +#: neutron/extensions/l3.py:42 +#, python-format +msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" +msgstr "" + +#: neutron/extensions/l3.py:47 +#, python-format +msgid "" +"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot " +"be deleted, as it is required by one or more floating IPs." +msgstr "" + +#: neutron/extensions/l3.py:53 +#, python-format +msgid "Floating IP %(floatingip_id)s could not be found" +msgstr "" + +#: neutron/extensions/l3.py:57 +#, python-format +msgid "" +"External network %(external_network_id)s is not reachable from subnet " +"%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a " +"Floating IP." +msgstr "" + +#: neutron/extensions/l3.py:63 +#, python-format +msgid "" +"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with " +"port %(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already " +"has a floating IP on external network %(net_id)s." +msgstr "" + +#: neutron/extensions/l3.py:70 +#, python-format +msgid "" +"Port %(port_id)s has owner %(device_owner)s and therefore cannot be " +"deleted directly via the port API." +msgstr "" + +#: neutron/extensions/l3.py:75 +#, python-format +msgid "" +"Gateway cannot be updated for router %(router_id)s, since a gateway to " +"external network %(net_id)s is required by one or more floating IPs." +msgstr "" + +#: neutron/extensions/l3.py:138 +msgid "Number of routers allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/l3.py:142 +msgid "" +"Number of floating IPs allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:47 +#: neutron/extensions/l3agentscheduler.py:85 +msgid "No plugin for L3 routing registered to handle router scheduling" +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:151 +#, python-format +msgid "Agent %(id)s is not a L3 Agent or has been disabled" +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:155 +#, python-format +msgid "" +"The router %(router_id)s has been already hosted by the L3 Agent " +"%(agent_id)s." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:160 +#, python-format +msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:165 +#, python-format +msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:170 +#, python-format +msgid "The router %(router_id)s is not hosted by L3 agent %(agent_id)s." +msgstr "" + +#: neutron/extensions/lbaas_agentscheduler.py:116 +#, python-format +msgid "No eligible loadbalancer agent found for pool %(pool_id)s." +msgstr "" + +#: neutron/extensions/lbaas_agentscheduler.py:121 +#, python-format +msgid "No active loadbalancer agent found for pool %(pool_id)s." +msgstr "" + +#: neutron/extensions/loadbalancer.py:33 +msgid "Delay must be greater than or equal to timeout" +msgstr "" + +#: neutron/extensions/loadbalancer.py:37 +#, python-format +msgid "No eligible backend for pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:41 +#, python-format +msgid "Vip %(vip_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:45 +#, python-format +msgid "Another Vip already exists for pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:49 +#, python-format +msgid "Pool %(pool_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:53 +#, python-format +msgid "Member %(member_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:57 +#, python-format +msgid "Health_monitor %(monitor_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:61 +#, python-format +msgid "Monitor %(monitor_id)s is not associated with Pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:66 +#, python-format +msgid "health_monitor %(monitor_id)s is already associated with pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:71 +#, python-format +msgid "Invalid state %(state)s of Loadbalancer resource %(id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:75 +#, python-format +msgid "Pool %(pool_id)s is still in use" +msgstr "" + +#: neutron/extensions/loadbalancer.py:79 +#, python-format +msgid "Health monitor %(monitor_id)s still has associations with pools" +msgstr "" + +#: neutron/extensions/loadbalancer.py:84 +#, python-format +msgid "Statistics of Pool %(pool_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:88 +#, python-format +msgid "Protocol %(vip_proto)s does not match pool protocol %(pool_proto)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:93 +#, python-format +msgid "" +"Member with address %(address)s and port %(port)s already present in pool" +" %(pool)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:309 +msgid "Number of vips allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/loadbalancer.py:313 +msgid "Number of pools allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/loadbalancer.py:317 +msgid "" +"Number of pool members allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/loadbalancer.py:321 +msgid "" +"Number of health monitors allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/metering.py:33 +#, python-format +msgid "Metering label %(label_id)s does not exist" +msgstr "" + +#: neutron/extensions/metering.py:37 +msgid "Duplicate Metering Rule in POST." +msgstr "" + +#: neutron/extensions/metering.py:41 +#, python-format +msgid "Metering label rule %(rule_id)s does not exist" +msgstr "" + +#: neutron/extensions/metering.py:45 +#, python-format +msgid "" +"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " +"another" +msgstr "" + +#: neutron/extensions/multiprovidernet.py:27 +msgid "Segments and provider values cannot both be set." +msgstr "" + +#: neutron/extensions/multiprovidernet.py:31 +msgid "Duplicate segment entry in request." +msgstr "" + +#: neutron/extensions/portsecurity.py:20 +msgid "" +"Port has security group associated. Cannot disable port security or ip " +"address until security group is removed" +msgstr "" + +#: neutron/extensions/portsecurity.py:25 +msgid "" +"Port security must be enabled and port must have an IP address in order " +"to use security groups." +msgstr "" + +#: neutron/extensions/portsecurity.py:30 +msgid "Port does not have port security binding." +msgstr "" + +#: neutron/extensions/providernet.py:54 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:289 +msgid "Plugin does not support updating provider attributes" +msgstr "" + +#: neutron/extensions/quotasv2.py:67 +msgid "POST requests are not supported on this resource." +msgstr "" + +#: neutron/extensions/quotasv2.py:86 +msgid "Only admin is authorized to access quotas for another tenant" +msgstr "" + +#: neutron/extensions/quotasv2.py:91 +msgid "Only admin can view or configure quota" +msgstr "" + +#: neutron/extensions/securitygroup.py:34 +msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" +msgstr "" + +#: neutron/extensions/securitygroup.py:39 +#, python-format +msgid "Invalid value for port %(port)s" +msgstr "" + +#: neutron/extensions/securitygroup.py:43 +#, python-format +msgid "" +"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to " +"255." +msgstr "" + +#: neutron/extensions/securitygroup.py:48 +#, python-format +msgid "" +"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-" +"range-min) is missing." +msgstr "" + +#: neutron/extensions/securitygroup.py:53 +#, python-format +msgid "Security Group %(id)s in use." +msgstr "" + +#: neutron/extensions/securitygroup.py:57 +msgid "Removing default security group not allowed." +msgstr "" + +#: neutron/extensions/securitygroup.py:61 +msgid "Updating default security group not allowed." +msgstr "" + +#: neutron/extensions/securitygroup.py:65 +msgid "Default security group already exists." +msgstr "" + +#: neutron/extensions/securitygroup.py:69 +#, python-format +msgid "" +"Security group rule protocol %(protocol)s not supported. Only protocol " +"values %(values)s and their integer representation (0 to 255) are " +"supported." +msgstr "" + +#: neutron/extensions/securitygroup.py:75 +msgid "Multiple tenant_ids in bulk security group rule create not allowed" +msgstr "" + +#: neutron/extensions/securitygroup.py:80 +msgid "Only remote_ip_prefix or remote_group_id may be provided." +msgstr "" + +#: neutron/extensions/securitygroup.py:85 +msgid "Must also specifiy protocol if port range is given." +msgstr "" + +#: neutron/extensions/securitygroup.py:89 +msgid "Only allowed to update rules for one security profile at a time" +msgstr "" + +#: neutron/extensions/securitygroup.py:94 +#, python-format +msgid "Security group %(id)s does not exist" +msgstr "" + +#: neutron/extensions/securitygroup.py:98 +#, python-format +msgid "Security group rule %(id)s does not exist" +msgstr "" + +#: neutron/extensions/securitygroup.py:102 +msgid "Duplicate Security Group Rule in POST." +msgstr "" + +#: neutron/extensions/securitygroup.py:106 +#, python-format +msgid "Security group rule already exists. Group id is %(id)s." +msgstr "" + +#: neutron/extensions/securitygroup.py:110 +#, python-format +msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" +msgstr "" + +#: neutron/extensions/securitygroup.py:158 +#, python-format +msgid "'%s' is not an integer or uuid" +msgstr "" + +#: neutron/extensions/securitygroup.py:247 +msgid "" +"Number of security groups allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/securitygroup.py:251 +msgid "" +"Number of security rules allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/servicetype.py:52 +msgid "Neutron Service Type Management" +msgstr "" + +#: neutron/extensions/servicetype.py:60 +msgid "API for retrieving service providers for Neutron advanced services" +msgstr "" + +#: neutron/extensions/vpnaas.py:31 +#, python-format +msgid "VPNService %(vpnservice_id)s could not be found" +msgstr "" + +#: neutron/extensions/vpnaas.py:35 +#, python-format +msgid "ipsec_site_connection %(ipsecsite_conn_id)s not found" +msgstr "" + +#: neutron/extensions/vpnaas.py:39 +#, python-format +msgid "ipsec_site_connection %(attr)s is equal to or less than dpd_interval" +msgstr "" + +#: neutron/extensions/vpnaas.py:44 +#, python-format +msgid "ipsec_site_connection MTU %(mtu)d is too small for ipv%(version)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:49 +#, python-format +msgid "IKEPolicy %(ikepolicy_id)s could not be found" +msgstr "" + +#: neutron/extensions/vpnaas.py:53 +#, python-format +msgid "IPsecPolicy %(ipsecpolicy_id)s could not be found" +msgstr "" + +#: neutron/extensions/vpnaas.py:57 +#, python-format +msgid "" +"IKEPolicy %(ikepolicy_id)s is in use by existing IPsecSiteConnection and " +"can't be updated or deleted" +msgstr "" + +#: neutron/extensions/vpnaas.py:62 +#, python-format +msgid "VPNService %(vpnservice_id)s is still in use" +msgstr "" + +#: neutron/extensions/vpnaas.py:66 +#, python-format +msgid "Router %(router_id)s is used by VPNService %(vpnservice_id)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:70 +#, python-format +msgid "Invalid state %(state)s of vpnaas resource %(id)s for updating" +msgstr "" + +#: neutron/extensions/vpnaas.py:75 +#, python-format +msgid "" +"IPsecPolicy %(ipsecpolicy_id)s is in use by existing IPsecSiteConnection " +"and can't be updated or deleted" +msgstr "" + +#: neutron/extensions/vpnaas.py:80 +#, python-format +msgid "Can not load driver :%(device_driver)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:84 +#, python-format +msgid "Subnet %(subnet_id)s is not connected to Router %(router_id)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:89 +#, python-format +msgid "Router %(router_id)s has no external network gateway set" +msgstr "" + +#: neutron/notifiers/nova.py:165 +msgid "device_id is not set on port yet." +msgstr "" + +#: neutron/notifiers/nova.py:169 +msgid "Port ID not set! Nova will not be notified of port status change." +msgstr "" + +#: neutron/notifiers/nova.py:194 +#, python-format +msgid "" +"Ignoring state change previous_port_status: %(pre_status)s " +"current_port_status: %(cur_status)s port_id %(id)s" +msgstr "" + +#: neutron/notifiers/nova.py:220 +#, python-format +msgid "Sending events: %s" +msgstr "" + +#: neutron/notifiers/nova.py:225 +#, python-format +msgid "Nova returned NotFound for event: %s" +msgstr "" + +#: neutron/notifiers/nova.py:228 +#, python-format +msgid "Failed to notify nova on events: %s" +msgstr "" + +#: neutron/notifiers/nova.py:232 neutron/notifiers/nova.py:248 +#, python-format +msgid "Error response returned from nova: %s" +msgstr "" + +#: neutron/notifiers/nova.py:243 +#, python-format +msgid "Nova event: %s returned with failed status" +msgstr "" + +#: neutron/notifiers/nova.py:246 +#, python-format +msgid "Nova event response: %s" +msgstr "" + +#: neutron/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: neutron/openstack/common/gettextutils.py:320 +msgid "Message objects do not support addition." +msgstr "" + +#: neutron/openstack/common/gettextutils.py:330 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: neutron/openstack/common/lockutils.py:103 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: neutron/openstack/common/lockutils.py:168 +#, python-format +msgid "Got semaphore \"%(lock)s\"" +msgstr "" + +#: neutron/openstack/common/lockutils.py:177 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\"" +msgstr "" + +#: neutron/openstack/common/lockutils.py:187 +#, python-format +msgid "Created lock path: %s" +msgstr "" + +#: neutron/openstack/common/lockutils.py:205 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s" +msgstr "" + +#: neutron/openstack/common/lockutils.py:209 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s" +msgstr "" + +#: neutron/openstack/common/lockutils.py:247 +#, python-format +msgid "Got semaphore / lock \"%(function)s\"" +msgstr "" + +#: neutron/openstack/common/lockutils.py:251 +#, python-format +msgid "Semaphore / lock released \"%(function)s\"" +msgstr "" + +#: neutron/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: neutron/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: neutron/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: neutron/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:89 +msgid "in fixed duration looping call" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:39 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: neutron/openstack/common/policy.py:395 +#, python-format +msgid "Failed to understand rule %(rule)s" +msgstr "" + +#: neutron/openstack/common/policy.py:405 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: neutron/openstack/common/policy.py:680 +#, python-format +msgid "Failed to understand rule %(rule)r" +msgstr "" + +#: neutron/openstack/common/processutils.py:130 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: neutron/openstack/common/processutils.py:145 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: neutron/openstack/common/processutils.py:169 +#: neutron/openstack/common/processutils.py:241 +#, python-format +msgid "Result was %s" +msgstr "" + +#: neutron/openstack/common/processutils.py:181 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: neutron/openstack/common/processutils.py:220 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: neutron/openstack/common/processutils.py:222 +msgid "Environment not supported over SSH" +msgstr "" + +#: neutron/openstack/common/processutils.py:226 +msgid "process_input not supported over SSH" +msgstr "" + +#: neutron/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: neutron/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: neutron/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: neutron/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: neutron/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: neutron/openstack/common/strutils.py:92 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: neutron/openstack/common/strutils.py:197 +#, python-format +msgid "Invalid unit system: \"%s\"" +msgstr "" + +#: neutron/openstack/common/strutils.py:206 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: neutron/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: neutron/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: neutron/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:58 +msgid "Sort key supplied was not valid." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:119 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:162 +#, python-format +msgid "" +"There is no `deleted` column in `%s` table. Project doesn't use soft-" +"deleted feature." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:174 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:181 +#, python-format +msgid "There is no `project_id` column in `%s` table." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:243 +msgid "model should be a subclass of ModelBase" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:286 +#, python-format +msgid "" +"Please specify column %s in col_name_col_instance param. It is required " +"because column has unsupported type by sqlite)." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:292 +#, python-format +msgid "" +"col_name_col_instance param has wrong type of column instance for column " +"%s It should be instance of sqlalchemy.Column." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:400 +msgid "Unsupported id columns type" +msgstr "" + +#: neutron/openstack/common/middleware/catch_errors.py:40 +#, python-format +msgid "An error occurred during processing the request: %s" +msgstr "" + +#: neutron/openstack/common/middleware/sizelimit.py:55 +#: neutron/openstack/common/middleware/sizelimit.py:64 +#: neutron/openstack/common/middleware/sizelimit.py:75 +msgid "Request is too large." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:33 +msgid "" +"A comma separated list of Big Switch or Floodlight servers and port " +"numbers. The plugin proxies the requests to the Big Switch/Floodlight " +"server, which performs the networking configuration. Only oneserver is " +"needed per deployment, but you may wish todeploy multiple servers to " +"support failover." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:40 +msgid "" +"The username and password for authenticating against the Big Switch or " +"Floodlight controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:43 +msgid "" +"If True, Use SSL when connecting to the Big Switch or Floodlight " +"controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:46 +msgid "" +"Trust and store the first certificate received for each controller " +"address and use it to validate future connections to that address." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:50 +msgid "Disables SSL certificate validation for controllers" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:52 +msgid "Re-use HTTP/HTTPS connections to the controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:55 +msgid "Directory containing ca_certs and host_certs certificate directories." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:58 +msgid "Sync data on connect" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:60 +msgid "" +"If neutron fails to create a resource because the backend controller " +"doesn't know of a dependency, the plugin automatically triggers a full " +"data synchronization to the controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:65 +msgid "" +"Time between verifications that the backend controller database is " +"consistent with Neutron" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:68 +msgid "" +"Maximum number of seconds to wait for proxy request to connect and " +"complete." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:71 +msgid "" +"Maximum number of threads to spawn to handle large volumes of port " +"creations." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:75 +msgid "User defined identifier for this Neutron deployment" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:77 +msgid "" +"Flag to decide if a route to the metadata server should be injected into " +"the VM" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:82 +msgid "" +"The default router rules installed in new tenant routers. Repeat the " +"config option for each rule. Format is " +"::: Use an * to specify default for " +"all tenants." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:87 +msgid "Maximum number of router rules" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:91 +msgid "Virtual interface type to configure on Nova compute nodes" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:98 +#, python-format +msgid "Nova compute nodes to manually set VIF type to %s" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:105 +msgid "List of allowed vif_type values." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:109 +msgid "" +"Name of integration bridge on compute nodes used for security group " +"insertion." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:112 +msgid "Seconds between agent checks for port changes" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:114 +msgid "Virtual switch type." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:94 +msgid "Syntax error in server config file, aborting plugin" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:132 neutron/plugins/ml2/db.py:100 +#, python-format +msgid "get_port_and_sgs() called for port_id %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:238 +#, python-format +msgid "Unable to update remote topology: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:322 +#, python-format +msgid "" +"Setting admin_state_up=False is not supported in this plugin version. " +"Ignoring setting for resource: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:328 +#, python-format +msgid "" +"Operational status is internally set by the plugin. Ignoring setting " +"status=%s." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:353 +#, python-format +msgid "Unrecognized vif_type in configuration [%s]. Defaulting to ovs." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:399 +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:98 +msgid "Iconsistency with backend controller triggering full synchronization." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:416 +#, python-format +msgid "NeutronRestProxyV2: Unable to create port: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:467 +#, python-format +msgid "NeutronRestProxy: Starting plugin. Version=%s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:494 +msgid "NeutronRestProxyV2: initialization done" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:536 +msgid "NeutronRestProxyV2: create_network() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:578 +msgid "NeutronRestProxyV2.update_network() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:605 +msgid "NeutronRestProxyV2: delete_network() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:642 +msgid "NeutronRestProxyV2: create_port() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:733 +msgid "NeutronRestProxyV2: update_port() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:792 +msgid "NeutronRestProxyV2: delete_port() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:808 +msgid "NeutronRestProxyV2: create_subnet() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:824 +msgid "NeutronRestProxyV2: update_subnet() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:842 +msgid "NeutronRestProxyV2: delete_subnet() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:880 +msgid "NeutronRestProxyV2: create_router() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:902 +msgid "NeutronRestProxyV2.update_router() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:924 +msgid "NeutronRestProxyV2: delete_router() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:953 +msgid "NeutronRestProxyV2: add_router_interface() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:981 +msgid "NeutronRestProxyV2: remove_router_interface() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1014 +msgid "NeutronRestProxyV2: create_floatingip() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1031 +#, python-format +msgid "NeutronRestProxyV2: Unable to create remote floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1037 +msgid "NeutronRestProxyV2: update_floatingip() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1053 +msgid "NeutronRestProxyV2: delete_floatingip() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1068 +msgid "NeutronRestProxyV2: diassociate_floatingips() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1098 +msgid "NeutronRestProxyV2: too many external networks" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1113 +msgid "Adding host route: " +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1114 +#, python-format +msgid "Destination:%(dst)s nexthop:%(next)s" +msgstr "" + +#: neutron/plugins/bigswitch/routerrule_db.py:77 +msgid "No rules in router" +msgstr "" + +#: neutron/plugins/bigswitch/routerrule_db.py:91 +#, python-format +msgid "Updating router rules to %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:77 +#, python-format +msgid "Error in REST call to remote network controller: %(reason)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:116 +msgid "Couldn't retrieve capabilities. Newer API calls won't be supported." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:118 +#, python-format +msgid "The following capabilities were received for %(server)s: %(cap)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:142 +#, python-format +msgid "ServerProxy: server=%(server)s, port=%(port)d, ssl=%(ssl)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:145 +#, python-format +msgid "" +"ServerProxy: resource=%(resource)s, data=%(data)r, headers=%(headers)r, " +"action=%(action)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:166 +msgid "ServerProxy: Could not establish HTTPS connection" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:174 +msgid "ServerProxy: Could not establish HTTP connection" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:209 +#, python-format +msgid "ServerProxy: %(action)s failure, %(e)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:212 +#, python-format +msgid "" +"ServerProxy: status=%(status)d, reason=%(reason)r, ret=%(ret)s, " +"data=%(data)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:228 +msgid "ServerPool: initializing" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:254 +msgid "Servers not defined. Aborting server manager." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:261 +#, python-format +msgid "Servers must be defined as :. Configuration was %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:269 +msgid "ServerPool: initialization done" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:302 +#, python-format +msgid "ssl_cert_directory [%s] does not exist. Create it or disable ssl." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:319 +#, python-format +msgid "No certificates were found to verify controller %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:361 +#, python-format +msgid "" +"Could not retrieve initial certificate from controller %(server)s. Error " +"details: %(error)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:366 +#, python-format +msgid "Storing to certificate for host %(server)s at %(path)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:406 +msgid "Server requires synchronization, but no topology function was defined." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:421 +#, python-format +msgid "" +"ServerProxy: %(action)s failure for servers: %(server)r Response: " +"%(response)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:427 +#, python-format +msgid "" +"ServerProxy: Error details: status=%(status)d, reason=%(reason)r, " +"ret=%(ret)s, data=%(data)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:434 +#, python-format +msgid "ServerProxy: %(action)s failure for all servers: %(server)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:457 +#, python-format +msgid "" +"NeutronRestProxyV2: Received and ignored error code %(code)s on " +"%(action)s action to resource %(resource)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:467 +#, python-format +msgid "Unable to create remote router: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:473 +#, python-format +msgid "Unable to update remote router: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:478 +#, python-format +msgid "Unable to delete remote router: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:484 +#, python-format +msgid "Unable to add router interface: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:489 +#, python-format +msgid "Unable to delete remote intf: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:495 +#, python-format +msgid "Unable to create remote network: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:501 +#: neutron/plugins/bigswitch/servermanager.py:506 +#, python-format +msgid "Unable to update remote network: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:515 +#, python-format +msgid "No device MAC attached to port %s. Skipping notification to controller." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:520 +#, python-format +msgid "Unable to create remote port: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:525 +#, python-format +msgid "Unable to delete remote port: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:535 +#, python-format +msgid "Unable to create floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:540 +#, python-format +msgid "Unable to update floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:545 +#, python-format +msgid "Unable to delete floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:550 +msgid "Backend server(s) do not support automated consitency checks." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:562 +msgid "Encountered an error checking controller health." +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:116 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:235 +msgid "Port update received" +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:120 +#, python-format +msgid "Port %s is not present on this host." +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:123 +#, python-format +msgid "Port %s found. Refreshing firewall." +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:151 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:268 +msgid "Agent loop has new device" +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:155 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:398 +#: neutron/plugins/nec/agent/nec_neutron_agent.py:225 +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:159 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:272 +msgid "Error in agent event loop" +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:161 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:226 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:996 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1365 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1429 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:278 +#, python-format +msgid "Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" +msgstr "" + +#: neutron/plugins/bigswitch/db/consistency_db.py:55 +#, python-format +msgid "Consistency hash for group %(hash_id)s updated to %(hash)s" +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:41 +msgid "No host_id in port request to track port location." +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:44 +#, python-format +msgid "Received an empty port ID for host_id '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:47 +#, python-format +msgid "Received an empty host_id for port '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:49 +#, python-format +msgid "Logging port %(port)s on host_id %(host)s" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:30 +#, python-format +msgid "Invalid format for router rules: %(rule)s, %(reason)s" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:34 +#, python-format +msgid "" +"Unable to complete rules update for %(router_id)s. The number of rules " +"exceeds the maximum %(quota)s." +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:51 +#, python-format +msgid "Invalid data format for router rule: '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:83 +#, python-format +msgid "Duplicate nexthop in rule '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:91 +#, python-format +msgid "Action must be either permit or deny. '%s' was provided" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:103 +#, python-format +msgid "Duplicate router rules (src,dst) found '%s'" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:64 +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:34 +msgid "The address of the host to SSH to" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:66 +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:36 +msgid "The SSH username to use" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:68 +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:38 +msgid "The SSH password to use" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:70 +msgid "Currently unused" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:74 +msgid "The network interface to use when creatinga port" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:124 +#: neutron/plugins/hyperv/rpc_callbacks.py:47 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:79 +#: neutron/plugins/mlnx/rpc_callbacks.py:63 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:87 +#, python-format +msgid "Device %(device)s details requested from %(agent_id)s" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:138 +#: neutron/plugins/brocade/NeutronPlugin.py:155 +#: neutron/plugins/hyperv/rpc_callbacks.py:63 +#: neutron/plugins/hyperv/rpc_callbacks.py:82 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:102 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:129 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:152 +#: neutron/plugins/mlnx/rpc_callbacks.py:85 +#: neutron/plugins/mlnx/rpc_callbacks.py:104 +#: neutron/plugins/mlnx/rpc_callbacks.py:119 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:105 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:132 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:155 +#, python-format +msgid "%s can not be found in database" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:290 +#: neutron/plugins/brocade/NeutronPlugin.py:334 +#: neutron/plugins/brocade/NeutronPlugin.py:387 +#: neutron/plugins/brocade/NeutronPlugin.py:417 +msgid "Brocade NOS driver error" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:291 +#, python-format +msgid "Returning the allocated vlan (%d) to the pool" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:294 +#: neutron/plugins/brocade/NeutronPlugin.py:335 +#: neutron/plugins/brocade/NeutronPlugin.py:388 +#: neutron/plugins/brocade/NeutronPlugin.py:419 +msgid "Brocade plugin raised exception, check logs" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:300 +#, python-format +msgid "Allocated vlan (%d) from the pool" +msgstr "" + +#: neutron/plugins/brocade/nos/nosdriver.py:71 +#, python-format +msgid "Connect failed to switch: %s" +msgstr "" + +#: neutron/plugins/brocade/nos/nosdriver.py:73 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:76 +#, python-format +msgid "Connect success to host %(host)s:%(ssh_port)d" +msgstr "" + +#: neutron/plugins/brocade/nos/nosdriver.py:98 +#: neutron/plugins/brocade/nos/nosdriver.py:112 +#: neutron/plugins/brocade/nos/nosdriver.py:125 +#: neutron/plugins/brocade/nos/nosdriver.py:138 +#, python-format +msgid "NETCONF error: %s" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:91 +msgid "Plugin initialization complete" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:119 +#, python-format +msgid "'%(model)s' object has no attribute '%(name)s'" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:136 +#: neutron/plugins/cisco/db/network_db_v2.py:38 +msgid "get_all_qoss() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:142 +msgid "get_qos_details() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:147 +msgid "create_qos() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:153 +msgid "delete_qos() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:158 +msgid "rename_qos() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:163 +msgid "get_all_credentials() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:169 +msgid "get_credential_details() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:174 +msgid "rename_credential() called" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:27 +#, python-format +msgid "Segmentation ID for network %(net_id)s is not found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:32 +msgid "" +"Unable to complete operation. No more dynamic NICs are available in the " +"system." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:38 +#, python-format +msgid "" +"NetworkVlanBinding for %(vlan_id)s and network %(network_id)s already " +"exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:44 +#, python-format +msgid "Vlan ID %(vlan_id)s not found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:49 +msgid "" +"Unable to complete operation. VLAN ID exists outside of the configured " +"network segment range." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:55 +msgid "No Vlan ID available." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:60 +#, python-format +msgid "QoS level %(qos_id)s could not be found for tenant %(tenant_id)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:66 +#, python-format +msgid "QoS level with name %(qos_name)s already exists for tenant %(tenant_id)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:72 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:23 +#, python-format +msgid "Credential %(credential_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:77 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:28 +#, python-format +msgid "Credential %(credential_name)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:82 +#, python-format +msgid "Credential %(credential_name)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:87 +#, python-format +msgid "Provider network %s already exists" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:92 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:39 +#, python-format +msgid "Connection to %(host)s is not configured." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:97 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:44 +#, python-format +msgid "Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:102 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:49 +#, python-format +msgid "Failed to configure Nexus: %(config)s. Reason: %(exc)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:107 +#, python-format +msgid "Nexus Port Binding (%(filters)s) is not present." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:116 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:69 +msgid "No usable Nexus switch found to create SVI interface." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:121 +#, python-format +msgid "PortVnic Binding %(port_id)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:126 +#, python-format +msgid "PortVnic Binding %(port_id)s is not present." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:131 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:74 +msgid "No subnet_id specified for router gateway." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:136 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:79 +#, python-format +msgid "Subnet %(subnet_id)s has an interface on %(router_id)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:141 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:84 +msgid "Nexus hardware router gateway only uses Subnet Ids." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:145 +#, python-format +msgid "" +"Unable to unplug the attachment %(att_id)s from port %(port_id)s for " +"network %(net_id)s. The attachment %(att_id)s does not exist." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:152 +#, python-format +msgid "Policy Profile %(profile_id)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:158 +#, python-format +msgid "Policy Profile %(profile_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:163 +#, python-format +msgid "Network Profile %(profile_id)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:169 +#, python-format +msgid "Network Profile %(profile)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:174 +#, python-format +msgid "" +"One or more network segments belonging to network profile %(profile)s is " +"in use." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:180 +#, python-format +msgid "" +"No more segments available in network segment pool " +"%(network_profile_name)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:186 +#, python-format +msgid "VM Network %(name)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:191 +#, python-format +msgid "Unable to create the network. The VXLAN ID %(vxlan_id)s is in use." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:197 +#, python-format +msgid "Vxlan ID %(vxlan_id)s not found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:202 +msgid "" +"Unable to complete operation. VXLAN ID exists outside of the configured " +"network segment range." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:208 +#, python-format +msgid "Connection to VSM failed: %(reason)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:213 +#, python-format +msgid "Internal VSM Error: %(reason)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:218 +#, python-format +msgid "Network Binding for network %(network_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:224 +#, python-format +msgid "Port Binding for port %(port_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:230 +#, python-format +msgid "Profile-Tenant binding for profile %(profile_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:236 +msgid "No service cluster found to perform multi-segment bridging." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:74 +msgid "Port not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:75 +msgid "Unable to find a port with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:89 +msgid "Credential Not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:90 +msgid "Unable to find a Credential with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:105 +msgid "QoS Not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:106 +msgid "Unable to find a QoS with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:121 +msgid "Nova tenant Not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:122 +msgid "Unable to find a Novatenant with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:137 +msgid "Requested State Invalid" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:138 +msgid "Unable to update port state with specified value." +msgstr "" + +#: neutron/plugins/cisco/common/config.py:26 +msgid "Virtual Switch to use" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:30 +msgid "Nexus Switch to use" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:35 +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:21 +msgid "VLAN Name prefix" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:37 +msgid "VLAN Name prefix for provider vlans" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:39 +msgid "Provider VLANs are automatically created as needed on the Nexus switch" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:42 +msgid "" +"Provider VLANs are automatically trunked as needed on the ports of the " +"Nexus switch" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:45 +msgid "Enable L3 support on the Nexus switches" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:47 +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:23 +msgid "Distribute SVI interfaces over all switches" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:51 +msgid "Model Class" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:55 +msgid "Nexus Driver Name" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:60 +msgid "N1K Integration Bridge" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:62 +msgid "N1K Enable Tunneling" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:64 +msgid "N1K Tunnel Bridge" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:66 +msgid "N1K Local IP" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:68 +msgid "N1K Tenant Network Type" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:70 +msgid "N1K Bridge Mappings" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:72 +msgid "N1K VXLAN ID Ranges" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:74 +msgid "N1K Network VLAN Ranges" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:76 +msgid "N1K default network profile" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:78 +msgid "N1K default policy profile" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:80 +msgid "N1K policy profile for network node" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:82 +msgid "N1K Policy profile polling duration in seconds" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:135 +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:68 +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:58 +msgid "Some config files were not parsed properly" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:331 +#, python-format +msgid "seg_min %(seg_min)s, seg_max %(seg_max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:562 +#, python-format +msgid "Reserving specific vlan %(vlan)s on physical network %(network)s from pool" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:587 +#, python-format +msgid "vlan_id %(vlan)s on physical network %(network)s not found" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:601 +#, python-format +msgid "Unreasonable vxlan ID range %(vxlan_min)s - %(vxlan_max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:643 +#, python-format +msgid "Reserving specific vxlan %s from pool" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:664 +#, python-format +msgid "vxlan_id %s not found" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:772 +msgid "create_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:794 +msgid "delete_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:808 +msgid "update_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:817 +msgid "get_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:842 +msgid "create_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:853 +msgid "delete_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:862 +msgid "update_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:871 +msgid "get_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:890 +msgid "Invalid profile type" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:907 +msgid "_profile_binding_exists()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:915 +msgid "get_profile_binding()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:925 +msgid "delete_profile_binding()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:932 +#, python-format +msgid "" +"Profile-Tenant binding missing for profile ID %(profile_id)s and tenant " +"ID %(tenant_id)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:946 +msgid "_get_profile_bindings()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1096 +msgid "segment_range not required for TRUNK" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1102 +msgid "multicast_ip_range not required" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1229 +msgid "Invalid segment range. example range: 500-550" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1242 +msgid "Invalid multicast ip address range. example range: 224.1.1.1-224.1.1.10" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1249 +#, python-format +msgid "%s is not a valid multicast ip address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1253 +#, python-format +msgid "%s is reserved multicast ip address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1257 +#, python-format +msgid "%s is not a valid ip address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1261 +#, python-format +msgid "" +"Invalid multicast IP range '%(min_ip)s-%(max_ip)s': Range should be from " +"low address to high address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1274 +msgid "Arguments segment_type missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1283 +msgid "segment_type should either be vlan, overlay, multi-segment or trunk" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1289 +msgid "Argument physical_network missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1295 +msgid "segment_range not required for trunk" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1301 +msgid "Argument sub_type missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1308 +msgid "Argument segment_range missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1319 +msgid "Argument multicast_ip_range missing for VXLAN multicast network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1347 +#, python-format +msgid "Segment range is invalid, select from %(min)s-%(nmin)s, %(nmax)s-%(max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1365 +#, python-format +msgid "segment range is invalid. Valid range is : %(min)s-%(max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1379 +#, python-format +msgid "NetworkProfile name %s already exists" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1396 +msgid "Segment range overlaps with another profile" +msgstr "" + +#: neutron/plugins/cisco/db/network_db_v2.py:46 +msgid "get_qos() called" +msgstr "" + +#: neutron/plugins/cisco/db/network_db_v2.py:59 +msgid "add_qos() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:34 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:30 +msgid "get_nexusport_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:43 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:39 +msgid "get_nexusvlan_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:49 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:45 +msgid "add_nexusport_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:62 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:58 +msgid "remove_nexusport_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:78 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:74 +msgid "update_nexusport_binding called with no vlan" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:80 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:76 +msgid "update_nexusport_binding called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:91 +msgid "get_nexusvm_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:99 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:94 +msgid "get_port_vlan_switch_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:107 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:102 +#, python-format +msgid "" +"get_port_switch_bindings() called, port:'%(port_id)s', " +"switch:'%(switch_ip)s'" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:119 +msgid "get_nexussvi_bindings() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:74 +#, python-format +msgid "Loaded device plugin %s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:91 +#, python-format +msgid "%(module)s.%(name)s init done" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:138 +#, python-format +msgid "No %s Plugin loaded" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:139 +#, python-format +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:169 +msgid "create_network() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:182 +#, python-format +msgid "Provider network added to DB: %(network_id)s, %(vlan_id)s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:201 +msgid "update_network() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:225 +#, python-format +msgid "Provider network removed from DB: %s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:287 +msgid "create_port() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:352 +#, python-format +msgid "" +"tenant_id: %(tid)s, net_id: %(nid)s, old_device_id: %(odi)s, " +"new_device_id: %(ndi)s, old_host_id: %(ohi)s, new_host_id: %(nhi)s, " +"old_device_owner: %(odo)s, new_device_owner: %(ndo)s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:381 +msgid "update_port() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:408 +#, python-format +msgid "Unable to update port '%s' on Nexus switch" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:427 +msgid "delete_port() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:468 +msgid "L3 enabled on Nexus plugin, create SVI on switch" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:488 +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:513 +msgid "L3 disabled or not Nexus plugin, send to vswitch" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:502 +msgid "L3 enabled on Nexus plugin, delete SVI from switch" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:224 +msgid "Logical network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:249 +msgid "network_segment_pool" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:294 +msgid "Invalid input for CIDR" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:434 +#, python-format +msgid "req: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:443 +#, python-format +msgid "status_code %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:451 +#, python-format +msgid "VSM: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:146 +msgid "_setup_vsm" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:165 +msgid "_populate_policy_profiles" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:192 +msgid "No policy profile populated from VSM" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:229 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:336 +#: neutron/plugins/mlnx/mlnx_plugin.py:219 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:398 +msgid "provider:network_type required" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:233 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:247 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:346 +#: neutron/plugins/mlnx/mlnx_plugin.py:249 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:408 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:427 +msgid "provider:segmentation_id required" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:236 +msgid "provider:segmentation_id out of range (1 through 4094)" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:241 +msgid "provider:physical_network specified for Overlay network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:250 +msgid "provider:segmentation_id out of range (5000+)" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:254 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:368 +#: neutron/plugins/mlnx/mlnx_plugin.py:235 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:443 +#, python-format +msgid "provider:network_type %s not supported" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:265 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:374 +#: neutron/plugins/mlnx/mlnx_plugin.py:275 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:449 +#, python-format +msgid "Unknown provider:physical_network %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:269 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:380 +#: neutron/plugins/mlnx/mlnx_plugin.py:281 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:455 +msgid "provider:physical_network required" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:447 +#, python-format +msgid "_populate_member_segments %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:486 +msgid "Invalid pairing supplied" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:491 +#, python-format +msgid "Invalid UUID supplied in %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:492 +msgid "Invalid UUID supplied" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:521 +#, python-format +msgid "Cannot add a trunk segment '%s' as a member of another trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:526 +#, python-format +msgid "Cannot add vlan segment '%s' as a member of a vxlan trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:532 +#, python-format +msgid "Network UUID '%s' belongs to a different physical network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:537 +#, python-format +msgid "Cannot add vxlan segment '%s' as a member of a vlan trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:542 +#, python-format +msgid "Vlan tag '%s' is out of range" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:545 +#, python-format +msgid "Vlan tag '%s' is not an integer value" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:550 +#, python-format +msgid "%s is not a valid uuid" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:597 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:600 +msgid "n1kv:profile_id does not exist" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:612 +msgid "_send_create_logical_network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:635 +#, python-format +msgid "_send_create_network_profile_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:645 +#, python-format +msgid "_send_update_network_profile_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:655 +#, python-format +msgid "_send_delete_network_profile_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:670 +#, python-format +msgid "_send_create_network_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:702 +#, python-format +msgid "_send_update_network_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:724 +#, python-format +msgid "add_segments=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:725 +#, python-format +msgid "del_segments=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:749 +#, python-format +msgid "_send_delete_network_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:789 +#, python-format +msgid "_send_create_subnet_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:799 +#, python-format +msgid "_send_update_subnet_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:810 +#, python-format +msgid "_send_delete_subnet_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:836 +#, python-format +msgid "_send_create_port_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:853 +#, python-format +msgid "_send_update_port_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:869 +#, python-format +msgid "_send_delete_port_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:900 +#, python-format +msgid "Create network: profile_id=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:908 +#, python-format +msgid "" +"Physical_network %(phy_net)s, seg_type %(net_type)s, seg_id %(seg_id)s, " +"multicast_ip %(multicast_ip)s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:920 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:930 +#, python-format +msgid "Seg list %s " +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:970 +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:254 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:198 +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:232 +#: neutron/plugins/mlnx/mlnx_plugin.py:362 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:503 +#, python-format +msgid "Created network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1028 +#, python-format +msgid "Updated network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1043 +#, python-format +msgid "Cannot delete network '%s' that is member of a trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1047 +#, python-format +msgid "Cannot delete network '%s' that is a member of a multi-segment network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1060 +#, python-format +msgid "Deleted network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1070 +#, python-format +msgid "Get network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1092 +msgid "Get networks" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1142 +#, python-format +msgid "Create port: profile_id=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1188 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:305 +#, python-format +msgid "Created port: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1199 +#, python-format +msgid "Update port: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1246 +#, python-format +msgid "Get port: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1266 +msgid "Get ports" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1282 +msgid "Create subnet" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1290 +#, python-format +msgid "Created subnet: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1301 +msgid "Update subnet" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1316 +#, python-format +msgid "Delete subnet: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1331 +#, python-format +msgid "Get subnet: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1351 +msgid "Get subnets" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1436 +#, python-format +msgid "Scheduling router %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:159 +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:167 +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:189 +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:195 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:113 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:152 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:161 +#, python-format +msgid "NexusDriver: %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:174 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:168 +#, python-format +msgid "NexusDriver created VLAN: %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:49 +#, python-format +msgid "Loaded driver %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:64 +msgid "NexusPlugin:create_network() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:113 +#: neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py:108 +#, python-format +msgid "Nexus: create & trunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:120 +#, python-format +msgid "Nexus: create vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:125 +#: neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py:112 +#, python-format +msgid "Nexus: trunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:138 +#, python-format +msgid "Nexus: delete & untrunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:144 +#, python-format +msgid "Nexus: delete vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:147 +#, python-format +msgid "Nexus: untrunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:204 +msgid "Grabbing a switch to create SVI" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:207 +msgid "Using round robin to create SVI" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:226 +msgid "No round robin or zero weights, using first switch" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:236 +msgid "NexusPlugin:delete_network() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:244 +msgid "NexusPlugin:update_network() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:252 +msgid "NexusPlugin:create_port() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:260 +msgid "NexusPlugin:delete_port() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:272 +#, python-format +msgid "delete_network(): provider vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:329 +msgid "NexusPlugin:update_port() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:338 +msgid "NexusPlugin:plug_interface() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:346 +msgid "NexusPlugin:unplug_interface() called" +msgstr "" + +#: neutron/plugins/common/utils.py:32 +#, python-format +msgid "%s is not a valid VLAN tag" +msgstr "" + +#: neutron/plugins/common/utils.py:36 +msgid "End of VLAN range is less than start of VLAN range" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:109 +#: neutron/plugins/embrane/agent/dispatcher.py:134 +#: neutron/services/loadbalancer/drivers/embrane/poller.py:56 +#: neutron/services/loadbalancer/drivers/embrane/agent/dispatcher.py:108 +msgid "Unhandled exception occurred" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:174 +#: neutron/plugins/embrane/base_plugin.py:193 +#, python-format +msgid "The following routers have not physical match: %s" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:179 +#, python-format +msgid "Requested router: %s" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:231 +#, python-format +msgid "Deleting router=%s" +msgstr "" + +#: neutron/plugins/embrane/agent/operations/router_operations.py:99 +#, python-format +msgid "The router %s had no physical representation,likely already deleted" +msgstr "" + +#: neutron/plugins/embrane/agent/operations/router_operations.py:128 +#, python-format +msgid "Interface %s not found in the heleos back-end,likely already deleted" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:25 +#: neutron/services/loadbalancer/drivers/embrane/config.py:25 +msgid "ESM management root address" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:27 +#: neutron/services/loadbalancer/drivers/embrane/config.py:27 +msgid "ESM admin username." +msgstr "" + +#: neutron/plugins/embrane/common/config.py:30 +#: neutron/services/loadbalancer/drivers/embrane/config.py:30 +msgid "ESM admin password." +msgstr "" + +#: neutron/plugins/embrane/common/config.py:32 +msgid "Router image id (Embrane FW/VPN)" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:34 +msgid "In band Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:36 +msgid "Out of band Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:38 +msgid "Management Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:40 +msgid "Dummy user traffic Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:42 +#: neutron/services/loadbalancer/drivers/embrane/config.py:42 +msgid "Shared resource pool id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:44 +#: neutron/services/loadbalancer/drivers/embrane/config.py:49 +msgid "Define if the requests have run asynchronously or not" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:51 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:50 +#, python-format +msgid "Dva is pending for the following reason: %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:52 +msgid "" +"Dva can't be found to execute the operation, probably was cancelled " +"through the heleos UI" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:54 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:53 +#, python-format +msgid "Dva seems to be broken for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:55 +#, python-format +msgid "Dva interface seems to be broken for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:57 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:54 +#, python-format +msgid "Dva creation failed reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:58 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:55 +#, python-format +msgid "Dva creation is in pending state for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:60 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:57 +#, python-format +msgid "Dva configuration failed for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:61 +#, python-format +msgid "" +"Failed to delete the backend router for reason %s. Please remove it " +"manually through the heleos UI" +msgstr "" + +#: neutron/plugins/embrane/common/exceptions.py:24 +#, python-format +msgid "An unexpected error occurred:%(err_msg)s" +msgstr "" + +#: neutron/plugins/embrane/common/exceptions.py:28 +#, python-format +msgid "%(err_msg)s" +msgstr "" + +#: neutron/plugins/embrane/common/utils.py:47 +msgid "No ip allocation set" +msgstr "" + +#: neutron/plugins/embrane/l2base/support_exceptions.py:24 +#, python-format +msgid "Cannot retrieve utif info for the following reason: %(err_msg)s" +msgstr "" + +#: neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py:46 +msgid "" +"No segmentation_id found for the network, please be sure that " +"tenant_network_type is vlan" +msgstr "" + +#: neutron/plugins/hyperv/db.py:42 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:113 +#: neutron/plugins/openvswitch/ovs_db_v2.py:131 +#, python-format +msgid "" +"Reserving vlan %(vlan_id)s on physical network %(physical_network)s from " +"pool" +msgstr "" + +#: neutron/plugins/hyperv/db.py:57 +#, python-format +msgid "Reserving flat physical network %(physical_network)s from pool" +msgstr "" + +#: neutron/plugins/hyperv/db.py:80 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:136 +#: neutron/plugins/ml2/drivers/type_vlan.py:204 +#: neutron/plugins/openvswitch/ovs_db_v2.py:155 +#, python-format +msgid "" +"Reserving specific vlan %(vlan_id)s on physical network " +"%(physical_network)s from pool" +msgstr "" + +#: neutron/plugins/hyperv/db.py:137 +#, python-format +msgid "Releasing vlan %(vlan_id)s on physical network %(physical_network)s" +msgstr "" + +#: neutron/plugins/hyperv/db.py:142 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:177 +#: neutron/plugins/openvswitch/ovs_db_v2.py:196 +#, python-format +msgid "vlan_id %(vlan_id)s on physical network %(physical_network)s not found" +msgstr "" + +#: neutron/plugins/hyperv/db.py:167 neutron/plugins/hyperv/db.py:180 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:64 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:83 +#: neutron/plugins/ml2/drivers/type_vlan.py:128 +#: neutron/plugins/ml2/drivers/type_vlan.py:149 +#: neutron/plugins/openvswitch/ovs_db_v2.py:87 +#: neutron/plugins/openvswitch/ovs_db_v2.py:105 +#, python-format +msgid "" +"Removing vlan %(vlan_id)s on physical network %(physical_network)s from " +"pool" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:46 +msgid "Network type for tenant networks (local, flat, vlan or none)" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:50 +#: neutron/plugins/linuxbridge/common/config.py:35 +#: neutron/plugins/mlnx/common/config.py:32 +#: neutron/plugins/openvswitch/common/config.py:51 +msgid "List of :: or " +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:78 +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:100 +#, python-format +msgid "segmentation_id specified for %s network" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:85 +#, python-format +msgid "physical_network specified for %s network" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:127 +msgid "physical_network not provided" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:180 +#, python-format +msgid "Invalid tenant_network_type: %s. Agent terminated!" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:203 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:297 +#: neutron/plugins/ml2/drivers/type_vlan.py:94 +#: neutron/plugins/mlnx/mlnx_plugin.py:180 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:350 +#, python-format +msgid "Network VLAN ranges: %s" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:228 +#, python-format +msgid "Network type %s not supported" +msgstr "" + +#: neutron/plugins/hyperv/rpc_callbacks.py:71 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:112 +#: neutron/plugins/mlnx/rpc_callbacks.py:92 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:114 +#, python-format +msgid "Device %(device)s no longer exists on %(agent_id)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:52 +msgid "" +"List of : where the physical networks can be " +"expressed with wildcards, e.g.: .\"*:external\"" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:58 +msgid "Private vswitch name used for local networks" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:60 +#: neutron/plugins/linuxbridge/common/config.py:66 +#: neutron/plugins/mlnx/common/config.py:69 +#: neutron/plugins/nec/common/config.py:31 +#: neutron/plugins/oneconvergence/lib/config.py:47 +#: neutron/plugins/openvswitch/common/config.py:63 +#: neutron/plugins/ryu/common/config.py:45 +msgid "" +"The number of seconds the agent will wait between polling for local " +"device changes." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:64 +msgid "" +"Enables metrics collections for switch ports by using Hyper-V's metric " +"APIs. Collected data can by retrieved by other apps and services, e.g.: " +"Ceilometer. Requires Hyper-V / Windows Server 2012 and above" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:71 +msgid "" +"Specifies the maximum number of retries to enable Hyper-V's port metrics " +"collection. The agent will try to enable the feature once every " +"polling_interval period for at most metrics_max_retries or until it " +"succeedes." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:153 +#, python-format +msgid "Failed reporting state! %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:188 +#, python-format +msgid "Invalid physical network mapping: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:209 +#, python-format +msgid "network_delete received. Deleting network %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:215 +#, python-format +msgid "Network %s not defined on agent." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:218 +msgid "port_delete received" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:223 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:173 +msgid "port_update received" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:245 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:131 +#, python-format +msgid "Provisioning network %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:258 +#, python-format +msgid "" +"Cannot provision unknown network type %(network_type)s for network " +"%(net_uuid)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:270 +#, python-format +msgid "Reclaiming local network %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:278 +#, python-format +msgid "Binding port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:291 +#, python-format +msgid "Binding VLAN ID %(segmentation_id)s to switch port %(port_id)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:304 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:116 +#, python-format +msgid "Unsupported network type %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:313 +#, python-format +msgid "Network %s is not avalailable on this agent" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:317 +#, python-format +msgid "Unbinding port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:330 +#, python-format +msgid "Port metrics enabled for port: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:334 +#, python-format +msgid "Port metrics raw enabling for port: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:359 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:211 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:314 +#, python-format +msgid "No port %s defined on agent." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:364 +#, python-format +msgid "Adding port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:372 +#, python-format +msgid "Unable to get port details for device %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:379 +#, python-format +msgid "Port %(device)s updated. Details: %(device_details)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:405 +#, python-format +msgid "Removing port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:413 +#, python-format +msgid "Removing port failed for device %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:438 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:965 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:382 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1267 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1327 +msgid "Agent out of sync with plugin!" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:446 +msgid "Agent loop has new devices!" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:453 +#, python-format +msgid "Error in agent event loop: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:461 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:405 +#, python-format +msgid "Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:474 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:269 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1020 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:158 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1511 +msgid "Agent initialized successfully, now running... " +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:62 +#, python-format +msgid "Hyper-V Exception: %(hyperv_exeption)s while adding rule: %(rule)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:72 +#, python-format +msgid "Hyper-V Exception: %(hyperv_exeption)s while removing rule: %(rule)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:92 +msgid "Aplying port filter." +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:95 +msgid "Updating port rules." +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:108 +#, python-format +msgid "Creating %(new)s new rules, removing %(old)s old rules." +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:119 +msgid "Removing port filter" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:38 +#, python-format +msgid "HyperVException: %(msg)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:83 +#, python-format +msgid "Vnic not found: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:118 +#, python-format +msgid "Job failed with error %d" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:137 +#, python-format +msgid "" +"WMI job failed with status %(job_state)d. Error details: %(err_sum_desc)s" +" - %(err_desc)s - Error code: %(err_code)d" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:146 +#, python-format +msgid "WMI job failed with status %(job_state)d. Error details: %(error)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:150 +#, python-format +msgid "WMI job failed with status %d. No error description available" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:155 +#, python-format +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:169 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:189 +#, python-format +msgid "" +"Failed to disconnect port %(switch_port_name)s from switch " +"%(vswitch_name)s with error %(ret_val)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:200 +#, python-format +msgid "" +"Failed to delete port %(switch_port_name)s from switch %(vswitch_name)s " +"with error %(ret_val)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:207 +#: neutron/plugins/hyperv/agent/utilsv2.py:137 +#, python-format +msgid "VSwitch not found: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:248 +#: neutron/plugins/hyperv/agent/utils.py:252 +msgid "Metrics collection is not supported on this version of Hyper-V" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsfactory.py:34 +msgid "Force V1 WMI utility classes" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsfactory.py:63 +msgid "" +"V1 virtualization namespace no longer supported on Windows Server / " +"Hyper-V Server 2012 R2 or above." +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsfactory.py:70 +#, python-format +msgid "Loading class: %(module_name)s.%(class_name)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsv2.py:160 +#: neutron/plugins/hyperv/agent/utilsv2.py:320 +#, python-format +msgid "Port Allocation not found: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsv2.py:270 +#, python-format +msgid "Cannot get VM summary data for: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:77 +#, python-format +msgid "The IP addr of available SDN-VE controllers: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:80 +#, python-format +msgid "The SDN-VE controller IP address: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:97 +#, python-format +msgid "unable to serialize object type: '%s'" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:164 +#, python-format +msgid "" +"Sending request to SDN-VE. url: %(myurl)s method: %(method)s body: " +"%(body)s header: %(header)s " +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:177 +#, python-format +msgid "Error: Could not reach server: %(url)s Exception: %(excp)s." +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:184 +#, python-format +msgid "Error message: %(reply)s -- Status: %(status)s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:187 +#, python-format +msgid "Received response status: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:194 +#, python-format +msgid "Deserialized body: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:236 +msgid "Bad resource for forming a list request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:246 +msgid "Bad resource for forming a show request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:256 +msgid "Bad resource for forming a create request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:268 +msgid "Bad resource for forming a update request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:279 +msgid "Bad resource for forming a delete request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:307 +#, python-format +msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:369 +#, python-format +msgid "Did not find tenant: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:32 +msgid "Fake SDNVE controller initialized" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:35 +msgid "Fake SDNVE controller: list" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:39 +msgid "Fake SDNVE controller: show" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:43 +msgid "Fake SDNVE controller: create" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:47 +msgid "Fake SDNVE controller: update" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:51 +msgid "Fake SDNVE controller: delete" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:55 +msgid "Fake SDNVE controller: get tenant by id" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:59 +msgid "Fake SDNVE controller: check and create tenant" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:63 +msgid "Fake SDNVE controller: get controller" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:152 +msgid "Set a new controller if needed." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:158 +#, python-format +msgid "Set the controller to a new controller: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:166 +#, python-format +msgid "Original SDN-VE HTTP request: %(orig)s; New request: %(new)s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:176 +#, python-format +msgid "Create network in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:185 +msgid "Create net failed: no SDN-VE tenant." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:196 +#, python-format +msgid "Create net failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:203 +#, python-format +msgid "Update network in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:223 +#, python-format +msgid "Update net failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:229 +#, python-format +msgid "Delete network in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:239 +#, python-format +msgid "Delete net failed after deleting the network in DB: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:244 +#, python-format +msgid "Get network in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:250 +msgid "Get networks in progress" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:260 +#, python-format +msgid "Create port in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:276 +msgid "Create port does not have tenant id info" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:282 +#, python-format +msgid "Create port does not have tenant id info; obtained is: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:303 +#, python-format +msgid "Create port failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:310 +#, python-format +msgid "Update port in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:337 +#, python-format +msgid "Update port failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:343 +#, python-format +msgid "Delete port in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:356 +#, python-format +msgid "Delete port operation failed in SDN-VE after deleting the port from DB: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:365 +#, python-format +msgid "Create subnet in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:377 +#, python-format +msgid "Create subnet failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:379 +#, python-format +msgid "Subnet created: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:385 +#, python-format +msgid "Update subnet in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:410 +#, python-format +msgid "Update subnet failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:416 +#, python-format +msgid "Delete subnet in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:421 +#, python-format +msgid "" +"Delete subnet operation failed in SDN-VE after deleting the subnet from " +"DB: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:430 +#, python-format +msgid "Create router in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:433 +#, python-format +msgid "Ignoring admin_state_up=False for router=%r. Overriding with True" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:443 +msgid "Create router failed: no SDN-VE tenant." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:451 +#, python-format +msgid "Create router failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:453 +#, python-format +msgid "Router created: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:458 +#, python-format +msgid "Update router in progress: id=%(id)s router=%(router)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:465 +msgid "admin_state_up=False routers are not supported." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:489 +#, python-format +msgid "Update router failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:495 +#, python-format +msgid "Delete router in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:502 +#, python-format +msgid "" +"Delete router operation failed in SDN-VE after deleting the router in DB:" +" %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:507 +#, python-format +msgid "" +"Add router interface in progress: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:515 +#, python-format +msgid "SdnvePluginV2.add_router_interface called. Port info: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:529 +#, python-format +msgid "Update router-add-interface failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:532 +#, python-format +msgid "Added router interface: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:536 +#, python-format +msgid "" +"Add router interface only called: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:546 +msgid "" +"SdnvePluginV2._add_router_interface_only: failed to add the interface in " +"the roll back. of a remove_router_interface operation" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:552 +#, python-format +msgid "" +"Remove router interface in progress: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:561 +msgid "No port ID" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:563 +#, python-format +msgid "SdnvePluginV2.remove_router_interface port: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:567 +msgid "No fixed IP" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:572 +#, python-format +msgid "SdnvePluginV2.remove_router_interface subnet_id: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:595 +#, python-format +msgid "Update router-remove-interface failed SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:616 +#, python-format +msgid "Create floatingip in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:627 +#, python-format +msgid "Creating floating ip operation failed in SDN-VE controller: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:630 +#, python-format +msgid "Created floatingip : %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:635 +#, python-format +msgid "Update floatingip in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:655 +#, python-format +msgid "Update floating ip failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:661 +#, python-format +msgid "Delete floatingip in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:666 +#, python-format +msgid "Delete floatingip failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:139 +msgid "info_update received" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:144 +#, python-format +msgid "info_update received. New controlleris to be set to: %s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:150 +msgid "info_update received. New controlleris set to be out of band" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:195 +#, python-format +msgid "Mapping physical network %(physical_network)s to interface %(interface)s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:201 +#, python-format +msgid "" +"Interface %(interface)s for physical network %(physical_network)s does " +"not exist. Agent terminated!" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:219 +msgid "Agent in the rpc loop." +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:241 +#, python-format +msgid "Controller IPs: %s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:263 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1498 +#, python-format +msgid "%s Agent terminated!" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:28 +msgid "If set to True uses a fake controller." +msgstr "" + +#: neutron/plugins/ibm/common/config.py:30 +msgid "Base URL for SDN-VE controller REST API" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:32 +msgid "List of IP addresses of SDN-VE controller(s)" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:34 +msgid "SDN-VE RPC subject" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:36 +msgid "SDN-VE controller port number" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:38 +msgid "SDN-VE request/response format" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:40 +msgid "SDN-VE administrator user id" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:42 +msgid "SDN-VE administrator password" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:44 +#: neutron/plugins/nec/common/config.py:26 +#: neutron/plugins/openvswitch/common/config.py:30 +#: neutron/plugins/ryu/common/config.py:24 +msgid "Integration bridge to use" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:46 +msgid "Reset the integration bridge before use" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:48 +msgid "Indicating if controller is out of band or not" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:51 +msgid "List of :" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:53 +msgid "Tenant type: OVERLAY (default) or OF" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:55 +msgid "" +"The string in tenant description that indicates the tenant is a OVERLAY " +"tenant" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:58 +msgid "The string in tenant description that indicates the tenant is a OF tenant" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:64 +msgid "Agent polling interval if necessary" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:66 +msgid "Using root helper" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:68 +msgid "Whether using rpc" +msgstr "" + +#: neutron/plugins/ibm/common/exceptions.py:23 +#, python-format +msgid "" +"An unexpected error occurred in the SDN-VE Plugin. Here is the error " +"message: %(msg)s" +msgstr "" + +#: neutron/plugins/ibm/common/exceptions.py:28 +#, python-format +msgid "The input does not contain nececessary info: %(msg)s" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:120 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:144 +#: neutron/plugins/ml2/rpc.py:170 neutron/plugins/ml2/rpc.py:192 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:122 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:147 +#, python-format +msgid "Device %(device)s not bound to the agent host %(host)s" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:138 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:141 +#, python-format +msgid "Device %(device)s up on %(agent_id)s" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:258 +#: neutron/plugins/mlnx/mlnx_plugin.py:200 +#, python-format +msgid "Invalid tenant_network_type: %s. Service terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:269 +msgid "Linux Bridge Plugin initialization complete" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:295 +#, python-format +msgid "%s. Agent terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:340 +#: neutron/plugins/mlnx/mlnx_plugin.py:244 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:402 +msgid "provider:segmentation_id specified for flat network" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:349 +#: neutron/plugins/mlnx/mlnx_plugin.py:252 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:411 +#, python-format +msgid "provider:segmentation_id out of range (%(min_id)s through %(max_id)s)" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:356 +#: neutron/plugins/mlnx/mlnx_plugin.py:260 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:431 +msgid "provider:physical_network specified for local network" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:362 +#: neutron/plugins/mlnx/mlnx_plugin.py:264 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:437 +msgid "provider:segmentation_id specified for local network" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:85 +msgid "VXLAN is enabled, a valid local_ip must be provided" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:99 +msgid "Invalid Network ID, will lead to incorrect bridgename" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:106 +msgid "Invalid VLAN ID, will lead to incorrect subinterface name" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:113 +msgid "Invalid Interface ID, will lead to incorrect tap device name" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:122 +#, python-format +msgid "Invalid Segmentation ID: %s, will lead to incorrect vxlan device name" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:185 +#, python-format +msgid "Failed creating vxlan interface for %(segmentation_id)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:217 +#, python-format +msgid "" +"Creating subinterface %(interface)s for VLAN %(vlan_id)s on interface " +"%(physical_interface)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:230 +#, python-format +msgid "Done creating subinterface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:237 +#, python-format +msgid "Creating vxlan interface %(interface)s for VNI %(segmentation_id)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:252 +#, python-format +msgid "Done creating vxlan interface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:305 +#, python-format +msgid "Starting bridge %(bridge_name)s for subinterface %(interface)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:320 +#, python-format +msgid "Done starting bridge %(bridge_name)s for subinterface %(interface)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:342 +#, python-format +msgid "Unable to add %(interface)s to %(bridge_name)s! Exception: %(e)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:355 +#, python-format +msgid "Unable to add vxlan interface for network %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:362 +#, python-format +msgid "No mapping for physical network %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:371 +#, python-format +msgid "Unknown network_type %(network_type)s for network %(network_id)s." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:384 +#, python-format +msgid "Tap device: %s does not exist on this host, skipped" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:402 +#, python-format +msgid "Adding device %(tap_device_name)s to bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:411 +#, python-format +msgid "%(tap_device_name)s already exists on bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:449 +#, python-format +msgid "Deleting bridge %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:456 +#, python-format +msgid "Done deleting bridge %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:459 +#, python-format +msgid "Cannot delete bridge %s, does not exist" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:473 +#, python-format +msgid "Removing device %(interface_name)s from bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:480 +#, python-format +msgid "Done removing device %(interface_name)s from bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:486 +#, python-format +msgid "" +"Cannot remove device %(interface_name)s bridge %(bridge_name)s does not " +"exist" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:494 +#, python-format +msgid "Deleting subinterface %s for vlan" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:501 +#, python-format +msgid "Done deleting subinterface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:505 +#, python-format +msgid "Deleting vxlan interface %s for vlan" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:510 +#, python-format +msgid "Done deleting vxlan interface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:524 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:559 +#, python-format +msgid "" +"Option \"%(option)s\" must be supported by command \"%(command)s\" to " +"enable %(mode)s mode" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:536 +msgid "No valid Segmentation ID to perform UCAST test." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:553 +msgid "" +"VXLAN muticast group must be provided in vxlan_group option to enable " +"VXLAN MCAST mode" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:578 +msgid "" +"Linux kernel vxlan module and iproute2 3.8 or above are required to " +"enable VXLAN." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:588 +#, python-format +msgid "Using %s VXLAN mode" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:665 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:164 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:276 +msgid "network_delete received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:679 +#, python-format +msgid "port_update RPC received for port: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:682 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:317 +msgid "fdb_add received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:704 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:345 +msgid "fdb_remove received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:726 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:424 +msgid "update chg_ip received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:751 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:447 +msgid "fdb_update received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:808 +msgid "Unable to obtain MAC address for unique ID. Agent terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:812 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:254 +#: neutron/plugins/nec/agent/nec_neutron_agent.py:144 +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:109 +#, python-format +msgid "RPC agent_id: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:871 +#, python-format +msgid "Treating added or updated device: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:877 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1069 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1108 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1100 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1139 +#, python-format +msgid "Unable to get port details for %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:883 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1075 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1106 +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:915 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:936 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:368 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1095 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1157 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1126 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1188 +#, python-format +msgid "Device %s not defined on plugin" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:922 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1125 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1142 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1156 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1173 +#, python-format +msgid "Attachment %s removed" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:930 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1132 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1149 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1163 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1180 +#, python-format +msgid "port_removed failed for %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:934 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:366 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1154 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1185 +#, python-format +msgid "Port %s updated." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:960 +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:978 +#, python-format +msgid "Agent loop found changes! %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:984 +#, python-format +msgid "Error in agent loop. Devices info: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1010 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:419 +#, python-format +msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1013 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:422 +#, python-format +msgid "Interface mappings: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:31 +#: neutron/plugins/mlnx/common/config.py:28 +msgid "Network type for tenant networks (local, vlan, or none)" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:41 +msgid "" +"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " +"plugin using linuxbridge mechanism driver" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:45 +msgid "TTL for vxlan interface protocol packets." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:47 +msgid "TOS for vxlan interface protocol packets." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:49 +msgid "Multicast group for vxlan interface." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:51 +msgid "Local IP address of the VXLAN endpoints." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:53 +msgid "" +"Extension to use alongside ml2 plugin's l2population mechanism driver. It" +" enables the plugin to populate VXLAN forwarding table." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:61 +#: neutron/plugins/mlnx/common/config.py:47 +msgid "List of :" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:69 +#: neutron/plugins/mlnx/common/config.py:72 +msgid "Enable server RPC compatibility with old agents" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:142 +#: neutron/plugins/ml2/drivers/type_vlan.py:210 +#: neutron/plugins/openvswitch/ovs_db_v2.py:161 +#, python-format +msgid "" +"Reserving specific vlan %(vlan_id)s on physical network " +"%(physical_network)s outside pool" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:166 +#: neutron/plugins/ml2/drivers/type_vlan.py:259 +#: neutron/plugins/openvswitch/ovs_db_v2.py:191 +#, python-format +msgid "" +"Releasing vlan %(vlan_id)s on physical network %(physical_network)s to " +"pool" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:171 +#: neutron/plugins/ml2/drivers/type_vlan.py:254 +#: neutron/plugins/openvswitch/ovs_db_v2.py:186 +#, python-format +msgid "" +"Releasing vlan %(vlan_id)s on physical network %(physical_network)s " +"outside pool" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:202 +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:214 +msgid "get_port_from_device() called" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:230 +#, python-format +msgid "set_port_status as %s called" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:62 +#, python-format +msgid "Flavor %(flavor)s could not be found" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:66 +msgid "Failed to add flavor binding" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:75 +msgid "Start initializing metaplugin" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:127 +#, python-format +msgid "default_flavor %s is not plugin list" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:133 +#, python-format +msgid "default_l3_flavor %s is not plugin list" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:139 +#, python-format +msgid "rpc_flavor %s is not plugin list" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:171 +#, python-format +msgid "Plugin location: %s" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:222 +#, python-format +msgid "Created network: %(net_id)s with flavor %(flavor)s" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:228 +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:372 +msgid "Failed to add flavor bindings" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:365 +#, python-format +msgid "Created router: %(router_id)s with flavor %(flavor)s" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:376 +#, python-format +msgid "Created router: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:68 +#, python-format +msgid "Update subnet failed: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:75 +msgid "Subnet in remote have already deleted" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:96 +#, python-format +msgid "Update network failed: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:103 +msgid "Network in remote have already deleted" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:124 +#, python-format +msgid "Update port failed: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:135 +msgid "Port in remote have already deleted" +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:25 +msgid "" +"Comma separated list of flavor:neutron_plugin for plugins to load. " +"Extension method is searched in the list order and the first one is used." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:31 +msgid "" +"Comma separated list of flavor:neutron_plugin for L3 service plugins to " +"load. This is intended for specifying L2 plugins which support L3 " +"functions. If you use a router service plugin, set this blank." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:38 +msgid "" +"Default flavor to use, when flavor:network is not specified at network " +"creation." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:43 +msgid "" +"Default L3 flavor to use, when flavor:router is not specified at router " +"creation. Ignored if 'l3_plugin_list' is blank." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:49 +msgid "Comma separated list of supported extension aliases." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:53 +msgid "" +"Comma separated list of method:flavor to select specific plugin for a " +"method. This has priority over method search order based on " +"'plugin_list'." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:59 +msgid "Specifies flavor for plugin to handle 'q-plugin' RPC requests." +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:46 +#, python-format +msgid "MidoNet %(resource_type)s %(id)s could not be found" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:50 +#, python-format +msgid "MidoNet API error: %(msg)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:84 +#, python-format +msgid "MidoClient.create_bridge called: kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:94 +#, python-format +msgid "MidoClient.delete_bridge called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:104 +#, python-format +msgid "MidoClient.get_bridge called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:118 +#, python-format +msgid "MidoClient.update_bridge called: id=%(id)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:138 +#, python-format +msgid "" +"MidoClient.create_dhcp called: bridge=%(bridge)s, cidr=%(cidr)s, " +"gateway_ip=%(gateway_ip)s, host_rts=%(host_rts)s, " +"dns_servers=%(dns_servers)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:156 +#, python-format +msgid "" +"MidoClient.add_dhcp_host called: bridge=%(bridge)s, cidr=%(cidr)s, " +"ip=%(ip)s, mac=%(mac)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:161 +msgid "Tried to add tonon-existent DHCP" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:175 +#, python-format +msgid "" +"MidoClient.remove_dhcp_host called: bridge=%(bridge)s, cidr=%(cidr)s, " +"ip=%(ip)s, mac=%(mac)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:180 +msgid "Tried to delete mapping from non-existent subnet" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:185 +#, python-format +msgid "MidoClient.remove_dhcp_host: Deleting %(dh)r" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:198 +#, python-format +msgid "" +"MidoClient.delete_dhcp_host called: bridge_id=%(bridge_id)s, " +"cidr=%(cidr)s, ip=%(ip)s, mac=%(mac)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:213 +#, python-format +msgid "MidoClient.delete_dhcp called: bridge=%(bridge)s, cidr=%(cidr)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:220 +msgid "Tried to delete non-existent DHCP" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:232 +#, python-format +msgid "MidoClient.delete_port called: id=%(id)s, delete_chains=%(delete_chains)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:247 +#, python-format +msgid "MidoClient.get_port called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:261 +#, python-format +msgid "MidoClient.add_bridge_port called: bridge=%(bridge)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:273 +#, python-format +msgid "MidoClient.update_port called: id=%(id)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:298 +#, python-format +msgid "MidoClient.create_router called: kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:308 +#, python-format +msgid "MidoClient.delete_router called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:318 +#, python-format +msgid "MidoClient.get_router called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:332 +#, python-format +msgid "MidoClient.update_router called: id=%(id)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:353 +#, python-format +msgid "" +"MidoClient.add_dhcp_route_option called: bridge=%(bridge)s, " +"cidr=%(cidr)s, gw_ip=%(gw_ip)sdst_ip=%(dst_ip)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:361 +msgid "Tried to access non-existent DHCP" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:393 +#, python-format +msgid "MidoClient.unlink called: port=%(port)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:398 +#, python-format +msgid "Attempted to unlink a port that was not linked. %s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:404 +#, python-format +msgid "" +"MidoClient.remove_rules_by_property called: tenant_id=%(tenant_id)s, " +"chain_name=%(chain_name)skey=%(key)s, value=%(value)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:431 +#, python-format +msgid "" +"MidoClient.create_router_chains called: router=%(router)s, " +"inbound_chain_name=%(in_chain)s, outbound_chain_name=%(out_chain)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:454 +#, python-format +msgid "MidoClient.delete_router_chains called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:469 +#, python-format +msgid "MidoClient.delete_port_chains called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:481 +#, python-format +msgid "" +"MidoClient.get_link_port called: router=%(router)s, " +"peer_router_id=%(peer_router_id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:520 +#, python-format +msgid "" +"MidoClient.add_static_nat called: tenant_id=%(tenant_id)s, " +"chain_name=%(chain_name)s, from_ip=%(from_ip)s, to_ip=%(to_ip)s, " +"port_id=%(port_id)s, nat_type=%(nat_type)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:528 +#, python-format +msgid "Invalid NAT type passed in %s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:580 +#, python-format +msgid "MidoClient.remote_static_route called: router=%(router)s, ip=%(ip)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:591 +#, python-format +msgid "" +"MidoClient.update_port_chains called: " +"port=%(port)sinbound_chain_id=%(inbound_chain_id)s, " +"outbound_chain_id=%(outbound_chain_id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:602 +#, python-format +msgid "MidoClient.create_chain called: tenant_id=%(tenant_id)s name=%(name)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:610 +#, python-format +msgid "MidoClient.delete_chain called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:616 +#, python-format +msgid "" +"MidoClient.delete_chains_by_names called: tenant_id=%(tenant_id)s " +"names=%(names)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:627 +#, python-format +msgid "" +"MidoClient.get_chain_by_name called: tenant_id=%(tenant_id)s " +"name=%(name)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:638 +#, python-format +msgid "" +"MidoClient.get_port_group_by_name called: tenant_id=%(tenant_id)s " +"name=%(name)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:652 +#, python-format +msgid "MidoClient.create_port_group called: tenant_id=%(tenant_id)s name=%(name)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:661 +#, python-format +msgid "" +"MidoClient.delete_port_group_by_name called: tenant_id=%(tenant_id)s " +"name=%(name)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:667 +#, python-format +msgid "Deleting pg %(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:673 +#, python-format +msgid "" +"MidoClient.add_port_to_port_group_by_name called: tenant_id=%(tenant_id)s" +" name=%(name)s port_id=%(port_id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:687 +#, python-format +msgid "MidoClient.remove_port_from_port_groups called: port_id=%(port_id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:82 +#, python-format +msgid "Invalid nat_type %s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:139 +#, python-format +msgid "Unrecognized direction %s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:173 +#, python-format +msgid "There is no %(name)s with ID %(id)s in MidoNet." +msgstr "" + +#: neutron/plugins/midonet/plugin.py:185 +#: neutron/plugins/ml2/drivers/mech_arista/exceptions.py:23 +#: neutron/plugins/ml2/drivers/mech_arista/exceptions.py:27 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:217 +msgid "provider_router_id should be configured in the plugin config file" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:387 +#, python-format +msgid "MidonetPluginV2.create_subnet called: subnet=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:419 +#, python-format +msgid "MidonetPluginV2.create_subnet exiting: sn_entry=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:428 +#, python-format +msgid "MidonetPluginV2.delete_subnet called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:447 +msgid "MidonetPluginV2.delete_subnet exiting" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:454 +#, python-format +msgid "MidonetPluginV2.create_network called: network=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:469 +#, python-format +msgid "MidonetPluginV2.create_network exiting: net=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:478 +#, python-format +msgid "MidonetPluginV2.update_network called: id=%(id)r, network=%(network)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:487 +#, python-format +msgid "MidonetPluginV2.update_network exiting: net=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:495 +#, python-format +msgid "MidonetPluginV2.get_network called: id=%(id)r, fields=%(fields)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:500 +#, python-format +msgid "MidonetPluginV2.get_network exiting: qnet=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:505 +#, python-format +msgid "MidonetPluginV2.delete_network called: id=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:513 +#, python-format +msgid "Failed to delete neutron db, while Midonet bridge=%r had been deleted" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:518 +#, python-format +msgid "MidonetPluginV2.create_port called: port=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:579 +#, python-format +msgid "Failed to create a port on network %(net_id)s: %(err)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:584 +#, python-format +msgid "MidonetPluginV2.create_port exiting: port=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:589 +#, python-format +msgid "MidonetPluginV2.get_port called: id=%(id)s fields=%(fields)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:596 +#, python-format +msgid "There is no port with ID %(id)s in MidoNet." +msgstr "" + +#: neutron/plugins/midonet/plugin.py:600 +#, python-format +msgid "MidonetPluginV2.get_port exiting: port=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:605 +#, python-format +msgid "MidonetPluginV2.get_ports called: filters=%(filters)s fields=%(fields)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:614 +#, python-format +msgid "" +"MidonetPluginV2.delete_port called: id=%(id)s " +"l3_port_check=%(l3_port_check)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:648 +#, python-format +msgid "Failed to delete DHCP mapping for port %(id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:723 +#, python-format +msgid "MidonetPluginV2.create_router called: router=%(router)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:770 +#, python-format +msgid "MidonetPluginV2.create_router exiting: router_data=%(router_data)s." +msgstr "" + +#: neutron/plugins/midonet/plugin.py:782 +#, python-format +msgid "" +"MidonetPluginV2.set_router_gateway called: id=%(id)s, " +"gw_router=%(gw_router)s, gw_ip=%(gw_ip)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:826 +#, python-format +msgid "MidonetPluginV2.remove_router_gateway called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:846 +#, python-format +msgid "MidonetPluginV2.update_router called: id=%(id)s router=%(router)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:886 +#, python-format +msgid "MidonetPluginV2.update_router exiting: router=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:897 +#, python-format +msgid "MidonetPluginV2.delete_router called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:997 +#, python-format +msgid "" +"MidonetPluginV2.add_router_interface called: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1021 +msgid "" +"DHCP agent is not working correctly. No port to reach the Metadata server" +" on this network" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1030 +#, python-format +msgid "" +"Failed to create MidoNet resources to add router interface. " +"info=%(info)s, router_id=%(router_id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1037 +#, python-format +msgid "MidonetPluginV2.add_router_interface exiting: info=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1077 +#, python-format +msgid "" +"MidonetPluginV2.update_floatingip called: id=%(id)s " +"floatingip=%(floatingip)s " +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1096 +#, python-format +msgid "MidonetPluginV2.update_floating_ip exiting: fip=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1118 +#, python-format +msgid "" +"MidonetPluginV2.create_security_group called: " +"security_group=%(security_group)s default_sg=%(default_sg)s " +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1147 +#, python-format +msgid "Failed to create MidoNet resources for sg %(sg)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1154 +#, python-format +msgid "MidonetPluginV2.create_security_group exiting: sg=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1160 +#, python-format +msgid "MidonetPluginV2.delete_security_group called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1192 +#, python-format +msgid "" +"MidonetPluginV2.create_security_group_rule called: " +"security_group_rule=%(security_group_rule)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1202 +#, python-format +msgid "MidonetPluginV2.create_security_group_rule exiting: rule=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1212 +#, python-format +msgid "MidonetPluginV2.delete_security_group_rule called: sg_rule_id=%s" +msgstr "" + +#: neutron/plugins/midonet/common/config.py:25 +msgid "MidoNet API server URI." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:27 +msgid "MidoNet admin username." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:30 +msgid "MidoNet admin password." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:33 +msgid "ID of the project that MidoNet admin userbelongs to." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:36 +msgid "Virtual provider router ID." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:39 +msgid "Operational mode. Internal dev use only." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:42 +msgid "Path to midonet host uuid file" +msgstr "" + +#: neutron/plugins/ml2/config.py:22 +msgid "" +"List of network type driver entrypoints to be loaded from the " +"neutron.ml2.type_drivers namespace." +msgstr "" + +#: neutron/plugins/ml2/config.py:26 +msgid "Ordered list of network_types to allocate as tenant networks." +msgstr "" + +#: neutron/plugins/ml2/config.py:30 +msgid "" +"An ordered list of networking mechanism driver entrypoints to be loaded " +"from the neutron.ml2.mechanism_drivers namespace." +msgstr "" + +#: neutron/plugins/ml2/db.py:41 +#, python-format +msgid "Added segment %(id)s of type %(network_type)s for network %(network_id)s" +msgstr "" + +#: neutron/plugins/ml2/db.py:85 +#, python-format +msgid "Multiple ports have port_id starting with %s" +msgstr "" + +#: neutron/plugins/ml2/db.py:91 +#, python-format +msgid "get_port_from_device_mac() called for mac %s" +msgstr "" + +#: neutron/plugins/ml2/db.py:133 +#, python-format +msgid "No binding found for port %(port_id)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:36 +#, python-format +msgid "Configured type driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:41 +#, python-format +msgid "Loaded type driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:49 +#, python-format +msgid "" +"Type driver '%(new_driver)s' ignored because type driver '%(old_driver)s'" +" is already registered for type '%(type)s'" +msgstr "" + +#: neutron/plugins/ml2/managers.py:57 +#, python-format +msgid "Registered types: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:65 +#, python-format +msgid "No type driver for tenant network_type: %s. Service terminated!" +msgstr "" + +#: neutron/plugins/ml2/managers.py:69 +#, python-format +msgid "Tenant network_types: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:73 +#, python-format +msgid "Initializing driver for type '%s'" +msgstr "" + +#: neutron/plugins/ml2/managers.py:82 +#: neutron/plugins/ml2/drivers/type_tunnel.py:116 +#, python-format +msgid "network_type value '%s' not supported" +msgstr "" + +#: neutron/plugins/ml2/managers.py:108 +#, python-format +msgid "Failed to release segment '%s' because network type is not supported." +msgstr "" + +#: neutron/plugins/ml2/managers.py:124 +#, python-format +msgid "Configured mechanism driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:130 +#, python-format +msgid "Loaded mechanism driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:142 +#, python-format +msgid "Registered mechanism drivers: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:149 +#, python-format +msgid "Initializing mechanism driver '%s'" +msgstr "" + +#: neutron/plugins/ml2/managers.py:171 +#, python-format +msgid "Mechanism driver '%(name)s' failed in %(method)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:447 +#, python-format +msgid "" +"Attempting to bind port %(port)s on host %(host)s for vnic_type " +"%(vnic_type)s with profile %(profile)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:458 +#, python-format +msgid "" +"Bound port: %(port)s, host: %(host)s, vnic_type: %(vnic_type)s, profile: " +"%(profile)sdriver: %(driver)s, vif_type: %(vif_type)s, vif_details: " +"%(vif_details)s, segment: %(segment)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:474 +#, python-format +msgid "Mechanism driver %s failed in bind_port" +msgstr "" + +#: neutron/plugins/ml2/managers.py:478 +#, python-format +msgid "Failed to bind port %(port)s on host %(host)s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:121 +msgid "Modular L2 Plugin initialization complete" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:152 +msgid "network_type required" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:191 +#, python-format +msgid "Network %s has no segments" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:254 +msgid "binding:profile value too large" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:290 +#, python-format +msgid "Serialized vif_details DB value '%(value)s' for port %(port)s is invalid" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:301 +#, python-format +msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:354 +#, python-format +msgid "" +"In _notify_port_updated(), no bound segment for port %(port_id)s on " +"network %(network_id)s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:396 +#, python-format +msgid "mechanism_manager.create_network_postcommit failed, deleting network '%s'" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:456 +#, python-format +msgid "Deleting network %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:468 +#, python-format +msgid "Ports to auto-delete: %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:474 neutron/plugins/ml2/plugin.py:594 +msgid "Tenant-owned ports exist" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:482 +#, python-format +msgid "Subnets to auto-delete: %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:493 +#, python-format +msgid "Deleting network record %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:501 neutron/plugins/ml2/plugin.py:607 +msgid "Committing transaction" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:507 +msgid "A concurrent port creation has occurred" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:516 +#, python-format +msgid "Exception auto-deleting port %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:524 +#, python-format +msgid "Exception auto-deleting subnet %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:533 +msgid "mechanism_manager.delete_network_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:547 +#, python-format +msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:577 +#, python-format +msgid "Deleting subnet %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:588 +#, python-format +msgid "Ports to auto-deallocate: %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:603 +msgid "Deleting subnet record" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:623 +#, python-format +msgid "Exception deleting fixed_ip from port %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:633 +msgid "mechanism_manager.delete_subnet_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:662 +#, python-format +msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:718 +#: neutron/tests/unit/ml2/test_ml2_plugin.py:131 +#, python-format +msgid "Deleting port %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:736 +#: neutron/tests/unit/ml2/test_ml2_plugin.py:132 +#, python-format +msgid "The port '%s' was deleted" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:745 +msgid "Calling base delete_port" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:757 +msgid "mechanism_manager.delete_port_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:769 +#, python-format +msgid "Port %(port)s updated up by agent not found" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:88 +#, python-format +msgid "Device %(device)s details requested by agent %(agent_id)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:97 +#, python-format +msgid "Device %(device)s requested by agent %(agent_id)s not found in database" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:104 +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s has network " +"%(network_id)s with no segments" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:114 +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s on network " +"%(network_id)s not bound, vif_type: %(vif_type)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:125 +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s on network " +"%(network_id)s invalid segment, vif_type: %(vif_type)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:149 +#, python-format +msgid "Returning: %s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:163 +#, python-format +msgid "Device %(device)s no longer exists at agent %(agent_id)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:187 +#, python-format +msgid "Device %(device)s up at agent %(agent_id)s" +msgstr "" + +#: neutron/plugins/ml2/common/exceptions.py:23 +#, python-format +msgid "%(method)s failed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:54 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:345 +#, python-format +msgid "Attempting to bind port %(port)s on network %(network)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:61 +#, python-format +msgid "Refusing to bind due to unsupported vnic_type: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:65 +#, python-format +msgid "Checking agent: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:70 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:355 +#, python-format +msgid "Bound using segment: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:73 +#, python-format +msgid "Attempting to bind with dead agent: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_hyperv.py:44 +#, python-format +msgid "Checking segment: %(segment)s for mappings: %(mappings)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_linuxbridge.py:44 +#: neutron/plugins/ml2/drivers/mech_ofagent.py:50 +#: neutron/plugins/ml2/drivers/mech_openvswitch.py:45 +#, python-format +msgid "" +"Checking segment: %(segment)s for mappings: %(mappings)s with " +"tunnel_types: %(tunnel_types)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:35 +msgid "CRD service Username" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:38 +msgid "CRD Service Password" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:40 +msgid "CRD Tenant Name" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:43 +msgid "CRD Auth URL" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:46 +msgid "URL for connecting to CRD service" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:49 +msgid "Timeout value for connecting to CRD service in seconds" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:53 +msgid "Region name for connecting to CRD Service in admin context" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:57 +msgid "If set, ignore any SSL validation issues" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:60 +msgid "Auth strategy for connecting to neutron in admin context" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:63 +msgid "Location of ca certificates file to use for CRD client requests." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:87 +msgid "Initializing CRD client... " +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:29 +msgid "HTTP URL of Tail-f NCS REST interface." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:31 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:50 +msgid "HTTP username for authentication" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:33 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:52 +msgid "HTTP password for authentication" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:35 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:54 +msgid "HTTP timeout in seconds." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:48 +msgid "HTTP URL of OpenDaylight REST interface." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:56 +msgid "Tomcat session timeout in minutes." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:106 +#, python-format +msgid "Failed to authenticate with OpenDaylight: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:109 +#, python-format +msgid "Authentication Timed Out: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:297 +#, python-format +msgid "%(object_type)s not found (%(obj_id)s)" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:333 +#, python-format +msgid "ODL-----> sending URL (%s) <-----ODL" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:334 +#, python-format +msgid "ODL-----> sending JSON (%s) <-----ODL" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:358 +#, python-format +msgid "" +"Refusing to bind port for segment ID %(id)s, segment %(seg)s, phys net " +"%(physnet)s, and network type %(nettype)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:30 +msgid "" +"List of physical_network names with which flat networks can be created. " +"Use * to allow flat networks with arbitrary physical_network names." +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:68 +msgid "Arbitrary flat physical_network names allowed" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:73 +#, python-format +msgid "Allowable flat physical_network names: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:80 +msgid "ML2 FlatTypeDriver initialization complete" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:85 +msgid "physical_network required for flat provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:88 +#, python-format +msgid "physical_network '%s' unknown for flat provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:95 +#, python-format +msgid "%s prohibited for flat provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:109 +#, python-format +msgid "Reserving flat network on physical network %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:127 +#, python-format +msgid "Releasing flat network on physical network %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:130 +#, python-format +msgid "No flat network found on physical network %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:34 +msgid "" +"Comma-separated list of : tuples enumerating ranges of " +"GRE tunnel IDs that are available for tenant network allocation" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:85 +#, python-format +msgid "Reserving specific gre tunnel %s from pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:89 +#, python-format +msgid "Reserving specific gre tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:102 +#, python-format +msgid "Allocating gre tunnel id %(gre_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:120 +#, python-format +msgid "Releasing gre tunnel %s to pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:125 +#, python-format +msgid "Releasing gre tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:128 +#, python-format +msgid "gre_id %s not found" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:138 +#, python-format +msgid "Skipping unreasonable gre ID range %(tun_min)s:%(tun_max)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:156 +#: neutron/plugins/ml2/drivers/type_vxlan.py:165 +#: neutron/plugins/openvswitch/ovs_db_v2.py:229 +#, python-format +msgid "Removing tunnel %s from pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:171 +msgid "get_gre_endpoints() called" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:180 +#, python-format +msgid "add_gre_endpoint() called for ip %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:186 +#, python-format +msgid "Gre endpoint with ip %s already exists" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_local.py:35 +msgid "ML2 LocalTypeDriver initialization complete" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_local.py:46 +#, python-format +msgid "%s prohibited for local provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:61 +#, python-format +msgid "Invalid tunnel ID range: '%(range)s' - %(e)s. Agent terminated!" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:64 +#, python-format +msgid "%(type)s ID ranges: %(range)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:70 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:421 +#, python-format +msgid "provider:physical_network specified for %s network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:76 +#, python-format +msgid "segmentation_id required for %s provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:83 +#, python-format +msgid "%(key)s prohibited for %(tunnel)s provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:103 +msgid "Network_type value needed by the ML2 plugin" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:37 +msgid "" +"List of :: or " +"specifying physical_network names usable for VLAN provider and tenant " +"networks, as well as ranges of VLAN tags on each available for allocation" +" to tenant networks." +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:91 +msgid "Failed to parse network_vlan_ranges. Service terminated!" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:161 +msgid "VlanTypeDriver initialization complete" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:166 +msgid "physical_network required for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:169 +#, python-format +msgid "physical_network '%s' unknown for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:175 +msgid "segmentation_id required for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:178 +#, python-format +msgid "segmentation_id out of range (%(min)s through %(max)s)" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:188 +#, python-format +msgid "%s prohibited for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:226 +#, python-format +msgid "" +"Allocating vlan %(vlan_id)s on physical network %(physical_network)s from" +" pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:264 +#, python-format +msgid "No vlan_id %(vlan_id)s found on physical network %(physical_network)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:37 +msgid "" +"Comma-separated list of : tuples enumerating ranges of " +"VXLAN VNI IDs that are available for tenant network allocation" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:41 +msgid "Multicast group for VXLAN. If unset, disables VXLAN multicast mode." +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:93 +#, python-format +msgid "Reserving specific vxlan tunnel %s from pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:97 +#, python-format +msgid "Reserving specific vxlan tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:110 +#, python-format +msgid "Allocating vxlan tunnel vni %(vxlan_vni)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:128 +#, python-format +msgid "Releasing vxlan tunnel %s to pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:133 +#, python-format +msgid "Releasing vxlan tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:136 +#, python-format +msgid "vxlan_vni %s not found" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:147 +#, python-format +msgid "Skipping unreasonable VXLAN VNI range %(tun_min)s:%(tun_max)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:182 +msgid "get_vxlan_endpoints() called" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:192 +#, python-format +msgid "add_vxlan_endpoint() called for ip %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:40 +msgid "Allowed physical networks" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:42 +msgid "Unused" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:93 +msgid "" +"Brocade Mechanism: failed to create network, network cannot be created in" +" the configured physical network" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:99 +msgid "" +"Brocade Mechanism: failed to create network, only network type vlan is " +"supported" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:107 +msgid "Brocade Mechanism: failed to create network in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:109 +msgid "Brocade Mechanism: create_network_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:111 +#, python-format +msgid "" +"create network (precommit): %(network_id)s of network type = " +"%(network_type)s with vlan = %(vlan_id)s for tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:123 +msgid "create_network_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:143 +msgid "Brocade NOS driver: failed in create network" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:146 +msgid "Brocade Mechanism: create_network_postcommmit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:148 +#, python-format +msgid "" +"created network (postcommit): %(network_id)s of network type = " +"%(network_type)s with vlan = %(vlan_id)s for tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:160 +msgid "delete_network_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:173 +msgid "Brocade Mechanism: failed to delete network in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:175 +msgid "Brocade Mechanism: delete_network_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:177 +#, python-format +msgid "" +"delete network (precommit): %(network_id)s with vlan = %(vlan_id)s for " +"tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:189 +msgid "delete_network_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:201 +msgid "Brocade NOS driver: failed to delete network" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:203 +msgid "Brocade switch exception, delete_network_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:206 +#, python-format +msgid "" +"delete network (postcommit): %(network_id)s with vlan = %(vlan_id)s for " +"tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:224 +msgid "create_port_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:242 +msgid "Brocade Mechanism: failed to create port in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:244 +msgid "Brocade Mechanism: create_port_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:249 +msgid "create_port_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:273 +#, python-format +msgid "Brocade NOS driver: failed to associate mac %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:276 +msgid "Brocade switch exception: create_port_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:279 +#, python-format +msgid "" +"created port (postcommit): port_id=%(port_id)s network_id=%(network_id)s " +"tenant_id=%(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:287 +msgid "delete_port_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:296 +msgid "Brocade Mechanism: failed to delete port in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:298 +msgid "Brocade Mechanism: delete_port_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:303 +msgid "delete_port_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:327 +#, python-format +msgid "Brocade NOS driver: failed to dissociate MAC %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:330 +msgid "Brocade switch exception, delete_port_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:333 +#, python-format +msgid "" +"delete port (postcommit): port_id=%(port_id)s network_id=%(network_id)s " +"tenant_id=%(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:340 +msgid "update_port_precommit(self: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:344 +msgid "update_port_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:348 +msgid "create_subnetwork_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:352 +msgid "create_subnetwork_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:356 +msgid "delete_subnetwork_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:360 +msgid "delete_subnetwork_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:364 +msgid "update_subnet_precommit(self: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:368 +msgid "update_subnet_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:64 +msgid "" +"Brocade Switch IP address is not set, check config ml2_conf_brocade.ini " +"file" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:74 +msgid "Connect failed to switch" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:101 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:115 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:128 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:141 +msgid "NETCONF error" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:223 +#, python-format +msgid "data = %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:226 +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:168 +#, python-format +msgid "Response: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:290 +#, python-format +msgid "APIC session will expire in %d seconds" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:335 +msgid "APIC session timed-out, logging in again." +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:23 +msgid "Host name or IP Address of the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:25 +msgid "Username for the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:27 +msgid "Password for the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:29 +msgid "Communication port for the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:31 +msgid "Name for the VMM domain provider" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:33 +msgid "Name for the VMM domain to be created for Openstack" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:35 +msgid "Name for the vlan namespace to be used for openstack" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:37 +msgid "Range of VLAN's to be used for Openstack" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:39 +msgid "Name of the node profile to be created" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:41 +msgid "Name of the entity profile to be created" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:43 +msgid "Name of the function profile to be created" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:45 +msgid "Clear the node profiles on the APIC at startup (mainly used for testing)" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:25 +#, python-format +msgid "No response from APIC at %(url)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:30 +#, python-format +msgid "" +"APIC responded with HTTP status %(status)s: %(reason)s, Request: " +"'%(request)s', APIC error code %(err_code)s: %(err_text)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:37 +#, python-format +msgid "APIC failed to provide cookie for %(request)s request" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:42 +msgid "Authorized APIC session not established" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:47 +#, python-format +msgid "The switch and port for host '%(host)s' are not configured" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:52 +#, python-format +msgid "Managed Object '%(mo_class)s' is not supported" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:57 +#, python-format +msgid "" +"Multiple VLAN ranges are not supported in the APIC plugin. Please specify" +" a single VLAN range. Current config: '%(vlan_ranges)s'" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py:84 +#, python-format +msgid "Port %s is not bound to a segment" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:25 +msgid "The physical network managed by the switches." +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:33 +#, python-format +msgid "Credential %(credential_name)s already exists for tenant %(tenant_id)s." +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:54 +#, python-format +msgid "Nexus Port Binding (%(filters)s) is not present" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:63 +#, python-format +msgid "Missing required field(s) to configure nexus switch: %(fields)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py:45 +#, python-format +msgid "nexus_switches found = %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:87 +msgid "get_nexusvm_bindings() called" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/config.py:25 +msgid "" +"Delay within which agent is expected to update existing ports whent it " +"restarts" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:42 +msgid "Experimental L2 population driver" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:143 +msgid "Unable to retrieve the agent ip, check the agent configuration." +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:149 +#, python-format +msgid "Port %(port)s updated by agent %(agent)s isn't bound to any segment" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:199 +#, python-format +msgid "" +"Unable to retrieve the agent ip, check the agent %(agent_host)s " +"configuration." +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/rpc.py:40 +#, python-format +msgid "" +"Fanout notify l2population agents at %(topic)s the message %(method)s " +"with %(fdb_entries)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/rpc.py:51 +#, python-format +msgid "" +"Notify l2population agent %(host)s at %(topic)s the message %(method)s " +"with %(fdb_entries)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:31 +msgid "" +"Username for Arista EOS. This is required field. If not set, all " +"communications to Arista EOSwill fail." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:37 +msgid "" +"Password for Arista EOS. This is required field. If not set, all " +"communications to Arista EOS will fail." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:42 +msgid "" +"Arista EOS IP address. This is required field. If not set, all " +"communications to Arista EOSwill fail." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:47 +msgid "" +"Defines if hostnames are sent to Arista EOS as FQDNs " +"(\"node1.domain.com\") or as short names (\"node1\"). This is optional. " +"If not set, a value of \"True\" is assumed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:53 +msgid "" +"Sync interval in seconds between Neutron plugin and EOS. This interval " +"defines how often the synchronization is performed. This is an optional " +"field. If not set, a value of 180 seconds is assumed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:60 +msgid "" +"Defines Region Name that is assigned to this OpenStack Controller. This " +"is useful when multiple OpenStack/Neutron controllers are managing the " +"same Arista HW clusters. Note that this name must match with the region " +"name registered (or known) to keystone service. Authentication with " +"Keysotne is performed by EOS. This is optional. If not set, a value of " +"\"RegionOne\" is assumed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:32 +msgid "Unable to reach EOS" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:74 +#, python-format +msgid "'timestamp' command '%s' is not available on EOS" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:314 +#, python-format +msgid "VM id %(vmid)s not found for port %(portid)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:333 +#, python-format +msgid "Unknown device owner: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:429 +#, python-format +msgid "Executing command on Arista EOS: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:437 +#, python-format +msgid "Error %(err)s while trying to execute commands %(cmd)s on EOS %(host)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:505 +msgid "Required option eapi_host is not set" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:509 +msgid "Required option eapi_username is not set" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:529 +msgid "Syncing Neutron <-> EOS" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:535 +msgid "OpenStack and EOS are in sync!" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:557 +#, python-format +msgid "" +"No Tenants configured in Neutron DB. But %d tenants disovered in EOS " +"during synchronization.Enitre EOS region is cleared" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:722 +#, python-format +msgid "Network %s is not created as it is not found inArista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:736 +#, python-format +msgid "Network name changed to %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:764 +#, python-format +msgid "Network %s is not updated as it is not found inArista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:856 +#, python-format +msgid "VM %s is not created as it is not found in Arista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:871 +#, python-format +msgid "Port name changed to %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:921 +#, python-format +msgid "VM %s is not updated as it is not found in Arista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:49 +msgid "Initializing driver" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:64 +msgid "Initialization done" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:125 +msgid "Ignoring port notification to controller because of missing host ID." +msgstr "" + +#: neutron/plugins/ml2/drivers/mlnx/config.py:24 +#: neutron/plugins/mlnx/common/config.py:50 +msgid "Type of VM network interface: mlnx_direct or hostdev" +msgstr "" + +#: neutron/plugins/ml2/drivers/mlnx/config.py:28 +msgid "Enable server compatibility with old nova" +msgstr "" + +#: neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py:56 +#, python-format +msgid "Checking segment: %(segment)s for mappings: %(mappings)s " +msgstr "" + +#: neutron/plugins/mlnx/agent_notify_api.py:50 +msgid "Sending delete network message" +msgstr "" + +#: neutron/plugins/mlnx/agent_notify_api.py:58 +msgid "Sending update port message" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:117 +msgid "Mellanox Embedded Switch Plugin initialisation complete" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:154 +#, python-format +msgid "Invalid physical network type %(type)s.Server terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:161 +#, python-format +msgid "Parsing physical_network_type failed: %s. Server terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:166 +#, python-format +msgid "" +"Invalid physical network type %(type)s for network %(net)s. Server " +"terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:170 +#, python-format +msgid "Physical Network type mappings: %s" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:178 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:348 +#, python-format +msgid "%s. Server terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:316 +#, python-format +msgid "Unsupported vnic type %(vnic_type)s for physical network type %(net_type)s" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:320 +msgid "Invalid vnic_type on port_create" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:322 +msgid "vnic_type is not defined in port profile" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:366 +msgid "Update network" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:379 +msgid "Delete network" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:427 +#, python-format +msgid "create_port with %s" +msgstr "" + +#: neutron/plugins/mlnx/rpc_callbacks.py:111 +#, python-format +msgid "Device %(device)s up %(agent_id)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:58 +#, python-format +msgid "Agent cache inconsistency - port id is not stored for %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:73 +#, python-format +msgid "Network %s not defined on Agent." +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:86 +#, python-format +msgid "Network %s is not available on this agent" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:97 +#, python-format +msgid "Connecting port %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:107 +#, python-format +msgid "Binding Segmentation ID %(seg_id)sto eSwitch for vNIC mac_address %(mac)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:126 +#, python-format +msgid "Port_mac %s is not available on this agent" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:133 +msgid "Creating VLAN Network" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:135 +#, python-format +msgid "Unknown network type %(network_type)s for network %(network_id)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:167 +msgid "Invalid Network ID, cannot remove Network" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:169 +#, python-format +msgid "Delete network %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:209 +#, python-format +msgid "RPC timeout while updating port %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:291 +msgid "Ports added!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:294 +msgid "Ports removed!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:319 +#, python-format +msgid "Adding port with mac %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:326 +#, python-format +msgid "" +"Unable to get device dev_details for device with mac_address %(device)s: " +"due to %(exc)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:332 +#, python-format +msgid "Port %s updated" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:333 +#, python-format +msgid "Device details %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:346 +#, python-format +msgid "Device with mac_address %s not defined on Neutron Plugin" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:353 +#, python-format +msgid "Removing device with mac_address %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:361 +#, python-format +msgid "Removing port failed for device %(device)s due to %(exc)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:376 +msgid "eSwitch Agent Started!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:389 +msgid "Agent loop process devices!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:394 +msgid "" +"Request timeout in agent event loop eSwitchD is not responding - " +"exiting..." +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:427 +#, python-format +msgid "Failed on Agent initialisation : %s. Agent terminated!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:432 +msgid "Agent initialised successfully, now running... " +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:32 +msgid "" +"Failed to import eventlet.green.zmq. Won't connect to eSwitchD - " +"exiting..." +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:76 +#, python-format +msgid "Action %(action)s failed: %(reason)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:78 +#, python-format +msgid "Unknown operation status %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:83 +msgid "get_attached_vnics" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:90 +#, python-format +msgid "" +"Set Vlan %(segmentation_id)s on Port %(port_mac)s on Fabric " +"%(physical_network)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:103 +#, python-format +msgid "Define Fabric %(fabric)s on interface %(ifc)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:112 +#, python-format +msgid "Port Up for %(port_mac)s on fabric %(fabric)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:121 +#, python-format +msgid "Port Down for %(port_mac)s on fabric %(fabric)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:130 +#, python-format +msgid "Port Release for %(port_mac)s on fabric %(fabric)s" +msgstr "" + +#: neutron/plugins/mlnx/common/comm_utils.py:59 +#, python-format +msgid "Request timeout - call again after %s seconds" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:36 +msgid "" +"List of : with " +"physical_network_type is either eth or ib" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:39 +msgid "Physical network type for provider network (eth or ib)" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:54 +msgid "eswitch daemon end point" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:56 +msgid "" +"The number of milliseconds the agent will wait for response on request to" +" daemon." +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:59 +msgid "" +"The number of retries the agent will send request to daemon before giving" +" up" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:62 +msgid "" +"backoff rate multiplier for waiting period between retries for request to" +" daemon, i.e. value of 2 will double the request timeout each retry" +msgstr "" + +#: neutron/plugins/mlnx/common/exceptions.py:22 +#, python-format +msgid "Mlnx Exception: %(err_msg)s" +msgstr "" + +#: neutron/plugins/mlnx/common/exceptions.py:26 +msgid "Request Timeout: no response from eSwitchD" +msgstr "" + +#: neutron/plugins/mlnx/common/exceptions.py:30 +#, python-format +msgid "Operation Failed: %(err_msg)s" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:44 +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:65 +#, python-format +msgid "Removing vlan %(seg_id)s on physical network %(net)s from pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:121 +#, python-format +msgid "Reserving vlan %(seg_id)s on physical network %(net)s from pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:140 +#, python-format +msgid "" +"Reserving specific vlan %(seg_id)s on physical network %(phy_net)s from " +"pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:145 +#, python-format +msgid "" +"Reserving specific vlan %(seg_id)s on physical network %(phy_net)s " +"outside pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:172 +#, python-format +msgid "Releasing vlan %(seg_id)s on physical network %(phy_net)s to pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:177 +#, python-format +msgid "Releasing vlan %(seg_id)s on physical network %(phy_net)s outside pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:183 +#, python-format +msgid "vlan_id %(seg_id)s on physical network %(phy_net)s not found" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:241 +msgid "Get_port_from_device_mac() called" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:249 +#, python-format +msgid "Set_port_status as %s called" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:194 +#, python-format +msgid "_cleanup_ofc_tenant: No OFC tenant for %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:197 +#, python-format +msgid "delete_ofc_tenant() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:213 +msgid "activate_port_if_ready(): skip, port.admin_state_up is False." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:217 +msgid "activate_port_if_ready(): skip, network.admin_state_up is False." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:221 +msgid "activate_port_if_ready(): skip, no portinfo for this port." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:225 +msgid "activate_port_if_ready(): skip, ofc_port already exists." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:233 +#, python-format +msgid "create_ofc_port() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:246 +#, python-format +msgid "deactivate_port(): skip, ofc_port for port=%s does not exist." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:265 +#, python-format +msgid "deactivate_port(): OFC port for port=%s is already removed." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:273 +#, python-format +msgid "Failed to delete port=%(port)s from OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:292 +#, python-format +msgid "NECPluginV2.create_network() called, network=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:309 +#, python-format +msgid "Failed to create network id=%(id)s on OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:325 +#, python-format +msgid "NECPluginV2.update_network() called, id=%(id)s network=%(network)s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:369 +#, python-format +msgid "NECPluginV2.delete_network() called, id=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:403 +#, python-format +msgid "delete_network() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:544 +#, python-format +msgid "NECPluginV2.create_port() called, port=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:608 +#, python-format +msgid "NECPluginV2.update_port() called, id=%(id)s port=%(port)s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:636 +#, python-format +msgid "NECPluginV2.delete_port() called, id=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:704 +#, python-format +msgid "" +"NECPluginV2RPCCallbacks.get_port_from_device() called, device=%(device)s " +"=> %(ret)s." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:728 +#, python-format +msgid "NECPluginV2RPCCallbacks.update_ports() called, kwargs=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:738 +#, python-format +msgid "" +"update_ports(): ignore unchanged portinfo in port_added message " +"(port_id=%s)." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:758 +#, python-format +msgid "" +"update_ports(): ignore port_removed message due to portinfo for " +"port_id=%s was not registered" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:763 +#, python-format +msgid "" +"update_ports(): ignore port_removed message received from different host " +"(registered_datapath_id=%(registered)s, " +"received_datapath_id=%(received)s)." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:60 +#, python-format +msgid "RouterMixin.create_router() called, router=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:86 +#, python-format +msgid "RouterMixin.update_router() called, id=%(id)s, router=%(router)s ." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:104 +#, python-format +msgid "RouterMixin.delete_router() called, id=%s." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:123 +#, python-format +msgid "" +"RouterMixin.add_router_interface() called, id=%(id)s, " +"interface=%(interface)s." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:130 +#, python-format +msgid "" +"RouterMixin.remove_router_interface() called, id=%(id)s, " +"interface=%(interface)s." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:313 +#, python-format +msgid "" +"OFC does not support router with provider=%(provider)s, so removed it " +"from supported provider (new router driver map=%(driver_map)s)" +msgstr "" + +#: neutron/plugins/nec/nec_router.py:321 +#, python-format +msgid "" +"default_router_provider %(default)s is supported! Please specify one of " +"%(supported)s" +msgstr "" + +#: neutron/plugins/nec/nec_router.py:335 +#, python-format +msgid "Enabled router drivers: %s" +msgstr "" + +#: neutron/plugins/nec/nec_router.py:338 +#, python-format +msgid "" +"No router provider is enabled. neutron-server terminated! " +"(supported=%(supported)s, configured=%(config)s)" +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:42 +msgid "Disabled packet-filter extension." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:47 +#, python-format +msgid "create_packet_filter() called, packet_filter=%s ." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:63 +#, python-format +msgid "update_packet_filter() called, id=%(id)s packet_filter=%(packet_filter)s ." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:136 +#: neutron/plugins/nec/packet_filter.py:189 +#, python-format +msgid "Failed to create packet_filter id=%(id)s on OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:146 +#, python-format +msgid "delete_packet_filter() called, id=%s ." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:164 +#, python-format +msgid "activate_packet_filter_if_ready() called, packet_filter=%s." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:173 +#, python-format +msgid "" +"activate_packet_filter_if_ready(): skip pf_id=%s, " +"packet_filter.admin_state_up is False." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:176 +#, python-format +msgid "" +"activate_packet_filter_if_ready(): skip pf_id=%s, no portinfo for the " +"in_port." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:179 +msgid "" +"_activate_packet_filter_if_ready(): skip, ofc_packet_filter already " +"exists." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:182 +#, python-format +msgid "activate_packet_filter_if_ready(): create packet_filter id=%s on OFC." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:202 +#, python-format +msgid "deactivate_packet_filter_if_ready() called, packet_filter=%s." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:207 +#, python-format +msgid "" +"deactivate_packet_filter(): skip, Not found OFC Mapping for packet_filter" +" id=%s." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:212 +#, python-format +msgid "deactivate_packet_filter(): deleting packet_filter id=%s from OFC." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:221 +#, python-format +msgid "Failed to delete packet_filter id=%(id)s from OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:252 +#, python-format +msgid "Error occurred while disabling packet filter(s) for port %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:127 +#, python-format +msgid "create_router() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:156 +#, python-format +msgid "_update_ofc_routes() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:171 +#, python-format +msgid "delete_router() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:182 +#, python-format +msgid "" +"RouterOpenFlowDriver.add_interface(): the requested port has no subnet. " +"add_interface() is skipped. router_id=%(id)s, port=%(port)s)" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:202 +#, python-format +msgid "add_router_interface() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:220 +#, python-format +msgid "delete_router_interface() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/agent/nec_neutron_agent.py:53 +#, python-format +msgid "Update ports: added=%(added)s, removed=%(removed)s" +msgstr "" + +#: neutron/plugins/nec/agent/nec_neutron_agent.py:76 +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:51 +#, python-format +msgid "port_update received: %s" +msgstr "" + +#: neutron/plugins/nec/agent/nec_neutron_agent.py:220 +msgid "No port changed." +msgstr "" + +#: neutron/plugins/nec/common/config.py:37 +msgid "Host to connect to" +msgstr "" + +#: neutron/plugins/nec/common/config.py:39 +msgid "Base URL of OFC REST API. It is prepended to each API request." +msgstr "" + +#: neutron/plugins/nec/common/config.py:42 +msgid "Port to connect to" +msgstr "" + +#: neutron/plugins/nec/common/config.py:44 +msgid "Driver to use" +msgstr "" + +#: neutron/plugins/nec/common/config.py:46 +msgid "Enable packet filter" +msgstr "" + +#: neutron/plugins/nec/common/config.py:48 +msgid "Use SSL to connect" +msgstr "" + +#: neutron/plugins/nec/common/config.py:50 +msgid "Key file" +msgstr "" + +#: neutron/plugins/nec/common/config.py:52 +msgid "Certificate file" +msgstr "" + +#: neutron/plugins/nec/common/config.py:54 +msgid "Disable SSL certificate verification" +msgstr "" + +#: neutron/plugins/nec/common/config.py:56 +msgid "" +"Maximum attempts per OFC API request.NEC plugin retries API request to " +"OFC when OFC returns ServiceUnavailable (503).The value must be greater " +"than 0." +msgstr "" + +#: neutron/plugins/nec/common/config.py:65 +msgid "Default router provider to use." +msgstr "" + +#: neutron/plugins/nec/common/config.py:68 +msgid "List of enabled router providers." +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:22 +#, python-format +msgid "An OFC exception has occurred: %(reason)s" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:32 +#, python-format +msgid "The specified OFC resource (%(resource)s) is not found." +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:36 +#, python-format +msgid "An exception occurred in NECPluginV2 DB: %(reason)s" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:40 +#, python-format +msgid "" +"Neutron-OFC resource mapping for %(resource)s %(neutron_id)s is not " +"found. It may be deleted during processing." +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:46 +#, python-format +msgid "OFC returns Server Unavailable (503) (Retry-After=%(retry_after)s)" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:55 +#, python-format +msgid "PortInfo %(id)s could not be found" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:59 +msgid "" +"Invalid input for operation: datapath_id should be a hex string with at " +"most 8 bytes" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:65 +msgid "Invalid input for operation: port_no should be [0:65535]" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:70 +#, python-format +msgid "Router (provider=%(provider)s) does not support an external network" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:75 +#, python-format +msgid "Provider %(provider)s could not be found" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:79 +#, python-format +msgid "Cannot create more routers with provider=%(provider)s" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:83 +#, python-format +msgid "" +"Provider of Router %(router_id)s is %(provider)s. This operation is " +"supported only for router provider %(expected_provider)s." +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:56 +#, python-format +msgid "Operation on OFC failed: %(status)s%(msg)s" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:80 +#, python-format +msgid "Client request: %(host)s:%(port)s %(method)s %(action)s [%(body)s]" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:89 +#, python-format +msgid "OFC returns [%(status)s:%(data)s]" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:106 +#, python-format +msgid "OFC returns ServiceUnavailable (retry-after=%s)" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:110 +#, python-format +msgid "Specified resource %s does not exist on OFC " +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:114 +#, python-format +msgid "Operation on OFC failed: status=%(status)s, detail=%(detail)s" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:117 +msgid "Operation on OFC failed" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:126 +#, python-format +msgid "Failed to connect OFC : %s" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:142 +#, python-format +msgid "Waiting for %s seconds due to OFC Service_Unavailable." +msgstr "" + +#: neutron/plugins/nec/db/api.py:110 +#, python-format +msgid "del_ofc_item(): NotFound item (resource=%(resource)s, id=%(id)s) " +msgstr "" + +#: neutron/plugins/nec/db/api.py:144 +#, python-format +msgid "del_portinfo(): NotFound portinfo for port_id: %s" +msgstr "" + +#: neutron/plugins/nec/db/api.py:165 +#: neutron/plugins/openvswitch/ovs_db_v2.py:317 +#, python-format +msgid "get_port_with_securitygroups() called:port_id=%s" +msgstr "" + +#: neutron/plugins/nec/db/router.py:87 +#, python-format +msgid "Add provider binding (router=%(router_id)s, provider=%(provider)s)" +msgstr "" + +#: neutron/plugins/nec/drivers/__init__.py:38 +#, python-format +msgid "Loading OFC driver: %s" +msgstr "" + +#: neutron/plugins/nec/drivers/pfc.py:35 +#, python-format +msgid "OFC %(resource)s ID has an invalid format: %(ofc_id)s" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:35 +msgid "Number of packet_filters allowed per tenant, -1 for unlimited" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:42 +#, python-format +msgid "PacketFilter %(id)s could not be found" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:46 +#, python-format +msgid "" +"IP version %(version)s is not supported for %(field)s (%(value)s is " +"specified)" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:51 +#, python-format +msgid "Packet Filter priority should be %(min)s-%(max)s (included)" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:55 +#, python-format +msgid "%(field)s field cannot be updated" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:59 +#, python-format +msgid "" +"The backend does not support duplicated priority. Priority %(priority)s " +"is in use" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:64 +#, python-format +msgid "" +"Ether Type '%(eth_type)s' conflicts with protocol '%(protocol)s'. Update " +"or clear protocol before changing ether type." +msgstr "" + +#: neutron/plugins/nuage/plugin.py:89 +#, python-format +msgid "%(resource)s with id %(resource_id)s does not exist" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:98 +#, python-format +msgid "" +"Either %(resource)s %(req_resource)s not found or you dont have " +"credential to access it" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:104 +#, python-format +msgid "" +"More than one entry found for %(resource)s %(req_resource)s. Use id " +"instead" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:214 +#, python-format +msgid "Subnet %s not found on VSD" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:219 +#, python-format +msgid "Port-Mapping for port %s not found on VSD" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:314 +msgid "External network with subnets can not be changed to non-external network" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:368 +msgid "" +"Either net_partition is not provided with subnet OR default net_partition" +" is not created at the start" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:397 +#, python-format +msgid "Only one subnet is allowed per external network %s" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:479 +#, python-format +msgid "" +"Unable to complete operation on subnet %s.One or more ports have an IP " +"allocation from this subnet." +msgstr "" + +#: neutron/plugins/nuage/plugin.py:509 +#, python-format +msgid "" +"Router %s does not hold default zone OR net_partition mapping. Router-IF " +"add failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:519 +#, python-format +msgid "Subnet %s does not hold Nuage VSD reference. Router-IF add failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:529 +#, python-format +msgid "" +"Subnet %(subnet)s and Router %(router)s belong to different net_partition" +" Router-IF add not permitted" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:541 +#, python-format +msgid "Subnet %s has one or more active VMs Router-IF add not permitted" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:587 neutron/plugins/nuage/plugin.py:592 +#: neutron/plugins/nuage/plugin.py:598 +#, python-format +msgid "No router interface found for Router %s. Router-IF delete failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:614 +#, python-format +msgid "Subnet %s has one or more active VMs Router-IF delete not permitted" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:623 +#, python-format +msgid "" +"Router %s does not hold net_partition assoc on Nuage VSD. Router-IF " +"delete failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:662 +msgid "" +"Either net_partition is not provided with router OR default net_partition" +" is not created at the start" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:708 +msgid "for same subnet, multiple static routes not allowed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:724 +#, python-format +msgid "Router %s does not hold net-partition assoc on VSD. extra-route failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:837 +#, python-format +msgid "One or more router still attached to net_partition %s." +msgstr "" + +#: neutron/plugins/nuage/plugin.py:842 +#, python-format +msgid "NetPartition with %s does not exist" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:888 +#, python-format +msgid "router %s is not associated with any net-partition" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:903 +msgid "Floating IP can not be associated to VM in different router context" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:22 +msgid "IP Address and Port of Nuage's VSD server" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:25 +msgid "Username and password for authentication" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:27 +msgid "Boolean for SSL connection with VSD server" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:29 +msgid "Nuage provided base uri to reach out to VSD" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:31 +msgid "" +"Organization name in which VSD will orchestrate network resources using " +"openstack" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:34 +msgid "Nuage provided uri for initial authorization to access VSD" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:38 +msgid "" +"Default Network partition in which VSD will orchestrate network resources" +" using openstack" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:42 +msgid "Per Net Partition quota of floating ips" +msgstr "" + +#: neutron/plugins/nuage/common/exceptions.py:24 +#, python-format +msgid "Nuage Plugin does not support this operation: %(msg)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:92 +msgid "Agent terminated!: Failed to get a datapath." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:111 +msgid "Agent terminated" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:145 +msgid "Agent failed to create agent config map" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:272 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1258 +#, python-format +msgid "Unable to create tunnel port. Invalid remote IP: %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:277 +#, python-format +msgid "ryu send_msg() result: %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:311 +#, python-format +msgid "network_delete received network %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:317 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:544 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:284 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:607 +#, python-format +msgid "Network %s not used on agent." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:326 +#, python-format +msgid "port_update received port %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:329 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:296 +msgid "tunnel_update received" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:335 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:305 +msgid "No tunnel_type specified, cannot create tunnels" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:338 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:308 +#, python-format +msgid "tunnel_type %s not supported by agent" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:459 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:515 +#, python-format +msgid "No local VLAN available for net-id=%s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:462 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:523 +#, python-format +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:474 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:545 +#, python-format +msgid "" +"Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " +"tunneling disabled" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:482 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:564 +#, python-format +msgid "" +"Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " +"physical_network %(physical_network)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:492 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:584 +#, python-format +msgid "" +"Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " +"physical_network %(physical_network)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:501 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:593 +#, python-format +msgid "" +"Cannot provision unknown network type %(network_type)s for net-" +"id=%(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:547 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:610 +#, python-format +msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:581 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:650 +#, python-format +msgid "" +"Cannot reclaim unknown network type %(network_type)s for net-" +"id=%(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:632 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:696 +#, python-format +msgid "port_unbound() net_uuid %s not in local_vlan_map" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:706 +#, python-format +msgid "ancillary bridge list: %s." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:796 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:778 +msgid "" +"Failed to create OVS patch port. Cannot have tunneling enabled on this " +"agent, since this version of OVS does not support tunnels or patch ports." +" Agent terminated!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:880 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:891 +#, python-format +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:886 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:897 +#, python-format +msgid "" +"Bridge %(bridge)s for physical network %(physical_network)s does not " +"exist. Agent terminated!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:954 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:986 +#, python-format +msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:983 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1015 +#, python-format +msgid "VIF port: %s has no ofport configured, and might not be able to transmit" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:991 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1025 +#, python-format +msgid "No VIF port for port %s defined on agent." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1004 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1038 +#: neutron/tests/unit/ofagent/test_ofa_neutron_agent.py:683 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:763 +msgid "ofport should have a value that can be interpreted as an integer" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1007 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1041 +#: neutron/tests/unit/ofagent/test_ofa_neutron_agent.py:666 +#: neutron/tests/unit/ofagent/test_ofa_neutron_agent.py:686 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:746 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:766 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:783 +#, python-format +msgid "Failed to set-up %(type)s tunnel port to %(ip)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1055 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1084 +#, python-format +msgid "Processing port %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1061 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1090 +#, python-format +msgid "" +"Port %s was not found on the integration bridge and will therefore not be" +" processed" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1086 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1117 +#, python-format +msgid "Setting status for %s to UP" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1090 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1121 +#, python-format +msgid "Setting status for %s to DOWN" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1093 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1124 +#, python-format +msgid "Configuration for device %s completed." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1103 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1134 +#, python-format +msgid "Ancillary Port %s added" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1178 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d - " +"treat_devices_added_or_updated completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1186 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d - treat_devices_removed " +"completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1199 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1236 +#, python-format +msgid "" +"process_ancillary_network_ports - iteration: %(iter_num)d - " +"treat_ancillary_devices_added completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1208 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1245 +#, python-format +msgid "" +"process_ancillary_network_ports - iteration: %(iter_num)d - " +"treat_ancillary_devices_removed completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1235 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1287 +#, python-format +msgid "Unable to sync tunnel IP %(local_ip)s: %(e)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1263 +#, python-format +msgid "Agent ovsdb_monitor_loop - iteration:%d started" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1274 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1334 +msgid "Agent tunnel out of sync with plugin!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1278 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1338 +msgid "Error while synchronizing tunnels" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1282 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - starting polling. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1295 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - port information " +"retrieved. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1305 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1371 +#, python-format +msgid "Starting to process devices in:%s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1309 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - ports processed. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1324 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - ancillary port info " +"retrieved. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1334 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - ancillary ports " +"processed. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1349 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1413 +msgid "Error while processing VIF ports" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1356 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d completed. Processed " +"ports statistics:%(port_stats)s. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1389 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 +#, python-format +msgid "Parsing bridge_mappings failed: %s." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1412 +#, python-format +msgid "Invalid tunnel type specificed: %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1415 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1483 +msgid "Tunneling cannot be enabled without a valid local_ip." +msgstr "" + +#: neutron/plugins/ofagent/common/config.py:24 +msgid "Number of seconds to retry acquiring an Open vSwitch datapath" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:240 +msgid "Failed to create subnet, deleting it from neutron" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:304 +#, python-format +msgid "Deleting newly created neutron port %s" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:374 +msgid "Failed to create floatingip" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:413 +msgid "Failed to create router" +msgstr "" + +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:154 +msgid "Port list is updated" +msgstr "" + +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:161 +msgid "AGENT looping....." +msgstr "" + +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:173 +msgid "NVSD Agent initialized successfully, now running... " +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:26 +msgid "NVSD Controller IP address" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:29 +msgid "NVSD Controller Port number" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:32 +msgid "NVSD Controller username" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:35 +msgid "NVSD Controller password" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:38 +msgid "NVSD controller REST API request timeout in seconds" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:40 +msgid "Number of login retries to NVSD controller" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:45 +msgid "integration bridge" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:23 +#, python-format +msgid "An unknown nvsd plugin exception occurred: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:27 +#: neutron/plugins/vmware/api_client/exception.py:68 +msgid "The request has timed out." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:31 +msgid "Invalid access credentials to the Server." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:35 +#, python-format +msgid "A resource is not found: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:39 +#, python-format +msgid "Request sent to server is invalid: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:43 +#, python-format +msgid "Internal Server Error: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:47 +msgid "Connection is closed by the server." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:51 +#, python-format +msgid "The request is forbidden access to the resource: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:55 +#, python-format +msgid "Internal Server Error from NVSD controller: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:55 +#, python-format +msgid "Could not create a %(resource)s under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:59 +#, python-format +msgid "Failed to %(method)s %(resource)s id=%(resource_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:65 +#, python-format +msgid "Failed to %(method)s %(resource)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:109 +#, python-format +msgid "Network %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:126 +#, python-format +msgid "Network %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:147 +#, python-format +msgid "Network %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:160 +#, python-format +msgid "Subnet %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:174 +#, python-format +msgid "Subnet %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:190 +#, python-format +msgid "Subnet %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:222 +#, python-format +msgid "Port %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:246 +#, python-format +msgid "Port %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:259 +#, python-format +msgid "Port %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:281 +#, python-format +msgid "Flatingip %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:298 +#, python-format +msgid "Flatingip %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:312 +#, python-format +msgid "Flatingip %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:325 +#, python-format +msgid "Router %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:341 +#, python-format +msgid "Router %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:351 +#, python-format +msgid "Router %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:81 +#, python-format +msgid "Unable to connect to NVSD controller. Exiting after %(retries)s attempts" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:91 +#, python-format +msgid "Login Failed: %s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:92 +#, python-format +msgid "Unable to establish connection with Controller %s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:94 +msgid "Retrying after 1 second..." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:98 +#, python-format +msgid "Login Successful %(uri)s %(status)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:102 +#, python-format +msgid "AuthToken = %s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:104 +msgid "login failed" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:112 +msgid "No Token, Re-login" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:129 +#, python-format +msgid "request: %(method)s %(uri)s successful" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:136 +#, python-format +msgid "request: Request failed from Controller side :%s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:141 +#, python-format +msgid "Response is Null, Request timed out: %(method)s to %(uri)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:153 +#, python-format +msgid "Request %(method)s %(uri)s body = %(body)s failed with status %(status)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:157 +#, python-format +msgid "%s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:161 +#, python-format +msgid "%(method)s to %(url)s, unexpected response code: %(status)d" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:167 +#, python-format +msgid "Request failed from Controller side with Status=%s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:171 +#, python-format +msgid "Success: %(method)s %(url)s status=%(status)s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:210 +#, python-format +msgid "Skipping unreasonable tunnel ID range %(tun_min)s:%(tun_max)s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:258 +#, python-format +msgid "Reserving tunnel %s from pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:273 +#, python-format +msgid "Reserving specific tunnel %s from pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:276 +#, python-format +msgid "Reserving specific tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:299 +#, python-format +msgid "Releasing tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:301 +#, python-format +msgid "Releasing tunnel %s to pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:303 +#, python-format +msgid "tunnel_id %s not found" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:375 +#, python-format +msgid "Adding a tunnel endpoint for %s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:391 +#, python-format +msgid "" +"Adding a tunnel endpoint failed due to a concurrenttransaction had been " +"committed (%s attempts left)" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:396 +msgid "Unable to generate a new tunnel id" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:296 +#, python-format +msgid "Invalid tenant_network_type: %s. Server terminated!" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:313 +#, python-format +msgid "Tunneling disabled but tenant_network_type is '%s'. Server terminated!" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:359 +#, python-format +msgid "Invalid tunnel ID range: '%(range)s' - %(e)s. Server terminated!" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:363 +#, python-format +msgid "Tunnel ID ranges: %s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:418 +#, python-format +msgid "%s networks are not enabled" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:228 +msgid "OVS version can not support ARP responder." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:293 +#, python-format +msgid "port_update message processed for port %s" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:494 +#, python-format +msgid "Action %s not supported" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:757 +#, python-format +msgid "Adding %s to list of bridges." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:871 +#, python-format +msgid "" +"Creating an interface named %(name)s exceeds the %(limit)d character " +"limitation. It was shortened to %(new_name)s to fit." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1215 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d " +"-treat_devices_added_or_updated completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1223 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d -treat_devices_removed " +"completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1324 +#, python-format +msgid "Agent rpc_loop - iteration:%d started" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1348 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d - starting polling. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1361 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d - port information retrieved. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1376 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d -ports processed. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1390 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d -ancillary port info retrieved. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1399 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d - ancillary ports processed. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1420 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d completed. Processed ports " +"statistics: %(port_stats)s. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1480 +#, python-format +msgid "Invalid tunnel type specified: %s" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:32 +msgid "Enable tunneling support" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:34 +msgid "Tunnel bridge to use" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:36 +msgid "Peer patch port in integration bridge for tunnel bridge" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:39 +msgid "Peer patch port in tunnel bridge for integration bridge" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:42 +msgid "Local IP address of GRE tunnel endpoints." +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:45 +msgid "List of :" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:47 +msgid "Network type for tenant networks (local, vlan, gre, vxlan, or none)" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:55 +msgid "List of :" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:57 +msgid "The type of tunnels to use when utilizing tunnels, either 'gre' or 'vxlan'" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:67 +msgid "Minimize polling by monitoring ovsdb for interface changes." +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:71 +msgid "" +"The number of seconds to wait before respawning the ovsdb monitor after " +"losing communication with it" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:74 +msgid "Network types supported by the agent (gre and/or vxlan)" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:79 +msgid "MTU size of veth interfaces" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:81 +msgid "" +"Use ml2 l2population mechanism driver to learn remote mac and IPs and " +"improve tunnel scalability" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:84 +msgid "Enable local ARP responder if it is supported" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:86 +msgid "" +"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying " +"GRE/VXLAN tunnel" +msgstr "" + +#: neutron/plugins/plumgrid/common/exceptions.py:26 +#, python-format +msgid "PLUMgrid Plugin Error: %(err_msg)s" +msgstr "" + +#: neutron/plugins/plumgrid/common/exceptions.py:30 +#, python-format +msgid "Connection failed with PLUMgrid Director: %(err_msg)s" +msgstr "" + +#: neutron/plugins/plumgrid/drivers/fake_plumlib.py:32 +msgid "Python PLUMgrid Fake Library Started " +msgstr "" + +#: neutron/plugins/plumgrid/drivers/fake_plumlib.py:37 +#, python-format +msgid "Fake Director: %s" +msgstr "" + +#: neutron/plugins/plumgrid/drivers/plumlib.py:38 +msgid "Python PLUMgrid Library Started " +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:45 +msgid "PLUMgrid Director server to connect to" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:47 +msgid "PLUMgrid Director server port to connect to" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:49 +msgid "PLUMgrid Director admin username" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:51 +msgid "PLUMgrid Director admin password" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:53 +msgid "PLUMgrid Director server timeout" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:56 +msgid "PLUMgrid Driver" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:73 +msgid "Neutron PLUMgrid Director: Starting Plugin" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:78 +msgid "Neutron PLUMgrid Director: Neutron server with PLUMgrid Plugin has started" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:91 +#, python-format +msgid "Neutron PLUMgrid Director: %s" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:102 +msgid "Neutron PLUMgrid Director: create_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:116 +msgid "PLUMgrid Library: create_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:131 +msgid "Neutron PLUMgrid Director: update_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:143 +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:169 +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:353 +msgid "PLUMgrid Library: update_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:158 +msgid "Neutron PLUMgrid Director: delete_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:181 +msgid "Neutron PLUMgrid Director: create_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:200 +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:227 +msgid "PLUMgrid Library: create_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:215 +msgid "Neutron PLUMgrid Director: update_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:243 +msgid "Neutron PLUMgrid Director: delete_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:258 +msgid "PLUMgrid Library: delete_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:287 +msgid "Neutron PLUMgrid Director: create_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:311 +msgid "PLUMgrid Library: create_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:321 +msgid "Neutron PLUMgrid Director: delete_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:333 +msgid "PLUMgrid Library: delete_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:341 +msgid "update_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:365 +msgid "Neutron PLUMgrid Director: create_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:377 +msgid "PLUMgrid Library: create_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:387 +msgid "Neutron PLUMgrid Director: update_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:393 +msgid "PLUMgrid Library: update_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:402 +msgid "Neutron PLUMgrid Director: delete_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:412 +msgid "PLUMgrid Library: delete_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:420 +msgid "Neutron PLUMgrid Director: add_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:440 +msgid "PLUMgrid Library: add_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:451 +msgid "Neutron PLUMgrid Director: remove_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:473 +msgid "PLUMgrid Library: remove_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:484 +msgid "Neutron PLUMgrid Director: create_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:491 +msgid "PLUMgrid Library: create_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:500 +msgid "Neutron PLUMgrid Director: update_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:509 +msgid "PLUMgrid Library: update_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:519 +msgid "Neutron PLUMgrid Director: delete_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:529 +msgid "PLUMgrid Library: delete_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:536 +msgid "Neutron PLUMgrid Director: disassociate_floatingips() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:543 +msgid "PLUMgrid Library: disassociate_floatingips() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:573 +msgid "" +"Networks with admin_state_up=False are not supported by PLUMgrid plugin " +"yet." +msgstr "" + +#: neutron/plugins/ryu/ryu_neutron_plugin.py:61 +#, python-format +msgid "get_ofp_rest_api: %s" +msgstr "" + +#: neutron/plugins/ryu/ryu_neutron_plugin.py:125 +msgid "Invalid configuration. check ryu.ini" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:95 +#, python-format +msgid "Could not get IPv4 address from %(nic)s: %(cfg)s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:161 +#, python-format +msgid "External port %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:169 +msgid "Get Ryu rest API address" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:217 +msgid "Ryu rest API port isn't specified" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:218 +#, python-format +msgid "Going to ofp controller mode %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:294 +#, python-format +msgid "tunnel_ip %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:296 +#, python-format +msgid "ovsdb_port %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:298 +#, python-format +msgid "ovsdb_ip %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:304 +#, python-format +msgid "Initialization failed: %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:307 +msgid "" +"Ryu initialization on the node is done. Agent initialized successfully, " +"now running..." +msgstr "" + +#: neutron/plugins/ryu/common/config.py:26 +msgid "OpenFlow REST API location" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:28 +msgid "Minimum tunnel ID to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:30 +msgid "Maximum tunnel ID to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:32 +msgid "Tunnel IP to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:34 +msgid "Tunnel interface to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:36 +msgid "OVSDB port to connect to" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:38 +msgid "OVSDB IP to connect to" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:40 +msgid "OVSDB interface to connect to" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:40 +#, python-format +msgid "get_port_from_device() called:port_id=%s" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:76 +#, python-format +msgid "" +"Invalid tunnel key options tunnel_key_min: %(key_min)d tunnel_key_max: " +"%(key_max)d. Using default value" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:156 +#, python-format +msgid "last_key %(last_key)s new_key %(new_key)s" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:159 +msgid "No key found" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:192 +#, python-format +msgid "Transaction retry exhausted (%d). Abandoned tunnel key allocation." +msgstr "" + +#: neutron/plugins/vmware/check_nsx_config.py:47 +#: neutron/plugins/vmware/check_nsx_config.py:82 +#, python-format +msgid "Error '%(err)s' when connecting to controller(s): %(ctl)s." +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:55 +#, python-format +msgid "Invalid agent_mode: %s" +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:106 +msgid "network_auto_schedule has been disabled" +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:127 +#, python-format +msgid "Unable to run Neutron with config option '%s', as NSX does not support it" +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:130 +#, python-format +msgid "Unmet dependency for config option '%s'" +msgstr "" + +#: neutron/plugins/vmware/nsx_cluster.py:49 +#, python-format +msgid "" +"Attribute '%s' has been deprecated or moved to a new section. See new " +"configuration file for details." +msgstr "" + +#: neutron/plugins/vmware/nsx_cluster.py:61 +#, python-format +msgid "The following cluster attributes were not specified: %s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/__init__.py:28 +#, python-format +msgid "Invalid connection type: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:103 +#, python-format +msgid "[%d] no API providers currently available." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:106 +#, python-format +msgid "[%d] Waiting to acquire API client connection." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:110 +#, python-format +msgid "[%(rid)d] Connection %(conn)s idle for %(sec)0.2f seconds; reconnecting." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:119 +#, python-format +msgid "[%(rid)d] Acquired connection %(conn)s. %(qsize)d connection(s) available." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:140 +#, python-format +msgid "" +"[%(rid)d] Released connection %(conn)s is not an API provider for the " +"cluster" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:150 +#, python-format +msgid "[%(rid)d] Connection returned in bad state, reconnecting to %(conn)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:176 +#, python-format +msgid "[%(rid)d] Released connection %(conn)s. %(qsize)d connection(s) available." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:186 +#, python-format +msgid "Login request for an invalid connection: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:197 +msgid "Waiting for auth to complete" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:239 +#, python-format +msgid "Invalid conn_params value: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:93 +#, python-format +msgid "Request returns \"%s\"" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:106 +#, python-format +msgid "Request timed out: %(method)s to %(url)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:117 +#, python-format +msgid "Received error code: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:118 +#, python-format +msgid "Server Error Message: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:124 +#, python-format +msgid "" +"%(method)s to %(url)s, unexpected response code: %(status)d (content = " +"'%(body)s')" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:141 +msgid "Unable to determine NSX version. Plugin might not work as expected." +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_client.py:145 +#, python-format +msgid "Login error \"%s\"" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_client.py:150 +#, python-format +msgid "Saving new authentication cookie '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:102 +msgid "Joining an invalid green thread" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:122 +#, python-format +msgid "[%d] Request timeout." +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:123 +msgid "Request timeout" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:149 +#, python-format +msgid "[%(rid)d] Completed request '%(method)s %(url)s': %(status)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:156 +#, python-format +msgid "[%(rid)d] Error while handling request: %(req)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:212 +#, python-format +msgid "[%(rid)d] Failed to parse API provider: %(e)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:41 +msgid "Server denied session's authentication credentials." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:45 +msgid "An entity referenced in the request was not found." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:49 +msgid "Request conflicts with configuration on a different entity." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:54 +msgid "" +"Request could not completed because the associated resource could not be " +"reached." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:59 +msgid "The request is forbidden from accessing the referenced resource." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:64 +msgid "Create/Update actions are forbidden when in read-only mode." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:72 +msgid "The server is unable to fulfill the request due to a bad syntax" +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:77 +msgid "The backend received an invalid security certificate." +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:85 +msgid "No API connections available" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:90 +#, python-format +msgid "[%(rid)d] Issuing - request %(conn)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:116 +#, python-format +msgid "Setting X-Nvp-Wait-For-Config-Generation request header: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:122 +#, python-format +msgid "[%(rid)d] Exception issuing request: %(e)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:130 +#, python-format +msgid "[%(rid)d] Completed request '%(conn)s': %(status)s (%(elapsed)s seconds)" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:139 +#, python-format +msgid "Reading X-Nvp-config-Generation response header: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:168 +#, python-format +msgid "[%d] Maximum redirects exceeded, aborting request" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:178 +#, python-format +msgid "[%(rid)d] Redirecting request to: %(conn)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:191 +#, python-format +msgid "[%(rid)d] Request '%(method)s %(url)s' received: %(status)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:195 +#, python-format +msgid "Server error return: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:199 +msgid "Invalid server response" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:204 +#, python-format +msgid "[%(rid)d] Failed request '%(conn)s': '%(msg)s' (%(elapsed)s seconds)" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:238 +#, python-format +msgid "[%d] Received redirect status without location header field" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:255 +#, python-format +msgid "[%(rid)d] Received invalid redirect location: '%(url)s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:259 +#, python-format +msgid "[%(rid)d] Received malformed redirect location: %(url)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/version.py:30 +#, python-format +msgid "Unable to fetch NSX version from response headers :%s" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:41 +msgid "" +"Maximum number of ports of a logical switch on a bridged transport zone " +"(default 5000)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:45 +msgid "" +"Maximum number of ports of a logical switch on an overlay transport zone " +"(default 256)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:49 +msgid "Maximum concurrent connections to each NSX controller." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:54 +msgid "" +"Number of seconds a generation id should be valid for (default -1 meaning" +" do not time out)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:58 +msgid "" +"If set to access_network this enables a dedicated connection to the " +"metadata proxy for metadata server access via Neutron router. If set to " +"dhcp_host_route this enables host route injection via the dhcp agent. " +"This option is only useful if running on a host that does not support " +"namespaces otherwise access_network should be used." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:67 +msgid "" +"The default network tranport type to use (stt, gre, bridge, ipsec_gre, or" +" ipsec_stt)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:71 +msgid "The mode used to implement DHCP/metadata services." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:73 +msgid "" +"The default option leverages service nodes to perform packet replication " +"though one could set to this to 'source' to perform replication locally. " +"This is useful if one does not want to deploy a service node(s)." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:82 +msgid "" +"Interval in seconds between runs of the state synchronization task. Set " +"it to 0 to disable it" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:86 +msgid "" +"Maximum value for the additional random delay in seconds between runs of " +"the state synchronization task" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:91 +msgid "" +"Minimum delay, in seconds, between two state synchronization queries to " +"NSX. It must not exceed state_sync_interval" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:96 +msgid "" +"Minimum number of resources to be retrieved from NSX during state " +"synchronization" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:100 +msgid "" +"Always read operational status from backend on show operations. Enabling " +"this option might slow down the system." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:109 +msgid "User name for NSX controllers in this cluster" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:114 +msgid "Password for NSX controllers in this cluster" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:117 +msgid "Total time limit for a cluster request" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:120 +msgid "Time before aborting a request" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:123 +msgid "Number of time a request should be retried" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:126 +msgid "Number of times a redirect should be followed" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:129 +msgid "Lists the NSX controllers in this cluster" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:134 +msgid "" +"This is uuid of the default NSX Transport zone that will be used for " +"creating tunneled isolated \"Neutron\" networks. It needs to be created " +"in NSX before starting Neutron with the nsx plugin." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:139 +msgid "" +"Unique identifier of the NSX L3 Gateway service which will be used for " +"implementing routers and floating IPs" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:143 +msgid "" +"Unique identifier of the NSX L2 Gateway service which will be used by " +"default for network gateways" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:146 +msgid "" +"Unique identifier of the Service Cluster which will be used by logical " +"services like dhcp and metadata" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:149 +msgid "" +"Name of the interface on a L2 Gateway transport nodewhich should be used " +"by default when setting up a network connection" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:159 +msgid "User name for vsm" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:163 +msgid "Password for vsm" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:165 +msgid "uri for vsm" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:167 +msgid "Optional parameter identifying the ID of datacenter to deploy NSX Edges" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:170 +#: neutron/plugins/vmware/common/config.py:176 +msgid "Optional parameter identifying the ID of datastore to deploy NSX Edges" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:173 +msgid "Optional parameter identifying the ID of resource to deploy NSX Edges" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:179 +msgid "Network ID for physical network connectivity" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:182 +msgid "Task status check interval" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:196 +#, python-format +msgid "Invalid replication_mode: %s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:21 +#, python-format +msgid "An unexpected error occurred in the NSX Plugin: %(err_msg)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:25 +#, python-format +msgid "Unable to fulfill request with version %(version)s." +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:29 +#, python-format +msgid "Invalid NSX connection parameters: %(conn_params)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:33 +#, python-format +msgid "" +"Invalid cluster values: %(invalid_attrs)s. Please ensure that these " +"values are specified in the [DEFAULT] section of the NSX plugin ini file." +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:39 +#, python-format +msgid "Unable to find cluster config entry for nova zone: %(nova_zone)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:44 +#, python-format +msgid "" +"Unable to create port on network %(network)s. Maximum number of ports " +"reached" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:49 +#, python-format +msgid "" +"While retrieving NAT rules, %(actual_rules)s were found whereas rules in " +"the (%(min_rules)s,%(max_rules)s) interval were expected" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:55 +#, python-format +msgid "Invalid NSX attachment type '%(attachment_type)s'" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:59 +msgid "" +"The networking backend is currently in maintenance mode and therefore " +"unable to accept requests which modify its state. Please try later." +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:65 +#, python-format +msgid "Gateway Service %(gateway)s is already in use" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:69 +msgid "" +"An invalid security certificate was specified for the gateway device. " +"Certificates must be enclosed between '-----BEGIN CERTIFICATE-----' and '" +"-----END CERTIFICATE-----'" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:76 +#, python-format +msgid "Quota exceeded for Vcns resource: %(overs)s: %(err_msg)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:80 +#, python-format +msgid "Router %(router_id)s is in use by Loadbalancer Service %(vip_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:85 +#, python-format +msgid "Router %(router_id)s is in use by firewall Service %(firewall_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:90 +#, python-format +msgid "Error happened in NSX VCNS Driver: %(err_msg)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:94 +#, python-format +msgid "" +"Router %(router_id)s is not in 'ACTIVE' status, thus unable to provide " +"advanced service" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:99 +#, python-format +msgid "" +"Service cluster: '%(cluster_id)s' is unavailable. Please, check NSX setup" +" and/or configuration" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:104 +#, python-format +msgid "" +"An error occurred while connecting LSN %(lsn_id)s and network %(net_id)s " +"via port %(port_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:113 +#, python-format +msgid "Unable to find LSN for %(entity)s %(entity_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:117 +#, python-format +msgid "Unable to find port for LSN %(lsn_id)s and %(entity)s %(entity_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:122 +#, python-format +msgid "Unable to migrate network '%(net_id)s' to LSN: %(reason)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:126 +#, python-format +msgid "Configuration conflict on Logical Service Node %(lsn_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:65 +#, python-format +msgid "Unable to find NSX switches for Neutron network %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:112 +#, python-format +msgid "Unable to find NSX port for Neutron port %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:152 +#, python-format +msgid "Unable to find NSX security profile for Neutron security group %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:156 +#, python-format +msgid "Multiple NSX security profiles found for Neutron security group %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:187 +#, python-format +msgid "Unable to find NSX router for Neutron router %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:245 +#, python-format +msgid "" +"Unable to retrieve operational status for gateway devices belonging to " +"tenant: %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:248 +msgid "Unable to retrieve operational status for gateway devices" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:226 +#, python-format +msgid "" +"Minimum request delay:%(req_delay)s must not exceed synchronization " +"interval:%(sync_interval)s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:259 +#, python-format +msgid "Logical switch for neutron network %s not found on NSX." +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:294 +#: neutron/plugins/vmware/common/sync.py:376 +#: neutron/plugins/vmware/common/sync.py:471 +#, python-format +msgid "Updating status for neutron resource %(q_id)s to: %(status)s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:346 +#, python-format +msgid "Logical router for neutron router %s not found on NSX." +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:396 +#, python-format +msgid "Unable to find Neutron router id for NSX logical router: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:438 +#, python-format +msgid "Logical switch port for neutron port %s not found on NSX." +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:531 +#, python-format +msgid "" +"Requested page size is %(cur_chunk_size)d.It might be necessary to do " +"%(num_requests)d round-trips to NSX for fetching data. Please tune sync " +"parameters to ensure chunk size is less than %(max_page_size)d" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:564 +#, python-format +msgid "Fetching up to %s resources from NSX backend" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:584 +#, python-format +msgid "Total data size: %d" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:588 +#, python-format +msgid "" +"Fetched %(num_lswitches)d logical switches, %(num_lswitchports)d logical " +"switch ports,%(num_lrouters)d logical routers" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:604 +#, python-format +msgid "Running state synchronization task. Chunk: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:614 +#, python-format +msgid "" +"An error occurred while communicating with NSX backend. Will retry " +"synchronization in %d seconds" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:618 +#, python-format +msgid "Time elapsed querying NSX: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:625 +#, python-format +msgid "Number of chunks: %d" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:643 +#, python-format +msgid "Time elapsed hashing data: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:655 +#, python-format +msgid "Synchronization for chunk %(chunk_num)d of %(total_chunks)d performed" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:667 +#, python-format +msgid "Time elapsed at end of sync: %s" +msgstr "" + +#: neutron/plugins/vmware/common/utils.py:66 +#, python-format +msgid "Specified name:'%s' exceeds maximum length. It will be truncated on NSX" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:76 +#, python-format +msgid "Port mapping for %s already available" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:123 +#, python-format +msgid "NSX identifiers for neutron port %s not yet stored in Neutron DB" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:134 +#, python-format +msgid "NSX identifiers for neutron router %s not yet stored in Neutron DB" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:149 +#, python-format +msgid "NSX identifiers for neutron security group %s not yet stored in Neutron DB" +msgstr "" + +#: neutron/plugins/vmware/dbexts/lsn_db.py:87 +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:71 +#, python-format +msgid "Unable to find Logical Service Node for network %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:45 +#, python-format +msgid "" +"Network Gateway '%(gateway_id)s' still has active mappings with one or " +"more neutron networks." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:50 +#, python-format +msgid "Network Gateway %(gateway_id)s could not be found" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:54 +#, python-format +msgid "" +"Network Gateway Device '%(device_id)s' is still used by one or more " +"network gateways." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:59 +#, python-format +msgid "Network Gateway Device %(device_id)s could not be found." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:63 +#, python-format +msgid "" +"Port '%(port_id)s' is owned by '%(device_owner)s' and therefore cannot be" +" deleted directly via the port API." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:68 +#, python-format +msgid "" +"The specified mapping '%(mapping)s' is already in use on network gateway " +"'%(gateway_id)s'." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:73 +#, python-format +msgid "" +"Multiple network connections found on '%(gateway_id)s' with provided " +"criteria." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:78 +#, python-format +msgid "" +"The connection %(network_mapping_info)s was not found on the network " +"gateway '%(network_gateway_id)s'" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:83 +#, python-format +msgid "The network gateway %(gateway_id)s cannot be updated or deleted" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:191 +msgid "" +"A network identifier must be specified when connecting a network to a " +"network gateway. Unable to complete operation" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:197 +#, python-format +msgid "" +"Invalid keys found among the ones provided in request body: " +"%(connection_attrs)s." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:203 +msgid "" +"In order to specify a segmentation id the segmentation type must be " +"specified as well" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:207 +msgid "Cannot specify a segmentation id when the segmentation type is flat" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:262 +#, python-format +msgid "Created network gateway with id:%s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:274 +#, python-format +msgid "Updated network gateway with id:%s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:289 +#, python-format +msgid "Network gateway '%s' was destroyed." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:306 +#, python-format +msgid "Connecting network '%(network_id)s' to gateway '%(network_gateway_id)s'" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:347 +#, python-format +msgid "" +"Requested network '%(network_id)s' not found.Unable to create network " +"connection on gateway '%(network_gateway_id)s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:355 +#, python-format +msgid "" +"Gateway port for '%(network_gateway_id)s' created on network " +"'%(network_id)s':%(port_id)s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:371 +#, python-format +msgid "Ensured no Ip addresses are configured on port %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:381 +#, python-format +msgid "" +"Disconnecting network '%(network_id)s' from gateway " +"'%(network_gateway_id)s'" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:475 +#, python-format +msgid "Created network gateway device: %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:486 +#, python-format +msgid "Updated network gateway device: %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:499 +#, python-format +msgid "Deleted network gateway device: %s." +msgstr "" + +#: neutron/plugins/vmware/dbexts/nsxrouter.py:61 +#, python-format +msgid "Nsx router extension successfully processed for router:%s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/qos_db.py:291 +#, python-format +msgid "DSCP value (%s) will be ignored with 'trusted' marking" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:77 +#, python-format +msgid "Rule Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:94 +msgid "Rule Resource binding not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:123 +#: neutron/plugins/vmware/dbexts/vcns_db.py:133 +#, python-format +msgid "VIP Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:162 +#, python-format +msgid "" +"Pool Resource binding with edge_id:%(edge_id)s pool_vseid:%(pool_vseid)s " +"not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:174 +#, python-format +msgid "Pool Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:200 +#, python-format +msgid "Monitor Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:40 +msgid "" +"Pull LSN information from NSX in case it is missing from the local data " +"store. This is useful to rebuild the local store in case of server " +"recovery." +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:82 +#, python-format +msgid "Unable to create LSN for network %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:90 +#, python-format +msgid "Unable to delete Logical Service Node %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:107 +#, python-format +msgid "" +"Unable to find Logical Service Node Port for LSN %(lsn_id)s and subnet " +"%(subnet_id)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:129 +#, python-format +msgid "" +"Unable to find Logical Service Node Port for LSN %(lsn_id)s and mac " +"address %(mac)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:149 +#, python-format +msgid "Unable to create port for LSN %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:157 +#, python-format +msgid "Unable to delete LSN Port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:174 +#, python-format +msgid "Metadata port not found while attempting to delete it from network %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:177 +#, python-format +msgid "Unable to find Logical Services Node Port with MAC %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:262 +#, python-format +msgid "" +"Unable to configure dhcp for Logical Service Node %(lsn_id)s and port " +"%(lsn_port_id)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:283 +#, python-format +msgid "Unable to configure metadata for subnet %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:305 +#, python-format +msgid "Error while configuring LSN port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:377 +#, python-format +msgid "Unable to save LSN for network %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:443 +#, python-format +msgid "Unable to save LSN port for subnet %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:81 +#, python-format +msgid "Port %s is already gone" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:112 +msgid "LSN already exist" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:116 +msgid "Cannot migrate an external network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:125 +msgid "Cannot migrate a 'metadata' network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:128 +msgid "Unable to support multiple subnets per network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:39 +msgid "Comma separated list of additional domain name servers" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:47 +msgid "Default DHCP lease time" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:55 +msgid "IP address used by Metadata server." +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:59 +msgid "TCP Port used by Metadata server." +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:136 +#, python-format +msgid "" +"Error while creating subnet %(cidr)s for network %(network)s. Please, " +"contact administrator" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:206 +#: neutron/plugins/vmware/dhcp_meta/nsx.py:224 +#, python-format +msgid "Performing DHCP %(action)s for resource: %(resource)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:211 +#, python-format +msgid "Network %s is external: no LSN to create" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:219 +#, python-format +msgid "Logical Services Node for network %s configured successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:241 +#, python-format +msgid "Error while configuring DHCP for port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:253 +#, python-format +msgid "DHCP is disabled for subnet %s: nothing to do" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:272 +#, python-format +msgid "DHCP for port %s configured successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:280 +#, python-format +msgid "Network %s is external: nothing to do" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:288 +#, python-format +msgid "Configuring metadata entry for port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:300 +#, python-format +msgid "Metadata for port %s configured successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:304 +#, python-format +msgid "Handle metadata access via router: %(r)s and interface %(i)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:321 +#, python-format +msgid "Metadata for router %s handled successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:77 +#, python-format +msgid "Subnet %s does not have a gateway, the metadata route will not be created" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:101 +msgid "Metadata access network is disabled" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:104 +msgid "" +"Overlapping IPs must be enabled in order to setup the metadata access " +"network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:124 +#, python-format +msgid "" +"No router interface found for router '%s'. No metadata access network " +"should be created or destroyed" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:132 +#, python-format +msgid "" +"An error occurred while operating on the metadata access network for " +"router:'%s'" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:86 +msgid "Cannot create a gateway with an empty device list" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:102 +#, python-format +msgid "Unexpected keys found in device description:%s" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:106 +#, python-format +msgid "%s: provided data are not iterable" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:113 +msgid "A connector type is required to create a gateway device" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:122 +#, python-format +msgid "Unknown connector type: %s" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:129 +msgid "Number of network gateways allowed per tenant, -1 for unlimited" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:36 +msgid "Need to be admin in order to create queue called default" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:40 +msgid "Default queue already exists." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:44 +#, python-format +msgid "Invalid value for dscp %(data)s must be integer value between 0 and 63." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:49 +msgid "Invalid bandwidth rate, min greater than max." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:53 +#, python-format +msgid "Invalid bandwidth rate, %(data)s must be a non negative integer." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:58 +#, python-format +msgid "Queue %(id)s does not exist" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:62 +msgid "Unable to delete queue attached to port." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:66 +msgid "Port is not associated with lqueue" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:77 +#, python-format +msgid "'%s' must be a non negative integer." +msgstr "" + +#: neutron/plugins/vmware/nsxlib/__init__.py:77 +#, python-format +msgid "Error. %(type)s exception: %(exc)s." +msgstr "" + +#: neutron/plugins/vmware/nsxlib/__init__.py:81 +#, python-format +msgid "locals=[%s]" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/lsn.py:173 +#, python-format +msgid "" +"Attempt to plug Logical Services Node %(lsn)s into network with port " +"%(port)s failed. PatchAttachment already exists with another port" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:252 +#, python-format +msgid "Cannot update NSX routes %(routes)s for router %(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:346 +#, python-format +msgid "Created logical port %(lport_uuid)s on logical router %(lrouter_uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:374 +#, python-format +msgid "Updated logical port %(lport_uuid)s on logical router %(lrouter_uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:385 +#, python-format +msgid "" +"Delete logical router port %(lport_uuid)s on logical router " +"%(lrouter_uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:449 +#, python-format +msgid "Invalid keys for NAT match: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:455 +#, python-format +msgid "Creating NAT rule: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:470 +msgid "" +"No SNAT rules cannot be applied as they are not available in this version" +" of the NSX platform" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:475 +msgid "" +"No DNAT rules cannot be applied as they are not available in this version" +" of the NSX platform" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:609 +#, python-format +msgid "Router Port %(lport_id)s not found on router %(lrouter_id)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:614 +#, python-format +msgid "" +"An exception occurred while updating IP addresses on a router logical " +"port:%s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/secgroup.py:94 +#, python-format +msgid "Created Security Profile: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/secgroup.py:120 +#, python-format +msgid "Updated Security Profile: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/secgroup.py:140 +#, python-format +msgid "Unable to find security profile %s on NSX backend" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:130 +#, python-format +msgid "Created logical switch: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:150 +#: neutron/plugins/vmware/nsxlib/switch.py:165 +#, python-format +msgid "Network not found, Error: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:188 +msgid "Port or Network not found" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:247 +#, python-format +msgid "Lswitch %s not found in NSX" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:256 +msgid "Unable to get ports" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:273 +#, python-format +msgid "" +"Looking for port with q_port_id tag '%(neutron_port_id)s' on: " +"'%(lswitch_uuid)s'" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:281 +#, python-format +msgid "" +"Found '%(num_ports)d' ports with q_port_id tag: '%(neutron_port_id)s'. " +"Only 1 was expected." +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:290 +#, python-format +msgid "get_port() %(network)s %(port)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:298 +#: neutron/plugins/vmware/nsxlib/switch.py:329 +#, python-format +msgid "Port or Network not found, Error: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:324 +#, python-format +msgid "Updated logical port %(result)s on logical switch %(uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:359 +#, python-format +msgid "Created logical port %(result)s on logical switch %(uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:371 +#, python-format +msgid "Port not found, Error: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/versioning.py:56 +msgid "Operation may not be supported" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/versioning.py:64 +msgid "" +"NSX version is not set. Unable to complete request correctly. Check log " +"for NSX communication errors." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:211 +#, python-format +msgid "Unable to process default l2 gw service:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:246 +#, python-format +msgid "Created NSX router port:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:248 +#: neutron/plugins/vmware/plugins/service.py:440 +#, python-format +msgid "Unable to create port on NSX logical router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:251 +#, python-format +msgid "" +"Unable to create logical router port for neutron port id %(port_id)s on " +"router %(nsx_router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:329 +#, python-format +msgid "Attached %(att)s to NSX router port %(port)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:335 +#, python-format +msgid "" +"Unable to plug attachment in NSX logical router port %(r_port_id)s, " +"associated with Neutron %(q_port_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:341 +#, python-format +msgid "" +"Unable to plug attachment in router port %(r_port_id)s for neutron port " +"id %(q_port_id)s on router %(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:387 +msgid "An exception occurred while selecting logical switch for the port" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:427 +#, python-format +msgid "" +"An exception occurred while creating the neutron port %s on the NSX " +"plaform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:439 +#: neutron/plugins/vmware/plugins/base.py:491 +#: neutron/plugins/vmware/plugins/base.py:689 +#, python-format +msgid "" +"NSX plugin does not support regular VIF ports on external networks. Port " +"%s will be down." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:460 +#, python-format +msgid "" +"_nsx_create_port completed for port %(name)s on network %(network_id)s. " +"The new port id is %(id)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:471 +#, python-format +msgid "" +"Concurrent network deletion detected; Back-end Port %(nsx_id)s creation " +"to be rolled back for Neutron port: %(neutron_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:483 +#, python-format +msgid "NSX Port %s already gone" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:498 +#, python-format +msgid "Port '%s' was already deleted on NSX platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:505 +#, python-format +msgid "_nsx_delete_port completed for port %(port_id)s on network %(net_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:510 +#, python-format +msgid "Port %s not found in NSX" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:519 +#, python-format +msgid "" +"Neutron port %(port_id)s not found on NSX backend. Terminating delete " +"operation. A dangling router port might have been left on router " +"%(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:534 +#: neutron/plugins/vmware/plugins/base.py:1069 +#, python-format +msgid "" +"Ignoring exception as this means the peer for port '%s' has already been " +"deleted." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:546 +#, python-format +msgid "" +"It is not allowed to create router interface ports on external networks " +"as '%s'" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:571 +#, python-format +msgid "" +"_nsx_create_router_port completed for port %(name)s on network " +"%(network_id)s. The new port id is %(id)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:584 +#, python-format +msgid "" +"device_id field must be populated in order to create an external gateway " +"port for network %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:594 +#, python-format +msgid "The gateway port for the NSX router %s was not found on the backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:635 +#, python-format +msgid "" +"_nsx_create_ext_gw_port completed on external network %(ext_net_id)s, " +"attached to router:%(router_id)s. NSX port id is %(nsx_port_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:670 +#: neutron/plugins/vmware/plugins/base.py:1806 +#, python-format +msgid "Logical router resource %s not found on NSX platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:674 +#: neutron/plugins/vmware/plugins/base.py:1810 +msgid "Unable to update logical routeron NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:676 +#, python-format +msgid "" +"_nsx_delete_ext_gw_port completed on external network %(ext_net_id)s, " +"attached to NSX router:%(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:718 +#, python-format +msgid "" +"_nsx_create_l2_gw_port completed for port %(name)s on network " +"%(network_id)s. The new port id is %(id)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:760 +#, python-format +msgid "%s required" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:765 +msgid "Segmentation ID cannot be specified with flat network type" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:769 +msgid "Segmentation ID must be specified with vlan network type" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:773 +#: neutron/plugins/vmware/plugins/base.py:789 +#, python-format +msgid "%(segmentation_id)s out of range (%(min_id)s through %(max_id)s)" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:795 +#, python-format +msgid "%(net_type_param)s %(net_type_value)s not supported" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:840 +#, python-format +msgid "No switch has available ports (%d checked)" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:874 +#, python-format +msgid "Maximum number of logical ports reached for logical network %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:966 +#, python-format +msgid "" +"Network with admin_state_up=False are not yet supported by this plugin. " +"Ignoring setting for network %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1058 +#, python-format +msgid "" +"A nsx lport identifier was not found for neutron port '%s'. Unable to " +"remove the peer router port for this switch port" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1077 +#, python-format +msgid "delete_network completed for tenant: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1080 +#: neutron/plugins/vmware/plugins/service.py:553 +#, python-format +msgid "Did not found lswitch %s in NSX" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1115 +msgid "admin_state_up=False networks are not supported." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1135 +#, python-format +msgid "Unable to find NSX mappings for neutron network:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1142 +#, python-format +msgid "" +"Logical switch update on NSX backend failed. Neutron network " +"id:%(net_id)s; NSX lswitch id:%(lswitch_id)s;Error:%(error)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1213 +#, python-format +msgid "port created on NSX backend for tenant %(tenant_id)s: (%(id)s)" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1216 +#, python-format +msgid "Logical switch for network %s was not found in NSX." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1227 +msgid "Unable to create port or set port attachment in NSX." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1315 +#, python-format +msgid "Updating port: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1350 +#, python-format +msgid "Unable to update port id: %s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1433 +msgid "" +"Cannot create a distributed router with the NSX platform currently in " +"execution. Please, try without specifying the 'distributed' attribute." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1439 +msgid "Unable to create logical router on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1450 +#, python-format +msgid "" +"Unable to create L3GW port on logical router %(router_uuid)s. Verify " +"Default Layer-3 Gateway service %(def_l3_gw_svc)s id is correct" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1460 +#, python-format +msgid "Unable to create router %s on NSX backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1489 +#: neutron/plugins/vmware/plugins/base.py:1574 +#: neutron/plugins/vmware/plugins/service.py:202 +#: neutron/plugins/vmware/plugins/service.py:1232 +#, python-format +msgid "Network '%s' is not a valid external network" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1538 +#, python-format +msgid "Failed to set gateway info for router being created:%s - removing router" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1541 +#, python-format +msgid "" +"Create router failed while setting external gateway. Router:%s has been " +"removed from DB and backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1583 +msgid "" +"'routes' cannot contain route '0.0.0.0/0', this must be updated through " +"the default gateway attribute" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1599 +#, python-format +msgid "Logical router %s not found on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1603 +msgid "Unable to update logical router on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1605 +msgid "" +"Request cannot contain 'routes' with the NSX platform currently in " +"execution. Please, try without specifying the static routes." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1662 +#, python-format +msgid "Logical router '%s' not found on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1666 +#, python-format +msgid "Unable to delete logical router '%s' on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1676 +#, python-format +msgid "" +"Unable to remove NSX mapping for Neutron router %(router_id)s because of " +"the following exception:%(d_exc)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1744 +#, python-format +msgid "" +"Add_router_interface completed for subnet:%(subnet_id)s and " +"router:%(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1842 +#, python-format +msgid "" +"An error occurred while removing NAT rules on the NSX platform for " +"floating ip:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1847 +msgid "An incorrect number of matching NAT rules was found on the NSX platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1979 +#, python-format +msgid "" +"An error occurred while creating NAT rules on the NSX platform for " +"floating ip:%(floating_ip)s mapped to internal ip:%(internal_ip)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1985 +msgid "Failed to update NAT rules for floatingip update" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2022 +#, python-format +msgid "The port '%s' is not associated with floating IPs" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2025 +#, python-format +msgid "Nat rules not found in nsx for port: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2061 +#, python-format +msgid "Unable to create l2_gw_service for: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2084 +msgid "" +"Unable to remove gateway service from NSX plaform - the resource was not " +"found" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2113 +#, python-format +msgid "Unable to update name on NSX backend for network gateway: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2142 +#, python-format +msgid "" +"Rolling back database changes for gateway device %s because of an error " +"in the NSX backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2187 +#: neutron/plugins/vmware/plugins/base.py:2225 +#, python-format +msgid "" +"Neutron gateway device: %(neutron_id)s; NSX transport node identifier: " +"%(nsx_id)s; Operational status: %(status)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2322 +#, python-format +msgid "" +"Removal of gateway device: %(neutron_id)s failed on NSX backend (NSX " +"id:%(nsx_id)s) because the NSX resource was not found" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2329 +#, python-format +msgid "" +"Removal of gateway device: %(neutron_id)s failed on NSX backend (NSX " +"id:%(nsx_id)s). Neutron and NSX states have diverged." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2377 +#, python-format +msgid "" +"Error while updating security profile %(uuid)s with name %(name)s: " +"%(error)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2409 +#, python-format +msgid "" +"The NSX security profile %(sec_profile_id)s, associated with the Neutron " +"security group %(sec_group_id)s was not found on the backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2417 +#, python-format +msgid "" +"An exception occurred while removing the NSX security profile " +"%(sec_profile_id)s, associated with Netron security group " +"%(sec_group_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2424 +#, python-format +msgid "Unable to remove security group %s from backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2437 +#, python-format +msgid "Port values not valid for protocol: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:145 +#, python-format +msgid "EDGE: router = %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:176 +msgid "EDGE: _vcns_create_ext_gw_port" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:185 +msgid "EDGE: _vcns_delete_ext_gw_port" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:340 +#, python-format +msgid "VCNS: delete default gateway %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:425 +#, python-format +msgid "An exception occurred while creating a port on lswitch %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:473 +#, python-format +msgid "Unable to create integration logic switch for router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:483 +#, python-format +msgid "Unable to add router interface to integration lswitch for router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:493 +#, python-format +msgid "Unable to create advance service router for %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:525 +msgid "router_id is not provided!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:529 +#, python-format +msgid "router_id:%s is not an advanced router!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:895 +#, python-format +msgid "Failed to create firewall on vShield Edge bound on router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:903 +msgid "Bad Firewall request Input" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:920 +msgid "A firewall is already associated with the router" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1117 +#, python-format +msgid "Failed to find the edge with vip_id: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1154 +#, python-format +msgid "" +"Operation can't be performed, Since resource %(model)s : %(id)s is in " +"DELETEing status!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1163 +#, python-format +msgid "Resource %(model)s : %(id)s not found!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1188 +#, python-format +msgid "Failed to create healthmonitor associated with pool id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1205 +msgid "Failed to create pool on vshield edge" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1224 +msgid "create_vip() called" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1256 +msgid "Failed to create vip!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1296 +#, python-format +msgid "Failed to update vip with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1313 +#, python-format +msgid "Failed to delete vip with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1369 +#, python-format +msgid "Failed to update pool with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1391 +#: neutron/plugins/vmware/plugins/service.py:1438 +#: neutron/plugins/vmware/plugins/service.py:1461 +msgid "Failed to update pool with the member" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1417 +msgid "Failed to update old pool with the member" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1481 +#, python-format +msgid "Failed to update monitor with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1491 +msgid "Vcns right now can only support one monitor per pool" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1520 +msgid "Failed to associate monitor with pool!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1551 +msgid "Failed to update pool with pool_monitor!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1568 +#, python-format +msgid "" +"Failed to update ipsec vpn configuration on edge, since the router: %s " +"does not have a gateway yet!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1593 +msgid "Bad or unsupported Input request!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1596 +#, python-format +msgid "" +"Failed to update ipsec VPN configuration with vpnservice: " +"%(vpnservice_id)s on vShield Edge: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1603 +msgid "create_vpnservice() called" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1607 +#, python-format +msgid "a vpnservice is already associated with the router: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1720 +#, python-format +msgid "Start deploying %(edge_id)s for router %(name)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1726 +#: neutron/plugins/vmware/plugins/service.py:1763 +#, python-format +msgid "Failed to deploy Edge for router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1744 +#, python-format +msgid "Router %s not found" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1747 +#, python-format +msgid "Successfully deployed %(edge_id)s for router %(name)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1779 +#, python-format +msgid "interface_update_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1782 +#, python-format +msgid "snat_create_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1785 +#, python-format +msgid "snat_delete_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1788 +#, python-format +msgid "dnat_create_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1791 +#, python-format +msgid "dnat_delete_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1794 +#, python-format +msgid "routes_update_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1797 +#, python-format +msgid "nat_update_result %d" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:24 +#, python-format +msgid "" +"\n" +"Service type = %s\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:27 +#, python-format +msgid "Service uuids = %s\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:28 +#, python-format +msgid "" +"Port uuids = %s\n" +"\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:37 +msgid "ID or name of network to run report on" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:46 +msgid "Migration report is:\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:56 +msgid "ID or name of network to migrate" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:66 +msgid "Migration has been successful:\n" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:137 +#, python-format +msgid "" +"VCNS: Failed to get edge status:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:164 +#, python-format +msgid "VCNS: start updating vnic %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:169 +#, python-format +msgid "" +"VCNS: Failed to update vnic %(config)s:\n" +"%(response)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:175 +#, python-format +msgid "VCNS: Failed to update vnic %d" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:183 +#, python-format +msgid "VCNS: update vnic %(index)d: %(addr)s %(netmask)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:192 +#, python-format +msgid "Vnic %d currently not supported" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:213 +#, python-format +msgid "VCNS: start deploying edge %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:221 +#, python-format +msgid "VCNS: deploying edge %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:226 +#, python-format +msgid "VCNS: deploy edge failed for router %s." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:245 +#, python-format +msgid "VCNS: Edge %s status query failed." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:250 +#, python-format +msgid "VCNS: Unable to retrieve edge %(edge_id)s status. Retry %(retries)d." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:257 +#, python-format +msgid "VCNS: Unable to retrieve edge %s status. Abort." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:261 +#, python-format +msgid "VCNS: Edge %s status" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:268 +#, python-format +msgid "VCNS: Failed to deploy edge %(edge_id)s for %(name)s, status %(status)d" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:275 +#, python-format +msgid "VCNS: Edge %(edge_id)s deployed for router %(name)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:282 +#, python-format +msgid "VCNS: start destroying edge %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:290 +#, python-format +msgid "" +"VCNS: Failed to delete %(edge_id)s:\n" +"%(response)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:296 +#, python-format +msgid "VCNS: Failed to delete %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:306 +#, python-format +msgid "" +"VCNS: Failed to get edges:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:384 +#, python-format +msgid "" +"VCNS: Failed to get nat config:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:391 +#, python-format +msgid "VCNS: start creating nat rules: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:407 +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:532 +#, python-format +msgid "" +"VCNS: Failed to create snat rule:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:415 +#, python-format +msgid "VCNS: create snat rule %(src)s/%(translated)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:438 +#, python-format +msgid "VCNS: start deleting %(type)s rules: %(addr)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:449 +#, python-format +msgid "" +"VCNS: Failed to delete snat rule:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:456 +#, python-format +msgid "VCNS: delete snat rule %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:474 +#, python-format +msgid "VCNS: create dnat rule %(dst)s/%(translated)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:495 +#, python-format +msgid "VCNS: delete dnat rule %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:519 +#, python-format +msgid "VCNS: start updating nat rules: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:540 +#, python-format +msgid "" +"VCNS: update nat rule\n" +"SNAT:%(snat)s\n" +"DNAT:%(dnat)s\n" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:578 +#, python-format +msgid "VCNS: start updating routes for %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:602 +#, python-format +msgid "" +"VCNS: Failed to update routes:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:654 +msgid "Failed to get service config" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:666 +msgid "Failed to enable loadbalancer service config" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:43 +#, python-format +msgid "Invalid action value %s in a firewall rule" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:52 +#, python-format +msgid "Invalid action value %s in a vshield firewall rule" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:192 +#, python-format +msgid "Failed to get firewall with edge id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:210 +#, python-format +msgid "No rule id:%s found in the edge_firewall_binding" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:220 +#, python-format +msgid "Failed to get firewall rule: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:236 +#, python-format +msgid "Failed to update firewall with edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:248 +#, python-format +msgid "Failed to delete firewall with edge_id:%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:262 +#, python-format +msgid "Failed to update firewall rule: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:275 +#, python-format +msgid "Failed to delete firewall rule: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:292 +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:320 +#, python-format +msgid "Failed to add firewall rule above: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:331 +#, python-format +msgid "Failed to append a firewall rulewith edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:352 +msgid "Can't execute insert rule operation without reference rule_id" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:50 +#, python-format +msgid "" +"Unsupported ike_version: %s! Only 'v1' ike version is supported on " +"vshield Edge!" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:64 +msgid "" +"IKEPolicy and IPsecPolicy should have consistent auth_algorithm, " +"encryption_algorithm and pfs for VSE!" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:72 +#, python-format +msgid "" +"Unsupported encryption_algorithm: %s! '3des', 'aes-128' and 'aes-256' are" +" supported on VSE right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:81 +#, python-format +msgid "Unsupported pfs: %s! 'group2' and 'group5' are supported on VSE right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:89 +#, python-format +msgid "" +"Unsupported transform protocol: %s! 'esp' is supported by default on VSE " +"right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:98 +#, python-format +msgid "" +"Unsupported encapsulation mode: %s! 'tunnel' is supported by default on " +"VSE right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:136 +#, python-format +msgid "Failed to update ipsec vpn configuration with edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:143 +#, python-format +msgid "IPsec config not found on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:146 +#, python-format +msgid "Failed to delete ipsec vpn configuration with edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:156 +#, python-format +msgid "Invalid %(protocol)s persistence method: %(type)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:182 +#, python-format +msgid "Failed to create app profile on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:193 +#, python-format +msgid "Failed to create vip on vshield edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:211 +#, python-format +msgid "vip_binding not found with id: %(id)s edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:228 +msgid "Failed to get vip on edge" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:245 +#, python-format +msgid "Failed to update app profile on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:253 +#, python-format +msgid "Failed to update vip on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:264 +#, python-format +msgid "vip not found on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:267 +#, python-format +msgid "Failed to delete vip on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:272 +#, python-format +msgid "app profile not found on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:275 +#, python-format +msgid "Failed to delete app profile on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:286 +msgid "Failed to create pool" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:303 +#, python-format +msgid "pool_binding not found with id: %(id)s edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:313 +msgid "Failed to get pool on edge" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:325 +msgid "Failed to update pool" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:335 +msgid "Failed to delete pool" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:345 +#, python-format +msgid "Failed to create monitor on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:363 +#, python-format +msgid "monitor_binding not found with id: %(id)s edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:373 +#, python-format +msgid "Failed to get monitor on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:390 +#, python-format +msgid "Failed to update monitor on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:401 +msgid "Failed to delete monitor" +msgstr "" + +#: neutron/plugins/vmware/vshield/vcns.py:56 +#, python-format +msgid "VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')" +msgstr "" + +#: neutron/plugins/vmware/vshield/vcns.py:64 +#, python-format +msgid "Header: '%s'" +msgstr "" + +#: neutron/plugins/vmware/vshield/vcns.py:65 +#, python-format +msgid "Content: '%s'" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:35 +#, python-format +msgid "%(resource)s not found: %(msg)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:39 +#, python-format +msgid "An unknown exception %(status)s occurred: %(response)s." +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:50 +#, python-format +msgid "Resource %(uri)s has been redirected" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:54 +#, python-format +msgid "Request %(uri)s is Bad, response %(response)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:58 +#, python-format +msgid "Forbidden: %(uri)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:62 +#, python-format +msgid "Resource %(uri)s not found" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:66 +#, python-format +msgid "Media Type %(uri)s is not supported" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:70 +#, python-format +msgid "Service Unavailable: %(uri)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:46 +#, python-format +msgid "Invalid state %(state)d" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:50 +#, python-format +msgid "State %(state)d skipped. Current state %(current)d" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:96 +#, python-format +msgid "Task %(task)s encountered exception in %(func)s at state %(state)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:182 +#, python-format +msgid "Start task %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:188 +#: neutron/plugins/vmware/vshield/tasks/tasks.py:208 +#: neutron/plugins/vmware/vshield/tasks/tasks.py:231 +#, python-format +msgid "Task %(task)s encountered exception in %(cb)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:194 +#: neutron/plugins/vmware/vshield/tasks/tasks.py:213 +#, python-format +msgid "Task %(task)s return %(status)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:296 +msgid "Stopping TaskManager" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:321 +msgid "TaskManager terminating because of an exception" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:343 +msgid "TaskManager terminated" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:375 +msgid "Exception in _check_pending_tasks" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:53 +#, python-format +msgid "Agent %s already present" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:54 +#, python-format +msgid "" +"Network %(network_id)s is scheduled to be hosted by DHCP agent " +"%(agent_id)s" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:72 +#, python-format +msgid "Network %s is hosted already" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:81 +#: neutron/scheduler/dhcp_agent_scheduler.py:90 +msgid "No more DHCP agents" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:112 +#, python-format +msgid "DHCP agent %s is not active" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:119 +msgid "No non-hosted networks" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:65 +#, python-format +msgid "No enabled L3 agent on host %s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:70 +#, python-format +msgid "L3 agent %s is not active" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:78 +#: neutron/scheduler/l3_agent_scheduler.py:129 +#, python-format +msgid "Router %(router_id)s has already been hosted by L3 agent %(agent_id)s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:97 +msgid "No non-hosted routers" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:111 +#, python-format +msgid "No routers compatible with L3 agent configuration on host %s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:137 +msgid "No active L3 agents" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:142 +#, python-format +msgid "No L3 agents can host the router %s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:155 +#, python-format +msgid "Router %(router_id)s is scheduled to L3 agent %(agent_id)s" +msgstr "" + +#: neutron/server/__init__.py:42 +msgid "" +"ERROR: Unable to find configuration file via the default search paths " +"(~/.neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" +msgstr "" + +#: neutron/server/__init__.py:54 +msgid "RPC was already started in parent process by plugin." +msgstr "" + +#: neutron/server/__init__.py:66 +#, python-format +msgid "ERROR: %s" +msgstr "" + +#: neutron/services/provider_configuration.py:28 +msgid "" +"Defines providers for advanced services using the format: " +"::[:default]" +msgstr "" + +#: neutron/services/provider_configuration.py:46 +#, python-format +msgid "Provider name is limited by 255 characters: %s" +msgstr "" + +#: neutron/services/provider_configuration.py:55 +msgid "Invalid service provider format" +msgstr "" + +#: neutron/services/provider_configuration.py:63 +#, python-format +msgid "Invalid provider format. Last part should be 'default' or empty: %s" +msgstr "" + +#: neutron/services/provider_configuration.py:69 +#, python-format +msgid "Service type '%(svc_type)s' is not allowed, allowed types: %(allowed)s" +msgstr "" + +#: neutron/services/provider_configuration.py:83 +#, python-format +msgid "" +"Service provider '%(provider)s' could not be found for service type " +"%(service_type)s" +msgstr "" + +#: neutron/services/provider_configuration.py:88 +#, python-format +msgid "Service type %(service_type)s does not have a default service provider" +msgstr "" + +#: neutron/services/provider_configuration.py:93 +#, python-format +msgid "" +"Resource '%(resource_id)s' is already associated with provider " +"'%(provider)s' for service type '%(service_type)s'" +msgstr "" + +#: neutron/services/provider_configuration.py:106 +#, python-format +msgid "Driver %s is not unique across providers" +msgstr "" + +#: neutron/services/provider_configuration.py:116 +#, python-format +msgid "Multiple default providers for service %s" +msgstr "" + +#: neutron/services/provider_configuration.py:127 +#, python-format +msgid "Multiple providers specified for service %s" +msgstr "" + +#: neutron/services/service_base.py:72 +#, python-format +msgid "No providers specified for '%s' service, exiting" +msgstr "" + +#: neutron/services/service_base.py:83 +#, python-format +msgid "Loaded '%(provider)s' provider for service %(service_type)s" +msgstr "" + +#: neutron/services/service_base.py:89 +#, python-format +msgid "Error loading provider '%(provider)s' for service %(service_type)s" +msgstr "" + +#: neutron/services/service_base.py:100 +#, python-format +msgid "Default provider is not specified for service type %s" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:45 +msgid "set_firewall_status() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:53 +#, python-format +msgid "Firewall %(fw_id)s in PENDING_DELETE state, not changing to %(status)s" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:68 +msgid "firewall_deleted() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:76 +#, python-format +msgid "Firewall %(fw)s unexpectedly deleted by agent, status was %(status)s" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:84 +msgid "get_firewalls_for_tenant() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:93 +msgid "get_firewalls_for_tenant_without_rules() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:99 +msgid "get_tenants_with_firewalls() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:147 +#, python-format +msgid "" +"Exceeded allowed count of firewalls for tenant %(tenant_id)s. Only one " +"firewall is supported per tenant." +msgstr "" + +#: neutron/services/firewall/agents/firewall_agent_api.py:33 +msgid "Name of the FWaaS Driver" +msgstr "" + +#: neutron/services/firewall/agents/firewall_agent_api.py:37 +msgid "Enable FWaaS" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:45 +msgid "Retrieve Firewall with rules from Plugin" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:54 +msgid "Retrieve Tenants with Firewalls configured from Plugin" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:66 +msgid "Initializing firewall agent" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:74 +#, python-format +msgid "FWaaS Driver Loaded: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:77 +#, python-format +msgid "Error importing FWaaS device driver: %s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:114 +#, python-format +msgid "%(func_name)s from agent for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:122 +#, python-format +msgid "No Routers on tenant: %s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:129 +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:262 +#, python-format +msgid "Apply fw on Router List: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:141 +#, python-format +msgid "Firewall Driver Error for %(func_name)s for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:156 +#, python-format +msgid "FWaaS RPC failure in %(func_name)s for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:173 +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:189 +#, python-format +msgid "Firewall Driver Error on fw state %(fwmsg)s for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:201 +#, python-format +msgid "Process router add, router_id: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:212 +#, python-format +msgid "Process router add, fw_list: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:229 +#, python-format +msgid "FWaaS RPC info call failed for '%s'." +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:244 +#, python-format +msgid "Tenants with Firewalls: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:254 +#, python-format +msgid "Router List: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:256 +#, python-format +msgid "fw_list: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:273 +msgid "Failed fwaas process services sync" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:31 +msgid "vArmour director ip" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:33 +msgid "vArmour director port" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:35 +msgid "vArmour director username" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:37 +msgid "vArmour director password" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:47 +msgid "An unknown exception." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:61 +msgid "Invalid login credential." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:67 +msgid "vArmourRestAPI: started" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:100 +#, python-format +msgid "vArmourRestAPI: %(server)s %(port)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:106 +#, python-format +msgid "vArmourRestAPI Sending: %(method)s %(action)s %(headers)s %(body_data)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:117 +#, python-format +msgid "vArmourRestAPI Response: %(status)s %(resp_str)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:125 +msgid "vArmourRestAPI: Could not establish HTTP connection" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:51 +msgid "vArmourL3NATAgent: __init__" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:65 +#, python-format +msgid "_router_added: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:72 +#, python-format +msgid "_router_removed: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:113 +#, python-format +msgid "_va_unset_zone_interfaces: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:145 +#, python-format +msgid "_va_set_interface_ip: %(pif)s %(cidr)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:165 +#, python-format +msgid "_va_config_trusted_zone: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:196 +#, python-format +msgid "_va_config_untrusted_zone: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:211 +#, python-format +msgid "_va_config_untrusted_zone: gw=%r" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:224 +#, python-format +msgid "_va_config_router_snat_rules: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:256 +#, python-format +msgid "_va_config_floating_ips: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:278 +#, python-format +msgid "process_router: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:289 +msgid "Unable to parse interface mapping." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:292 +msgid "Unable to read interface mapping." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:311 +#, python-format +msgid "external_gateway_added: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:49 +msgid "Initializing fwaas iptables driver" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:52 +#, python-format +msgid "Creating firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:61 +#, python-format +msgid "Failed to create firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:65 +#, python-format +msgid "Deleting firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:77 +#, python-format +msgid "Failed to delete firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:81 +#, python-format +msgid "Updating firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:90 +#, python-format +msgid "Failed to update firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:94 +#, python-format +msgid "Applying firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:113 +#, python-format +msgid "Failed to apply default policy on firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:30 +msgid "Initializing fwaas vArmour driver" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:35 +#, python-format +msgid "create_firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:40 +#, python-format +msgid "update_firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:48 +#, python-format +msgid "delete_firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:53 +#, python-format +msgid "apply_default_policy (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:63 +#, python-format +msgid "Updating firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:112 +msgid "Unsupported IP version rule." +msgstr "" + +#: neutron/services/l3_router/l3_apic.py:55 +msgid "L3 Router Service Plugin for basic L3 using the APIC" +msgstr "" + +#: neutron/services/l3_router/l3_apic.py:96 +#, python-format +msgid "Error attaching subnet %(subnet_id)s to router %(router_id)s" +msgstr "" + +#: neutron/services/l3_router/l3_apic.py:131 +#, python-format +msgid "Error detaching subnet %(subnet_id)s from router %(router_id)s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:103 +#, python-format +msgid "Pool %(pool_id)s has already been hosted by lbaas agent %(agent_id)s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:111 +#, python-format +msgid "No active lbaas agents for pool %s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:117 +#, python-format +msgid "No lbaas agent supporting device driver %s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:126 +#, python-format +msgid "Pool %(pool_id)s is scheduled to lbaas agent %(agent_id)s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:82 +#, python-format +msgid "Delete associated loadbalancer pools before removing providers %s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:91 +#, python-format +msgid "Error retrieving driver for provider %s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:99 +#, python-format +msgid "Error retrieving provider for pool %s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:190 +#, python-format +msgid "Failed to delete pool %s, putting it in ERROR state" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent.py:38 +msgid "Seconds between periodic task runs" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:41 +msgid "Drivers used to manage loadbalancing devices" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:47 +#, python-format +msgid "Unknown device with pool_id %(pool_id)s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:96 +#, python-format +msgid "Error importing loadbalancer device driver: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:103 +#, python-format +msgid "Multiple device drivers with the same name found: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:143 +#, python-format +msgid "Error updating statistics on pool %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:159 +msgid "Unable to retrieve ready devices" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:176 +#: neutron/services/loadbalancer/agent/agent_manager.py:241 +#, python-format +msgid "No device driver on agent: %s." +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:186 +#, python-format +msgid "Unable to deploy instance for pool: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:196 +#, python-format +msgid "Unable to destroy device for pool: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:209 +#, python-format +msgid "%(operation)s %(obj)s %(id)s failed on device driver %(driver)s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:335 +#, python-format +msgid "Destroying pool %s due to agent disabling" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:338 +#, python-format +msgid "Agent_updated by server side %s!" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:42 +msgid "Driver to use for scheduling pool to a default loadbalancer agent" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:50 +msgid "Device driver for agent should be specified in plugin driver." +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:74 +#, python-format +msgid "Multiple lbaas agents found on host %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:154 +#, python-format +msgid "Unknown object type: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:165 +#, python-format +msgid "" +"Cannot update status: %(obj_type)s %(obj_id)s not found in the DB, it was" +" probably deleted concurrently" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:188 +#, python-format +msgid "Unable to find port %s to plug." +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:212 +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:229 +#, python-format +msgid "" +"Unable to find port %s to unplug. This can occur when the Vip has been " +"deleted first." +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:32 +msgid "Load Balancer image id (Embrane LB)" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:34 +msgid "In band Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:36 +msgid "Out of band Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:38 +msgid "Management Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:40 +msgid "Dummy user traffic Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:44 +msgid "choose LB image flavor to use, accepted values: small, medium" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:47 +msgid "resource synchronization interval in seconds" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/constants.py:51 +#, python-format +msgid "%s, probably was cancelled through the heleos UI" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/constants.py:58 +#, python-format +msgid "" +"Failed to delete the backend load balancer for reason %s. Please remove " +"it manually through the heleos UI" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/constants.py:61 +#, python-format +msgid "" +"No subnet is associated to member %s (required to identify the proper " +"load balancer port)" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/driver.py:88 +msgid "Connection limit is not supported by Embrane LB" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/driver.py:94 +#, python-format +msgid "Session persistence %s not supported by Embrane LBaaS" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/driver.py:132 +#, python-format +msgid "Subnet assigned to pool %s doesn't exist, backend port can't be created" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/agent/lb_operations.py:111 +#, python-format +msgid "" +"The load balancer %s had no physical representation, likely already " +"deleted" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:48 +msgid "Location to store config and state files" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:54 +msgid "The user group" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:60 +msgid "" +"When delete and re-add the same vip, send this many gratuitous ARPs to " +"flush the ARP cache in the Router. Set it below or equal to 0 to disable " +"this feature." +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:77 +#, python-format +msgid "Error importing interface driver: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:173 +#, python-format +msgid "Stats socket not found for pool %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:215 +#, python-format +msgid "Error while connecting to stats socket: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:394 +#, python-format +msgid "Unable to kill haproxy process: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:43 +#, python-format +msgid "NCC Error %d" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:54 +msgid "No NetScaler Control Center URI specified. Cannot connect." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:133 +#, python-format +msgid "Connection error occurred while connecting to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:138 +#, python-format +msgid "SSL error occurred while connecting to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:143 +#, python-format +msgid "Request to %s timed out" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:150 +msgid "Request did not specify a valid URL" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:154 +#, python-format +msgid "Too many redirects occurred for request to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:158 +#, python-format +msgid "A request error while connecting to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:163 +#, python-format +msgid "A unknown error occurred during request to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:171 +#, python-format +msgid "Unable to login. Invalid credentials passed.for: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:175 +#, python-format +msgid "Failed %(method)s operation on %(url)s status code: %(response_status)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:29 +msgid "The URL to reach the NetScaler Control Center Server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:33 +msgid "Username to login to the NetScaler Control Center Server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:37 +msgid "Password to login to the NetScaler Control Center Server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:75 +#, python-format +msgid "NetScaler driver vip creation: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:90 +#, python-format +msgid "NetScaler driver vip %(vip_id)s update: %(vip_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:105 +#, python-format +msgid "NetScaler driver vip removal: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:126 +#, python-format +msgid "NetScaler driver pool creation: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:141 +#, python-format +msgid "NetScaler driver pool %(pool_id)s update: %(pool_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:156 +#, python-format +msgid "NetScaler driver pool removal: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:173 +#, python-format +msgid "NetScaler driver poolmember creation: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:191 +#, python-format +msgid "NetScaler driver poolmember %(member_id)s update: %(member_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:208 +#, python-format +msgid "NetScaler driver poolmember removal: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:226 +#, python-format +msgid "" +"NetScaler driver healthmonitor creation for pool %(pool_id)s: " +"%(monitor_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:249 +#, python-format +msgid "NetScaler driver healthmonitor %(monitor_id)s update: %(monitor_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:270 +#, python-format +msgid "NetScaler driver healthmonitor %(monitor_id)sremoval for pool %(pool_id)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:290 +#, python-format +msgid "NetScaler driver pool stats retrieval: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:415 +#, python-format +msgid "" +"Filtering ports based on network_id=%(network_id)s, " +"tenant_id=%(tenant_id)s, device_id=%(device_id)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:430 +#, python-format +msgid "Found an existing SNAT port for subnet %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:433 +#, python-format +msgid "Found no SNAT ports for subnet %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:454 +#, python-format +msgid "Created SNAT port: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:462 +#, python-format +msgid "Removed SNAT port: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:469 +#, python-format +msgid "No SNAT port found for subnet %s. Creating one..." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:477 +#, python-format +msgid "SNAT port: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:487 +#, python-format +msgid "Removing SNAT port for subnet %s as this is the last pool using it..." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:63 +msgid "IP address of vDirect server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:65 +msgid "IP address of secondary vDirect server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:68 +msgid "vDirect user name." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:71 +msgid "vDirect user password." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:74 +msgid "Service ADC type. Default: VA." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:77 +msgid "Service ADC version." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:80 +msgid "Enables or disables the Service HA pair. Default: False." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:84 +msgid "Service throughput. Default: 1000." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:87 +msgid "Service SSL throughput. Default: 100." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:90 +msgid "Service compression throughput. Default: 100." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:93 +msgid "Size of service cache. Default: 20." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:96 +msgid "Name of l2_l3 workflow. Default: openstack_l2_l3." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:100 +msgid "Name of l4 workflow. Default: openstack_l4." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:108 +msgid "Parameter for l2_l3 workflow constructor." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:115 +msgid "Parameter for l2_l3 workflow setup." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:118 +msgid "List of actions that are not pushed to the completion queue." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:122 +msgid "Name of the l4 workflow action. Default: BaseCreate." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:126 +msgid "Resource pool IDs." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:129 +msgid "A required VLAN for the interswitch link to use." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:132 +msgid "" +"Enable or disable Alteon interswitch link for stateful session failover. " +"Default: False." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:229 +#, python-format +msgid "" +"vip: %(vip)s, extended_vip: %(extended_vip)s, network_id: " +"%(vip_network_id)s, service_name: %(service_name)s, pip_info: " +"%(pip_info)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:264 +#, python-format +msgid "Retrieved pip nport: %(port)r for vip: %(vip)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:272 +#, python-format +msgid "Found no pip nports associated with vip: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:281 +#, python-format +msgid "Failed to remove workflow %s. Going to set vip to ERROR status" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:295 +#, python-format +msgid "pip nport id: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:299 +#, python-format +msgid "pip nport delete failed: %r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:387 +#, python-format +msgid "" +"_handle_pool_health_monitor. health_monitor = %(hm_id)s pool_id = " +"%(pool_id)s delete = %(delete)s vip_id = %(vip_id)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:418 +msgid "Starting operation completion handling thread" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:448 +#, python-format +msgid "_update_workflow response: %s " +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:457 +#: neutron/services/loadbalancer/drivers/radware/driver.py:488 +#, python-format +msgid "Pushing operation %s to the queue" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:465 +#, python-format +msgid "Remove the workflow %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:473 +#, python-format +msgid "Post-remove workflow function %r completed" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:477 +#, python-format +msgid "Post-remove workflow function %r failed" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:594 +#, python-format +msgid "create_workflow response: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:663 +#, python-format +msgid "" +"vDirectRESTClient:init server=%(server)s, secondary " +"server=%(sec_server)s, port=%(port)d, ssl=%(ssl)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:669 +#, python-format +msgid "Fliping servers. Current is: %(server)s, switching to %(secondary)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:682 +msgid "" +"REST client is not able to recover since only one vDirect server is " +"configured." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:690 +#, python-format +msgid "vDirect server is not responding (%s)." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:694 +#, python-format +msgid "vDirect server is not active (%s)." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:722 +msgid "vdirectRESTClient: Could not establish HTTPS connection" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:729 +msgid "vdirectRESTClient: Could not establish HTTP connection" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:746 +#, python-format +msgid "vdirectRESTClient: %(action)s failure, %(e)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:814 +#, python-format +msgid "" +"Operation %(oper)s is completed after %(sec_to_completion)d sec with " +"success status: %(success)s :" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:826 +#, python-format +msgid "Operation %(operation)s failed. Reason: %(msg)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:858 +#, python-format +msgid "Operation %s is not completed yet.." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:873 +msgid "Exception was thrown inside OperationCompletionHandler" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:882 +#, python-format +msgid "Post-operation function %(func)r completed after operation %(oper)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:888 +#, python-format +msgid "Post-operation function %(func)r failed after operation %(oper)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:929 +#, python-format +msgid "_update: %s " +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:969 +#, python-format +msgid "_remove_object_from_db %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:24 +msgid "An unknown exception occurred in Radware LBaaS provider." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:28 +msgid "" +"vDirect user/password missing. Specify in configuration file, under " +"[radware] section" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:33 +#, python-format +msgid "" +"Workflow %(workflow)s is missing on vDirect server. Upload missing " +"workflow" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:38 +#, python-format +msgid "" +"REST request failed with status %(status)s. Reason: %(reason)s, " +"Description: %(description)s. Success status codes are %(success_codes)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:44 +#, python-format +msgid "%(operation)s operation is not supported for %(entity)s." +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:71 +msgid "Metering driver" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:73 +msgid "Interval between two metering measures" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:75 +msgid "Interval between two metering reports" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:99 +#, python-format +msgid "Loading Metering driver %s" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:101 +msgid "A metering driver must be specified" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:116 +#, python-format +msgid "Send metering report: %s" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:180 +#, python-format +msgid "Driver %(driver)s does not implement %(func)s" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:184 +#, python-format +msgid "Driver %(driver)s:%(func)s runtime error" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:219 +msgid "Get router traffic counters" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:223 +msgid "Update metering rules from agent" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:228 +msgid "Creating a metering label from agent" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:235 +msgid "Delete a metering label from agent" +msgstr "" + +#: neutron/services/metering/drivers/iptables/iptables_driver.py:90 +#, python-format +msgid "Loading interface driver %s" +msgstr "" + +#: neutron/services/vpn/agent.py:28 +msgid "The vpn device drivers Neutron will use" +msgstr "" + +#: neutron/services/vpn/plugin.py:48 +#, python-format +msgid "VPN plugin using service driver: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:71 +#, python-format +msgid "RESPONSE: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:84 +#, python-format +msgid "%(method)s: Request for %(resource)s payload: %(payload)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:91 +#, python-format +msgid "%(method)s Took %(time).2f seconds to process" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:97 +#, python-format +msgid "%(method)s: Request timeout%(ssl)s (%(timeout).3f sec) for CSR(%(host)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:106 +#, python-format +msgid "%(method)s: Unable to connect to CSR(%(host)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:110 +#, python-format +msgid "%(method)s: Unexpected error for CSR (%(host)s): %(error)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:116 +#, python-format +msgid "%(method)s: Completed [%(status)s]" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:131 +#, python-format +msgid "%(auth)s with CSR %(host)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:138 +#, python-format +msgid "Successfully authenticated with CSR %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:140 +#, python-format +msgid "Failed authentication with CSR %(host)s [%(status)s]" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:175 +#, python-format +msgid "%(method)s: Request timeout for CSR(%(host)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:43 +msgid "Status check interval for Cisco CSR IPSec connections" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:54 +#, python-format +msgid "Cisco CSR failed to create %(resource)s (%(which)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:58 +#, python-format +msgid "Cisco CSR failed to change %(tunnel)s admin state to %(state)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:63 +#, python-format +msgid "" +"Required %(resource)s attribute %(attr)s mapping for Cisco CSR is missing" +" in device driver" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:68 +#, python-format +msgid "" +"Device driver does not have a mapping of '%(value)s for attribute " +"%(attr)s of %(resource)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:83 +#, python-format +msgid "Scanning config files %s for Cisco CSR configurations" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:88 +#, python-format +msgid "Config file parse error: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:92 +#, python-format +msgid "Unable to parse config files %s for Cisco CSR info" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:102 +#, python-format +msgid "Ignoring Cisco CSR configuration entry - router IP %s is not valid" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:113 +#, python-format +msgid "Ignoring Cisco CSR for router %(router)s - missing %(field)s setting" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:121 +#, python-format +msgid "Ignoring Cisco CSR for router %s - timeout is not a floating point number" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:130 +#, python-format +msgid "Ignoring Cisco CSR for subnet %s - REST management is not an IP address" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:137 +#, python-format +msgid "Ignoring Cisco CSR for router %s - local tunnel is not an IP address" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:147 +#, python-format +msgid "Found CSR for router %(router)s: %(info)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:213 +#, python-format +msgid "Loaded %(num)d Cisco CSR configuration%(plural)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:217 +#, python-format +msgid "No Cisco CSR configurations found in: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:228 +#, python-format +msgid "Handling VPN service update notification '%s'" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:250 +#, python-format +msgid "Update: Existing connection %s changed" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:257 +#, python-format +msgid "Update: Connection %s no longer admin down" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:263 +#, python-format +msgid "Update: Connection %s forced to admin down" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:271 +#, python-format +msgid "Update: Created new connection %s in admin down state" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:276 +#, python-format +msgid "Update: Created new connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:288 +#, python-format +msgid "" +"Update: Skipping VPN service %(service)s as it's router (%(csr_id)s is " +"not associated with a Cisco CSR" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:294 +#, python-format +msgid "Update: Existing VPN service %s detected" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:298 +#, python-format +msgid "Update: New VPN service %s detected" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:307 +msgid "Update: Completed update processing" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:337 +#, python-format +msgid "Mark: %(service)d VPN services and %(conn)d IPSec connections marked dirty" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:359 +#, python-format +msgid "" +"Sweep: Removed %(service)d dirty VPN service%(splural)s and %(conn)d " +"dirty IPSec connection%(cplural)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:374 +#, python-format +msgid "Report: Collecting status for IPSec connections on VPN service %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:380 +#, python-format +msgid "Connection %s forced down" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:384 +#, python-format +msgid "Connection %(conn)s reported %(status)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:389 +#, python-format +msgid "Report: Adding info for IPSec connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:409 +#, python-format +msgid "Report: Adding info for VPN service %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:431 +msgid "Report: Starting status report processing" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:433 +#, python-format +msgid "Report: Collecting status for VPN service %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:439 +msgid "Sending status report update to plugin" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:441 +msgid "Report: Completed status report processing" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:736 +#, python-format +msgid "Unable to create %(resource)s %(which)s: %(status)d" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:749 +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:777 +#, python-format +msgid "Internal error - '%s' is not defined" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:762 +#, python-format +msgid "Unable to delete %(resource)s %(which)s: %(status)d" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:771 +#, python-format +msgid "Performing rollback action %(action)s for resource %(resource)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:797 +#, python-format +msgid "Creating IPSec connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:830 +#, python-format +msgid "FAILED: Create of IPSec site-to-site connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:833 +#, python-format +msgid "SUCCESS: Created IPSec site-to-site connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:842 +#, python-format +msgid "Deleting IPSec connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:844 +#, python-format +msgid "Unable to find connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:848 +#, python-format +msgid "SUCCESS: Deleted IPSec site-to-site connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:856 +#, python-format +msgid "Unable to change %(tunnel)s admin state to %(state)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:48 +msgid "Location to store ipsec server config files" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:51 +msgid "Interval for checking ipsec status" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:61 +msgid "Template file for ipsec configuration" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:67 +msgid "Template file for ipsec secret configuration" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:249 +#, python-format +msgid "Failed to enable vpn process on router %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:260 +#, python-format +msgid "Failed to disable vpn process on router %s" +msgstr "" + +#: neutron/services/vpn/service_drivers/__init__.py:78 +#, python-format +msgid "Notify agent at %(topic)s.%(host)s the message %(method)s %(args)s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:46 +#, python-format +msgid "Fatal - %(reason)s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:80 +#, python-format +msgid "No available Cisco CSR %(type)s IDs from %(min)d..%(max)d" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:135 +#, python-format +msgid "" +"Database inconsistency between IPSec connection and Cisco CSR mapping " +"table (%s)" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:161 +#, python-format +msgid "Reserved new CSR ID %(csr_id)d for %(policy)s ID %(policy_id)s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:188 +#, python-format +msgid "" +"Mappings for IPSec connection %(conn)s - tunnel=%(tunnel)s " +"ike_policy=%(csr_ike)d ipsec_policy=%(csr_ipsec)d" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:197 +#, python-format +msgid "" +"Existing entry for IPSec connection %s not found in Cisco CSR mapping " +"table" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:224 +#, python-format +msgid "" +"Attempt to create duplicate entry in Cisco CSR mapping table for " +"connection %s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:227 +#, python-format +msgid "" +"Mapped connection %(conn_id)s to Tunnel%(tunnel_id)d using IKE policy ID " +"%(ike_id)d and IPSec policy ID %(ipsec_id)d" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:239 +#, python-format +msgid "Removed mapping for connection %s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_ipsec.py:39 +#, python-format +msgid "" +"Cisco CSR does not support %(resource)s attribute %(key)s with value " +"'%(value)s'" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_ipsec.py:160 +#, python-format +msgid "IPSec connection %s validated for Cisco CSR" +msgstr "" + +#: neutron/tests/unit/test_api_v2_resource.py:176 +#: neutron/tests/unit/test_api_v2_resource.py:246 +msgid "Unmapped error" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:74 +#, python-format +msgid "" +"Request: action=%(action)s, uri=%(uri)r, body=%(body)s, " +"headers=%(headers)s" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:126 +#, python-format +msgid "No floating IPs in requesturi=%(uri)s, body=%(body)s" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:135 +#, python-format +msgid "Expected floating IPs from multiple tenants.uri=%(uri)s, body=%(body)s" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:180 +#, python-format +msgid "No host cert for %(server)s in cert %(cert)s" +msgstr "" + +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:217 +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:239 +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:258 +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:281 +#, python-format +msgid "Unexpected error code: %s" +msgstr "" + +#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:32 +#, python-format +msgid "" +"%(method)s called with network settings %(current)s (original settings " +"%(original)s) and network segments %(segments)s" +msgstr "" + +#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:59 +#, python-format +msgid "" +"%(method)s called with subnet settings %(current)s (original settings " +"%(original)s)" +msgstr "" + +#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:85 +#, python-format +msgid "" +"%(method)s called with port settings %(current)s (original settings " +"%(original)s) bound to segment %(segment)s (original segment " +"%(original_segment)s) using driver %(driver)s (original driver " +"%(original_driver)s) on network %(network)s" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:67 +#, python-format +msgid "(create_tenant) OFC tenant %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:79 +#, python-format +msgid "(delete_tenant) OFC tenant %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:81 +msgid "delete_tenant: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:88 +#, python-format +msgid "(create_network) OFC tenant %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:91 +#, python-format +msgid "(create_network) OFC network %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:102 +#, python-format +msgid "(update_network) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:106 +msgid "update_network: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:114 +#, python-format +msgid "(delete_network) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:116 +msgid "delete_network: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:123 +#, python-format +msgid "(create_port) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:126 +#, python-format +msgid "(create_port) OFC port %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:140 +#, python-format +msgid "(delete_port) OFC port %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:142 +msgid "delete_port: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:175 +#, python-format +msgid "(create_router) OFC tenant %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:178 +#, python-format +msgid "(create_router) OFC router %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:181 +msgid "Operation on OFC is failed" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:195 +#: neutron/tests/unit/nec/stub_ofc_driver.py:285 +#, python-format +msgid "(delete_router) OFC router %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:197 +msgid "delete_router: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:205 +#, python-format +msgid "(add_router_interface) ip_address %s is not a valid format (a.b.c.d/N)." +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:210 +#, python-format +msgid "(add_router_interface) OFC router %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:213 +#, python-format +msgid "(add_router_interface) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:220 +#, python-format +msgid "add_router_interface: SUCCEED (if_id=%s)" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:228 +#: neutron/tests/unit/nec/stub_ofc_driver.py:245 +#, python-format +msgid "(delete_router_interface) OFC router interface %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:237 +msgid "update_router_route: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:248 +msgid "delete_router_interface: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:258 +#, python-format +msgid "(add_router_route) OFC router %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:263 +#, python-format +msgid "(add_router_route) route to \"%s\" already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:268 +#, python-format +msgid "add_router_route: SUCCEED (route_id=%s)" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:277 +#, python-format +msgid "(delete_router_route) OFC router route %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:279 +msgid "delete_router_route: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:292 +#, python-format +msgid "list_router_routes: routes=%s" +msgstr "" + +#: neutron/tests/unit/nec/test_ofc_client.py:88 +msgid "The specified OFC resource (/somewhere) is not found." +msgstr "" + +#: neutron/tests/unit/nec/test_ofc_client.py:92 +#: neutron/tests/unit/nec/test_ofc_client.py:98 +#: neutron/tests/unit/nec/test_ofc_client.py:107 +msgid "An OFC exception has occurred: Operation on OFC failed" +msgstr "" + +#: neutron/tests/unit/nec/test_ofc_client.py:114 +msgid "An OFC exception has occurred: Failed to connect OFC : " +msgstr "" + +#: neutron/tests/unit/vmware/apiclient/fake.py:406 +#, python-format +msgid "lswitch:%s not found" +msgstr "" + +#: neutron/tests/unit/vmware/apiclient/fake.py:415 +#, python-format +msgid "lrouter:%s not found" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:104 +#, python-format +msgid "Job %s does not nexist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:116 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:127 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:144 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:162 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:184 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:206 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:290 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:304 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:318 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:360 +#, python-format +msgid "Edge %s does not exist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:194 +#, python-format +msgid "Rule id %d doest not exist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:257 +#, python-format +msgid "Lswitch %s does not exist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/test_edge_router.py:130 +msgid "Tasks not completed" +msgstr "" + diff --git a/neutron/locale/es/LC_MESSAGES/neutron-log-error.po b/neutron/locale/es/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 000000000..4c722b4fd --- /dev/null +++ b/neutron/locale/es/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,170 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Spanish (http://www.transifex.com/projects/p/neutron/language/" +"es/)\n" +"Language: es\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Se está descartando excepción original: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "La excepción inesperada ha ocurrido %d vez(veces)... reintentando." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "Excepción durante limpieza de rpc." + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "Excepción no controlada" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "Excepción de base de datos recortada." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Excepción durante el manejo de mensajes" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "Excepción en la operación de formato de cadena" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Devolviendo excepción %s al interlocutor" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "Fallo al procesar mensaje ... omitiendo." + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "Fallo al procesar mensaje ... se encolará nuevamente." + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"El servidor AMQP en %(hostname)s:%(port)d es inalcanzable: %(err_str)s. Se " +"volverá a intentar en %(sleep_time)d segundos." + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" +"No se ha podido declarar consumidor para el tema '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "No se ha podido consumir el mensaje de la cola: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" +"No se ha podido publicar el mensaje para el tema '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "No se ha podido procesar el mensaje... saltándoselo." + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" +"No se puede conectar con el servidor AMQP: %(e)s. En reposo durante " +"%(delay)s segundos" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "Error al procesar el mensaje. Saltándolo." + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "Ha fallado la serialización JSON." + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "No se puede cerrar el socket ZeroMQ." + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "El mensaje de RPC no incluía método." + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "Ha fallado la creación de archivo de socket de tema." + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"Almacenamiento intermedio de retraso por tema local para el tema %(topic)s. " +"Descartando mensaje." + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "El directorio IPC requerido no existe en %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "Permiso denegado para el directorio IPC en %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" +"No se ha podido crear el daemon de destinatario ZeroMQ. Es posible que ya se " +"esté utilizando el socket." + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "Versión de sobre de ZMQ no soportada o desconocida." diff --git a/neutron/locale/es/LC_MESSAGES/neutron-log-info.po b/neutron/locale/es/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 000000000..71c8f145d --- /dev/null +++ b/neutron/locale/es/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Spanish (http://www.transifex.com/projects/p/neutron/language/" +"es/)\n" +"Language: es\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "Se ha captado %s, saliendo" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "El proceso padre se ha detenido inesperadamente, saliendo" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Bifurcación demasiado rápida, en reposo" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Se ha iniciado el hijo %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Iniciando %d trabajadores" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Hijo %(pid)d matado por señal %(sig)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "El hijo %(pid)s ha salido con el estado %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "Se ha captado %s, deteniendo hijos" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "En espera de %d hijos para salir" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "Eliminando registro duplicado con id: %(id)s de la tabla: %(table)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Volviendo a conectar con el servidor AMQP en %(hostname)s:%(port)d " + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Conectado al servidor AMQP en %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Conectado con el servidor AMQP en %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registrando reactor" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "Registrado en reactor" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Consumiendo socket" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Creando proxy para el tema: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "Se va a saltar el registro del tema. Ya está registrado." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "Matchmaker no registrado: %(key)s, %(host)s" diff --git a/neutron/locale/fr/LC_MESSAGES/neutron-log-critical.po b/neutron/locale/fr/LC_MESSAGES/neutron-log-critical.po new file mode 100644 index 000000000..52ae6541a --- /dev/null +++ b/neutron/locale/fr/LC_MESSAGES/neutron-log-critical.po @@ -0,0 +1,23 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-20 06:09+0000\n" +"PO-Revision-Date: 2014-05-30 06:24+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: French (http://www.transifex.com/projects/p/neutron/language/" +"fr/)\n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#~ msgid "Dummy message for transifex setup." +#~ msgstr "message fictif pour la configuration transifex" diff --git a/neutron/locale/fr/LC_MESSAGES/neutron-log-error.po b/neutron/locale/fr/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 000000000..c35387900 --- /dev/null +++ b/neutron/locale/fr/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,171 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-20 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: French (http://www.transifex.com/projects/p/neutron/language/" +"fr/)\n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Exception d'origine en cours de suppression : %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "Exception inattendue survenue %d fois... Nouvel essai." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:190 +msgid "Exception during rpc cleanup." +msgstr "Exception pendant le nettoyage rpc." + +#: neutron/openstack/common/service.py:279 +msgid "Unhandled exception" +msgstr "Exception non gérée" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "DB dépassé limite de tentatives" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "erreurs de connexion DB" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "Exception BD encapsulée." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Exception pendant le traitement des messages" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "Exception dans l'opération de format de chaîne" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Renvoi de l'exception %s à l'appelant" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "Echec de traitement du message... Message ignoré." + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "Échec de traitement du message... Message remis en file d'attente." + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"Le serveur AMQP sur %(hostname)s:%(port)d est inaccessible : %(err_str)s. " +"Nouvelle tentative dans %(sleep_time)d secondes." + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" +"Echec de la déclaration du consommateur pour la rubrique '%(topic)s' : " +"%(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Echec de la consommation du message depuis la file d'attente : %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" +"Echec de la publication du message dans la rubrique '%(topic)s' : %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "Echec du traitement du message... Message ignoré." + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" +"Impossible de se connecter au serveur AMQP : %(e)s. En veille pendant " +"%(delay)s secondes" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "Erreur lors du traitement du message. Message ignoré." + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "Echec de la sérialisation JSON." + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "Le message d'appel de procédure distante n'a pas inclus la méthode." + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "La création du fichier socket de la rubrique a échoué." + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"Mémoire tampon de commandes en attente par rubrique locale saturée pour la " +"rubrique %(topic)s. Suppression du message." + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "Répertoire IPC requis n'existe pas à %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "Permission refusée au répertoire IPC à %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" +"Impossible de créer le démon récepteur ZeroMQ. Le socket est sans doute déjà " +"en cours d'utilisation." + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "Version de l'enveloppe ZMQ non prise en charge ou inconnue." diff --git a/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po b/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 000000000..2324a446d --- /dev/null +++ b/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: French (http://www.transifex.com/projects/p/neutron/language/" +"fr/)\n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "%s interceptée, sortie" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Processus parent arrêté de manière inattendue, sortie" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Bifurcation trop rapide, pause" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Enfant démarré %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Démarrage des travailleurs %d" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Enfant %(pid)d arrêté par le signal %(sig)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Processus fils %(pid)s terminé avec le status %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s interceptée, arrêt de l'enfant" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "En attente %d enfants pour sortie" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "Suppression ligne en double avec l'ID : %(id)s de la table : %(table)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Reconnexion au serveur AMQP sur %(hostname)s : %(port)d" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Connecté au serveur AMQP sur %(hostname)s : %(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Connecté au serveur AMQP sur %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Enregistrement de Reactor" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "Reactor entrant enregistré" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Consommation de socket" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Création du proxy pour la rubrique : %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "Passez l'enregistrement de la rubrique. Rubrique déjà enregistrée." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "Matchmaker non enregistré : %(key)s, %(host)s" diff --git a/neutron/locale/it/LC_MESSAGES/neutron-log-info.po b/neutron/locale/it/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 000000000..c8692b2ea --- /dev/null +++ b/neutron/locale/it/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Italian (http://www.transifex.com/projects/p/neutron/language/" +"it/)\n" +"Language: it\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "Rilevato %s, esistente" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Il processo principale è stato interrotto inaspettatamente, uscire" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Sblocco troppo veloce, attendere" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Child avviato %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Avvio %d operatori" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Child %(pid)d interrotto dal segnale %(sig)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Child %(pid)s terminato con stato %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "Intercettato %s, arresto in corso dei children" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "In attesa %d degli elementi secondari per uscire" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Riconnessione al server AMQP su %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Connesso al server AMQP su %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Connesso al serve AMQP su %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registrazione del reattore" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "Reactor interno registrato" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Utilizzo socket" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Creazione del proxy per il topic: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "La registrazione dell'argomento viene ignorata. È già registrata." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "" diff --git a/neutron/locale/ja/LC_MESSAGES/neutron-log-error.po b/neutron/locale/ja/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 000000000..9c2cf0376 --- /dev/null +++ b/neutron/locale/ja/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,170 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-19 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Japanese (http://www.transifex.com/projects/p/neutron/" +"language/ja/)\n" +"Language: ja\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "除去される元の例外: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "予期せぬ例外が、%d回()発生しました。再試行中。" + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "RPCクリーンアップ中に例外が発生" + +#: neutron/openstack/common/service.py:277 +msgid "Unhandled exception" +msgstr "未処理例外" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "DBへのリトライが上限に達しました。" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "DB接続エラーが発生しました。" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "DB 例外がラップされました。" + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "メッセージ処理中の例外" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "ストリング・フォーマットの操作で例外が発生しました" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "例外 %s を呼び出し元に返しています" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "メッセージの処理に失敗しました... スキップしています。" + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "メッセージ処理が失敗しました…キューに再登録します。" + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"%(hostname)s:%(port)d 上の AMQP サーバーは到達不能です: " +"%(err_str)s。%(sleep_time)d 秒後に再試行します。" + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" +"トピック '%(topic)s' のコンシューマーを宣言できませんでした: %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "キューからのメッセージのコンシュームに失敗しました: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" +"トピック '%(topic)s' に対してメッセージをパブリッシュできませんでした: " +"%(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "メッセージの処理に失敗しました... スキップしています。" + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" +"AMQP サーバーに接続できません: %(e)s。%(delay)s 秒間スリープ状態になります" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "メッセージの処理中にエラーが発生しました。スキップしています。" + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON の直列化が失敗しました。" + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "ZeroMQソケットをクローズできませんでした。" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC メッセージにメソッドが含まれていませんでした。" + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "トピック・ソケット・ファイルの作成に失敗しました。" + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"トピック %(topic)s のトピック単位のローカル・バックログ・バッファーがフルで" +"す。メッセージを除去しています。" + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "必要な IPC ディレクトリが %s に存在しません" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "IPC ディレクトリ %s へのアクセス許可がありません" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" +"ZeroMQ 受信側デーモンを作成できませんでした。ソケットが既に使用中である可能性" +"があります。" + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ エンベロープのバージョンがサポートされないか、不明です。" diff --git a/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po b/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 000000000..4340ef308 --- /dev/null +++ b/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Japanese (http://www.transifex.com/projects/p/neutron/" +"language/ja/)\n" +"Language: ja\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "%s が見つかりました。終了しています" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "親プロセスが予期せずに停止しました。終了しています" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "fork が早すぎます。スリープ状態にしています" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "子 %d を開始しました" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "%d ワーカーを開始しています" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "子 %(pid)d がシグナル %(sig)d によって強制終了されました" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "子 %(pid)s が状況 %(code)d で終了しました" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s が見つかりました。子を停止しています" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "%d 個の子で終了を待機しています" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "%(hostname)s:%(port)d 上の AMQP サーバーに再接続しています" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "%(hostname)s:%(port)d 上の AMQP サーバーに接続しました" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "%s 上の AMQP サーバーに接続しました" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "リアクターの登録中" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "入力リアクターが登録されました" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "ソケットの消費中" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "トピック用プロキシーの作成中: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "トピックの登録をスキップします。既に登録されています。" + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "matchmaker が登録されていません: %(key)s, %(host)s" diff --git a/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po b/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 000000000..a594504b9 --- /dev/null +++ b/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,165 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/neutron/" +"language/ko_KR/)\n" +"Language: ko_KR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "기존 예외가 삭제됨: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "예기치 않은 예외 %d 번 발생하였습니다... 다시 시도중." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "" + +#: neutron/openstack/common/service.py:277 +msgid "Unhandled exception" +msgstr "처리되지 않은 예외" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "데이터 베이스 연결 에러." + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "DB 예외가 랩핑되었습니다." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "메시지 처리 중 예외" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "문자열 형식화 오퍼레이션의 예외" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "%s 예외를 호출자에게 리턴 중" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "메시지 처리 실패 ... 건너뜁니다." + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "메시지 처리 실패 ... 큐에 다시 넣습니다." + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"%(hostname)s:%(port)d의 AMQP 서버에 접근할 수 없음: %(err_str)s. " +"%(sleep_time)d초 내에 다시 시도하십시오. " + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "'%(topic)s' 주제에 대한 이용자를 선언하지 못했음: %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "큐의 메시지를 이용하지 못했음: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "'%(topic)s' 주제에 메시지를 공개하지 못했음: %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "메시지를 처리하지 못했습니다. 건너뛰는 중입니다. " + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "AMQP 서버 %(e)s에 연결할 수 없습니다. %(delay)s 초 휴면 상태입니다. " + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "메시지 처리 오류입니다. 건너뛰는 중입니다. " + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON 직렬화에 실패했습니다. " + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC 메시지에 메소드가 없습니다. " + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "주제 소켓 파일 작성에 실패했습니다. " + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"%(topic)s 주제에 대한 로컬 주제별 백로그 버퍼가 가득 찼습니다. 메시지 삭제 중" +"입니다. " + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "%s 에서 필요한 IPC 디렉터리가 없습니다" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "%s에서 IPC 디렉터리에 대한 권한을 거부했습니다" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" +"ZeroMQ 수신기 디먼을 작성할 수 없습니다. 소켓이 이미 사용 중일 수 있습니다. " + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ Envelope 버전을 지원하지 않거나 알 수 없습니다. " diff --git a/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po b/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 000000000..ab4e1ebea --- /dev/null +++ b/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/neutron/" +"language/ko_KR/)\n" +"Language: ko_KR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "%s 발견, 종료 중" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "상위 프로세스가 예기치 않게 정지했습니다. 종료 중" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "포크가 너무 빠름. 정지 중" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "%d 하위를 시작했음" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "%d 작업자 시작 중" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "%(pid)d 하위가 %(sig)d 신호에 의해 강제 종료됨" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "%(pid)s 하위가 %(code)d 상태와 함께 종료했음" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s 발견, 하위 중지 중" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "%d 하위에서 종료하기를 대기 중임" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "%(hostname)s:%(port)d에서 AMQP 서버에 다시 연결 중" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "%(hostname)s:%(port)d에서 AMQP 서버에 연결되었음" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "%s의 AMQP 서버에 연결했음" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "등록 리액터" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "인 리액터 등록" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "소켓 이용 중" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "주제에 대한 프록시 작성: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "주제 등록을 건너뜁니다. 이미 등록되었습니다. " + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "등록되지 않은 중개자: %(key)s, %(host)s" diff --git a/neutron/locale/neutron-log-critical.pot b/neutron/locale/neutron-log-critical.pot new file mode 100644 index 000000000..f93eeb4f0 --- /dev/null +++ b/neutron/locale/neutron-log-critical.pot @@ -0,0 +1,19 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# FIRST AUTHOR , 2014. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: neutron 2014.2.dev608.g787bba2\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + diff --git a/neutron/locale/neutron-log-error.pot b/neutron/locale/neutron-log-error.pot new file mode 100644 index 000000000..a43e01062 --- /dev/null +++ b/neutron/locale/neutron-log-error.pot @@ -0,0 +1,158 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# FIRST AUTHOR , 2014. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: neutron 2014.2.dev608.g787bba2\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "" + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "" + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "" + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "" + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + diff --git a/neutron/locale/neutron-log-info.pot b/neutron/locale/neutron-log-info.pot new file mode 100644 index 000000000..7ca982b44 --- /dev/null +++ b/neutron/locale/neutron-log-info.pot @@ -0,0 +1,127 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# FIRST AUTHOR , 2014. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: neutron 2014.2.dev32.g043f04c\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "" + diff --git a/neutron/locale/neutron-log-warning.pot b/neutron/locale/neutron-log-warning.pot new file mode 100644 index 000000000..bb272314d --- /dev/null +++ b/neutron/locale/neutron-log-warning.pot @@ -0,0 +1,53 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# FIRST AUTHOR , 2014. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: neutron 2014.2.dev608.g787bba2\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: neutron/openstack/common/service.py:363 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:506 +#, python-format +msgid "Database server has gone away: %s" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:559 +msgid "Unable to detect effective SQL mode" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:567 +#, python-format +msgid "MySQL SQL mode is '%s', consider enabling TRADITIONAL or STRICT_ALL_TABLES" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:673 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:97 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: neutron/openstack/common/rpc/matchmaker_ring.py:75 +#: neutron/openstack/common/rpc/matchmaker_ring.py:93 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + diff --git a/neutron/locale/neutron.pot b/neutron/locale/neutron.pot new file mode 100644 index 000000000..6255c19d3 --- /dev/null +++ b/neutron/locale/neutron.pot @@ -0,0 +1,16162 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# FIRST AUTHOR , 2014. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: neutron 2014.2.dev124.g431937c\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-21 06:08+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: neutron/auth.py:37 +msgid "X_USER_ID is not found in request" +msgstr "" + +#: neutron/context.py:83 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: neutron/context.py:111 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: neutron/manager.py:71 +#, python-format +msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." +msgstr "" + +#: neutron/manager.py:83 +msgid "Neutron core_plugin not configured!" +msgstr "" + +#: neutron/manager.py:112 +#, python-format +msgid "Loading core plugin: %s" +msgstr "" + +#: neutron/manager.py:137 +#, python-format +msgid "Error loading plugin by name, %s" +msgstr "" + +#: neutron/manager.py:138 +#, python-format +msgid "Error loading plugin by class, %s" +msgstr "" + +#: neutron/manager.py:139 +msgid "Plugin not found." +msgstr "" + +#: neutron/manager.py:144 +msgid "Loading services supported by the core plugin" +msgstr "" + +#: neutron/manager.py:152 +#, python-format +msgid "Service %s is supported by the core plugin" +msgstr "" + +#: neutron/manager.py:165 +#, python-format +msgid "Loading service plugins: %s" +msgstr "" + +#: neutron/manager.py:170 +#, python-format +msgid "Loading Plugin: %s" +msgstr "" + +#: neutron/manager.py:178 +#, python-format +msgid "Multiple plugins for service %s were configured" +msgstr "" + +#: neutron/manager.py:190 +#, python-format +msgid "Successfully loaded %(type)s plugin. Description: %(desc)s" +msgstr "" + +#: neutron/policy.py:88 +#, python-format +msgid "Loading policies from file: %s" +msgstr "" + +#: neutron/policy.py:95 +#, python-format +msgid "" +"Found deprecated policy rule:%s. Please consider upgrading your policy " +"configuration file" +msgstr "" + +#: neutron/policy.py:107 +#, python-format +msgid "" +"Inserting policy:%(new_policy)s in place of deprecated " +"policy:%(old_policy)s" +msgstr "" + +#: neutron/policy.py:115 +#, python-format +msgid "" +"Backward compatibility unavailable for deprecated policy %s. The policy " +"will not be enforced" +msgstr "" + +#: neutron/policy.py:137 +#, python-format +msgid "Unable to find data type descriptor for attribute %s" +msgstr "" + +#: neutron/policy.py:142 +#, python-format +msgid "" +"Attribute type descriptor is not a dict. Unable to generate any sub-attr " +"policy rule for %s." +msgstr "" + +#: neutron/policy.py:215 +#, python-format +msgid "" +"Unable to identify a target field from:%s.match should be in the form " +"%%()s" +msgstr "" + +#: neutron/policy.py:241 +#, python-format +msgid "Unable to find ':' as separator in %s." +msgstr "" + +#: neutron/policy.py:245 +#, python-format +msgid "Unable to find resource name in %s" +msgstr "" + +#: neutron/policy.py:254 +#, python-format +msgid "" +"Unable to verify match:%(match)s as the parent resource: %(res)s was not " +"found" +msgstr "" + +#: neutron/policy.py:280 +#, python-format +msgid "Policy check error while calling %s!" +msgstr "" + +#: neutron/policy.py:311 +#, python-format +msgid "Unable to find requested field: %(field)s in target: %(target_dict)s" +msgstr "" + +#: neutron/policy.py:369 +#, python-format +msgid "Failed policy check for '%s'" +msgstr "" + +#: neutron/quota.py:36 +msgid "Resource name(s) that are supported in quota features" +msgstr "" + +#: neutron/quota.py:40 +msgid "" +"Default number of resource allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/quota.py:44 +msgid "Number of networks allowed per tenant.A negative value means unlimited." +msgstr "" + +#: neutron/quota.py:48 +msgid "Number of subnets allowed per tenant, A negative value means unlimited." +msgstr "" + +#: neutron/quota.py:52 +msgid "Number of ports allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/quota.py:56 +msgid "Default driver to use for quota checks" +msgstr "" + +#: neutron/quota.py:150 neutron/quota.py:155 +msgid "Access to this resource was denied." +msgstr "" + +#: neutron/quota.py:228 +msgid "" +"ConfDriver is used as quota_driver because the loaded plugin does not " +"support 'quotas' table." +msgstr "" + +#: neutron/quota.py:233 +#, python-format +msgid "Loaded quota_driver: %s." +msgstr "" + +#: neutron/quota.py:242 +#, python-format +msgid "%s is already registered." +msgstr "" + +#: neutron/service.py:40 +msgid "Seconds between running periodic tasks" +msgstr "" + +#: neutron/service.py:43 +msgid "Number of separate worker processes for service" +msgstr "" + +#: neutron/service.py:46 +msgid "Number of RPC worker processes for service" +msgstr "" + +#: neutron/service.py:49 +msgid "" +"Range of seconds to randomly delay when starting the periodic task " +"scheduler to reduce stampeding. (Disable by setting to 0)" +msgstr "" + +#: neutron/service.py:105 neutron/service.py:163 +msgid "Unrecoverable error: please check log for details." +msgstr "" + +#: neutron/service.py:144 +msgid "Active plugin doesn't implement start_rpc_listeners" +msgstr "" + +#: neutron/service.py:146 +#, python-format +msgid "'rpc_workers = %d' ignored because start_rpc_listeners is not implemented." +msgstr "" + +#: neutron/service.py:170 +msgid "No known API applications configured." +msgstr "" + +#: neutron/service.py:177 +#, python-format +msgid "Neutron service started, listening on %(host)s:%(port)s" +msgstr "" + +#: neutron/service.py:278 +msgid "Exception occurs when timer stops" +msgstr "" + +#: neutron/service.py:288 +msgid "Exception occurs when waiting for timer" +msgstr "" + +#: neutron/wsgi.py:53 +msgid "Number of backlog requests to configure the socket with" +msgstr "" + +#: neutron/wsgi.py:57 +msgid "" +"Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not " +"supported on OS X." +msgstr "" + +#: neutron/wsgi.py:61 +msgid "Number of seconds to keep retrying to listen" +msgstr "" + +#: neutron/wsgi.py:64 +msgid "Max header line to accommodate large tokens" +msgstr "" + +#: neutron/wsgi.py:67 +msgid "Enable SSL on the API server" +msgstr "" + +#: neutron/wsgi.py:69 +msgid "CA certificate file to use to verify connecting clients" +msgstr "" + +#: neutron/wsgi.py:72 +msgid "Certificate file to use when starting the server securely" +msgstr "" + +#: neutron/wsgi.py:75 +msgid "Private key file to use when starting the server securely" +msgstr "" + +#: neutron/wsgi.py:134 +#, python-format +msgid "Unable to listen on %(host)s:%(port)s" +msgstr "" + +#: neutron/wsgi.py:140 +#, python-format +msgid "Unable to find ssl_cert_file : %s" +msgstr "" + +#: neutron/wsgi.py:146 +#, python-format +msgid "Unable to find ssl_key_file : %s" +msgstr "" + +#: neutron/wsgi.py:151 +#, python-format +msgid "Unable to find ssl_ca_file : %s" +msgstr "" + +#: neutron/wsgi.py:184 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" +msgstr "" + +#: neutron/wsgi.py:344 +msgid "Missing Content-Type" +msgstr "" + +#: neutron/wsgi.py:533 +#, python-format +msgid "Data %(data)s type is %(type)s" +msgstr "" + +#: neutron/wsgi.py:616 +msgid "Cannot understand JSON" +msgstr "" + +#: neutron/wsgi.py:629 neutron/wsgi.py:632 +msgid "Inline DTD forbidden" +msgstr "" + +#: neutron/wsgi.py:713 +msgid "Cannot understand XML" +msgstr "" + +#: neutron/wsgi.py:822 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: neutron/wsgi.py:826 +msgid "No Content-Type provided in request" +msgstr "" + +#: neutron/wsgi.py:830 +msgid "Empty body provided in request" +msgstr "" + +#: neutron/wsgi.py:837 +msgid "Unable to deserialize body as provided Content-Type" +msgstr "" + +#: neutron/wsgi.py:933 +msgid "You must implement __call__" +msgstr "" + +#: neutron/wsgi.py:1026 neutron/api/v2/base.py:192 neutron/api/v2/base.py:333 +#: neutron/api/v2/base.py:473 neutron/api/v2/base.py:524 +#: neutron/extensions/l3agentscheduler.py:49 +#: neutron/extensions/l3agentscheduler.py:87 +msgid "The resource could not be found." +msgstr "" + +#: neutron/wsgi.py:1073 +#, python-format +msgid "%(method)s %(url)s" +msgstr "" + +#: neutron/wsgi.py:1079 +msgid "Unsupported Content-Type" +msgstr "" + +#: neutron/wsgi.py:1080 +#, python-format +msgid "InvalidContentType: %s" +msgstr "" + +#: neutron/wsgi.py:1084 +msgid "Malformed request body" +msgstr "" + +#: neutron/wsgi.py:1085 +#, python-format +msgid "MalformedRequestBody: %s" +msgstr "" + +#: neutron/wsgi.py:1092 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: neutron/wsgi.py:1097 +msgid "Internal error" +msgstr "" + +#: neutron/wsgi.py:1112 neutron/wsgi.py:1214 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: neutron/wsgi.py:1115 +#, python-format +msgid "%(url)s returned a fault: %(exception)s" +msgstr "" + +#: neutron/wsgi.py:1233 +#, python-format +msgid "The requested content type %s is invalid." +msgstr "" + +#: neutron/wsgi.py:1292 +msgid "Could not deserialize data" +msgstr "" + +#: neutron/agent/dhcp_agent.py:53 +msgid "Interval to resync." +msgstr "" + +#: neutron/agent/dhcp_agent.py:56 +msgid "The driver used to manage the DHCP server." +msgstr "" + +#: neutron/agent/dhcp_agent.py:58 +msgid "Support Metadata requests on isolated networks." +msgstr "" + +#: neutron/agent/dhcp_agent.py:60 +msgid "" +"Allows for serving metadata requests from a dedicated network. Requires " +"enable_isolated_metadata = True" +msgstr "" + +#: neutron/agent/dhcp_agent.py:64 +msgid "Number of threads to use during sync process." +msgstr "" + +#: neutron/agent/dhcp_agent.py:67 neutron/agent/l3_agent.py:190 +#: neutron/agent/metadata/namespace_proxy.py:167 +msgid "Location of Metadata Proxy UNIX domain socket" +msgstr "" + +#: neutron/agent/dhcp_agent.py:104 +#, python-format +msgid "" +"The '%s' DHCP-driver does not support retrieving of a list of existing " +"networks" +msgstr "" + +#: neutron/agent/dhcp_agent.py:111 neutron/agent/dhcp_agent.py:600 +msgid "DHCP agent started" +msgstr "" + +#: neutron/agent/dhcp_agent.py:120 +#, python-format +msgid "Calling driver for network: %(net)s action: %(action)s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:136 +#, python-format +msgid "" +"Unable to %(action)s dhcp for %(net_id)s: there is a conflict with its " +"current state; please check that the network and/or its subnet(s) still " +"exist." +msgstr "" + +#: neutron/agent/dhcp_agent.py:145 neutron/agent/dhcp_agent.py:203 +#, python-format +msgid "Network %s has been deleted." +msgstr "" + +#: neutron/agent/dhcp_agent.py:147 +#, python-format +msgid "Unable to %(action)s dhcp for %(net_id)s." +msgstr "" + +#: neutron/agent/dhcp_agent.py:157 +msgid "Synchronizing state" +msgstr "" + +#: neutron/agent/dhcp_agent.py:169 +#, python-format +msgid "Unable to sync network state on deleted network %s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:175 +msgid "Synchronizing state complete" +msgstr "" + +#: neutron/agent/dhcp_agent.py:179 +msgid "Unable to sync network state." +msgstr "" + +#: neutron/agent/dhcp_agent.py:191 +#, python-format +msgid "resync: %(reason)s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:207 +#, python-format +msgid "Network %s info call failed." +msgstr "" + +#: neutron/agent/dhcp_agent.py:219 +#, python-format +msgid "" +"Network %s may have been deleted and its resources may have already been " +"disposed." +msgstr "" + +#: neutron/agent/dhcp_agent.py:344 +#, python-format +msgid "" +"%(port_num)d router ports found on the metadata access network. Only the " +"port %(port_id)s, for router %(router_id)s will be considered" +msgstr "" + +#: neutron/agent/dhcp_agent.py:582 neutron/agent/l3_agent.py:961 +#: neutron/agent/metadata/agent.py:364 +#: neutron/services/metering/agents/metering_agent.py:273 +msgid "" +"Neutron server does not support state report. State report for this agent" +" will be disabled." +msgstr "" + +#: neutron/agent/dhcp_agent.py:588 neutron/agent/l3_agent.py:966 +#: neutron/agent/metadata/agent.py:369 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:111 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:798 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:250 +#: neutron/plugins/nec/agent/nec_neutron_agent.py:182 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:265 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:240 +#: neutron/services/loadbalancer/agent/agent_manager.py:123 +#: neutron/services/metering/agents/metering_agent.py:278 +msgid "Failed reporting state!" +msgstr "" + +#: neutron/agent/dhcp_agent.py:595 +#, python-format +msgid "Agent updated: %(payload)s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:597 neutron/agent/l3_agent.py:971 +#: neutron/services/metering/agents/metering_agent.py:281 +#, python-format +msgid "agent_updated by server side %s!" +msgstr "" + +#: neutron/agent/l3_agent.py:164 neutron/debug/debug_agent.py:43 +msgid "Name of bridge used for external network traffic." +msgstr "" + +#: neutron/agent/l3_agent.py:168 +msgid "TCP Port used by Neutron metadata namespace proxy." +msgstr "" + +#: neutron/agent/l3_agent.py:172 +msgid "" +"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, " +"the feature is disabled" +msgstr "" + +#: neutron/agent/l3_agent.py:175 +msgid "" +"If namespaces is disabled, the l3 agent can only configure a router that " +"has the matching router ID." +msgstr "" + +#: neutron/agent/l3_agent.py:180 +msgid "Agent should implement routers with no gateway" +msgstr "" + +#: neutron/agent/l3_agent.py:182 +msgid "UUID of external network for routers implemented by the agents." +msgstr "" + +#: neutron/agent/l3_agent.py:185 +msgid "Allow running metadata proxy." +msgstr "" + +#: neutron/agent/l3_agent.py:187 +msgid "Delete namespace after removing a router." +msgstr "" + +#: neutron/agent/l3_agent.py:210 +#, python-format +msgid "Error importing interface driver '%s'" +msgstr "" + +#: neutron/agent/l3_agent.py:238 neutron/agent/linux/dhcp.py:716 +#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 +msgid "An interface driver must be specified" +msgstr "" + +#: neutron/agent/l3_agent.py:243 +msgid "Router id is required if not using namespaces." +msgstr "" + +#: neutron/agent/l3_agent.py:264 +msgid "RuntimeError in obtaining router list for namespace cleanup." +msgstr "" + +#: neutron/agent/l3_agent.py:284 +#, python-format +msgid "Failed to destroy stale router namespace %s" +msgstr "" + +#: neutron/agent/l3_agent.py:305 neutron/agent/linux/dhcp.py:227 +#, python-format +msgid "Failed trying to delete namespace: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:335 +msgid "" +"The 'gateway_external_network_id' option must be configured for this " +"agent as Neutron has more than one external network." +msgstr "" + +#: neutron/agent/l3_agent.py:359 +#, python-format +msgid "Info for router %s were not found. Skipping router removal" +msgstr "" + +#: neutron/agent/l3_agent.py:408 +#: neutron/services/firewall/agents/varmour/varmour_router.py:104 +#, python-format +msgid "Router port %s has no IP address" +msgstr "" + +#: neutron/agent/l3_agent.py:410 neutron/db/l3_db.py:973 +#: neutron/services/firewall/agents/varmour/varmour_router.py:107 +#, python-format +msgid "Ignoring multiple IPs on router port %s" +msgstr "" + +#: neutron/agent/l3_agent.py:450 +#, python-format +msgid "Deleting stale internal router device: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:479 +#, python-format +msgid "Deleting stale external router device: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:598 +#, python-format +msgid "Unable to configure IP address for floating IP: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:628 +#, python-format +msgid "Failed sending gratuitous ARP: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:737 +#, python-format +msgid "Got router deleted notification for %s" +msgstr "" + +#: neutron/agent/l3_agent.py:742 +#, python-format +msgid "Got routers updated notification :%s" +msgstr "" + +#: neutron/agent/l3_agent.py:750 +#, python-format +msgid "Got router removed from agent :%r" +msgstr "" + +#: neutron/agent/l3_agent.py:754 +#, python-format +msgid "Got router added to agent :%r" +msgstr "" + +#: neutron/agent/l3_agent.py:761 +#, python-format +msgid "The external network bridge '%s' does not exist" +msgstr "" + +#: neutron/agent/l3_agent.py:811 +#, python-format +msgid "Starting RPC loop for %d updated routers" +msgstr "" + +#: neutron/agent/l3_agent.py:829 +msgid "RPC loop successfully completed" +msgstr "" + +#: neutron/agent/l3_agent.py:831 neutron/agent/l3_agent.py:869 +#: neutron/services/metering/agents/metering_agent.py:62 +msgid "Failed synchronizing routers" +msgstr "" + +#: neutron/agent/l3_agent.py:849 +#, python-format +msgid "Starting _sync_routers_task - fullsync:%s" +msgstr "" + +#: neutron/agent/l3_agent.py:860 +#, python-format +msgid "Processing :%r" +msgstr "" + +#: neutron/agent/l3_agent.py:863 +msgid "_sync_routers_task successfully completed" +msgstr "" + +#: neutron/agent/l3_agent.py:865 +msgid "Failed synchronizing routers due to RPC error" +msgstr "" + +#: neutron/agent/l3_agent.py:878 +msgid "L3 agent started" +msgstr "" + +#: neutron/agent/l3_agent.py:893 +#, python-format +msgid "Added route entry is '%s'" +msgstr "" + +#: neutron/agent/l3_agent.py:901 +#, python-format +msgid "Removed route entry is '%s'" +msgstr "" + +#: neutron/agent/l3_agent.py:934 +msgid "Report state task started" +msgstr "" + +#: neutron/agent/l3_agent.py:958 +msgid "Report state task successfully completed" +msgstr "" + +#: neutron/agent/netns_cleanup_util.py:61 +msgid "Delete the namespace by removing all devices." +msgstr "" + +#: neutron/agent/netns_cleanup_util.py:118 +#, python-format +msgid "Unable to find bridge for device: %s" +msgstr "" + +#: neutron/agent/netns_cleanup_util.py:142 +#, python-format +msgid "Error unable to destroy namespace: %s" +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:41 +msgid "" +"True to delete all ports on all the OpenvSwitch bridges. False to delete " +"ports created by Neutron on integration and external network bridges." +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:75 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:668 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:278 +#, python-format +msgid "Delete %s" +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:105 +#, python-format +msgid "Cleaning %s" +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:112 +msgid "OVS cleanup completed successfully" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:31 +msgid "Driver for security groups firewall in the L2 agent" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:35 +msgid "" +"Controls whether the neutron security group API is enabled in the server." +" It should be false when using no security groups or using the nova " +"security group API." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:58 +#: neutron/agent/securitygroups_rpc.py:142 +msgid "Driver configuration doesn't match with enable_security_group" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:71 +msgid "Disabled security-group extension." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:73 +msgid "Disabled allowed-address-pairs extension." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:80 +#, python-format +msgid "Get security group rules for devices via rpc %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:97 +msgid "" +"Security group agent binding currently not set. This should be set by the" +" end of the init process." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:108 +#, python-format +msgid "Security group rule updated on remote: %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:120 +#, python-format +msgid "Security group member updated on remote: %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:127 +#: neutron/agent/securitygroups_rpc.py:196 +msgid "Provider rule updated" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:140 +#, python-format +msgid "Init firewall settings (driver=%s)" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:159 +#, python-format +msgid "Preparing filters for devices %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:167 +#, python-format +msgid "Security group rule updated %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:174 +#, python-format +msgid "Security group member updated %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:188 +#, python-format +msgid "" +"Adding %s devices to the list of devices for which firewall needs to be " +"refreshed" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:208 +#, python-format +msgid "Remove device filter for %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:217 +msgid "Refresh firewall rules" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:221 +msgid "No ports here to refresh firewall" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:227 +#, python-format +msgid "Update port filter for %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:245 +#, python-format +msgid "Preparing device filters for %d new devices" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:258 +msgid "Refreshing firewall for all filtered devices" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:266 +#, python-format +msgid "Refreshing firewall for %d devices" +msgstr "" + +#: neutron/agent/common/config.py:31 +msgid "Root helper application." +msgstr "" + +#: neutron/agent/common/config.py:36 +msgid "" +"Seconds between nodes reporting state to server; should be less than " +"agent_down_time, best if it is half or less than agent_down_time." +msgstr "" + +#: neutron/agent/common/config.py:43 +msgid "The driver used to manage the virtual interface." +msgstr "" + +#: neutron/agent/common/config.py:48 +msgid "Allow overlapping IP." +msgstr "" + +#: neutron/agent/common/config.py:104 +msgid "" +"DEFAULT.root_helper is deprecated! Please move root_helper configuration " +"to [AGENT] section." +msgstr "" + +#: neutron/agent/common/config.py:115 +msgid "Top-level directory for maintaining dhcp state" +msgstr "" + +#: neutron/agent/linux/async_process.py:68 +msgid "respawn_interval must be >= 0 if provided." +msgstr "" + +#: neutron/agent/linux/async_process.py:82 +msgid "Process is already started" +msgstr "" + +#: neutron/agent/linux/async_process.py:84 +#, python-format +msgid "Launching async process [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:90 +#, python-format +msgid "Halting async process [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:93 +msgid "Process is not running." +msgstr "" + +#: neutron/agent/linux/async_process.py:165 +#, python-format +msgid "An error occurred while killing [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:172 +#, python-format +msgid "Halting async process [%s] in response to an error." +msgstr "" + +#: neutron/agent/linux/async_process.py:178 +#, python-format +msgid "Respawning async process [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:187 +#, python-format +msgid "An error occurred while communicating with async process [%s]." +msgstr "" + +#: neutron/agent/linux/daemon.py:39 +#, python-format +msgid "Error while handling pidfile: %s" +msgstr "" + +#: neutron/agent/linux/daemon.py:47 +msgid "Unable to unlock pid file" +msgstr "" + +#: neutron/agent/linux/daemon.py:96 +msgid "Fork failed" +msgstr "" + +#: neutron/agent/linux/daemon.py:138 +#, python-format +msgid "Pidfile %s already exist. Daemon already running?" +msgstr "" + +#: neutron/agent/linux/dhcp.py:45 +msgid "Location to store DHCP server config files" +msgstr "" + +#: neutron/agent/linux/dhcp.py:48 neutron/plugins/vmware/dhcp_meta/nsx.py:44 +msgid "Domain to use for building the hostnames" +msgstr "" + +#: neutron/agent/linux/dhcp.py:51 +msgid "Override the default dnsmasq settings with this file" +msgstr "" + +#: neutron/agent/linux/dhcp.py:53 +msgid "Comma-separated list of the DNS servers which will be used as forwarders." +msgstr "" + +#: neutron/agent/linux/dhcp.py:57 +msgid "Delete namespace after removing a dhcp server." +msgstr "" + +#: neutron/agent/linux/dhcp.py:61 +msgid "Limit number of leases to prevent a denial-of-service." +msgstr "" + +#: neutron/agent/linux/dhcp.py:209 +#, python-format +msgid "" +"DHCP for %(net_id)s is stale, pid %(pid)d does not exist, performing " +"cleanup" +msgstr "" + +#: neutron/agent/linux/dhcp.py:216 +#, python-format +msgid "No DHCP started for %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:248 neutron/agent/linux/external_process.py:80 +#, python-format +msgid "Error while reading %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:255 neutron/agent/linux/external_process.py:88 +#, python-format +msgid "Unable to convert value in %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:257 neutron/agent/linux/external_process.py:86 +#, python-format +msgid "Unable to access %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:318 +#, python-format +msgid "" +"FAILED VERSION REQUIREMENT FOR DNSMASQ. DHCP AGENT MAY NOT RUN CORRECTLY!" +" Please ensure that its version is %s or above!" +msgstr "" + +#: neutron/agent/linux/dhcp.py:323 +#, python-format +msgid "" +"Unable to determine dnsmasq version. Please ensure that its version is %s" +" or above!" +msgstr "" + +#: neutron/agent/linux/dhcp.py:421 +#, python-format +msgid "Killing dhcpmasq for network since all subnets have turned off DHCP: %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:433 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: neutron/agent/linux/dhcp.py:434 +#, python-format +msgid "Reloading allocations for network: %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:474 +#, python-format +msgid "Building host file: %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:484 +#, python-format +msgid "Adding %(mac)s : %(name)s : %(ip)s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:500 +#, python-format +msgid "Done building host file %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:723 +#, python-format +msgid "Error importing interface driver '%(driver)s': %(inner)s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:763 +#, python-format +msgid "Setting gateway for dhcp netns on net %(n)s to %(ip)s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:773 +#, python-format +msgid "Removing gateway for dhcp netns on net %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:817 +#, python-format +msgid "" +"DHCP port %(device_id)s on network %(network_id)s does not yet exist. " +"Checking for a reserved port." +msgstr "" + +#: neutron/agent/linux/dhcp.py:831 +#, python-format +msgid "DHCP port %(device_id)s on network %(network_id)s does not yet exist." +msgstr "" + +#: neutron/agent/linux/dhcp.py:866 neutron/debug/debug_agent.py:69 +#, python-format +msgid "Reusing existing device: %s." +msgstr "" + +#: neutron/agent/linux/external_process.py:32 +msgid "Location to store child pid files" +msgstr "" + +#: neutron/agent/linux/external_process.py:63 +#, python-format +msgid "Process for %(uuid)s pid %(pid)d is stale, ignoring command" +msgstr "" + +#: neutron/agent/linux/external_process.py:66 +#, python-format +msgid "No process started for %s" +msgstr "" + +#: neutron/agent/linux/interface.py:39 +msgid "Name of Open vSwitch bridge to use" +msgstr "" + +#: neutron/agent/linux/interface.py:42 +msgid "Uses veth for an interface or not" +msgstr "" + +#: neutron/agent/linux/interface.py:44 +msgid "MTU setting for device." +msgstr "" + +#: neutron/agent/linux/interface.py:46 +msgid "Mapping between flavor and LinuxInterfaceDriver" +msgstr "" + +#: neutron/agent/linux/interface.py:48 +msgid "Admin username" +msgstr "" + +#: neutron/agent/linux/interface.py:50 neutron/agent/metadata/agent.py:56 +#: neutron/plugins/metaplugin/common/config.py:67 +msgid "Admin password" +msgstr "" + +#: neutron/agent/linux/interface.py:53 neutron/agent/metadata/agent.py:59 +#: neutron/plugins/metaplugin/common/config.py:70 +msgid "Admin tenant name" +msgstr "" + +#: neutron/agent/linux/interface.py:55 neutron/agent/metadata/agent.py:61 +#: neutron/plugins/metaplugin/common/config.py:72 +msgid "Authentication URL" +msgstr "" + +#: neutron/agent/linux/interface.py:57 neutron/agent/metadata/agent.py:63 +#: neutron/common/config.py:47 neutron/plugins/metaplugin/common/config.py:74 +msgid "The type of authentication to use" +msgstr "" + +#: neutron/agent/linux/interface.py:59 neutron/agent/metadata/agent.py:65 +#: neutron/plugins/metaplugin/common/config.py:76 +msgid "Authentication region" +msgstr "" + +#: neutron/agent/linux/interface.py:216 neutron/agent/linux/interface.py:270 +#: neutron/agent/linux/interface.py:332 neutron/agent/linux/interface.py:381 +#, python-format +msgid "Device %s already exists" +msgstr "" + +#: neutron/agent/linux/interface.py:234 neutron/agent/linux/interface.py:281 +#: neutron/agent/linux/interface.py:344 neutron/agent/linux/interface.py:388 +#, python-format +msgid "Unplugged interface '%s'" +msgstr "" + +#: neutron/agent/linux/interface.py:236 neutron/agent/linux/interface.py:280 +#: neutron/agent/linux/interface.py:346 neutron/agent/linux/interface.py:390 +#, python-format +msgid "Failed unplugging interface '%s'" +msgstr "" + +#: neutron/agent/linux/interface.py:448 +#, python-format +msgid "Driver location: %s" +msgstr "" + +#: neutron/agent/linux/ip_lib.py:27 +msgid "Force ip_lib calls to use the root helper" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:58 +#, python-format +msgid "Preparing device (%s) filter" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:66 +#, python-format +msgid "Updating device (%s) filter" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:68 +#, python-format +msgid "Attempted to update port filter which is not filtered %s" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:77 +#, python-format +msgid "Removing device (%s) filter" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:79 +#, python-format +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:159 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:201 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:236 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:374 +#, python-format +msgid "Got semaphore / lock \"%s\"" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:377 +#, python-format +msgid "Semaphore / lock released \"%s\"" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:426 +#: neutron/tests/unit/test_iptables_manager.py:560 +#: neutron/tests/unit/test_iptables_manager.py:594 +#, python-format +msgid "" +"IPTablesManager.apply failed to apply the following set of iptables " +"rules:\n" +"%s" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:429 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:439 +#, python-format +msgid "Unable to find table %s" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:639 +#, python-format +msgid "Attempted to get traffic counters of chain %s which does not exist" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:34 +msgid "Timeout in seconds for ovs-vsctl commands" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:68 neutron/agent/linux/ovs_lib.py:168 +#: neutron/agent/linux/ovs_lib.py:315 +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:55 +#, python-format +msgid "Unable to execute %(cmd)s. Exception: %(exception)s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:223 +msgid "defer_apply_on" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:227 +msgid "defer_apply_off" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:237 +#, python-format +msgid "Applying following deferred flows to bridge %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:240 +#, python-format +msgid "%(action)s: %(flow)s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:266 +msgid "" +"Unable to create VXLAN tunnel port. Please ensure that an openvswitch " +"version that supports VXLAN is installed." +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:363 +#, python-format +msgid "Found not yet ready openvswitch port: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:378 +#, python-format +msgid "Found failed openvswitch port: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:433 +#, python-format +msgid "Port: %(port_name)s is on %(switch)s, not on %(br_name)s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:441 +#, python-format +msgid "ofport: %(ofport)s for VIF: %(vif)s is not a positive integer" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:451 +#, python-format +msgid "Unable to parse interface details. Exception: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:469 +#, python-format +msgid "Unable to determine mac address for %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:486 +#, python-format +msgid "Interface %s not found." +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:497 +#, python-format +msgid "Unable to retrieve bridges. Exception: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:506 +#, python-format +msgid "Bridge %s not found." +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:522 +msgid "Cannot match priority on flow deletion or modification" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:527 +msgid "Must specify one or more actions on flow addition or modification" +msgstr "" + +#: neutron/agent/linux/ovsdb_monitor.py:46 +#, python-format +msgid "Output received from ovsdb monitor: %s" +msgstr "" + +#: neutron/agent/linux/ovsdb_monitor.py:52 +#, python-format +msgid "Error received from ovsdb monitor: %s" +msgstr "" + +#: neutron/agent/linux/utils.py:48 +#, python-format +msgid "Running command: %s" +msgstr "" + +#: neutron/agent/linux/utils.py:71 +#, python-format +msgid "" +"\n" +"Command: %(cmd)s\n" +"Exit code: %(code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: neutron/agent/metadata/agent.py:54 +#: neutron/plugins/metaplugin/common/config.py:65 +msgid "Admin user" +msgstr "" + +#: neutron/agent/metadata/agent.py:68 +msgid "Turn off verification of the certificate for ssl" +msgstr "" + +#: neutron/agent/metadata/agent.py:71 +msgid "Certificate Authority public key (CA cert) file for ssl" +msgstr "" + +#: neutron/agent/metadata/agent.py:75 +msgid "Network service endpoint type to pull from the keystone catalog" +msgstr "" + +#: neutron/agent/metadata/agent.py:78 +msgid "IP address used by Nova metadata server." +msgstr "" + +#: neutron/agent/metadata/agent.py:81 +msgid "TCP Port used by Nova metadata server." +msgstr "" + +#: neutron/agent/metadata/agent.py:84 +#: neutron/plugins/vmware/dhcp_meta/nsx.py:63 +msgid "Shared secret to sign instance-id request" +msgstr "" + +#: neutron/agent/metadata/agent.py:89 +msgid "Protocol to access nova metadata, http or https" +msgstr "" + +#: neutron/agent/metadata/agent.py:91 +msgid "Allow to perform insecure SSL (https) requests to nova metadata" +msgstr "" + +#: neutron/agent/metadata/agent.py:95 +msgid "Client certificate for nova metadata api server." +msgstr "" + +#: neutron/agent/metadata/agent.py:98 +msgid "Private key of client certificate." +msgstr "" + +#: neutron/agent/metadata/agent.py:128 +#: neutron/agent/metadata/namespace_proxy.py:70 +#, python-format +msgid "Request: %s" +msgstr "" + +#: neutron/agent/metadata/agent.py:137 +#: neutron/agent/metadata/namespace_proxy.py:78 +msgid "Unexpected error." +msgstr "" + +#: neutron/agent/metadata/agent.py:138 +#: neutron/agent/metadata/namespace_proxy.py:79 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: neutron/agent/metadata/agent.py:180 +msgid "" +"Either one of parameter network_id or router_id must be passed to " +"_get_ports method." +msgstr "" + +#: neutron/agent/metadata/agent.py:232 +msgid "" +"The remote metadata server responded with Forbidden. This response " +"usually occurs when shared secrets do not match." +msgstr "" + +#: neutron/agent/metadata/agent.py:243 +#: neutron/agent/metadata/namespace_proxy.py:122 +msgid "Remote metadata server experienced an internal server error." +msgstr "" + +#: neutron/agent/metadata/agent.py:249 +#: neutron/agent/metadata/namespace_proxy.py:128 +#, python-format +msgid "Unexpected response code: %s" +msgstr "" + +#: neutron/agent/metadata/agent.py:309 +msgid "Location for Metadata Proxy UNIX domain socket" +msgstr "" + +#: neutron/agent/metadata/agent.py:312 +msgid "Number of separate worker processes for metadata server" +msgstr "" + +#: neutron/agent/metadata/agent.py:316 +msgid "Number of backlog requests to configure the metadata server socket with" +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:65 +msgid "network_id and router_id are None. One must be provided." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:151 +msgid "Network that will have instance metadata proxied." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:154 +msgid "Router that will have connected instances' metadata proxied." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:157 +msgid "Location of pid file of this process." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:160 +msgid "Run as daemon." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:163 +msgid "TCP Port to listen for metadata server requests." +msgstr "" + +#: neutron/api/api_common.py:101 +#, python-format +msgid "" +"Invalid value for pagination_max_limit: %s. It should be an integer " +"greater to 0" +msgstr "" + +#: neutron/api/api_common.py:115 +#, python-format +msgid "Limit must be an integer 0 or greater and not '%d'" +msgstr "" + +#: neutron/api/api_common.py:132 +msgid "The number of sort_keys and sort_dirs must be same" +msgstr "" + +#: neutron/api/api_common.py:137 +#, python-format +msgid "%s is invalid attribute for sort_keys" +msgstr "" + +#: neutron/api/api_common.py:141 +#, python-format +msgid "" +"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s'" +" and '%(desc)s'" +msgstr "" + +#: neutron/api/api_common.py:315 neutron/api/v2/base.py:594 +#, python-format +msgid "Unable to find '%s' in request body" +msgstr "" + +#: neutron/api/api_common.py:322 +#, python-format +msgid "Failed to parse request. Parameter '%s' not specified" +msgstr "" + +#: neutron/api/extensions.py:253 +#, python-format +msgid "Extension with alias %s does not exist" +msgstr "" + +#: neutron/api/extensions.py:257 neutron/api/extensions.py:261 +msgid "Resource not found." +msgstr "" + +#: neutron/api/extensions.py:283 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: neutron/api/extensions.py:305 +#, python-format +msgid "Extended action: %s" +msgstr "" + +#: neutron/api/extensions.py:313 +#, python-format +msgid "Extended request: %s" +msgstr "" + +#: neutron/api/extensions.py:403 +msgid "Initializing extension manager." +msgstr "" + +#: neutron/api/extensions.py:486 +#, python-format +msgid "Error fetching extended attributes for extension '%s'" +msgstr "" + +#: neutron/api/extensions.py:492 +#, python-format +msgid "" +"Extension '%s' provides no backward compatibility map for extended " +"attributes" +msgstr "" + +#: neutron/api/extensions.py:502 +#, python-format +msgid "" +"It was impossible to process the following extensions: %s because of " +"missing requirements." +msgstr "" + +#: neutron/api/extensions.py:513 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: neutron/api/extensions.py:514 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: neutron/api/extensions.py:515 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: neutron/api/extensions.py:516 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: neutron/api/extensions.py:517 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: neutron/api/extensions.py:519 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: neutron/api/extensions.py:537 +#, python-format +msgid "Extension path '%s' doesn't exist!" +msgstr "" + +#: neutron/api/extensions.py:545 +#, python-format +msgid "Loading extension file: %s" +msgstr "" + +#: neutron/api/extensions.py:553 +#, python-format +msgid "Did not find expected name \"%(ext_name)s\" in %(file)s" +msgstr "" + +#: neutron/api/extensions.py:561 +#, python-format +msgid "Extension file %(f)s wasn't loaded due to %(exception)s" +msgstr "" + +#: neutron/api/extensions.py:570 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: neutron/api/extensions.py:601 +#, python-format +msgid "Extension %s not supported by any of loaded plugins" +msgstr "" + +#: neutron/api/extensions.py:612 +#, python-format +msgid "Loaded plugins do not implement extension %s interface" +msgstr "" + +#: neutron/api/versions.py:45 +msgid "Unknown API version specified" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:65 +#, python-format +msgid "" +"Unable to schedule network %s: no agents available; will retry on " +"subsequent port creation events." +msgstr "" + +#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:78 +#, python-format +msgid "" +"Only %(active)d of %(total)d DHCP agents associated with network " +"'%(net_id)s' are marked as active, so notifications may be sent to " +"inactive agents." +msgstr "" + +#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:90 +#, python-format +msgid "" +"Will not send event %(method)s for network %(net_id)s: no agent " +"available. Payload: %(payload)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:38 +#, python-format +msgid "Nofity agent at %(host)s the message %(method)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:58 +#, python-format +msgid "Notify agent at %(topic)s.%(host)s the message %(method)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:74 +#, python-format +msgid "" +"No plugin for L3 routing registered. Cannot notify agents with the " +"message %s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:92 +#, python-format +msgid "" +"Fanout notify agent at %(topic)s the message %(method)s on router " +"%(router_id)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py:49 +#, python-format +msgid "Notify metering agent at %(topic)s.%(host)s the message %(method)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py:64 +#, python-format +msgid "" +"Fanout notify metering agent at %(topic)s the message %(method)s on " +"router %(router_id)s" +msgstr "" + +#: neutron/api/v2/attributes.py:46 +#, python-format +msgid "" +"Invalid input. '%(target_dict)s' must be a dictionary with keys: " +"%(expected_keys)s" +msgstr "" + +#: neutron/api/v2/attributes.py:57 +#, python-format +msgid "" +"Validation of dictionary's keys failed.Expected keys: %(expected_keys)s " +"Provided keys: %(provided_keys)s" +msgstr "" + +#: neutron/api/v2/attributes.py:71 +#, python-format +msgid "'%(data)s' is not in %(valid_values)s" +msgstr "" + +#: neutron/api/v2/attributes.py:87 +#, python-format +msgid "'%s' Blank strings are not permitted" +msgstr "" + +#: neutron/api/v2/attributes.py:97 +#, python-format +msgid "'%s' is not a valid string" +msgstr "" + +#: neutron/api/v2/attributes.py:102 +#, python-format +msgid "'%(data)s' exceeds maximum length of %(max_len)s" +msgstr "" + +#: neutron/api/v2/attributes.py:112 +#, python-format +msgid "'%s' is not a valid boolean value" +msgstr "" + +#: neutron/api/v2/attributes.py:131 neutron/api/v2/attributes.py:456 +#, python-format +msgid "'%s' is not an integer" +msgstr "" + +#: neutron/api/v2/attributes.py:135 +#, python-format +msgid "'%(data)s' is too small - must be at least '%(limit)d'" +msgstr "" + +#: neutron/api/v2/attributes.py:140 +#, python-format +msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" +msgstr "" + +#: neutron/api/v2/attributes.py:149 +#, python-format +msgid "'%s' contains whitespace" +msgstr "" + +#: neutron/api/v2/attributes.py:166 +#, python-format +msgid "'%s' is not a valid MAC address" +msgstr "" + +#: neutron/api/v2/attributes.py:181 +#, python-format +msgid "'%s' is not a valid IP address" +msgstr "" + +#: neutron/api/v2/attributes.py:192 +#, python-format +msgid "Invalid data format for IP pool: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:211 neutron/api/v2/attributes.py:218 +#, python-format +msgid "Invalid data format for fixed IP: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:226 +#, python-format +msgid "Duplicate IP address '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:242 +#, python-format +msgid "Invalid data format for nameserver: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:253 +#, python-format +msgid "'%s' is not a valid nameserver" +msgstr "" + +#: neutron/api/v2/attributes.py:257 +#, python-format +msgid "Duplicate nameserver '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:265 +#, python-format +msgid "Invalid data format for hostroute: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:285 +#, python-format +msgid "Duplicate hostroute '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:302 neutron/tests/unit/test_attributes.py:462 +#: neutron/tests/unit/test_attributes.py:476 +#: neutron/tests/unit/test_attributes.py:484 +#, python-format +msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" +msgstr "" + +#: neutron/api/v2/attributes.py:308 +#, python-format +msgid "'%s' is not a valid IP subnet" +msgstr "" + +#: neutron/api/v2/attributes.py:316 neutron/api/v2/attributes.py:369 +#, python-format +msgid "'%s' is not a list" +msgstr "" + +#: neutron/api/v2/attributes.py:321 neutron/api/v2/attributes.py:380 +#, python-format +msgid "Duplicate items in the list: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:344 +#, python-format +msgid "'%s' is not a valid input" +msgstr "" + +#: neutron/api/v2/attributes.py:357 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:551 +#, python-format +msgid "'%s' is not a valid UUID" +msgstr "" + +#: neutron/api/v2/attributes.py:400 +#, python-format +msgid "Validator '%s' does not exist." +msgstr "" + +#: neutron/api/v2/attributes.py:410 +#, python-format +msgid "'%s' is not a dictionary" +msgstr "" + +#: neutron/api/v2/attributes.py:461 +#, python-format +msgid "'%s' should be non-negative" +msgstr "" + +#: neutron/api/v2/attributes.py:480 +#, python-format +msgid "'%s' cannot be converted to boolean" +msgstr "" + +#: neutron/api/v2/attributes.py:488 +#: neutron/plugins/nec/extensions/packetfilter.py:77 +#, python-format +msgid "'%s' is not a integer" +msgstr "" + +#: neutron/api/v2/attributes.py:501 +#, python-format +msgid "'%s' is not of the form =[value]" +msgstr "" + +#: neutron/api/v2/base.py:88 +msgid "Native pagination depend on native sorting" +msgstr "" + +#: neutron/api/v2/base.py:91 +msgid "Allow sorting is enabled because native pagination requires native sorting" +msgstr "" + +#: neutron/api/v2/base.py:362 +#, python-format +msgid "Unable to undo add for %(resource)s %(id)s" +msgstr "" + +#: neutron/api/v2/base.py:494 +#, python-format +msgid "Invalid format: %s" +msgstr "" + +#: neutron/api/v2/base.py:547 +msgid "" +"Specifying 'tenant_id' other than authenticated tenant in request " +"requires admin privileges" +msgstr "" + +#: neutron/api/v2/base.py:555 +msgid "Running without keystone AuthN requires that tenant_id is specified" +msgstr "" + +#: neutron/api/v2/base.py:573 +msgid "Resource body required" +msgstr "" + +#: neutron/api/v2/base.py:575 +#, python-format +msgid "Request body: %(body)s" +msgstr "" + +#: neutron/api/v2/base.py:585 +msgid "Bulk operation not supported" +msgstr "" + +#: neutron/api/v2/base.py:589 +msgid "Resources required" +msgstr "" + +#: neutron/api/v2/base.py:605 +#, python-format +msgid "Failed to parse request. Required attribute '%s' not specified" +msgstr "" + +#: neutron/api/v2/base.py:612 +#, python-format +msgid "Attribute '%s' not allowed in POST" +msgstr "" + +#: neutron/api/v2/base.py:617 +#, python-format +msgid "Cannot update read-only attribute %s" +msgstr "" + +#: neutron/api/v2/base.py:635 +#, python-format +msgid "Invalid input for %(attr)s. Reason: %(reason)s." +msgstr "" + +#: neutron/api/v2/base.py:644 neutron/extensions/allowedaddresspairs.py:57 +#: neutron/extensions/multiprovidernet.py:51 +#, python-format +msgid "Unrecognized attribute(s) '%s'" +msgstr "" + +#: neutron/api/v2/base.py:663 +#, python-format +msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" +msgstr "" + +#: neutron/api/v2/resource.py:97 +#, python-format +msgid "%(action)s failed (client error): %(exc)s" +msgstr "" + +#: neutron/api/v2/resource.py:100 neutron/api/v2/resource.py:110 +#: neutron/api/v2/resource.py:129 +#, python-format +msgid "%s failed" +msgstr "" + +#: neutron/api/v2/resource.py:131 +#: neutron/tests/unit/test_api_v2_resource.py:277 +#: neutron/tests/unit/test_api_v2_resource.py:293 +msgid "Request Failed: internal server error while processing your request." +msgstr "" + +#: neutron/cmd/sanity_check.py:39 +msgid "" +"Check for Open vSwitch VXLAN support failed. Please ensure that the " +"version of openvswitch being used has VXLAN support." +msgstr "" + +#: neutron/cmd/sanity_check.py:48 +msgid "" +"Check for Open vSwitch patch port support failed. Please ensure that the " +"version of openvswitch being used has patch port support or disable " +"features requiring patch ports (gre/vxlan, etc.)." +msgstr "" + +#: neutron/cmd/sanity_check.py:58 +msgid "Check for vxlan support" +msgstr "" + +#: neutron/cmd/sanity_check.py:60 +msgid "Check for patch port support" +msgstr "" + +#: neutron/common/config.py:37 +msgid "The host IP to bind to" +msgstr "" + +#: neutron/common/config.py:39 +msgid "The port to bind to" +msgstr "" + +#: neutron/common/config.py:41 +msgid "The API paste config file to use" +msgstr "" + +#: neutron/common/config.py:43 +msgid "The path for API extensions" +msgstr "" + +#: neutron/common/config.py:45 +msgid "The policy file to use" +msgstr "" + +#: neutron/common/config.py:49 +msgid "The core plugin Neutron will use" +msgstr "" + +#: neutron/common/config.py:51 neutron/db/migration/cli.py:35 +msgid "The service plugins Neutron will use" +msgstr "" + +#: neutron/common/config.py:53 +msgid "The base MAC address Neutron will use for VIFs" +msgstr "" + +#: neutron/common/config.py:55 +msgid "How many times Neutron will retry MAC generation" +msgstr "" + +#: neutron/common/config.py:57 +msgid "Allow the usage of the bulk API" +msgstr "" + +#: neutron/common/config.py:59 +msgid "Allow the usage of the pagination" +msgstr "" + +#: neutron/common/config.py:61 +msgid "Allow the usage of the sorting" +msgstr "" + +#: neutron/common/config.py:63 +msgid "" +"The maximum number of items returned in a single response, value was " +"'infinite' or negative integer means no limit" +msgstr "" + +#: neutron/common/config.py:67 +msgid "Maximum number of DNS nameservers" +msgstr "" + +#: neutron/common/config.py:69 +msgid "Maximum number of host routes per subnet" +msgstr "" + +#: neutron/common/config.py:71 +msgid "Maximum number of fixed ips per port" +msgstr "" + +#: neutron/common/config.py:74 +msgid "" +"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " +"lease times." +msgstr "" + +#: neutron/common/config.py:77 +msgid "Allow sending resource operation notification to DHCP agent" +msgstr "" + +#: neutron/common/config.py:80 +msgid "Allow overlapping IP support in Neutron" +msgstr "" + +#: neutron/common/config.py:82 +msgid "The hostname Neutron is running on" +msgstr "" + +#: neutron/common/config.py:84 +msgid "Ensure that configured gateway is on subnet" +msgstr "" + +#: neutron/common/config.py:86 +msgid "Send notification to nova when port status changes" +msgstr "" + +#: neutron/common/config.py:88 +msgid "" +"Send notification to nova when port data (fixed_ips/floatingip) changes " +"so nova can update its cache." +msgstr "" + +#: neutron/common/config.py:92 +msgid "URL for connection to nova" +msgstr "" + +#: neutron/common/config.py:94 +msgid "Username for connecting to nova in admin context" +msgstr "" + +#: neutron/common/config.py:96 +msgid "Password for connection to nova in admin context" +msgstr "" + +#: neutron/common/config.py:99 +msgid "The uuid of the admin nova tenant" +msgstr "" + +#: neutron/common/config.py:102 +msgid "Authorization URL for connecting to nova in admin context" +msgstr "" + +#: neutron/common/config.py:105 +msgid "CA file for novaclient to verify server certificates" +msgstr "" + +#: neutron/common/config.py:107 +msgid "If True, ignore any SSL validation issues" +msgstr "" + +#: neutron/common/config.py:109 +msgid "" +"Name of nova region to use. Useful if keystone manages more than one " +"region." +msgstr "" + +#: neutron/common/config.py:112 +msgid "" +"Number of seconds between sending events to nova if there are any events " +"to send." +msgstr "" + +#: neutron/common/config.py:119 +msgid "" +"Where to store Neutron state files. This directory must be writable by " +"the agent." +msgstr "" + +#: neutron/common/config.py:151 +#, python-format +msgid "Base MAC: %s" +msgstr "" + +#: neutron/common/config.py:162 +msgid "Logging enabled!" +msgstr "" + +#: neutron/common/config.py:178 +#, python-format +msgid "Config paste file: %s" +msgstr "" + +#: neutron/common/config.py:183 +#, python-format +msgid "Unable to load %(app_name)s from configuration file %(config_path)s." +msgstr "" + +#: neutron/common/exceptions.py:30 +#: neutron/plugins/vmware/api_client/exception.py:27 +msgid "An unknown exception occurred." +msgstr "" + +#: neutron/common/exceptions.py:51 +#, python-format +msgid "Bad %(resource)s request: %(msg)s" +msgstr "" + +#: neutron/common/exceptions.py:63 +msgid "Not authorized." +msgstr "" + +#: neutron/common/exceptions.py:67 +msgid "The service is unavailable" +msgstr "" + +#: neutron/common/exceptions.py:71 +#, python-format +msgid "User does not have admin privileges: %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:75 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: neutron/common/exceptions.py:79 +#, python-format +msgid "Network %(net_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:83 +#, python-format +msgid "Subnet %(subnet_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:87 +#, python-format +msgid "Port %(port_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:91 +#, python-format +msgid "Port %(port_id)s could not be found on network %(net_id)s" +msgstr "" + +#: neutron/common/exceptions.py:96 +msgid "Policy configuration policy.json could not be found" +msgstr "" + +#: neutron/common/exceptions.py:100 +#, python-format +msgid "Failed to init policy %(policy)s because %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:104 +#, python-format +msgid "Failed to check policy %(policy)s because %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:108 +#, python-format +msgid "Unsupported port state: %(port_state)s" +msgstr "" + +#: neutron/common/exceptions.py:112 +msgid "The resource is inuse" +msgstr "" + +#: neutron/common/exceptions.py:116 +#, python-format +msgid "" +"Unable to complete operation on network %(net_id)s. There are one or more" +" ports still in use on the network." +msgstr "" + +#: neutron/common/exceptions.py:121 +#, python-format +msgid "" +"Unable to complete operation on subnet %(subnet_id)s. One or more ports " +"have an IP allocation from this subnet." +msgstr "" + +#: neutron/common/exceptions.py:126 +#, python-format +msgid "" +"Unable to complete operation on port %(port_id)s for network %(net_id)s. " +"Port already has an attacheddevice %(device_id)s." +msgstr "" + +#: neutron/common/exceptions.py:132 +#, python-format +msgid "" +"Unable to complete operation for network %(net_id)s. The mac address " +"%(mac)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:138 +#, python-format +msgid "" +"Unable to complete operation for %(subnet_id)s. The number of host routes" +" exceeds the limit %(quota)s." +msgstr "" + +#: neutron/common/exceptions.py:144 +#, python-format +msgid "" +"Unable to complete operation for %(subnet_id)s. The number of DNS " +"nameservers exceeds the limit %(quota)s." +msgstr "" + +#: neutron/common/exceptions.py:149 +#, python-format +msgid "" +"Unable to complete operation for network %(net_id)s. The IP address " +"%(ip_address)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:154 +#, python-format +msgid "" +"Unable to create the network. The VLAN %(vlan_id)s on physical network " +"%(physical_network)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:160 +#, python-format +msgid "" +"Unable to create the flat network. Physical network %(physical_network)s " +"is in use." +msgstr "" + +#: neutron/common/exceptions.py:165 +#, python-format +msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:170 +msgid "Tenant network creation is not enabled." +msgstr "" + +#: neutron/common/exceptions.py:178 +msgid "" +"Unable to create the network. No tenant network is available for " +"allocation." +msgstr "" + +#: neutron/common/exceptions.py:183 +#, python-format +msgid "" +"Subnet on port %(port_id)s does not match the requested subnet " +"%(subnet_id)s" +msgstr "" + +#: neutron/common/exceptions.py:188 +#, python-format +msgid "Malformed request body: %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:198 +#, python-format +msgid "Invalid input for operation: %(error_message)s." +msgstr "" + +#: neutron/common/exceptions.py:202 +#, python-format +msgid "The allocation pool %(pool)s is not valid." +msgstr "" + +#: neutron/common/exceptions.py:206 +#, python-format +msgid "" +"Found overlapping allocation pools:%(pool_1)s %(pool_2)s for subnet " +"%(subnet_cidr)s." +msgstr "" + +#: neutron/common/exceptions.py:211 +#, python-format +msgid "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." +msgstr "" + +#: neutron/common/exceptions.py:216 +#, python-format +msgid "Unable to generate unique mac on network %(net_id)s." +msgstr "" + +#: neutron/common/exceptions.py:220 +#, python-format +msgid "No more IP addresses available on network %(net_id)s." +msgstr "" + +#: neutron/common/exceptions.py:224 +#, python-format +msgid "Bridge %(bridge)s does not exist." +msgstr "" + +#: neutron/common/exceptions.py:228 +#, python-format +msgid "Creation failed. %(dev_name)s already exists." +msgstr "" + +#: neutron/common/exceptions.py:232 +msgid "Sudo privilege is required to run this command." +msgstr "" + +#: neutron/common/exceptions.py:236 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: neutron/common/exceptions.py:240 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: neutron/common/exceptions.py:244 +msgid "Tenant-id was missing from Quota request" +msgstr "" + +#: neutron/common/exceptions.py:248 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: neutron/common/exceptions.py:253 +#, python-format +msgid "" +"Unable to reconfigure sharing settings for network %(network)s. Multiple " +"tenants are using it" +msgstr "" + +#: neutron/common/exceptions.py:258 +#, python-format +msgid "Invalid extension environment: %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:262 +#, python-format +msgid "Extensions not found: %(extensions)s" +msgstr "" + +#: neutron/common/exceptions.py:266 +#, python-format +msgid "Invalid content type %(content_type)s" +msgstr "" + +#: neutron/common/exceptions.py:270 +#, python-format +msgid "Unable to find any IP address on external network %(net_id)s." +msgstr "" + +#: neutron/common/exceptions.py:275 +msgid "More than one external network exists" +msgstr "" + +#: neutron/common/exceptions.py:279 +#, python-format +msgid "An invalid value was provided for %(opt_name)s: %(opt_value)s" +msgstr "" + +#: neutron/common/exceptions.py:284 +#, python-format +msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s" +msgstr "" + +#: neutron/common/exceptions.py:289 +#, python-format +msgid "" +"Current gateway ip %(ip_address)s already in use by port %(port_id)s. " +"Unable to update." +msgstr "" + +#: neutron/common/exceptions.py:294 +#, python-format +msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'" +msgstr "" + +#: neutron/common/exceptions.py:304 +#, python-format +msgid "Invalid network VXLAN port range: '%(vxlan_range)s'" +msgstr "" + +#: neutron/common/exceptions.py:308 +msgid "VXLAN Network unsupported." +msgstr "" + +#: neutron/common/exceptions.py:312 +#, python-format +msgid "Found duplicate extension: %(alias)s" +msgstr "" + +#: neutron/common/exceptions.py:316 +#, python-format +msgid "" +"The following device_id %(device_id)s is not owned by your tenant or " +"matches another tenants router." +msgstr "" + +#: neutron/common/exceptions.py:321 +#, python-format +msgid "Invalid CIDR %(input)s given as IP prefix" +msgstr "" + +#: neutron/common/ipv6_utils.py:27 +msgid "Unable to generate IP address by EUI64 for IPv4 prefix" +msgstr "" + +#: neutron/common/ipv6_utils.py:34 +#, python-format +msgid "" +"Bad prefix or mac format for generating IPv6 address by EUI-64: " +"%(prefix)s, %(mac)s:" +msgstr "" + +#: neutron/common/ipv6_utils.py:38 +#, python-format +msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" +msgstr "" + +#: neutron/common/log.py:32 +#, python-format +msgid "" +"%(class_name)s method %(method_name)s called with arguments %(args)s " +"%(kwargs)s" +msgstr "" + +#: neutron/common/utils.py:68 +#, python-format +msgid "" +"Method %(func_name)s cannot be cached due to unhashable parameters: args:" +" %(args)s, kwargs: %(kwargs)s" +msgstr "" + +#: neutron/common/utils.py:91 +#, python-format +msgid "" +"Instance of class %(module)s.%(class)s doesn't contain attribute _cache " +"therefore results cannot be cached for %(func_name)s." +msgstr "" + +#: neutron/common/utils.py:117 neutron/openstack/common/fileutils.py:63 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: neutron/common/utils.py:200 +#, python-format +msgid "Invalid mapping: '%s'" +msgstr "" + +#: neutron/common/utils.py:203 +#, python-format +msgid "Missing key in mapping: '%s'" +msgstr "" + +#: neutron/common/utils.py:206 +#, python-format +msgid "Missing value in mapping: '%s'" +msgstr "" + +#: neutron/common/utils.py:208 +#, python-format +msgid "Key %(key)s in mapping: '%(mapping)s' not unique" +msgstr "" + +#: neutron/common/utils.py:211 +#, python-format +msgid "Value %(value)s in mapping: '%(mapping)s' not unique" +msgstr "" + +#: neutron/db/agents_db.py:36 +msgid "" +"Seconds to regard the agent is down; should be at least twice " +"report_interval, to be sure the agent is down for good." +msgstr "" + +#: neutron/db/agents_db.py:93 +#, python-format +msgid "Configuration for agent %(agent_type)s on host %(host)s is invalid." +msgstr "" + +#: neutron/db/agents_db.py:214 +msgid "Message with invalid timestamp received" +msgstr "" + +#: neutron/db/agentschedulers_db.py:37 +msgid "Driver to use for scheduling network to DHCP agent" +msgstr "" + +#: neutron/db/agentschedulers_db.py:39 +msgid "Allow auto scheduling networks to DHCP agent." +msgstr "" + +#: neutron/db/agentschedulers_db.py:41 +msgid "Number of DHCP agents scheduled to host a network." +msgstr "" + +#: neutron/db/api.py:77 +#, python-format +msgid "Database registration exception: %s" +msgstr "" + +#: neutron/db/api.py:89 +msgid "Database exception" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:148 +msgid "Cannot create resource for another tenant" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:317 +#, python-format +msgid "Generated mac for network %(network_id)s is %(mac_address)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:323 +#, python-format +msgid "Generated mac %(mac_address)s exists. Remaining attempts %(max_retries)s." +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:327 +#, python-format +msgid "Unable to generate mac address after %s attempts" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:345 +#, python-format +msgid "Delete allocated IP %(ip_address)s (%(network_id)s/%(subnet_id)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:383 +#, python-format +msgid "All IPs from subnet %(subnet_id)s (%(cidr)s) allocated" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:388 +#, python-format +msgid "Allocated IP - %(ip_address)s from %(first_ip)s to %(last_ip)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:395 +msgid "No more free IP's in slice. Deleting allocation pool." +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:414 +#, python-format +msgid "Rebuilding availability ranges for subnet %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:546 +msgid "IP allocation requires subnet_id or ip_address" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:558 +#, python-format +msgid "IP address %s is not a valid IP for the defined networks subnets" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:564 +#, python-format +msgid "" +"Failed to create port on network %(network_id)s, because fixed_ips " +"included invalid subnet %(subnet_id)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:584 +#, python-format +msgid "IP address %s is not a valid IP for the defined subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:593 neutron/db/db_base_plugin_v2.py:626 +msgid "Exceeded maximim amount of fixed ips per port" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:641 +#, python-format +msgid "Port update. Hold %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:648 +#, python-format +msgid "Port update. Adding %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:717 +#, python-format +msgid "" +"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps" +" with another subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:722 +#, python-format +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:742 +msgid "Performing IP validity checks on allocation pools" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:749 +#, python-format +msgid "Found invalid IP address in pool: %(start)s - %(end)s:" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:756 +msgid "Specified IP addresses do not match the subnet IP version" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:760 +#, python-format +msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:765 +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:778 +msgid "Checking for overlaps among allocation pools and gateway ip" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:789 +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:802 neutron/db/db_base_plugin_v2.py:806 +#, python-format +msgid "Invalid route: %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:879 +#, python-format +msgid "" +"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " +"'%(addr_mode)s' is not valid. If both attributes are set, they must be " +"the same value" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:887 +msgid "" +"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set " +"to False." +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:893 +msgid "Cannot disable enable_dhcp with ipv6 attributes set" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:983 +#, python-format +msgid "An exception occurred while creating the %(resource)s:%(item)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1080 +#, python-format +msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1104 +msgid "Gateway is not valid on subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1124 neutron/db/db_base_plugin_v2.py:1138 +msgid "new subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1131 +#, python-format +msgid "Error parsing dns address %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1147 +msgid "ipv6_ra_mode is not valid when ip_version is 4" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1151 +msgid "ipv6_address_mode is not valid when ip_version is 4" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1452 +#, python-format +msgid "Allocated IP %(ip_address)s (%(network_id)s/%(subnet_id)s/%(port_id)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1531 +#, python-format +msgid "" +"Ignoring PortNotFound when deleting port '%s'. The port has already been " +"deleted." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:58 +msgid "Unrecognized action" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:75 +#, python-format +msgid "" +"Action %(action)s for network %(net_id)s could not complete successfully:" +" %(reason)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:85 +#, python-format +msgid "get_active_networks requested from %s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:92 +#, python-format +msgid "get_active_networks_info from %s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:112 +#, python-format +msgid "Network %(network_id)s requested from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:119 neutron/db/dhcp_rpc_base.py:183 +#, python-format +msgid "Network %s could not be found, it might have been deleted concurrently." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:141 +#, python-format +msgid "Port %(device_id)s for %(network_id)s requested from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:175 +#, python-format +msgid "" +"DHCP port %(device_id)s on network %(network_id)s does not exist on " +"%(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:215 +#, python-format +msgid "DHCP port deletion for %(network_id)s request from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:228 +#, python-format +msgid "DHCP port remove fixed_ip for %(subnet_id)s request from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:252 +#, python-format +msgid "Updating lease expiration is now deprecated. Issued from host %s." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:263 +#, python-format +msgid "Create dhcp port %(port)s from %(host)s." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:280 +#, python-format +msgid "Update dhcp port %(port)s from %(host)s." +msgstr "" + +#: neutron/db/extraroute_db.py:36 +msgid "Maximum number of routes" +msgstr "" + +#: neutron/db/extraroute_db.py:91 +msgid "the nexthop is not connected with router" +msgstr "" + +#: neutron/db/extraroute_db.py:96 +msgid "the nexthop is used by router" +msgstr "" + +#: neutron/db/extraroute_db.py:125 +#, python-format +msgid "Added routes are %s" +msgstr "" + +#: neutron/db/extraroute_db.py:133 +#, python-format +msgid "Removed routes are %s" +msgstr "" + +#: neutron/db/l3_agentschedulers_db.py:34 +msgid "Driver to use for scheduling router to a default L3 agent" +msgstr "" + +#: neutron/db/l3_agentschedulers_db.py:37 +msgid "Allow auto scheduling of routers to L3 agent." +msgstr "" + +#: neutron/db/l3_db.py:239 +#, python-format +msgid "No eligible l3 agent associated with external network %s found" +msgstr "" + +#: neutron/db/l3_db.py:260 +#, python-format +msgid "No IPs available for external network %s" +msgstr "" + +#: neutron/db/l3_db.py:274 +#, python-format +msgid "Network %s is not an external network" +msgstr "" + +#: neutron/db/l3_db.py:388 +#, python-format +msgid "Router already has a port on subnet %s" +msgstr "" + +#: neutron/db/l3_db.py:402 +#, python-format +msgid "" +"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s " +"of subnet %(sub_id)s" +msgstr "" + +#: neutron/db/l3_db.py:416 neutron/db/l3_db.py:542 +#: neutron/plugins/bigswitch/plugin.py:989 +#: neutron/plugins/bigswitch/plugin.py:998 +msgid "Either subnet_id or port_id must be specified" +msgstr "" + +#: neutron/db/l3_db.py:421 +msgid "Cannot specify both subnet-id and port-id" +msgstr "" + +#: neutron/db/l3_db.py:434 +msgid "Router port must have exactly one fixed IP" +msgstr "" + +#: neutron/db/l3_db.py:448 +msgid "Subnet for router interface must have a gateway IP" +msgstr "" + +#: neutron/db/l3_db.py:596 neutron/plugins/nec/nec_router.py:199 +#, python-format +msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" +msgstr "" + +#: neutron/db/l3_db.py:632 +#, python-format +msgid "" +"Port %(port_id)s is associated with a different tenant than Floating IP " +"%(floatingip_id)s and therefore cannot be bound." +msgstr "" + +#: neutron/db/l3_db.py:636 +#, python-format +msgid "" +"Cannot create floating IP and bind it to Port %s, since that port is " +"owned by a different tenant." +msgstr "" + +#: neutron/db/l3_db.py:648 +#, python-format +msgid "Port %(id)s does not have fixed ip %(address)s" +msgstr "" + +#: neutron/db/l3_db.py:655 +#, python-format +msgid "Cannot add floating IP to port %s that hasno fixed IP addresses" +msgstr "" + +#: neutron/db/l3_db.py:659 +#, python-format +msgid "" +"Port %s has multiple fixed IPs. Must provide a specific IP when " +"assigning a floating IP" +msgstr "" + +#: neutron/db/l3_db.py:702 neutron/plugins/vmware/plugins/base.py:1871 +msgid "fixed_ip_address cannot be specified without a port_id" +msgstr "" + +#: neutron/db/l3_db.py:737 +#, python-format +msgid "Network %s is not a valid external network" +msgstr "" + +#: neutron/db/l3_db.py:874 +#, python-format +msgid "" +"Port %(port_id)s has owner %(port_owner)s, but no IP address, so it can " +"be deleted" +msgstr "" + +#: neutron/db/l3_db.py:979 +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "" + +#: neutron/db/l3_rpc_base.py:50 +msgid "" +"No plugin for L3 routing registered! Will reply to l3 agent with empty " +"router dictionary." +msgstr "" + +#: neutron/db/l3_rpc_base.py:64 +#, python-format +msgid "" +"Routers returned to l3 agent:\n" +" %s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:70 +#, python-format +msgid "Checking router: %(id)s for host: %(host)s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:95 +#, python-format +msgid "External network ID returned to l3 agent: %s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:105 +#, python-format +msgid "New status for floating IP %(floatingip_id)s: %(status)s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:113 +#, python-format +msgid "Floating IP: %s no longer present." +msgstr "" + +#: neutron/db/routedserviceinsertion_db.py:36 +#, python-format +msgid "Resource type '%(resource_type)s' is longer than %(maxlen)d characters" +msgstr "" + +#: neutron/db/securitygroups_rpc_base.py:277 +#, python-format +msgid "No valid gateway port on subnet %s is found for IPv6 RA" +msgstr "" + +#: neutron/db/sqlalchemyutils.py:73 +#, python-format +msgid "%s is invalid attribute for sort_key" +msgstr "" + +#: neutron/db/sqlalchemyutils.py:76 +#, python-format +msgid "" +"The attribute '%(attr)s' is reference to other resource, can't used by " +"sort '%(resource)s'" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:241 +#: neutron/plugins/vmware/plugins/service.py:915 +#: neutron/services/firewall/fwaas_plugin.py:229 +msgid "create_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:257 +#: neutron/plugins/vmware/plugins/service.py:942 +#: neutron/services/firewall/fwaas_plugin.py:244 +msgid "update_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:267 +#: neutron/plugins/vmware/plugins/service.py:968 +#: neutron/services/firewall/fwaas_plugin.py:259 +msgid "delete_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:277 +msgid "get_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:282 +msgid "get_firewalls() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:288 +msgid "get_firewalls_count() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:293 +msgid "create_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:309 +#: neutron/plugins/vmware/plugins/service.py:1030 +#: neutron/services/firewall/fwaas_plugin.py:268 +msgid "update_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:321 +msgid "delete_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:333 +msgid "get_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:338 +msgid "get_firewall_policies() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:344 +msgid "get_firewall_policies_count() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:349 +msgid "create_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:378 +#: neutron/plugins/vmware/plugins/service.py:1004 +#: neutron/services/firewall/fwaas_plugin.py:276 +msgid "update_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:402 +msgid "delete_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:410 +msgid "get_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:415 +msgid "get_firewall_rules() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:421 +msgid "get_firewall_rules_count() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:430 +#: neutron/plugins/vmware/plugins/service.py:1057 +#: neutron/services/firewall/fwaas_plugin.py:286 +msgid "insert_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:470 +#: neutron/plugins/vmware/plugins/service.py:1086 +#: neutron/services/firewall/fwaas_plugin.py:294 +msgid "remove_rule() called" +msgstr "" + +#: neutron/db/loadbalancer/loadbalancer_db.py:70 +#, python-format +msgid "The %(key)s field can not have negative value. Current value is %(value)d." +msgstr "" + +#: neutron/db/loadbalancer/loadbalancer_db.py:274 +msgid "'cookie_name' should be specified for this type of session persistence." +msgstr "" + +#: neutron/db/loadbalancer/loadbalancer_db.py:278 +msgid "'cookie_name' is not allowed for this type of session persistence" +msgstr "" + +#: neutron/db/metering/metering_rpc.py:46 +#, python-format +msgid "Unable to find agent %s." +msgstr "" + +#: neutron/db/migration/cli.py:32 +msgid "Neutron plugin provider module" +msgstr "" + +#: neutron/db/migration/cli.py:41 +msgid "Neutron quota driver class" +msgstr "" + +#: neutron/db/migration/cli.py:49 +msgid "URL to database" +msgstr "" + +#: neutron/db/migration/cli.py:52 +msgid "Database engine" +msgstr "" + +#: neutron/db/migration/cli.py:75 +msgid "You must provide a revision or relative delta" +msgstr "" + +#: neutron/db/migration/cli.py:105 neutron/db/migration/cli.py:118 +msgid "Timeline branches unable to generate timeline" +msgstr "" + +#: neutron/db/migration/cli.py:112 +msgid "HEAD file does not match migration timeline head" +msgstr "" + +#: neutron/db/migration/cli.py:154 +msgid "Available commands" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:88 +msgid "Missing version in alembic_versions table" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:90 +#, python-format +msgid "Multiple versions in alembic_versions table: %s" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:94 +#, python-format +msgid "" +"Unsupported database schema %(current)s. Please migrate your database to " +"one of following versions: %(supported)s" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:414 +#, python-format +msgid "Unknown tunnel type: %s" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:428 +msgid "The plugin type whose database will be migrated" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:431 +msgid "The connection url for the target db" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:434 +#, python-format +msgid "The %s tunnel type to migrate from" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:437 +#: neutron/plugins/openvswitch/common/config.py:77 +msgid "The UDP port to use for VXLAN tunnels." +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:440 +msgid "Retain the old plugin's tables" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:446 +#, python-format +msgid "" +"Tunnel args (tunnel-type and vxlan-udp-port) are not valid for the %s " +"plugin" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:453 +#, python-format +msgid "" +"Support for migrating %(plugin)s for release %(release)s is not yet " +"implemented" +msgstr "" + +#: neutron/db/vpn/vpn_db.py:680 +#, python-format +msgid "vpnservice %s in db is already deleted" +msgstr "" + +#: neutron/debug/commands.py:34 +msgid "Unimplemented commands" +msgstr "" + +#: neutron/debug/commands.py:46 +msgid "ID of network to probe" +msgstr "" + +#: neutron/debug/commands.py:50 +msgid "Owner type of the device: network/compute" +msgstr "" + +#: neutron/debug/commands.py:58 +#, python-format +msgid "Probe created : %s " +msgstr "" + +#: neutron/debug/commands.py:70 +msgid "ID of probe port to delete" +msgstr "" + +#: neutron/debug/commands.py:77 +#, python-format +msgid "Probe %s deleted" +msgstr "" + +#: neutron/debug/commands.py:108 +msgid "All Probes deleted " +msgstr "" + +#: neutron/debug/commands.py:120 +msgid "ID of probe port to execute command" +msgstr "" + +#: neutron/debug/commands.py:125 +msgid "Command to execute" +msgstr "" + +#: neutron/debug/commands.py:145 +msgid "Ping timeout" +msgstr "" + +#: neutron/debug/commands.py:149 +msgid "ID of network" +msgstr "" + +#: neutron/debug/debug_agent.py:122 +#, python-format +msgid "Failed to delete namespace %s" +msgstr "" + +#: neutron/debug/shell.py:64 +msgid "Config file for interface driver (You may also use l3_agent.ini)" +msgstr "" + +#: neutron/debug/shell.py:72 +msgid "" +"You must provide a config file for bridge - either --config-file or " +"env[NEUTRON_TEST_CONFIG_FILE]" +msgstr "" + +#: neutron/extensions/agent.py:61 +#, python-format +msgid "Agent %(id)s could not be found" +msgstr "" + +#: neutron/extensions/agent.py:65 +#, python-format +msgid "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" +msgstr "" + +#: neutron/extensions/agent.py:70 +#, python-format +msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" +msgstr "" + +#: neutron/extensions/allowedaddresspairs.py:22 +msgid "AllowedAddressPair must contain ip_address" +msgstr "" + +#: neutron/extensions/allowedaddresspairs.py:26 +msgid "" +"Port Security must be enabled in order to have allowed address pairs on a" +" port." +msgstr "" + +#: neutron/extensions/allowedaddresspairs.py:31 +#, python-format +msgid "" +"Request contains duplicate address pair: mac_address %(mac_address)s " +"ip_address %(ip_address)s." +msgstr "" + +#: neutron/extensions/dhcpagentscheduler.py:119 +#, python-format +msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" +msgstr "" + +#: neutron/extensions/dhcpagentscheduler.py:123 +#, python-format +msgid "" +"The network %(network_id)s has been already hosted by the DHCP Agent " +"%(agent_id)s." +msgstr "" + +#: neutron/extensions/dhcpagentscheduler.py:128 +#, python-format +msgid "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." +msgstr "" + +#: neutron/extensions/external_net.py:23 +#, python-format +msgid "" +"External network %(net_id)s cannot be updated to be made non-external, " +"since it has existing gateway ports" +msgstr "" + +#: neutron/extensions/external_net.py:51 +msgid "Adds external network attribute to network resource." +msgstr "" + +#: neutron/extensions/extra_dhcp_opt.py:25 +#, python-format +msgid "ExtraDhcpOpt %(id)s could not be found" +msgstr "" + +#: neutron/extensions/extra_dhcp_opt.py:29 +#, python-format +msgid "Invalid data format for extra-dhcp-opt: %(data)s" +msgstr "" + +#: neutron/extensions/extraroute.py:23 +#, python-format +msgid "Invalid format for routes: %(routes)s, %(reason)s" +msgstr "" + +#: neutron/extensions/extraroute.py:27 +#, python-format +msgid "" +"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot " +"be deleted, as it is required by one or more routes." +msgstr "" + +#: neutron/extensions/extraroute.py:33 +#, python-format +msgid "" +"Unable to complete operation for %(router_id)s. The number of routes " +"exceeds the maximum %(quota)s." +msgstr "" + +#: neutron/extensions/firewall.py:37 +#, python-format +msgid "Firewall %(firewall_id)s could not be found." +msgstr "" + +#: neutron/extensions/firewall.py:41 +#, python-format +msgid "Firewall %(firewall_id)s is still active." +msgstr "" + +#: neutron/extensions/firewall.py:45 +#, python-format +msgid "" +"Operation cannot be performed since associated Firewall %(firewall_id)s " +"is in %(pending_state)s." +msgstr "" + +#: neutron/extensions/firewall.py:50 +#, python-format +msgid "Firewall Policy %(firewall_policy_id)s could not be found." +msgstr "" + +#: neutron/extensions/firewall.py:54 +#, python-format +msgid "Firewall Policy %(firewall_policy_id)s is being used." +msgstr "" + +#: neutron/extensions/firewall.py:58 +#, python-format +msgid "Firewall Rule %(firewall_rule_id)s could not be found." +msgstr "" + +#: neutron/extensions/firewall.py:62 +#, python-format +msgid "Firewall Rule %(firewall_rule_id)s is being used." +msgstr "" + +#: neutron/extensions/firewall.py:66 +#, python-format +msgid "" +"Firewall Rule %(firewall_rule_id)s is not associated with Firewall " +"Policy %(firewall_policy_id)s." +msgstr "" + +#: neutron/extensions/firewall.py:71 +#, python-format +msgid "" +"Firewall Rule protocol %(protocol)s is not supported. Only protocol " +"values %(values)s and their integer representation (0 to 255) are " +"supported." +msgstr "" + +#: neutron/extensions/firewall.py:77 +#, python-format +msgid "" +"Firewall rule action %(action)s is not supported. Only action values " +"%(values)s are supported." +msgstr "" + +#: neutron/extensions/firewall.py:82 +#, python-format +msgid "%(param)s are not allowed when protocol is set to ICMP." +msgstr "" + +#: neutron/extensions/firewall.py:87 +#, python-format +msgid "Invalid value for port %(port)s." +msgstr "" + +#: neutron/extensions/firewall.py:91 +msgid "Missing rule info argument for insert/remove rule operation." +msgstr "" + +#: neutron/extensions/firewall.py:101 +#, python-format +msgid "%(driver)s: Internal driver error." +msgstr "" + +#: neutron/extensions/firewall.py:150 +#, python-format +msgid "Port '%s' is not a valid number" +msgstr "" + +#: neutron/extensions/firewall.py:154 +#, python-format +msgid "Invalid port '%s'" +msgstr "" + +#: neutron/extensions/firewall.py:168 +#, python-format +msgid "%(msg_ip)s and %(msg_subnet)s" +msgstr "" + +#: neutron/extensions/firewall.py:289 +msgid "Number of firewalls allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/firewall.py:293 +msgid "" +"Number of firewall policies allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/firewall.py:297 +msgid "" +"Number of firewall rules allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/l3.py:29 +#, python-format +msgid "Router %(router_id)s could not be found" +msgstr "" + +#: neutron/extensions/l3.py:33 +#, python-format +msgid "Router %(router_id)s still has ports" +msgstr "" + +#: neutron/extensions/l3.py:37 +#, python-format +msgid "Router %(router_id)s does not have an interface with id %(port_id)s" +msgstr "" + +#: neutron/extensions/l3.py:42 +#, python-format +msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" +msgstr "" + +#: neutron/extensions/l3.py:47 +#, python-format +msgid "" +"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot " +"be deleted, as it is required by one or more floating IPs." +msgstr "" + +#: neutron/extensions/l3.py:53 +#, python-format +msgid "Floating IP %(floatingip_id)s could not be found" +msgstr "" + +#: neutron/extensions/l3.py:57 +#, python-format +msgid "" +"External network %(external_network_id)s is not reachable from subnet " +"%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a " +"Floating IP." +msgstr "" + +#: neutron/extensions/l3.py:63 +#, python-format +msgid "" +"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with " +"port %(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already " +"has a floating IP on external network %(net_id)s." +msgstr "" + +#: neutron/extensions/l3.py:70 +#, python-format +msgid "" +"Port %(port_id)s has owner %(device_owner)s and therefore cannot be " +"deleted directly via the port API." +msgstr "" + +#: neutron/extensions/l3.py:75 +#, python-format +msgid "" +"Gateway cannot be updated for router %(router_id)s, since a gateway to " +"external network %(net_id)s is required by one or more floating IPs." +msgstr "" + +#: neutron/extensions/l3.py:138 +msgid "Number of routers allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/l3.py:142 +msgid "" +"Number of floating IPs allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:47 +#: neutron/extensions/l3agentscheduler.py:85 +msgid "No plugin for L3 routing registered to handle router scheduling" +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:151 +#, python-format +msgid "Agent %(id)s is not a L3 Agent or has been disabled" +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:155 +#, python-format +msgid "" +"The router %(router_id)s has been already hosted by the L3 Agent " +"%(agent_id)s." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:160 +#, python-format +msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:165 +#, python-format +msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:170 +#, python-format +msgid "The router %(router_id)s is not hosted by L3 agent %(agent_id)s." +msgstr "" + +#: neutron/extensions/lbaas_agentscheduler.py:116 +#, python-format +msgid "No eligible loadbalancer agent found for pool %(pool_id)s." +msgstr "" + +#: neutron/extensions/lbaas_agentscheduler.py:121 +#, python-format +msgid "No active loadbalancer agent found for pool %(pool_id)s." +msgstr "" + +#: neutron/extensions/loadbalancer.py:33 +msgid "Delay must be greater than or equal to timeout" +msgstr "" + +#: neutron/extensions/loadbalancer.py:37 +#, python-format +msgid "No eligible backend for pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:41 +#, python-format +msgid "Vip %(vip_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:45 +#, python-format +msgid "Another Vip already exists for pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:49 +#, python-format +msgid "Pool %(pool_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:53 +#, python-format +msgid "Member %(member_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:57 +#, python-format +msgid "Health_monitor %(monitor_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:61 +#, python-format +msgid "Monitor %(monitor_id)s is not associated with Pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:66 +#, python-format +msgid "health_monitor %(monitor_id)s is already associated with pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:71 +#, python-format +msgid "Invalid state %(state)s of Loadbalancer resource %(id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:75 +#, python-format +msgid "Pool %(pool_id)s is still in use" +msgstr "" + +#: neutron/extensions/loadbalancer.py:79 +#, python-format +msgid "Health monitor %(monitor_id)s still has associations with pools" +msgstr "" + +#: neutron/extensions/loadbalancer.py:84 +#, python-format +msgid "Statistics of Pool %(pool_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:88 +#, python-format +msgid "Protocol %(vip_proto)s does not match pool protocol %(pool_proto)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:93 +#, python-format +msgid "" +"Member with address %(address)s and port %(port)s already present in pool" +" %(pool)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:309 +msgid "Number of vips allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/loadbalancer.py:313 +msgid "Number of pools allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/loadbalancer.py:317 +msgid "" +"Number of pool members allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/loadbalancer.py:321 +msgid "" +"Number of health monitors allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/metering.py:33 +#, python-format +msgid "Metering label %(label_id)s does not exist" +msgstr "" + +#: neutron/extensions/metering.py:37 +msgid "Duplicate Metering Rule in POST." +msgstr "" + +#: neutron/extensions/metering.py:41 +#, python-format +msgid "Metering label rule %(rule_id)s does not exist" +msgstr "" + +#: neutron/extensions/metering.py:45 +#, python-format +msgid "" +"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " +"another" +msgstr "" + +#: neutron/extensions/multiprovidernet.py:27 +msgid "Segments and provider values cannot both be set." +msgstr "" + +#: neutron/extensions/multiprovidernet.py:31 +msgid "Duplicate segment entry in request." +msgstr "" + +#: neutron/extensions/portsecurity.py:20 +msgid "" +"Port has security group associated. Cannot disable port security or ip " +"address until security group is removed" +msgstr "" + +#: neutron/extensions/portsecurity.py:25 +msgid "" +"Port security must be enabled and port must have an IP address in order " +"to use security groups." +msgstr "" + +#: neutron/extensions/portsecurity.py:30 +msgid "Port does not have port security binding." +msgstr "" + +#: neutron/extensions/providernet.py:54 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:289 +msgid "Plugin does not support updating provider attributes" +msgstr "" + +#: neutron/extensions/quotasv2.py:67 +msgid "POST requests are not supported on this resource." +msgstr "" + +#: neutron/extensions/quotasv2.py:86 +msgid "Only admin is authorized to access quotas for another tenant" +msgstr "" + +#: neutron/extensions/quotasv2.py:91 +msgid "Only admin can view or configure quota" +msgstr "" + +#: neutron/extensions/securitygroup.py:34 +msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" +msgstr "" + +#: neutron/extensions/securitygroup.py:39 +#, python-format +msgid "Invalid value for port %(port)s" +msgstr "" + +#: neutron/extensions/securitygroup.py:43 +#, python-format +msgid "" +"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to " +"255." +msgstr "" + +#: neutron/extensions/securitygroup.py:48 +#, python-format +msgid "" +"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-" +"range-min) is missing." +msgstr "" + +#: neutron/extensions/securitygroup.py:53 +#, python-format +msgid "Security Group %(id)s in use." +msgstr "" + +#: neutron/extensions/securitygroup.py:57 +msgid "Removing default security group not allowed." +msgstr "" + +#: neutron/extensions/securitygroup.py:61 +msgid "Updating default security group not allowed." +msgstr "" + +#: neutron/extensions/securitygroup.py:65 +msgid "Default security group already exists." +msgstr "" + +#: neutron/extensions/securitygroup.py:69 +#, python-format +msgid "" +"Security group rule protocol %(protocol)s not supported. Only protocol " +"values %(values)s and their integer representation (0 to 255) are " +"supported." +msgstr "" + +#: neutron/extensions/securitygroup.py:75 +msgid "Multiple tenant_ids in bulk security group rule create not allowed" +msgstr "" + +#: neutron/extensions/securitygroup.py:80 +msgid "Only remote_ip_prefix or remote_group_id may be provided." +msgstr "" + +#: neutron/extensions/securitygroup.py:85 +msgid "Must also specifiy protocol if port range is given." +msgstr "" + +#: neutron/extensions/securitygroup.py:89 +msgid "Only allowed to update rules for one security profile at a time" +msgstr "" + +#: neutron/extensions/securitygroup.py:94 +#, python-format +msgid "Security group %(id)s does not exist" +msgstr "" + +#: neutron/extensions/securitygroup.py:98 +#, python-format +msgid "Security group rule %(id)s does not exist" +msgstr "" + +#: neutron/extensions/securitygroup.py:102 +msgid "Duplicate Security Group Rule in POST." +msgstr "" + +#: neutron/extensions/securitygroup.py:106 +#, python-format +msgid "Security group rule already exists. Group id is %(id)s." +msgstr "" + +#: neutron/extensions/securitygroup.py:110 +#, python-format +msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" +msgstr "" + +#: neutron/extensions/securitygroup.py:158 +#, python-format +msgid "'%s' is not an integer or uuid" +msgstr "" + +#: neutron/extensions/securitygroup.py:247 +msgid "" +"Number of security groups allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/securitygroup.py:251 +msgid "" +"Number of security rules allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/servicetype.py:52 +msgid "Neutron Service Type Management" +msgstr "" + +#: neutron/extensions/servicetype.py:60 +msgid "API for retrieving service providers for Neutron advanced services" +msgstr "" + +#: neutron/extensions/vpnaas.py:31 +#, python-format +msgid "VPNService %(vpnservice_id)s could not be found" +msgstr "" + +#: neutron/extensions/vpnaas.py:35 +#, python-format +msgid "ipsec_site_connection %(ipsecsite_conn_id)s not found" +msgstr "" + +#: neutron/extensions/vpnaas.py:39 +#, python-format +msgid "ipsec_site_connection %(attr)s is equal to or less than dpd_interval" +msgstr "" + +#: neutron/extensions/vpnaas.py:44 +#, python-format +msgid "ipsec_site_connection MTU %(mtu)d is too small for ipv%(version)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:49 +#, python-format +msgid "IKEPolicy %(ikepolicy_id)s could not be found" +msgstr "" + +#: neutron/extensions/vpnaas.py:53 +#, python-format +msgid "IPsecPolicy %(ipsecpolicy_id)s could not be found" +msgstr "" + +#: neutron/extensions/vpnaas.py:57 +#, python-format +msgid "" +"IKEPolicy %(ikepolicy_id)s is in use by existing IPsecSiteConnection and " +"can't be updated or deleted" +msgstr "" + +#: neutron/extensions/vpnaas.py:62 +#, python-format +msgid "VPNService %(vpnservice_id)s is still in use" +msgstr "" + +#: neutron/extensions/vpnaas.py:66 +#, python-format +msgid "Router %(router_id)s is used by VPNService %(vpnservice_id)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:70 +#, python-format +msgid "Invalid state %(state)s of vpnaas resource %(id)s for updating" +msgstr "" + +#: neutron/extensions/vpnaas.py:75 +#, python-format +msgid "" +"IPsecPolicy %(ipsecpolicy_id)s is in use by existing IPsecSiteConnection " +"and can't be updated or deleted" +msgstr "" + +#: neutron/extensions/vpnaas.py:80 +#, python-format +msgid "Can not load driver :%(device_driver)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:84 +#, python-format +msgid "Subnet %(subnet_id)s is not connected to Router %(router_id)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:89 +#, python-format +msgid "Router %(router_id)s has no external network gateway set" +msgstr "" + +#: neutron/notifiers/nova.py:165 +msgid "device_id is not set on port yet." +msgstr "" + +#: neutron/notifiers/nova.py:169 +msgid "Port ID not set! Nova will not be notified of port status change." +msgstr "" + +#: neutron/notifiers/nova.py:194 +#, python-format +msgid "" +"Ignoring state change previous_port_status: %(pre_status)s " +"current_port_status: %(cur_status)s port_id %(id)s" +msgstr "" + +#: neutron/notifiers/nova.py:220 +#, python-format +msgid "Sending events: %s" +msgstr "" + +#: neutron/notifiers/nova.py:225 +#, python-format +msgid "Nova returned NotFound for event: %s" +msgstr "" + +#: neutron/notifiers/nova.py:228 +#, python-format +msgid "Failed to notify nova on events: %s" +msgstr "" + +#: neutron/notifiers/nova.py:232 neutron/notifiers/nova.py:248 +#, python-format +msgid "Error response returned from nova: %s" +msgstr "" + +#: neutron/notifiers/nova.py:243 +#, python-format +msgid "Nova event: %s returned with failed status" +msgstr "" + +#: neutron/notifiers/nova.py:246 +#, python-format +msgid "Nova event response: %s" +msgstr "" + +#: neutron/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: neutron/openstack/common/gettextutils.py:320 +msgid "Message objects do not support addition." +msgstr "" + +#: neutron/openstack/common/gettextutils.py:330 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: neutron/openstack/common/lockutils.py:103 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: neutron/openstack/common/lockutils.py:168 +#, python-format +msgid "Got semaphore \"%(lock)s\"" +msgstr "" + +#: neutron/openstack/common/lockutils.py:177 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\"" +msgstr "" + +#: neutron/openstack/common/lockutils.py:187 +#, python-format +msgid "Created lock path: %s" +msgstr "" + +#: neutron/openstack/common/lockutils.py:205 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s" +msgstr "" + +#: neutron/openstack/common/lockutils.py:209 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s" +msgstr "" + +#: neutron/openstack/common/lockutils.py:247 +#, python-format +msgid "Got semaphore / lock \"%(function)s\"" +msgstr "" + +#: neutron/openstack/common/lockutils.py:251 +#, python-format +msgid "Semaphore / lock released \"%(function)s\"" +msgstr "" + +#: neutron/openstack/common/log.py:301 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: neutron/openstack/common/log.py:402 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: neutron/openstack/common/log.py:453 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: neutron/openstack/common/log.py:623 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:89 +msgid "in fixed duration looping call" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:39 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: neutron/openstack/common/policy.py:395 +#, python-format +msgid "Failed to understand rule %(rule)s" +msgstr "" + +#: neutron/openstack/common/policy.py:405 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: neutron/openstack/common/policy.py:680 +#, python-format +msgid "Failed to understand rule %(rule)r" +msgstr "" + +#: neutron/openstack/common/processutils.py:130 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: neutron/openstack/common/processutils.py:145 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: neutron/openstack/common/processutils.py:169 +#: neutron/openstack/common/processutils.py:241 +#, python-format +msgid "Result was %s" +msgstr "" + +#: neutron/openstack/common/processutils.py:181 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: neutron/openstack/common/processutils.py:220 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: neutron/openstack/common/processutils.py:222 +msgid "Environment not supported over SSH" +msgstr "" + +#: neutron/openstack/common/processutils.py:226 +msgid "process_input not supported over SSH" +msgstr "" + +#: neutron/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: neutron/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: neutron/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: neutron/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: neutron/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: neutron/openstack/common/strutils.py:92 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: neutron/openstack/common/strutils.py:197 +#, python-format +msgid "Invalid unit system: \"%s\"" +msgstr "" + +#: neutron/openstack/common/strutils.py:206 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: neutron/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: neutron/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: neutron/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:58 +msgid "Sort key supplied was not valid." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:119 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:162 +#, python-format +msgid "" +"There is no `deleted` column in `%s` table. Project doesn't use soft-" +"deleted feature." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:174 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:181 +#, python-format +msgid "There is no `project_id` column in `%s` table." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:243 +msgid "model should be a subclass of ModelBase" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:286 +#, python-format +msgid "" +"Please specify column %s in col_name_col_instance param. It is required " +"because column has unsupported type by sqlite)." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:292 +#, python-format +msgid "" +"col_name_col_instance param has wrong type of column instance for column " +"%s It should be instance of sqlalchemy.Column." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:400 +msgid "Unsupported id columns type" +msgstr "" + +#: neutron/openstack/common/middleware/catch_errors.py:40 +#, python-format +msgid "An error occurred during processing the request: %s" +msgstr "" + +#: neutron/openstack/common/middleware/sizelimit.py:55 +#: neutron/openstack/common/middleware/sizelimit.py:64 +#: neutron/openstack/common/middleware/sizelimit.py:75 +msgid "Request is too large." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:33 +msgid "" +"A comma separated list of Big Switch or Floodlight servers and port " +"numbers. The plugin proxies the requests to the Big Switch/Floodlight " +"server, which performs the networking configuration. Only oneserver is " +"needed per deployment, but you may wish todeploy multiple servers to " +"support failover." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:40 +msgid "" +"The username and password for authenticating against the Big Switch or " +"Floodlight controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:43 +msgid "" +"If True, Use SSL when connecting to the Big Switch or Floodlight " +"controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:46 +msgid "" +"Trust and store the first certificate received for each controller " +"address and use it to validate future connections to that address." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:50 +msgid "Disables SSL certificate validation for controllers" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:52 +msgid "Re-use HTTP/HTTPS connections to the controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:55 +msgid "Directory containing ca_certs and host_certs certificate directories." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:58 +msgid "Sync data on connect" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:60 +msgid "" +"If neutron fails to create a resource because the backend controller " +"doesn't know of a dependency, the plugin automatically triggers a full " +"data synchronization to the controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:65 +msgid "" +"Time between verifications that the backend controller database is " +"consistent with Neutron" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:68 +msgid "" +"Maximum number of seconds to wait for proxy request to connect and " +"complete." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:71 +msgid "" +"Maximum number of threads to spawn to handle large volumes of port " +"creations." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:75 +msgid "User defined identifier for this Neutron deployment" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:77 +msgid "" +"Flag to decide if a route to the metadata server should be injected into " +"the VM" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:82 +msgid "" +"The default router rules installed in new tenant routers. Repeat the " +"config option for each rule. Format is " +"::: Use an * to specify default for " +"all tenants." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:87 +msgid "Maximum number of router rules" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:91 +msgid "Virtual interface type to configure on Nova compute nodes" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:98 +#, python-format +msgid "Nova compute nodes to manually set VIF type to %s" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:105 +msgid "List of allowed vif_type values." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:109 +msgid "" +"Name of integration bridge on compute nodes used for security group " +"insertion." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:112 +msgid "Seconds between agent checks for port changes" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:114 +msgid "Virtual switch type." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:94 +msgid "Syntax error in server config file, aborting plugin" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:132 neutron/plugins/ml2/db.py:100 +#, python-format +msgid "get_port_and_sgs() called for port_id %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:238 +#, python-format +msgid "Unable to update remote topology: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:322 +#, python-format +msgid "" +"Setting admin_state_up=False is not supported in this plugin version. " +"Ignoring setting for resource: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:328 +#, python-format +msgid "" +"Operational status is internally set by the plugin. Ignoring setting " +"status=%s." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:353 +#, python-format +msgid "Unrecognized vif_type in configuration [%s]. Defaulting to ovs." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:399 +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:98 +msgid "Iconsistency with backend controller triggering full synchronization." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:416 +#, python-format +msgid "NeutronRestProxyV2: Unable to create port: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:467 +#, python-format +msgid "NeutronRestProxy: Starting plugin. Version=%s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:494 +msgid "NeutronRestProxyV2: initialization done" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:536 +msgid "NeutronRestProxyV2: create_network() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:578 +msgid "NeutronRestProxyV2.update_network() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:605 +msgid "NeutronRestProxyV2: delete_network() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:642 +msgid "NeutronRestProxyV2: create_port() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:733 +msgid "NeutronRestProxyV2: update_port() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:792 +msgid "NeutronRestProxyV2: delete_port() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:808 +msgid "NeutronRestProxyV2: create_subnet() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:824 +msgid "NeutronRestProxyV2: update_subnet() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:842 +msgid "NeutronRestProxyV2: delete_subnet() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:880 +msgid "NeutronRestProxyV2: create_router() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:902 +msgid "NeutronRestProxyV2.update_router() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:924 +msgid "NeutronRestProxyV2: delete_router() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:953 +msgid "NeutronRestProxyV2: add_router_interface() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:981 +msgid "NeutronRestProxyV2: remove_router_interface() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1014 +msgid "NeutronRestProxyV2: create_floatingip() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1031 +#, python-format +msgid "NeutronRestProxyV2: Unable to create remote floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1037 +msgid "NeutronRestProxyV2: update_floatingip() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1053 +msgid "NeutronRestProxyV2: delete_floatingip() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1068 +msgid "NeutronRestProxyV2: diassociate_floatingips() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1098 +msgid "NeutronRestProxyV2: too many external networks" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1113 +msgid "Adding host route: " +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1114 +#, python-format +msgid "Destination:%(dst)s nexthop:%(next)s" +msgstr "" + +#: neutron/plugins/bigswitch/routerrule_db.py:77 +msgid "No rules in router" +msgstr "" + +#: neutron/plugins/bigswitch/routerrule_db.py:91 +#, python-format +msgid "Updating router rules to %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:77 +#, python-format +msgid "Error in REST call to remote network controller: %(reason)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:116 +msgid "Couldn't retrieve capabilities. Newer API calls won't be supported." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:118 +#, python-format +msgid "The following capabilities were received for %(server)s: %(cap)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:142 +#, python-format +msgid "ServerProxy: server=%(server)s, port=%(port)d, ssl=%(ssl)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:145 +#, python-format +msgid "" +"ServerProxy: resource=%(resource)s, data=%(data)r, headers=%(headers)r, " +"action=%(action)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:166 +msgid "ServerProxy: Could not establish HTTPS connection" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:174 +msgid "ServerProxy: Could not establish HTTP connection" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:209 +#, python-format +msgid "ServerProxy: %(action)s failure, %(e)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:212 +#, python-format +msgid "" +"ServerProxy: status=%(status)d, reason=%(reason)r, ret=%(ret)s, " +"data=%(data)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:228 +msgid "ServerPool: initializing" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:254 +msgid "Servers not defined. Aborting server manager." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:261 +#, python-format +msgid "Servers must be defined as :. Configuration was %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:269 +msgid "ServerPool: initialization done" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:302 +#, python-format +msgid "ssl_cert_directory [%s] does not exist. Create it or disable ssl." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:319 +#, python-format +msgid "No certificates were found to verify controller %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:361 +#, python-format +msgid "" +"Could not retrieve initial certificate from controller %(server)s. Error " +"details: %(error)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:366 +#, python-format +msgid "Storing to certificate for host %(server)s at %(path)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:406 +msgid "Server requires synchronization, but no topology function was defined." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:421 +#, python-format +msgid "" +"ServerProxy: %(action)s failure for servers: %(server)r Response: " +"%(response)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:427 +#, python-format +msgid "" +"ServerProxy: Error details: status=%(status)d, reason=%(reason)r, " +"ret=%(ret)s, data=%(data)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:434 +#, python-format +msgid "ServerProxy: %(action)s failure for all servers: %(server)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:457 +#, python-format +msgid "" +"NeutronRestProxyV2: Received and ignored error code %(code)s on " +"%(action)s action to resource %(resource)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:467 +#, python-format +msgid "Unable to create remote router: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:473 +#, python-format +msgid "Unable to update remote router: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:478 +#, python-format +msgid "Unable to delete remote router: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:484 +#, python-format +msgid "Unable to add router interface: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:489 +#, python-format +msgid "Unable to delete remote intf: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:495 +#, python-format +msgid "Unable to create remote network: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:501 +#: neutron/plugins/bigswitch/servermanager.py:506 +#, python-format +msgid "Unable to update remote network: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:515 +#, python-format +msgid "No device MAC attached to port %s. Skipping notification to controller." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:520 +#, python-format +msgid "Unable to create remote port: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:525 +#, python-format +msgid "Unable to delete remote port: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:535 +#, python-format +msgid "Unable to create floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:540 +#, python-format +msgid "Unable to update floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:545 +#, python-format +msgid "Unable to delete floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:550 +msgid "Backend server(s) do not support automated consitency checks." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:562 +msgid "Encountered an error checking controller health." +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:116 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:235 +msgid "Port update received" +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:120 +#, python-format +msgid "Port %s is not present on this host." +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:123 +#, python-format +msgid "Port %s found. Refreshing firewall." +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:151 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:268 +msgid "Agent loop has new device" +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:155 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:398 +#: neutron/plugins/nec/agent/nec_neutron_agent.py:225 +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:159 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:272 +msgid "Error in agent event loop" +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:161 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:226 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:996 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1365 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1429 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:278 +#, python-format +msgid "Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" +msgstr "" + +#: neutron/plugins/bigswitch/db/consistency_db.py:55 +#, python-format +msgid "Consistency hash for group %(hash_id)s updated to %(hash)s" +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:41 +msgid "No host_id in port request to track port location." +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:44 +#, python-format +msgid "Received an empty port ID for host_id '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:47 +#, python-format +msgid "Received an empty host_id for port '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:49 +#, python-format +msgid "Logging port %(port)s on host_id %(host)s" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:30 +#, python-format +msgid "Invalid format for router rules: %(rule)s, %(reason)s" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:34 +#, python-format +msgid "" +"Unable to complete rules update for %(router_id)s. The number of rules " +"exceeds the maximum %(quota)s." +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:51 +#, python-format +msgid "Invalid data format for router rule: '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:83 +#, python-format +msgid "Duplicate nexthop in rule '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:91 +#, python-format +msgid "Action must be either permit or deny. '%s' was provided" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:103 +#, python-format +msgid "Duplicate router rules (src,dst) found '%s'" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:64 +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:34 +msgid "The address of the host to SSH to" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:66 +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:36 +msgid "The SSH username to use" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:68 +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:38 +msgid "The SSH password to use" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:70 +msgid "Currently unused" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:74 +msgid "The network interface to use when creatinga port" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:124 +#: neutron/plugins/hyperv/rpc_callbacks.py:47 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:79 +#: neutron/plugins/mlnx/rpc_callbacks.py:63 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:87 +#, python-format +msgid "Device %(device)s details requested from %(agent_id)s" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:138 +#: neutron/plugins/brocade/NeutronPlugin.py:155 +#: neutron/plugins/hyperv/rpc_callbacks.py:63 +#: neutron/plugins/hyperv/rpc_callbacks.py:82 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:102 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:129 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:152 +#: neutron/plugins/mlnx/rpc_callbacks.py:85 +#: neutron/plugins/mlnx/rpc_callbacks.py:104 +#: neutron/plugins/mlnx/rpc_callbacks.py:119 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:105 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:132 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:155 +#, python-format +msgid "%s can not be found in database" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:290 +#: neutron/plugins/brocade/NeutronPlugin.py:334 +#: neutron/plugins/brocade/NeutronPlugin.py:387 +#: neutron/plugins/brocade/NeutronPlugin.py:417 +msgid "Brocade NOS driver error" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:291 +#, python-format +msgid "Returning the allocated vlan (%d) to the pool" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:294 +#: neutron/plugins/brocade/NeutronPlugin.py:335 +#: neutron/plugins/brocade/NeutronPlugin.py:388 +#: neutron/plugins/brocade/NeutronPlugin.py:419 +msgid "Brocade plugin raised exception, check logs" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:300 +#, python-format +msgid "Allocated vlan (%d) from the pool" +msgstr "" + +#: neutron/plugins/brocade/nos/nosdriver.py:71 +#, python-format +msgid "Connect failed to switch: %s" +msgstr "" + +#: neutron/plugins/brocade/nos/nosdriver.py:73 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:76 +#, python-format +msgid "Connect success to host %(host)s:%(ssh_port)d" +msgstr "" + +#: neutron/plugins/brocade/nos/nosdriver.py:98 +#: neutron/plugins/brocade/nos/nosdriver.py:112 +#: neutron/plugins/brocade/nos/nosdriver.py:125 +#: neutron/plugins/brocade/nos/nosdriver.py:138 +#, python-format +msgid "NETCONF error: %s" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:91 +msgid "Plugin initialization complete" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:119 +#, python-format +msgid "'%(model)s' object has no attribute '%(name)s'" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:136 +#: neutron/plugins/cisco/db/network_db_v2.py:38 +msgid "get_all_qoss() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:142 +msgid "get_qos_details() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:147 +msgid "create_qos() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:153 +msgid "delete_qos() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:158 +msgid "rename_qos() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:163 +msgid "get_all_credentials() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:169 +msgid "get_credential_details() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:174 +msgid "rename_credential() called" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:27 +#, python-format +msgid "Segmentation ID for network %(net_id)s is not found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:32 +msgid "" +"Unable to complete operation. No more dynamic NICs are available in the " +"system." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:38 +#, python-format +msgid "" +"NetworkVlanBinding for %(vlan_id)s and network %(network_id)s already " +"exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:44 +#, python-format +msgid "Vlan ID %(vlan_id)s not found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:49 +msgid "" +"Unable to complete operation. VLAN ID exists outside of the configured " +"network segment range." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:55 +msgid "No Vlan ID available." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:60 +#, python-format +msgid "QoS level %(qos_id)s could not be found for tenant %(tenant_id)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:66 +#, python-format +msgid "QoS level with name %(qos_name)s already exists for tenant %(tenant_id)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:72 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:23 +#, python-format +msgid "Credential %(credential_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:77 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:28 +#, python-format +msgid "Credential %(credential_name)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:82 +#, python-format +msgid "Credential %(credential_name)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:87 +#, python-format +msgid "Provider network %s already exists" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:92 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:39 +#, python-format +msgid "Connection to %(host)s is not configured." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:97 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:44 +#, python-format +msgid "Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:102 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:49 +#, python-format +msgid "Failed to configure Nexus: %(config)s. Reason: %(exc)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:107 +#, python-format +msgid "Nexus Port Binding (%(filters)s) is not present." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:116 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:69 +msgid "No usable Nexus switch found to create SVI interface." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:121 +#, python-format +msgid "PortVnic Binding %(port_id)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:126 +#, python-format +msgid "PortVnic Binding %(port_id)s is not present." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:131 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:74 +msgid "No subnet_id specified for router gateway." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:136 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:79 +#, python-format +msgid "Subnet %(subnet_id)s has an interface on %(router_id)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:141 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:84 +msgid "Nexus hardware router gateway only uses Subnet Ids." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:145 +#, python-format +msgid "" +"Unable to unplug the attachment %(att_id)s from port %(port_id)s for " +"network %(net_id)s. The attachment %(att_id)s does not exist." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:152 +#, python-format +msgid "Policy Profile %(profile_id)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:158 +#, python-format +msgid "Policy Profile %(profile_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:163 +#, python-format +msgid "Network Profile %(profile_id)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:169 +#, python-format +msgid "Network Profile %(profile)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:174 +#, python-format +msgid "" +"One or more network segments belonging to network profile %(profile)s is " +"in use." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:180 +#, python-format +msgid "" +"No more segments available in network segment pool " +"%(network_profile_name)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:186 +#, python-format +msgid "VM Network %(name)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:191 +#, python-format +msgid "Unable to create the network. The VXLAN ID %(vxlan_id)s is in use." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:197 +#, python-format +msgid "Vxlan ID %(vxlan_id)s not found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:202 +msgid "" +"Unable to complete operation. VXLAN ID exists outside of the configured " +"network segment range." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:208 +#, python-format +msgid "Connection to VSM failed: %(reason)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:213 +#, python-format +msgid "Internal VSM Error: %(reason)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:218 +#, python-format +msgid "Network Binding for network %(network_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:224 +#, python-format +msgid "Port Binding for port %(port_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:230 +#, python-format +msgid "Profile-Tenant binding for profile %(profile_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:236 +msgid "No service cluster found to perform multi-segment bridging." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:74 +msgid "Port not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:75 +msgid "Unable to find a port with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:89 +msgid "Credential Not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:90 +msgid "Unable to find a Credential with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:105 +msgid "QoS Not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:106 +msgid "Unable to find a QoS with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:121 +msgid "Nova tenant Not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:122 +msgid "Unable to find a Novatenant with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:137 +msgid "Requested State Invalid" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:138 +msgid "Unable to update port state with specified value." +msgstr "" + +#: neutron/plugins/cisco/common/config.py:26 +msgid "Virtual Switch to use" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:30 +msgid "Nexus Switch to use" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:35 +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:21 +msgid "VLAN Name prefix" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:37 +msgid "VLAN Name prefix for provider vlans" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:39 +msgid "Provider VLANs are automatically created as needed on the Nexus switch" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:42 +msgid "" +"Provider VLANs are automatically trunked as needed on the ports of the " +"Nexus switch" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:45 +msgid "Enable L3 support on the Nexus switches" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:47 +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:23 +msgid "Distribute SVI interfaces over all switches" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:51 +msgid "Model Class" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:55 +msgid "Nexus Driver Name" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:60 +msgid "N1K Integration Bridge" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:62 +msgid "N1K Enable Tunneling" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:64 +msgid "N1K Tunnel Bridge" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:66 +msgid "N1K Local IP" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:68 +msgid "N1K Tenant Network Type" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:70 +msgid "N1K Bridge Mappings" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:72 +msgid "N1K VXLAN ID Ranges" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:74 +msgid "N1K Network VLAN Ranges" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:76 +msgid "N1K default network profile" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:78 +msgid "N1K default policy profile" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:80 +msgid "N1K policy profile for network node" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:82 +msgid "N1K Policy profile polling duration in seconds" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:135 +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:68 +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:58 +msgid "Some config files were not parsed properly" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:331 +#, python-format +msgid "seg_min %(seg_min)s, seg_max %(seg_max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:562 +#, python-format +msgid "Reserving specific vlan %(vlan)s on physical network %(network)s from pool" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:587 +#, python-format +msgid "vlan_id %(vlan)s on physical network %(network)s not found" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:601 +#, python-format +msgid "Unreasonable vxlan ID range %(vxlan_min)s - %(vxlan_max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:643 +#, python-format +msgid "Reserving specific vxlan %s from pool" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:664 +#, python-format +msgid "vxlan_id %s not found" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:772 +msgid "create_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:794 +msgid "delete_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:808 +msgid "update_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:817 +msgid "get_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:842 +msgid "create_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:853 +msgid "delete_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:862 +msgid "update_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:871 +msgid "get_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:890 +msgid "Invalid profile type" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:907 +msgid "_profile_binding_exists()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:915 +msgid "get_profile_binding()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:925 +msgid "delete_profile_binding()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:932 +#, python-format +msgid "" +"Profile-Tenant binding missing for profile ID %(profile_id)s and tenant " +"ID %(tenant_id)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:946 +msgid "_get_profile_bindings()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1096 +msgid "segment_range not required for TRUNK" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1102 +msgid "multicast_ip_range not required" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1229 +msgid "Invalid segment range. example range: 500-550" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1242 +msgid "Invalid multicast ip address range. example range: 224.1.1.1-224.1.1.10" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1249 +#, python-format +msgid "%s is not a valid multicast ip address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1253 +#, python-format +msgid "%s is reserved multicast ip address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1257 +#, python-format +msgid "%s is not a valid ip address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1261 +#, python-format +msgid "" +"Invalid multicast IP range '%(min_ip)s-%(max_ip)s': Range should be from " +"low address to high address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1274 +msgid "Arguments segment_type missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1283 +msgid "segment_type should either be vlan, overlay, multi-segment or trunk" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1289 +msgid "Argument physical_network missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1295 +msgid "segment_range not required for trunk" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1301 +msgid "Argument sub_type missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1308 +msgid "Argument segment_range missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1319 +msgid "Argument multicast_ip_range missing for VXLAN multicast network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1347 +#, python-format +msgid "Segment range is invalid, select from %(min)s-%(nmin)s, %(nmax)s-%(max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1365 +#, python-format +msgid "segment range is invalid. Valid range is : %(min)s-%(max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1379 +#, python-format +msgid "NetworkProfile name %s already exists" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1396 +msgid "Segment range overlaps with another profile" +msgstr "" + +#: neutron/plugins/cisco/db/network_db_v2.py:46 +msgid "get_qos() called" +msgstr "" + +#: neutron/plugins/cisco/db/network_db_v2.py:59 +msgid "add_qos() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:34 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:30 +msgid "get_nexusport_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:43 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:39 +msgid "get_nexusvlan_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:49 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:45 +msgid "add_nexusport_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:62 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:58 +msgid "remove_nexusport_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:78 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:74 +msgid "update_nexusport_binding called with no vlan" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:80 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:76 +msgid "update_nexusport_binding called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:91 +msgid "get_nexusvm_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:99 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:94 +msgid "get_port_vlan_switch_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:107 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:102 +#, python-format +msgid "" +"get_port_switch_bindings() called, port:'%(port_id)s', " +"switch:'%(switch_ip)s'" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:119 +msgid "get_nexussvi_bindings() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:74 +#, python-format +msgid "Loaded device plugin %s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:91 +#, python-format +msgid "%(module)s.%(name)s init done" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:138 +#, python-format +msgid "No %s Plugin loaded" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:139 +#, python-format +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:169 +msgid "create_network() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:182 +#, python-format +msgid "Provider network added to DB: %(network_id)s, %(vlan_id)s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:201 +msgid "update_network() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:225 +#, python-format +msgid "Provider network removed from DB: %s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:287 +msgid "create_port() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:352 +#, python-format +msgid "" +"tenant_id: %(tid)s, net_id: %(nid)s, old_device_id: %(odi)s, " +"new_device_id: %(ndi)s, old_host_id: %(ohi)s, new_host_id: %(nhi)s, " +"old_device_owner: %(odo)s, new_device_owner: %(ndo)s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:381 +msgid "update_port() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:408 +#, python-format +msgid "Unable to update port '%s' on Nexus switch" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:427 +msgid "delete_port() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:468 +msgid "L3 enabled on Nexus plugin, create SVI on switch" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:488 +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:513 +msgid "L3 disabled or not Nexus plugin, send to vswitch" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:502 +msgid "L3 enabled on Nexus plugin, delete SVI from switch" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:224 +msgid "Logical network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:249 +msgid "network_segment_pool" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:294 +msgid "Invalid input for CIDR" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:434 +#, python-format +msgid "req: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:443 +#, python-format +msgid "status_code %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:451 +#, python-format +msgid "VSM: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:146 +msgid "_setup_vsm" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:165 +msgid "_populate_policy_profiles" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:192 +msgid "No policy profile populated from VSM" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:229 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:336 +#: neutron/plugins/mlnx/mlnx_plugin.py:219 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:398 +msgid "provider:network_type required" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:233 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:247 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:346 +#: neutron/plugins/mlnx/mlnx_plugin.py:249 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:408 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:427 +msgid "provider:segmentation_id required" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:236 +msgid "provider:segmentation_id out of range (1 through 4094)" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:241 +msgid "provider:physical_network specified for Overlay network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:250 +msgid "provider:segmentation_id out of range (5000+)" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:254 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:368 +#: neutron/plugins/mlnx/mlnx_plugin.py:235 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:443 +#, python-format +msgid "provider:network_type %s not supported" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:265 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:374 +#: neutron/plugins/mlnx/mlnx_plugin.py:275 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:449 +#, python-format +msgid "Unknown provider:physical_network %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:269 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:380 +#: neutron/plugins/mlnx/mlnx_plugin.py:281 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:455 +msgid "provider:physical_network required" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:447 +#, python-format +msgid "_populate_member_segments %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:486 +msgid "Invalid pairing supplied" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:491 +#, python-format +msgid "Invalid UUID supplied in %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:492 +msgid "Invalid UUID supplied" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:521 +#, python-format +msgid "Cannot add a trunk segment '%s' as a member of another trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:526 +#, python-format +msgid "Cannot add vlan segment '%s' as a member of a vxlan trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:532 +#, python-format +msgid "Network UUID '%s' belongs to a different physical network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:537 +#, python-format +msgid "Cannot add vxlan segment '%s' as a member of a vlan trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:542 +#, python-format +msgid "Vlan tag '%s' is out of range" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:545 +#, python-format +msgid "Vlan tag '%s' is not an integer value" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:550 +#, python-format +msgid "%s is not a valid uuid" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:597 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:600 +msgid "n1kv:profile_id does not exist" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:612 +msgid "_send_create_logical_network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:635 +#, python-format +msgid "_send_create_network_profile_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:645 +#, python-format +msgid "_send_update_network_profile_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:655 +#, python-format +msgid "_send_delete_network_profile_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:670 +#, python-format +msgid "_send_create_network_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:702 +#, python-format +msgid "_send_update_network_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:724 +#, python-format +msgid "add_segments=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:725 +#, python-format +msgid "del_segments=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:749 +#, python-format +msgid "_send_delete_network_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:789 +#, python-format +msgid "_send_create_subnet_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:799 +#, python-format +msgid "_send_update_subnet_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:810 +#, python-format +msgid "_send_delete_subnet_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:836 +#, python-format +msgid "_send_create_port_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:853 +#, python-format +msgid "_send_update_port_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:869 +#, python-format +msgid "_send_delete_port_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:900 +#, python-format +msgid "Create network: profile_id=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:908 +#, python-format +msgid "" +"Physical_network %(phy_net)s, seg_type %(net_type)s, seg_id %(seg_id)s, " +"multicast_ip %(multicast_ip)s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:920 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:930 +#, python-format +msgid "Seg list %s " +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:970 +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:254 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:198 +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:232 +#: neutron/plugins/mlnx/mlnx_plugin.py:362 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:503 +#, python-format +msgid "Created network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1028 +#, python-format +msgid "Updated network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1043 +#, python-format +msgid "Cannot delete network '%s' that is member of a trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1047 +#, python-format +msgid "Cannot delete network '%s' that is a member of a multi-segment network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1060 +#, python-format +msgid "Deleted network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1070 +#, python-format +msgid "Get network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1092 +msgid "Get networks" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1142 +#, python-format +msgid "Create port: profile_id=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1188 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:305 +#, python-format +msgid "Created port: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1199 +#, python-format +msgid "Update port: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1246 +#, python-format +msgid "Get port: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1266 +msgid "Get ports" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1282 +msgid "Create subnet" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1290 +#, python-format +msgid "Created subnet: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1301 +msgid "Update subnet" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1316 +#, python-format +msgid "Delete subnet: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1331 +#, python-format +msgid "Get subnet: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1351 +msgid "Get subnets" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1436 +#, python-format +msgid "Scheduling router %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:159 +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:167 +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:189 +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:195 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:113 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:152 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:161 +#, python-format +msgid "NexusDriver: %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:174 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:168 +#, python-format +msgid "NexusDriver created VLAN: %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:49 +#, python-format +msgid "Loaded driver %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:64 +msgid "NexusPlugin:create_network() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:113 +#: neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py:108 +#, python-format +msgid "Nexus: create & trunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:120 +#, python-format +msgid "Nexus: create vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:125 +#: neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py:112 +#, python-format +msgid "Nexus: trunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:138 +#, python-format +msgid "Nexus: delete & untrunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:144 +#, python-format +msgid "Nexus: delete vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:147 +#, python-format +msgid "Nexus: untrunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:204 +msgid "Grabbing a switch to create SVI" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:207 +msgid "Using round robin to create SVI" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:226 +msgid "No round robin or zero weights, using first switch" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:236 +msgid "NexusPlugin:delete_network() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:244 +msgid "NexusPlugin:update_network() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:252 +msgid "NexusPlugin:create_port() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:260 +msgid "NexusPlugin:delete_port() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:272 +#, python-format +msgid "delete_network(): provider vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:329 +msgid "NexusPlugin:update_port() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:338 +msgid "NexusPlugin:plug_interface() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:346 +msgid "NexusPlugin:unplug_interface() called" +msgstr "" + +#: neutron/plugins/common/utils.py:32 +#, python-format +msgid "%s is not a valid VLAN tag" +msgstr "" + +#: neutron/plugins/common/utils.py:36 +msgid "End of VLAN range is less than start of VLAN range" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:109 +#: neutron/plugins/embrane/agent/dispatcher.py:134 +#: neutron/services/loadbalancer/drivers/embrane/poller.py:56 +#: neutron/services/loadbalancer/drivers/embrane/agent/dispatcher.py:108 +msgid "Unhandled exception occurred" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:174 +#: neutron/plugins/embrane/base_plugin.py:193 +#, python-format +msgid "The following routers have not physical match: %s" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:179 +#, python-format +msgid "Requested router: %s" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:231 +#, python-format +msgid "Deleting router=%s" +msgstr "" + +#: neutron/plugins/embrane/agent/operations/router_operations.py:99 +#, python-format +msgid "The router %s had no physical representation,likely already deleted" +msgstr "" + +#: neutron/plugins/embrane/agent/operations/router_operations.py:128 +#, python-format +msgid "Interface %s not found in the heleos back-end,likely already deleted" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:25 +#: neutron/services/loadbalancer/drivers/embrane/config.py:25 +msgid "ESM management root address" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:27 +#: neutron/services/loadbalancer/drivers/embrane/config.py:27 +msgid "ESM admin username." +msgstr "" + +#: neutron/plugins/embrane/common/config.py:30 +#: neutron/services/loadbalancer/drivers/embrane/config.py:30 +msgid "ESM admin password." +msgstr "" + +#: neutron/plugins/embrane/common/config.py:32 +msgid "Router image id (Embrane FW/VPN)" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:34 +msgid "In band Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:36 +msgid "Out of band Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:38 +msgid "Management Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:40 +msgid "Dummy user traffic Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:42 +#: neutron/services/loadbalancer/drivers/embrane/config.py:42 +msgid "Shared resource pool id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:44 +#: neutron/services/loadbalancer/drivers/embrane/config.py:49 +msgid "Define if the requests have run asynchronously or not" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:51 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:50 +#, python-format +msgid "Dva is pending for the following reason: %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:52 +msgid "" +"Dva can't be found to execute the operation, probably was cancelled " +"through the heleos UI" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:54 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:53 +#, python-format +msgid "Dva seems to be broken for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:55 +#, python-format +msgid "Dva interface seems to be broken for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:57 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:54 +#, python-format +msgid "Dva creation failed reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:58 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:55 +#, python-format +msgid "Dva creation is in pending state for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:60 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:57 +#, python-format +msgid "Dva configuration failed for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:61 +#, python-format +msgid "" +"Failed to delete the backend router for reason %s. Please remove it " +"manually through the heleos UI" +msgstr "" + +#: neutron/plugins/embrane/common/exceptions.py:24 +#, python-format +msgid "An unexpected error occurred:%(err_msg)s" +msgstr "" + +#: neutron/plugins/embrane/common/exceptions.py:28 +#, python-format +msgid "%(err_msg)s" +msgstr "" + +#: neutron/plugins/embrane/common/utils.py:47 +msgid "No ip allocation set" +msgstr "" + +#: neutron/plugins/embrane/l2base/support_exceptions.py:24 +#, python-format +msgid "Cannot retrieve utif info for the following reason: %(err_msg)s" +msgstr "" + +#: neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py:46 +msgid "" +"No segmentation_id found for the network, please be sure that " +"tenant_network_type is vlan" +msgstr "" + +#: neutron/plugins/hyperv/db.py:42 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:113 +#: neutron/plugins/openvswitch/ovs_db_v2.py:131 +#, python-format +msgid "" +"Reserving vlan %(vlan_id)s on physical network %(physical_network)s from " +"pool" +msgstr "" + +#: neutron/plugins/hyperv/db.py:57 +#, python-format +msgid "Reserving flat physical network %(physical_network)s from pool" +msgstr "" + +#: neutron/plugins/hyperv/db.py:80 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:136 +#: neutron/plugins/ml2/drivers/type_vlan.py:204 +#: neutron/plugins/openvswitch/ovs_db_v2.py:155 +#, python-format +msgid "" +"Reserving specific vlan %(vlan_id)s on physical network " +"%(physical_network)s from pool" +msgstr "" + +#: neutron/plugins/hyperv/db.py:137 +#, python-format +msgid "Releasing vlan %(vlan_id)s on physical network %(physical_network)s" +msgstr "" + +#: neutron/plugins/hyperv/db.py:142 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:177 +#: neutron/plugins/openvswitch/ovs_db_v2.py:196 +#, python-format +msgid "vlan_id %(vlan_id)s on physical network %(physical_network)s not found" +msgstr "" + +#: neutron/plugins/hyperv/db.py:167 neutron/plugins/hyperv/db.py:180 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:64 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:83 +#: neutron/plugins/ml2/drivers/type_vlan.py:128 +#: neutron/plugins/ml2/drivers/type_vlan.py:149 +#: neutron/plugins/openvswitch/ovs_db_v2.py:87 +#: neutron/plugins/openvswitch/ovs_db_v2.py:105 +#, python-format +msgid "" +"Removing vlan %(vlan_id)s on physical network %(physical_network)s from " +"pool" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:46 +msgid "Network type for tenant networks (local, flat, vlan or none)" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:50 +#: neutron/plugins/linuxbridge/common/config.py:35 +#: neutron/plugins/mlnx/common/config.py:32 +#: neutron/plugins/openvswitch/common/config.py:51 +msgid "List of :: or " +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:78 +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:100 +#, python-format +msgid "segmentation_id specified for %s network" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:85 +#, python-format +msgid "physical_network specified for %s network" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:127 +msgid "physical_network not provided" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:180 +#, python-format +msgid "Invalid tenant_network_type: %s. Agent terminated!" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:203 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:297 +#: neutron/plugins/ml2/drivers/type_vlan.py:94 +#: neutron/plugins/mlnx/mlnx_plugin.py:180 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:350 +#, python-format +msgid "Network VLAN ranges: %s" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:228 +#, python-format +msgid "Network type %s not supported" +msgstr "" + +#: neutron/plugins/hyperv/rpc_callbacks.py:71 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:112 +#: neutron/plugins/mlnx/rpc_callbacks.py:92 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:114 +#, python-format +msgid "Device %(device)s no longer exists on %(agent_id)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:52 +msgid "" +"List of : where the physical networks can be " +"expressed with wildcards, e.g.: .\"*:external\"" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:58 +msgid "Private vswitch name used for local networks" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:60 +#: neutron/plugins/linuxbridge/common/config.py:66 +#: neutron/plugins/mlnx/common/config.py:69 +#: neutron/plugins/nec/common/config.py:31 +#: neutron/plugins/oneconvergence/lib/config.py:47 +#: neutron/plugins/openvswitch/common/config.py:63 +#: neutron/plugins/ryu/common/config.py:45 +msgid "" +"The number of seconds the agent will wait between polling for local " +"device changes." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:64 +msgid "" +"Enables metrics collections for switch ports by using Hyper-V's metric " +"APIs. Collected data can by retrieved by other apps and services, e.g.: " +"Ceilometer. Requires Hyper-V / Windows Server 2012 and above" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:71 +msgid "" +"Specifies the maximum number of retries to enable Hyper-V's port metrics " +"collection. The agent will try to enable the feature once every " +"polling_interval period for at most metrics_max_retries or until it " +"succeedes." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:153 +#, python-format +msgid "Failed reporting state! %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:188 +#, python-format +msgid "Invalid physical network mapping: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:209 +#, python-format +msgid "network_delete received. Deleting network %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:215 +#, python-format +msgid "Network %s not defined on agent." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:218 +msgid "port_delete received" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:223 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:173 +msgid "port_update received" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:245 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:131 +#, python-format +msgid "Provisioning network %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:258 +#, python-format +msgid "" +"Cannot provision unknown network type %(network_type)s for network " +"%(net_uuid)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:270 +#, python-format +msgid "Reclaiming local network %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:278 +#, python-format +msgid "Binding port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:291 +#, python-format +msgid "Binding VLAN ID %(segmentation_id)s to switch port %(port_id)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:304 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:116 +#, python-format +msgid "Unsupported network type %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:313 +#, python-format +msgid "Network %s is not avalailable on this agent" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:317 +#, python-format +msgid "Unbinding port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:330 +#, python-format +msgid "Port metrics enabled for port: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:334 +#, python-format +msgid "Port metrics raw enabling for port: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:359 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:211 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:314 +#, python-format +msgid "No port %s defined on agent." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:364 +#, python-format +msgid "Adding port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:372 +#, python-format +msgid "Unable to get port details for device %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:379 +#, python-format +msgid "Port %(device)s updated. Details: %(device_details)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:405 +#, python-format +msgid "Removing port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:413 +#, python-format +msgid "Removing port failed for device %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:438 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:965 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:382 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1267 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1327 +msgid "Agent out of sync with plugin!" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:446 +msgid "Agent loop has new devices!" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:453 +#, python-format +msgid "Error in agent event loop: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:461 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:405 +#, python-format +msgid "Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:474 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:269 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1020 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:158 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1511 +msgid "Agent initialized successfully, now running... " +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:62 +#, python-format +msgid "Hyper-V Exception: %(hyperv_exeption)s while adding rule: %(rule)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:72 +#, python-format +msgid "Hyper-V Exception: %(hyperv_exeption)s while removing rule: %(rule)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:92 +msgid "Aplying port filter." +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:95 +msgid "Updating port rules." +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:108 +#, python-format +msgid "Creating %(new)s new rules, removing %(old)s old rules." +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:119 +msgid "Removing port filter" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:38 +#, python-format +msgid "HyperVException: %(msg)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:83 +#, python-format +msgid "Vnic not found: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:118 +#, python-format +msgid "Job failed with error %d" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:137 +#, python-format +msgid "" +"WMI job failed with status %(job_state)d. Error details: %(err_sum_desc)s" +" - %(err_desc)s - Error code: %(err_code)d" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:146 +#, python-format +msgid "WMI job failed with status %(job_state)d. Error details: %(error)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:150 +#, python-format +msgid "WMI job failed with status %d. No error description available" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:155 +#, python-format +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:169 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:189 +#, python-format +msgid "" +"Failed to disconnect port %(switch_port_name)s from switch " +"%(vswitch_name)s with error %(ret_val)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:200 +#, python-format +msgid "" +"Failed to delete port %(switch_port_name)s from switch %(vswitch_name)s " +"with error %(ret_val)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:207 +#: neutron/plugins/hyperv/agent/utilsv2.py:137 +#, python-format +msgid "VSwitch not found: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:248 +#: neutron/plugins/hyperv/agent/utils.py:252 +msgid "Metrics collection is not supported on this version of Hyper-V" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsfactory.py:34 +msgid "Force V1 WMI utility classes" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsfactory.py:63 +msgid "" +"V1 virtualization namespace no longer supported on Windows Server / " +"Hyper-V Server 2012 R2 or above." +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsfactory.py:70 +#, python-format +msgid "Loading class: %(module_name)s.%(class_name)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsv2.py:160 +#: neutron/plugins/hyperv/agent/utilsv2.py:320 +#, python-format +msgid "Port Allocation not found: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsv2.py:270 +#, python-format +msgid "Cannot get VM summary data for: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:77 +#, python-format +msgid "The IP addr of available SDN-VE controllers: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:80 +#, python-format +msgid "The SDN-VE controller IP address: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:97 +#, python-format +msgid "unable to serialize object type: '%s'" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:164 +#, python-format +msgid "" +"Sending request to SDN-VE. url: %(myurl)s method: %(method)s body: " +"%(body)s header: %(header)s " +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:177 +#, python-format +msgid "Error: Could not reach server: %(url)s Exception: %(excp)s." +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:184 +#, python-format +msgid "Error message: %(reply)s -- Status: %(status)s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:187 +#, python-format +msgid "Received response status: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:194 +#, python-format +msgid "Deserialized body: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:236 +msgid "Bad resource for forming a list request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:246 +msgid "Bad resource for forming a show request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:256 +msgid "Bad resource for forming a create request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:268 +msgid "Bad resource for forming a update request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:279 +msgid "Bad resource for forming a delete request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:307 +#, python-format +msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:369 +#, python-format +msgid "Did not find tenant: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:32 +msgid "Fake SDNVE controller initialized" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:35 +msgid "Fake SDNVE controller: list" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:39 +msgid "Fake SDNVE controller: show" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:43 +msgid "Fake SDNVE controller: create" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:47 +msgid "Fake SDNVE controller: update" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:51 +msgid "Fake SDNVE controller: delete" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:55 +msgid "Fake SDNVE controller: get tenant by id" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:59 +msgid "Fake SDNVE controller: check and create tenant" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:63 +msgid "Fake SDNVE controller: get controller" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:152 +msgid "Set a new controller if needed." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:158 +#, python-format +msgid "Set the controller to a new controller: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:166 +#, python-format +msgid "Original SDN-VE HTTP request: %(orig)s; New request: %(new)s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:176 +#, python-format +msgid "Create network in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:185 +msgid "Create net failed: no SDN-VE tenant." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:196 +#, python-format +msgid "Create net failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:203 +#, python-format +msgid "Update network in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:223 +#, python-format +msgid "Update net failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:229 +#, python-format +msgid "Delete network in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:239 +#, python-format +msgid "Delete net failed after deleting the network in DB: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:244 +#, python-format +msgid "Get network in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:250 +msgid "Get networks in progress" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:260 +#, python-format +msgid "Create port in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:276 +msgid "Create port does not have tenant id info" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:282 +#, python-format +msgid "Create port does not have tenant id info; obtained is: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:303 +#, python-format +msgid "Create port failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:310 +#, python-format +msgid "Update port in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:337 +#, python-format +msgid "Update port failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:343 +#, python-format +msgid "Delete port in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:356 +#, python-format +msgid "Delete port operation failed in SDN-VE after deleting the port from DB: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:365 +#, python-format +msgid "Create subnet in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:377 +#, python-format +msgid "Create subnet failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:379 +#, python-format +msgid "Subnet created: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:385 +#, python-format +msgid "Update subnet in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:410 +#, python-format +msgid "Update subnet failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:416 +#, python-format +msgid "Delete subnet in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:421 +#, python-format +msgid "" +"Delete subnet operation failed in SDN-VE after deleting the subnet from " +"DB: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:430 +#, python-format +msgid "Create router in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:433 +#, python-format +msgid "Ignoring admin_state_up=False for router=%r. Overriding with True" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:443 +msgid "Create router failed: no SDN-VE tenant." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:451 +#, python-format +msgid "Create router failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:453 +#, python-format +msgid "Router created: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:458 +#, python-format +msgid "Update router in progress: id=%(id)s router=%(router)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:465 +msgid "admin_state_up=False routers are not supported." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:489 +#, python-format +msgid "Update router failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:495 +#, python-format +msgid "Delete router in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:502 +#, python-format +msgid "" +"Delete router operation failed in SDN-VE after deleting the router in DB:" +" %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:507 +#, python-format +msgid "" +"Add router interface in progress: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:515 +#, python-format +msgid "SdnvePluginV2.add_router_interface called. Port info: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:529 +#, python-format +msgid "Update router-add-interface failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:532 +#, python-format +msgid "Added router interface: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:536 +#, python-format +msgid "" +"Add router interface only called: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:546 +msgid "" +"SdnvePluginV2._add_router_interface_only: failed to add the interface in " +"the roll back. of a remove_router_interface operation" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:552 +#, python-format +msgid "" +"Remove router interface in progress: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:561 +msgid "No port ID" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:563 +#, python-format +msgid "SdnvePluginV2.remove_router_interface port: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:567 +msgid "No fixed IP" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:572 +#, python-format +msgid "SdnvePluginV2.remove_router_interface subnet_id: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:595 +#, python-format +msgid "Update router-remove-interface failed SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:616 +#, python-format +msgid "Create floatingip in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:627 +#, python-format +msgid "Creating floating ip operation failed in SDN-VE controller: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:630 +#, python-format +msgid "Created floatingip : %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:635 +#, python-format +msgid "Update floatingip in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:655 +#, python-format +msgid "Update floating ip failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:661 +#, python-format +msgid "Delete floatingip in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:666 +#, python-format +msgid "Delete floatingip failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:139 +msgid "info_update received" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:144 +#, python-format +msgid "info_update received. New controlleris to be set to: %s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:150 +msgid "info_update received. New controlleris set to be out of band" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:195 +#, python-format +msgid "Mapping physical network %(physical_network)s to interface %(interface)s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:201 +#, python-format +msgid "" +"Interface %(interface)s for physical network %(physical_network)s does " +"not exist. Agent terminated!" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:219 +msgid "Agent in the rpc loop." +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:241 +#, python-format +msgid "Controller IPs: %s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:263 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1498 +#, python-format +msgid "%s Agent terminated!" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:28 +msgid "If set to True uses a fake controller." +msgstr "" + +#: neutron/plugins/ibm/common/config.py:30 +msgid "Base URL for SDN-VE controller REST API" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:32 +msgid "List of IP addresses of SDN-VE controller(s)" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:34 +msgid "SDN-VE RPC subject" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:36 +msgid "SDN-VE controller port number" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:38 +msgid "SDN-VE request/response format" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:40 +msgid "SDN-VE administrator user id" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:42 +msgid "SDN-VE administrator password" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:44 +#: neutron/plugins/nec/common/config.py:26 +#: neutron/plugins/openvswitch/common/config.py:30 +#: neutron/plugins/ryu/common/config.py:24 +msgid "Integration bridge to use" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:46 +msgid "Reset the integration bridge before use" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:48 +msgid "Indicating if controller is out of band or not" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:51 +msgid "List of :" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:53 +msgid "Tenant type: OVERLAY (default) or OF" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:55 +msgid "" +"The string in tenant description that indicates the tenant is a OVERLAY " +"tenant" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:58 +msgid "The string in tenant description that indicates the tenant is a OF tenant" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:64 +msgid "Agent polling interval if necessary" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:66 +msgid "Using root helper" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:68 +msgid "Whether using rpc" +msgstr "" + +#: neutron/plugins/ibm/common/exceptions.py:23 +#, python-format +msgid "" +"An unexpected error occurred in the SDN-VE Plugin. Here is the error " +"message: %(msg)s" +msgstr "" + +#: neutron/plugins/ibm/common/exceptions.py:28 +#, python-format +msgid "The input does not contain nececessary info: %(msg)s" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:120 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:144 +#: neutron/plugins/ml2/rpc.py:170 neutron/plugins/ml2/rpc.py:192 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:122 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:147 +#, python-format +msgid "Device %(device)s not bound to the agent host %(host)s" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:138 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:141 +#, python-format +msgid "Device %(device)s up on %(agent_id)s" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:258 +#: neutron/plugins/mlnx/mlnx_plugin.py:200 +#, python-format +msgid "Invalid tenant_network_type: %s. Service terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:269 +msgid "Linux Bridge Plugin initialization complete" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:295 +#, python-format +msgid "%s. Agent terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:340 +#: neutron/plugins/mlnx/mlnx_plugin.py:244 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:402 +msgid "provider:segmentation_id specified for flat network" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:349 +#: neutron/plugins/mlnx/mlnx_plugin.py:252 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:411 +#, python-format +msgid "provider:segmentation_id out of range (%(min_id)s through %(max_id)s)" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:356 +#: neutron/plugins/mlnx/mlnx_plugin.py:260 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:431 +msgid "provider:physical_network specified for local network" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:362 +#: neutron/plugins/mlnx/mlnx_plugin.py:264 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:437 +msgid "provider:segmentation_id specified for local network" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:85 +msgid "VXLAN is enabled, a valid local_ip must be provided" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:99 +msgid "Invalid Network ID, will lead to incorrect bridgename" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:106 +msgid "Invalid VLAN ID, will lead to incorrect subinterface name" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:113 +msgid "Invalid Interface ID, will lead to incorrect tap device name" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:122 +#, python-format +msgid "Invalid Segmentation ID: %s, will lead to incorrect vxlan device name" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:185 +#, python-format +msgid "Failed creating vxlan interface for %(segmentation_id)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:217 +#, python-format +msgid "" +"Creating subinterface %(interface)s for VLAN %(vlan_id)s on interface " +"%(physical_interface)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:230 +#, python-format +msgid "Done creating subinterface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:237 +#, python-format +msgid "Creating vxlan interface %(interface)s for VNI %(segmentation_id)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:252 +#, python-format +msgid "Done creating vxlan interface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:305 +#, python-format +msgid "Starting bridge %(bridge_name)s for subinterface %(interface)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:320 +#, python-format +msgid "Done starting bridge %(bridge_name)s for subinterface %(interface)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:342 +#, python-format +msgid "Unable to add %(interface)s to %(bridge_name)s! Exception: %(e)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:355 +#, python-format +msgid "Unable to add vxlan interface for network %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:362 +#, python-format +msgid "No mapping for physical network %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:371 +#, python-format +msgid "Unknown network_type %(network_type)s for network %(network_id)s." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:384 +#, python-format +msgid "Tap device: %s does not exist on this host, skipped" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:402 +#, python-format +msgid "Adding device %(tap_device_name)s to bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:411 +#, python-format +msgid "%(tap_device_name)s already exists on bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:449 +#, python-format +msgid "Deleting bridge %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:456 +#, python-format +msgid "Done deleting bridge %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:459 +#, python-format +msgid "Cannot delete bridge %s, does not exist" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:473 +#, python-format +msgid "Removing device %(interface_name)s from bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:480 +#, python-format +msgid "Done removing device %(interface_name)s from bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:486 +#, python-format +msgid "" +"Cannot remove device %(interface_name)s bridge %(bridge_name)s does not " +"exist" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:494 +#, python-format +msgid "Deleting subinterface %s for vlan" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:501 +#, python-format +msgid "Done deleting subinterface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:505 +#, python-format +msgid "Deleting vxlan interface %s for vlan" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:510 +#, python-format +msgid "Done deleting vxlan interface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:524 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:559 +#, python-format +msgid "" +"Option \"%(option)s\" must be supported by command \"%(command)s\" to " +"enable %(mode)s mode" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:536 +msgid "No valid Segmentation ID to perform UCAST test." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:553 +msgid "" +"VXLAN muticast group must be provided in vxlan_group option to enable " +"VXLAN MCAST mode" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:578 +msgid "" +"Linux kernel vxlan module and iproute2 3.8 or above are required to " +"enable VXLAN." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:588 +#, python-format +msgid "Using %s VXLAN mode" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:665 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:164 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:276 +msgid "network_delete received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:679 +#, python-format +msgid "port_update RPC received for port: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:682 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:317 +msgid "fdb_add received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:704 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:345 +msgid "fdb_remove received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:726 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:424 +msgid "update chg_ip received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:751 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:447 +msgid "fdb_update received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:808 +msgid "Unable to obtain MAC address for unique ID. Agent terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:812 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:254 +#: neutron/plugins/nec/agent/nec_neutron_agent.py:144 +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:109 +#, python-format +msgid "RPC agent_id: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:871 +#, python-format +msgid "Treating added or updated device: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:877 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1069 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1108 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1100 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1139 +#, python-format +msgid "Unable to get port details for %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:883 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1075 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1106 +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:915 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:936 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:368 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1095 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1157 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1126 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1188 +#, python-format +msgid "Device %s not defined on plugin" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:922 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1125 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1142 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1156 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1173 +#, python-format +msgid "Attachment %s removed" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:930 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1132 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1149 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1163 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1180 +#, python-format +msgid "port_removed failed for %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:934 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:366 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1154 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1185 +#, python-format +msgid "Port %s updated." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:960 +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:978 +#, python-format +msgid "Agent loop found changes! %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:984 +#, python-format +msgid "Error in agent loop. Devices info: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1010 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:419 +#, python-format +msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1013 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:422 +#, python-format +msgid "Interface mappings: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:31 +#: neutron/plugins/mlnx/common/config.py:28 +msgid "Network type for tenant networks (local, vlan, or none)" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:41 +msgid "" +"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " +"plugin using linuxbridge mechanism driver" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:45 +msgid "TTL for vxlan interface protocol packets." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:47 +msgid "TOS for vxlan interface protocol packets." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:49 +msgid "Multicast group for vxlan interface." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:51 +msgid "Local IP address of the VXLAN endpoints." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:53 +msgid "" +"Extension to use alongside ml2 plugin's l2population mechanism driver. It" +" enables the plugin to populate VXLAN forwarding table." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:61 +#: neutron/plugins/mlnx/common/config.py:47 +msgid "List of :" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:69 +#: neutron/plugins/mlnx/common/config.py:72 +msgid "Enable server RPC compatibility with old agents" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:142 +#: neutron/plugins/ml2/drivers/type_vlan.py:210 +#: neutron/plugins/openvswitch/ovs_db_v2.py:161 +#, python-format +msgid "" +"Reserving specific vlan %(vlan_id)s on physical network " +"%(physical_network)s outside pool" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:166 +#: neutron/plugins/ml2/drivers/type_vlan.py:259 +#: neutron/plugins/openvswitch/ovs_db_v2.py:191 +#, python-format +msgid "" +"Releasing vlan %(vlan_id)s on physical network %(physical_network)s to " +"pool" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:171 +#: neutron/plugins/ml2/drivers/type_vlan.py:254 +#: neutron/plugins/openvswitch/ovs_db_v2.py:186 +#, python-format +msgid "" +"Releasing vlan %(vlan_id)s on physical network %(physical_network)s " +"outside pool" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:202 +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:214 +msgid "get_port_from_device() called" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:230 +#, python-format +msgid "set_port_status as %s called" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:62 +#, python-format +msgid "Flavor %(flavor)s could not be found" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:66 +msgid "Failed to add flavor binding" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:75 +msgid "Start initializing metaplugin" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:127 +#, python-format +msgid "default_flavor %s is not plugin list" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:133 +#, python-format +msgid "default_l3_flavor %s is not plugin list" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:139 +#, python-format +msgid "rpc_flavor %s is not plugin list" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:171 +#, python-format +msgid "Plugin location: %s" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:222 +#, python-format +msgid "Created network: %(net_id)s with flavor %(flavor)s" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:228 +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:372 +msgid "Failed to add flavor bindings" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:365 +#, python-format +msgid "Created router: %(router_id)s with flavor %(flavor)s" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:376 +#, python-format +msgid "Created router: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:68 +#, python-format +msgid "Update subnet failed: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:75 +msgid "Subnet in remote have already deleted" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:96 +#, python-format +msgid "Update network failed: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:103 +msgid "Network in remote have already deleted" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:124 +#, python-format +msgid "Update port failed: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:135 +msgid "Port in remote have already deleted" +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:25 +msgid "" +"Comma separated list of flavor:neutron_plugin for plugins to load. " +"Extension method is searched in the list order and the first one is used." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:31 +msgid "" +"Comma separated list of flavor:neutron_plugin for L3 service plugins to " +"load. This is intended for specifying L2 plugins which support L3 " +"functions. If you use a router service plugin, set this blank." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:38 +msgid "" +"Default flavor to use, when flavor:network is not specified at network " +"creation." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:43 +msgid "" +"Default L3 flavor to use, when flavor:router is not specified at router " +"creation. Ignored if 'l3_plugin_list' is blank." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:49 +msgid "Comma separated list of supported extension aliases." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:53 +msgid "" +"Comma separated list of method:flavor to select specific plugin for a " +"method. This has priority over method search order based on " +"'plugin_list'." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:59 +msgid "Specifies flavor for plugin to handle 'q-plugin' RPC requests." +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:46 +#, python-format +msgid "MidoNet %(resource_type)s %(id)s could not be found" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:50 +#, python-format +msgid "MidoNet API error: %(msg)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:84 +#, python-format +msgid "MidoClient.create_bridge called: kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:94 +#, python-format +msgid "MidoClient.delete_bridge called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:104 +#, python-format +msgid "MidoClient.get_bridge called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:118 +#, python-format +msgid "MidoClient.update_bridge called: id=%(id)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:138 +#, python-format +msgid "" +"MidoClient.create_dhcp called: bridge=%(bridge)s, cidr=%(cidr)s, " +"gateway_ip=%(gateway_ip)s, host_rts=%(host_rts)s, " +"dns_servers=%(dns_servers)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:156 +#, python-format +msgid "" +"MidoClient.add_dhcp_host called: bridge=%(bridge)s, cidr=%(cidr)s, " +"ip=%(ip)s, mac=%(mac)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:161 +msgid "Tried to add tonon-existent DHCP" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:175 +#, python-format +msgid "" +"MidoClient.remove_dhcp_host called: bridge=%(bridge)s, cidr=%(cidr)s, " +"ip=%(ip)s, mac=%(mac)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:180 +msgid "Tried to delete mapping from non-existent subnet" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:185 +#, python-format +msgid "MidoClient.remove_dhcp_host: Deleting %(dh)r" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:198 +#, python-format +msgid "" +"MidoClient.delete_dhcp_host called: bridge_id=%(bridge_id)s, " +"cidr=%(cidr)s, ip=%(ip)s, mac=%(mac)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:213 +#, python-format +msgid "MidoClient.delete_dhcp called: bridge=%(bridge)s, cidr=%(cidr)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:220 +msgid "Tried to delete non-existent DHCP" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:232 +#, python-format +msgid "MidoClient.delete_port called: id=%(id)s, delete_chains=%(delete_chains)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:247 +#, python-format +msgid "MidoClient.get_port called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:261 +#, python-format +msgid "MidoClient.add_bridge_port called: bridge=%(bridge)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:273 +#, python-format +msgid "MidoClient.update_port called: id=%(id)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:298 +#, python-format +msgid "MidoClient.create_router called: kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:308 +#, python-format +msgid "MidoClient.delete_router called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:318 +#, python-format +msgid "MidoClient.get_router called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:332 +#, python-format +msgid "MidoClient.update_router called: id=%(id)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:353 +#, python-format +msgid "" +"MidoClient.add_dhcp_route_option called: bridge=%(bridge)s, " +"cidr=%(cidr)s, gw_ip=%(gw_ip)sdst_ip=%(dst_ip)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:361 +msgid "Tried to access non-existent DHCP" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:393 +#, python-format +msgid "MidoClient.unlink called: port=%(port)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:398 +#, python-format +msgid "Attempted to unlink a port that was not linked. %s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:404 +#, python-format +msgid "" +"MidoClient.remove_rules_by_property called: tenant_id=%(tenant_id)s, " +"chain_name=%(chain_name)skey=%(key)s, value=%(value)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:431 +#, python-format +msgid "" +"MidoClient.create_router_chains called: router=%(router)s, " +"inbound_chain_name=%(in_chain)s, outbound_chain_name=%(out_chain)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:454 +#, python-format +msgid "MidoClient.delete_router_chains called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:469 +#, python-format +msgid "MidoClient.delete_port_chains called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:481 +#, python-format +msgid "" +"MidoClient.get_link_port called: router=%(router)s, " +"peer_router_id=%(peer_router_id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:520 +#, python-format +msgid "" +"MidoClient.add_static_nat called: tenant_id=%(tenant_id)s, " +"chain_name=%(chain_name)s, from_ip=%(from_ip)s, to_ip=%(to_ip)s, " +"port_id=%(port_id)s, nat_type=%(nat_type)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:528 +#, python-format +msgid "Invalid NAT type passed in %s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:580 +#, python-format +msgid "MidoClient.remote_static_route called: router=%(router)s, ip=%(ip)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:591 +#, python-format +msgid "" +"MidoClient.update_port_chains called: " +"port=%(port)sinbound_chain_id=%(inbound_chain_id)s, " +"outbound_chain_id=%(outbound_chain_id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:602 +#, python-format +msgid "MidoClient.create_chain called: tenant_id=%(tenant_id)s name=%(name)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:610 +#, python-format +msgid "MidoClient.delete_chain called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:616 +#, python-format +msgid "" +"MidoClient.delete_chains_by_names called: tenant_id=%(tenant_id)s " +"names=%(names)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:627 +#, python-format +msgid "" +"MidoClient.get_chain_by_name called: tenant_id=%(tenant_id)s " +"name=%(name)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:638 +#, python-format +msgid "" +"MidoClient.get_port_group_by_name called: tenant_id=%(tenant_id)s " +"name=%(name)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:652 +#, python-format +msgid "MidoClient.create_port_group called: tenant_id=%(tenant_id)s name=%(name)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:661 +#, python-format +msgid "" +"MidoClient.delete_port_group_by_name called: tenant_id=%(tenant_id)s " +"name=%(name)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:667 +#, python-format +msgid "Deleting pg %(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:673 +#, python-format +msgid "" +"MidoClient.add_port_to_port_group_by_name called: tenant_id=%(tenant_id)s" +" name=%(name)s port_id=%(port_id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:687 +#, python-format +msgid "MidoClient.remove_port_from_port_groups called: port_id=%(port_id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:82 +#, python-format +msgid "Invalid nat_type %s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:139 +#, python-format +msgid "Unrecognized direction %s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:173 +#, python-format +msgid "There is no %(name)s with ID %(id)s in MidoNet." +msgstr "" + +#: neutron/plugins/midonet/plugin.py:185 +#: neutron/plugins/ml2/drivers/mech_arista/exceptions.py:23 +#: neutron/plugins/ml2/drivers/mech_arista/exceptions.py:27 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:217 +msgid "provider_router_id should be configured in the plugin config file" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:387 +#, python-format +msgid "MidonetPluginV2.create_subnet called: subnet=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:419 +#, python-format +msgid "MidonetPluginV2.create_subnet exiting: sn_entry=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:428 +#, python-format +msgid "MidonetPluginV2.delete_subnet called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:447 +msgid "MidonetPluginV2.delete_subnet exiting" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:454 +#, python-format +msgid "MidonetPluginV2.create_network called: network=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:469 +#, python-format +msgid "MidonetPluginV2.create_network exiting: net=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:478 +#, python-format +msgid "MidonetPluginV2.update_network called: id=%(id)r, network=%(network)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:487 +#, python-format +msgid "MidonetPluginV2.update_network exiting: net=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:495 +#, python-format +msgid "MidonetPluginV2.get_network called: id=%(id)r, fields=%(fields)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:500 +#, python-format +msgid "MidonetPluginV2.get_network exiting: qnet=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:505 +#, python-format +msgid "MidonetPluginV2.delete_network called: id=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:513 +#, python-format +msgid "Failed to delete neutron db, while Midonet bridge=%r had been deleted" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:518 +#, python-format +msgid "MidonetPluginV2.create_port called: port=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:579 +#, python-format +msgid "Failed to create a port on network %(net_id)s: %(err)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:584 +#, python-format +msgid "MidonetPluginV2.create_port exiting: port=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:589 +#, python-format +msgid "MidonetPluginV2.get_port called: id=%(id)s fields=%(fields)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:596 +#, python-format +msgid "There is no port with ID %(id)s in MidoNet." +msgstr "" + +#: neutron/plugins/midonet/plugin.py:600 +#, python-format +msgid "MidonetPluginV2.get_port exiting: port=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:605 +#, python-format +msgid "MidonetPluginV2.get_ports called: filters=%(filters)s fields=%(fields)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:614 +#, python-format +msgid "" +"MidonetPluginV2.delete_port called: id=%(id)s " +"l3_port_check=%(l3_port_check)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:648 +#, python-format +msgid "Failed to delete DHCP mapping for port %(id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:723 +#, python-format +msgid "MidonetPluginV2.create_router called: router=%(router)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:770 +#, python-format +msgid "MidonetPluginV2.create_router exiting: router_data=%(router_data)s." +msgstr "" + +#: neutron/plugins/midonet/plugin.py:782 +#, python-format +msgid "" +"MidonetPluginV2.set_router_gateway called: id=%(id)s, " +"gw_router=%(gw_router)s, gw_ip=%(gw_ip)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:826 +#, python-format +msgid "MidonetPluginV2.remove_router_gateway called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:846 +#, python-format +msgid "MidonetPluginV2.update_router called: id=%(id)s router=%(router)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:886 +#, python-format +msgid "MidonetPluginV2.update_router exiting: router=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:897 +#, python-format +msgid "MidonetPluginV2.delete_router called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:997 +#, python-format +msgid "" +"MidonetPluginV2.add_router_interface called: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1021 +msgid "" +"DHCP agent is not working correctly. No port to reach the Metadata server" +" on this network" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1030 +#, python-format +msgid "" +"Failed to create MidoNet resources to add router interface. " +"info=%(info)s, router_id=%(router_id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1037 +#, python-format +msgid "MidonetPluginV2.add_router_interface exiting: info=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1077 +#, python-format +msgid "" +"MidonetPluginV2.update_floatingip called: id=%(id)s " +"floatingip=%(floatingip)s " +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1096 +#, python-format +msgid "MidonetPluginV2.update_floating_ip exiting: fip=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1118 +#, python-format +msgid "" +"MidonetPluginV2.create_security_group called: " +"security_group=%(security_group)s default_sg=%(default_sg)s " +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1147 +#, python-format +msgid "Failed to create MidoNet resources for sg %(sg)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1154 +#, python-format +msgid "MidonetPluginV2.create_security_group exiting: sg=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1160 +#, python-format +msgid "MidonetPluginV2.delete_security_group called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1192 +#, python-format +msgid "" +"MidonetPluginV2.create_security_group_rule called: " +"security_group_rule=%(security_group_rule)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1202 +#, python-format +msgid "MidonetPluginV2.create_security_group_rule exiting: rule=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1212 +#, python-format +msgid "MidonetPluginV2.delete_security_group_rule called: sg_rule_id=%s" +msgstr "" + +#: neutron/plugins/midonet/common/config.py:25 +msgid "MidoNet API server URI." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:27 +msgid "MidoNet admin username." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:30 +msgid "MidoNet admin password." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:33 +msgid "ID of the project that MidoNet admin userbelongs to." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:36 +msgid "Virtual provider router ID." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:39 +msgid "Operational mode. Internal dev use only." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:42 +msgid "Path to midonet host uuid file" +msgstr "" + +#: neutron/plugins/ml2/config.py:22 +msgid "" +"List of network type driver entrypoints to be loaded from the " +"neutron.ml2.type_drivers namespace." +msgstr "" + +#: neutron/plugins/ml2/config.py:26 +msgid "Ordered list of network_types to allocate as tenant networks." +msgstr "" + +#: neutron/plugins/ml2/config.py:30 +msgid "" +"An ordered list of networking mechanism driver entrypoints to be loaded " +"from the neutron.ml2.mechanism_drivers namespace." +msgstr "" + +#: neutron/plugins/ml2/db.py:41 +#, python-format +msgid "Added segment %(id)s of type %(network_type)s for network %(network_id)s" +msgstr "" + +#: neutron/plugins/ml2/db.py:85 +#, python-format +msgid "Multiple ports have port_id starting with %s" +msgstr "" + +#: neutron/plugins/ml2/db.py:91 +#, python-format +msgid "get_port_from_device_mac() called for mac %s" +msgstr "" + +#: neutron/plugins/ml2/db.py:133 +#, python-format +msgid "No binding found for port %(port_id)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:36 +#, python-format +msgid "Configured type driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:41 +#, python-format +msgid "Loaded type driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:49 +#, python-format +msgid "" +"Type driver '%(new_driver)s' ignored because type driver '%(old_driver)s'" +" is already registered for type '%(type)s'" +msgstr "" + +#: neutron/plugins/ml2/managers.py:57 +#, python-format +msgid "Registered types: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:65 +#, python-format +msgid "No type driver for tenant network_type: %s. Service terminated!" +msgstr "" + +#: neutron/plugins/ml2/managers.py:69 +#, python-format +msgid "Tenant network_types: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:73 +#, python-format +msgid "Initializing driver for type '%s'" +msgstr "" + +#: neutron/plugins/ml2/managers.py:82 +#: neutron/plugins/ml2/drivers/type_tunnel.py:116 +#, python-format +msgid "network_type value '%s' not supported" +msgstr "" + +#: neutron/plugins/ml2/managers.py:108 +#, python-format +msgid "Failed to release segment '%s' because network type is not supported." +msgstr "" + +#: neutron/plugins/ml2/managers.py:124 +#, python-format +msgid "Configured mechanism driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:130 +#, python-format +msgid "Loaded mechanism driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:142 +#, python-format +msgid "Registered mechanism drivers: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:149 +#, python-format +msgid "Initializing mechanism driver '%s'" +msgstr "" + +#: neutron/plugins/ml2/managers.py:171 +#, python-format +msgid "Mechanism driver '%(name)s' failed in %(method)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:447 +#, python-format +msgid "" +"Attempting to bind port %(port)s on host %(host)s for vnic_type " +"%(vnic_type)s with profile %(profile)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:458 +#, python-format +msgid "" +"Bound port: %(port)s, host: %(host)s, vnic_type: %(vnic_type)s, profile: " +"%(profile)sdriver: %(driver)s, vif_type: %(vif_type)s, vif_details: " +"%(vif_details)s, segment: %(segment)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:474 +#, python-format +msgid "Mechanism driver %s failed in bind_port" +msgstr "" + +#: neutron/plugins/ml2/managers.py:478 +#, python-format +msgid "Failed to bind port %(port)s on host %(host)s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:121 +msgid "Modular L2 Plugin initialization complete" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:152 +msgid "network_type required" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:191 +#, python-format +msgid "Network %s has no segments" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:254 +msgid "binding:profile value too large" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:290 +#, python-format +msgid "Serialized vif_details DB value '%(value)s' for port %(port)s is invalid" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:301 +#, python-format +msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:354 +#, python-format +msgid "" +"In _notify_port_updated(), no bound segment for port %(port_id)s on " +"network %(network_id)s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:396 +#, python-format +msgid "mechanism_manager.create_network_postcommit failed, deleting network '%s'" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:456 +#, python-format +msgid "Deleting network %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:468 +#, python-format +msgid "Ports to auto-delete: %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:474 neutron/plugins/ml2/plugin.py:594 +msgid "Tenant-owned ports exist" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:482 +#, python-format +msgid "Subnets to auto-delete: %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:493 +#, python-format +msgid "Deleting network record %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:501 neutron/plugins/ml2/plugin.py:607 +msgid "Committing transaction" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:507 +msgid "A concurrent port creation has occurred" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:516 +#, python-format +msgid "Exception auto-deleting port %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:524 +#, python-format +msgid "Exception auto-deleting subnet %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:533 +msgid "mechanism_manager.delete_network_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:547 +#, python-format +msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:577 +#, python-format +msgid "Deleting subnet %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:588 +#, python-format +msgid "Ports to auto-deallocate: %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:603 +msgid "Deleting subnet record" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:623 +#, python-format +msgid "Exception deleting fixed_ip from port %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:633 +msgid "mechanism_manager.delete_subnet_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:662 +#, python-format +msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:718 +#: neutron/tests/unit/ml2/test_ml2_plugin.py:131 +#, python-format +msgid "Deleting port %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:736 +#: neutron/tests/unit/ml2/test_ml2_plugin.py:132 +#, python-format +msgid "The port '%s' was deleted" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:745 +msgid "Calling base delete_port" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:757 +msgid "mechanism_manager.delete_port_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:769 +#, python-format +msgid "Port %(port)s updated up by agent not found" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:88 +#, python-format +msgid "Device %(device)s details requested by agent %(agent_id)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:97 +#, python-format +msgid "Device %(device)s requested by agent %(agent_id)s not found in database" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:104 +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s has network " +"%(network_id)s with no segments" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:114 +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s on network " +"%(network_id)s not bound, vif_type: %(vif_type)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:125 +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s on network " +"%(network_id)s invalid segment, vif_type: %(vif_type)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:149 +#, python-format +msgid "Returning: %s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:163 +#, python-format +msgid "Device %(device)s no longer exists at agent %(agent_id)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:187 +#, python-format +msgid "Device %(device)s up at agent %(agent_id)s" +msgstr "" + +#: neutron/plugins/ml2/common/exceptions.py:23 +#, python-format +msgid "%(method)s failed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:54 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:345 +#, python-format +msgid "Attempting to bind port %(port)s on network %(network)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:61 +#, python-format +msgid "Refusing to bind due to unsupported vnic_type: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:65 +#, python-format +msgid "Checking agent: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:70 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:355 +#, python-format +msgid "Bound using segment: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:73 +#, python-format +msgid "Attempting to bind with dead agent: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_hyperv.py:44 +#, python-format +msgid "Checking segment: %(segment)s for mappings: %(mappings)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_linuxbridge.py:44 +#: neutron/plugins/ml2/drivers/mech_ofagent.py:50 +#: neutron/plugins/ml2/drivers/mech_openvswitch.py:45 +#, python-format +msgid "" +"Checking segment: %(segment)s for mappings: %(mappings)s with " +"tunnel_types: %(tunnel_types)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:35 +msgid "CRD service Username" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:38 +msgid "CRD Service Password" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:40 +msgid "CRD Tenant Name" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:43 +msgid "CRD Auth URL" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:46 +msgid "URL for connecting to CRD service" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:49 +msgid "Timeout value for connecting to CRD service in seconds" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:53 +msgid "Region name for connecting to CRD Service in admin context" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:57 +msgid "If set, ignore any SSL validation issues" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:60 +msgid "Auth strategy for connecting to neutron in admin context" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:63 +msgid "Location of ca certificates file to use for CRD client requests." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:87 +msgid "Initializing CRD client... " +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:29 +msgid "HTTP URL of Tail-f NCS REST interface." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:31 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:50 +msgid "HTTP username for authentication" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:33 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:52 +msgid "HTTP password for authentication" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:35 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:54 +msgid "HTTP timeout in seconds." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:48 +msgid "HTTP URL of OpenDaylight REST interface." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:56 +msgid "Tomcat session timeout in minutes." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:106 +#, python-format +msgid "Failed to authenticate with OpenDaylight: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:109 +#, python-format +msgid "Authentication Timed Out: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:297 +#, python-format +msgid "%(object_type)s not found (%(obj_id)s)" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:333 +#, python-format +msgid "ODL-----> sending URL (%s) <-----ODL" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:334 +#, python-format +msgid "ODL-----> sending JSON (%s) <-----ODL" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:358 +#, python-format +msgid "" +"Refusing to bind port for segment ID %(id)s, segment %(seg)s, phys net " +"%(physnet)s, and network type %(nettype)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:30 +msgid "" +"List of physical_network names with which flat networks can be created. " +"Use * to allow flat networks with arbitrary physical_network names." +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:68 +msgid "Arbitrary flat physical_network names allowed" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:73 +#, python-format +msgid "Allowable flat physical_network names: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:80 +msgid "ML2 FlatTypeDriver initialization complete" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:85 +msgid "physical_network required for flat provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:88 +#, python-format +msgid "physical_network '%s' unknown for flat provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:95 +#, python-format +msgid "%s prohibited for flat provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:109 +#, python-format +msgid "Reserving flat network on physical network %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:127 +#, python-format +msgid "Releasing flat network on physical network %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:130 +#, python-format +msgid "No flat network found on physical network %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:34 +msgid "" +"Comma-separated list of : tuples enumerating ranges of " +"GRE tunnel IDs that are available for tenant network allocation" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:85 +#, python-format +msgid "Reserving specific gre tunnel %s from pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:89 +#, python-format +msgid "Reserving specific gre tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:102 +#, python-format +msgid "Allocating gre tunnel id %(gre_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:120 +#, python-format +msgid "Releasing gre tunnel %s to pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:125 +#, python-format +msgid "Releasing gre tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:128 +#, python-format +msgid "gre_id %s not found" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:138 +#, python-format +msgid "Skipping unreasonable gre ID range %(tun_min)s:%(tun_max)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:156 +#: neutron/plugins/ml2/drivers/type_vxlan.py:165 +#: neutron/plugins/openvswitch/ovs_db_v2.py:229 +#, python-format +msgid "Removing tunnel %s from pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:171 +msgid "get_gre_endpoints() called" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:180 +#, python-format +msgid "add_gre_endpoint() called for ip %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:186 +#, python-format +msgid "Gre endpoint with ip %s already exists" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_local.py:35 +msgid "ML2 LocalTypeDriver initialization complete" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_local.py:46 +#, python-format +msgid "%s prohibited for local provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:61 +#, python-format +msgid "Invalid tunnel ID range: '%(range)s' - %(e)s. Agent terminated!" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:64 +#, python-format +msgid "%(type)s ID ranges: %(range)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:70 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:421 +#, python-format +msgid "provider:physical_network specified for %s network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:76 +#, python-format +msgid "segmentation_id required for %s provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:83 +#, python-format +msgid "%(key)s prohibited for %(tunnel)s provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:103 +msgid "Network_type value needed by the ML2 plugin" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:37 +msgid "" +"List of :: or " +"specifying physical_network names usable for VLAN provider and tenant " +"networks, as well as ranges of VLAN tags on each available for allocation" +" to tenant networks." +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:91 +msgid "Failed to parse network_vlan_ranges. Service terminated!" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:161 +msgid "VlanTypeDriver initialization complete" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:166 +msgid "physical_network required for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:169 +#, python-format +msgid "physical_network '%s' unknown for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:175 +msgid "segmentation_id required for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:178 +#, python-format +msgid "segmentation_id out of range (%(min)s through %(max)s)" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:188 +#, python-format +msgid "%s prohibited for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:226 +#, python-format +msgid "" +"Allocating vlan %(vlan_id)s on physical network %(physical_network)s from" +" pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:264 +#, python-format +msgid "No vlan_id %(vlan_id)s found on physical network %(physical_network)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:37 +msgid "" +"Comma-separated list of : tuples enumerating ranges of " +"VXLAN VNI IDs that are available for tenant network allocation" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:41 +msgid "Multicast group for VXLAN. If unset, disables VXLAN multicast mode." +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:93 +#, python-format +msgid "Reserving specific vxlan tunnel %s from pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:97 +#, python-format +msgid "Reserving specific vxlan tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:110 +#, python-format +msgid "Allocating vxlan tunnel vni %(vxlan_vni)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:128 +#, python-format +msgid "Releasing vxlan tunnel %s to pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:133 +#, python-format +msgid "Releasing vxlan tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:136 +#, python-format +msgid "vxlan_vni %s not found" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:147 +#, python-format +msgid "Skipping unreasonable VXLAN VNI range %(tun_min)s:%(tun_max)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:182 +msgid "get_vxlan_endpoints() called" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:192 +#, python-format +msgid "add_vxlan_endpoint() called for ip %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:40 +msgid "Allowed physical networks" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:42 +msgid "Unused" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:93 +msgid "" +"Brocade Mechanism: failed to create network, network cannot be created in" +" the configured physical network" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:99 +msgid "" +"Brocade Mechanism: failed to create network, only network type vlan is " +"supported" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:107 +msgid "Brocade Mechanism: failed to create network in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:109 +msgid "Brocade Mechanism: create_network_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:111 +#, python-format +msgid "" +"create network (precommit): %(network_id)s of network type = " +"%(network_type)s with vlan = %(vlan_id)s for tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:123 +msgid "create_network_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:143 +msgid "Brocade NOS driver: failed in create network" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:146 +msgid "Brocade Mechanism: create_network_postcommmit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:148 +#, python-format +msgid "" +"created network (postcommit): %(network_id)s of network type = " +"%(network_type)s with vlan = %(vlan_id)s for tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:160 +msgid "delete_network_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:173 +msgid "Brocade Mechanism: failed to delete network in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:175 +msgid "Brocade Mechanism: delete_network_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:177 +#, python-format +msgid "" +"delete network (precommit): %(network_id)s with vlan = %(vlan_id)s for " +"tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:189 +msgid "delete_network_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:201 +msgid "Brocade NOS driver: failed to delete network" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:203 +msgid "Brocade switch exception, delete_network_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:206 +#, python-format +msgid "" +"delete network (postcommit): %(network_id)s with vlan = %(vlan_id)s for " +"tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:224 +msgid "create_port_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:242 +msgid "Brocade Mechanism: failed to create port in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:244 +msgid "Brocade Mechanism: create_port_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:249 +msgid "create_port_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:273 +#, python-format +msgid "Brocade NOS driver: failed to associate mac %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:276 +msgid "Brocade switch exception: create_port_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:279 +#, python-format +msgid "" +"created port (postcommit): port_id=%(port_id)s network_id=%(network_id)s " +"tenant_id=%(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:287 +msgid "delete_port_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:296 +msgid "Brocade Mechanism: failed to delete port in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:298 +msgid "Brocade Mechanism: delete_port_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:303 +msgid "delete_port_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:327 +#, python-format +msgid "Brocade NOS driver: failed to dissociate MAC %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:330 +msgid "Brocade switch exception, delete_port_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:333 +#, python-format +msgid "" +"delete port (postcommit): port_id=%(port_id)s network_id=%(network_id)s " +"tenant_id=%(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:340 +msgid "update_port_precommit(self: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:344 +msgid "update_port_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:348 +msgid "create_subnetwork_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:352 +msgid "create_subnetwork_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:356 +msgid "delete_subnetwork_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:360 +msgid "delete_subnetwork_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:364 +msgid "update_subnet_precommit(self: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:368 +msgid "update_subnet_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:64 +msgid "" +"Brocade Switch IP address is not set, check config ml2_conf_brocade.ini " +"file" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:74 +msgid "Connect failed to switch" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:101 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:115 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:128 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:141 +msgid "NETCONF error" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:223 +#, python-format +msgid "data = %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:226 +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:168 +#, python-format +msgid "Response: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:290 +#, python-format +msgid "APIC session will expire in %d seconds" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:335 +msgid "APIC session timed-out, logging in again." +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:23 +msgid "Host name or IP Address of the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:25 +msgid "Username for the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:27 +msgid "Password for the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:29 +msgid "Communication port for the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:31 +msgid "Name for the VMM domain provider" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:33 +msgid "Name for the VMM domain to be created for Openstack" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:35 +msgid "Name for the vlan namespace to be used for openstack" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:37 +msgid "Range of VLAN's to be used for Openstack" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:39 +msgid "Name of the node profile to be created" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:41 +msgid "Name of the entity profile to be created" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:43 +msgid "Name of the function profile to be created" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:45 +msgid "Clear the node profiles on the APIC at startup (mainly used for testing)" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:25 +#, python-format +msgid "No response from APIC at %(url)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:30 +#, python-format +msgid "" +"APIC responded with HTTP status %(status)s: %(reason)s, Request: " +"'%(request)s', APIC error code %(err_code)s: %(err_text)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:37 +#, python-format +msgid "APIC failed to provide cookie for %(request)s request" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:42 +msgid "Authorized APIC session not established" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:47 +#, python-format +msgid "The switch and port for host '%(host)s' are not configured" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:52 +#, python-format +msgid "Managed Object '%(mo_class)s' is not supported" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:57 +#, python-format +msgid "" +"Multiple VLAN ranges are not supported in the APIC plugin. Please specify" +" a single VLAN range. Current config: '%(vlan_ranges)s'" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py:84 +#, python-format +msgid "Port %s is not bound to a segment" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:25 +msgid "The physical network managed by the switches." +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:33 +#, python-format +msgid "Credential %(credential_name)s already exists for tenant %(tenant_id)s." +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:54 +#, python-format +msgid "Nexus Port Binding (%(filters)s) is not present" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:63 +#, python-format +msgid "Missing required field(s) to configure nexus switch: %(fields)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py:45 +#, python-format +msgid "nexus_switches found = %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:87 +msgid "get_nexusvm_bindings() called" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/config.py:25 +msgid "" +"Delay within which agent is expected to update existing ports whent it " +"restarts" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:42 +msgid "Experimental L2 population driver" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:143 +msgid "Unable to retrieve the agent ip, check the agent configuration." +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:149 +#, python-format +msgid "Port %(port)s updated by agent %(agent)s isn't bound to any segment" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:199 +#, python-format +msgid "" +"Unable to retrieve the agent ip, check the agent %(agent_host)s " +"configuration." +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/rpc.py:40 +#, python-format +msgid "" +"Fanout notify l2population agents at %(topic)s the message %(method)s " +"with %(fdb_entries)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/rpc.py:51 +#, python-format +msgid "" +"Notify l2population agent %(host)s at %(topic)s the message %(method)s " +"with %(fdb_entries)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:31 +msgid "" +"Username for Arista EOS. This is required field. If not set, all " +"communications to Arista EOSwill fail." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:37 +msgid "" +"Password for Arista EOS. This is required field. If not set, all " +"communications to Arista EOS will fail." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:42 +msgid "" +"Arista EOS IP address. This is required field. If not set, all " +"communications to Arista EOSwill fail." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:47 +msgid "" +"Defines if hostnames are sent to Arista EOS as FQDNs " +"(\"node1.domain.com\") or as short names (\"node1\"). This is optional. " +"If not set, a value of \"True\" is assumed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:53 +msgid "" +"Sync interval in seconds between Neutron plugin and EOS. This interval " +"defines how often the synchronization is performed. This is an optional " +"field. If not set, a value of 180 seconds is assumed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:60 +msgid "" +"Defines Region Name that is assigned to this OpenStack Controller. This " +"is useful when multiple OpenStack/Neutron controllers are managing the " +"same Arista HW clusters. Note that this name must match with the region " +"name registered (or known) to keystone service. Authentication with " +"Keysotne is performed by EOS. This is optional. If not set, a value of " +"\"RegionOne\" is assumed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:32 +msgid "Unable to reach EOS" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:74 +#, python-format +msgid "'timestamp' command '%s' is not available on EOS" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:314 +#, python-format +msgid "VM id %(vmid)s not found for port %(portid)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:333 +#, python-format +msgid "Unknown device owner: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:429 +#, python-format +msgid "Executing command on Arista EOS: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:437 +#, python-format +msgid "Error %(err)s while trying to execute commands %(cmd)s on EOS %(host)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:505 +msgid "Required option eapi_host is not set" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:509 +msgid "Required option eapi_username is not set" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:529 +msgid "Syncing Neutron <-> EOS" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:535 +msgid "OpenStack and EOS are in sync!" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:557 +#, python-format +msgid "" +"No Tenants configured in Neutron DB. But %d tenants disovered in EOS " +"during synchronization.Enitre EOS region is cleared" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:722 +#, python-format +msgid "Network %s is not created as it is not found inArista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:736 +#, python-format +msgid "Network name changed to %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:764 +#, python-format +msgid "Network %s is not updated as it is not found inArista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:856 +#, python-format +msgid "VM %s is not created as it is not found in Arista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:871 +#, python-format +msgid "Port name changed to %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:921 +#, python-format +msgid "VM %s is not updated as it is not found in Arista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:49 +msgid "Initializing driver" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:64 +msgid "Initialization done" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:125 +msgid "Ignoring port notification to controller because of missing host ID." +msgstr "" + +#: neutron/plugins/ml2/drivers/mlnx/config.py:24 +#: neutron/plugins/mlnx/common/config.py:50 +msgid "Type of VM network interface: mlnx_direct or hostdev" +msgstr "" + +#: neutron/plugins/ml2/drivers/mlnx/config.py:28 +msgid "Enable server compatibility with old nova" +msgstr "" + +#: neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py:56 +#, python-format +msgid "Checking segment: %(segment)s for mappings: %(mappings)s " +msgstr "" + +#: neutron/plugins/mlnx/agent_notify_api.py:50 +msgid "Sending delete network message" +msgstr "" + +#: neutron/plugins/mlnx/agent_notify_api.py:58 +msgid "Sending update port message" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:117 +msgid "Mellanox Embedded Switch Plugin initialisation complete" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:154 +#, python-format +msgid "Invalid physical network type %(type)s.Server terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:161 +#, python-format +msgid "Parsing physical_network_type failed: %s. Server terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:166 +#, python-format +msgid "" +"Invalid physical network type %(type)s for network %(net)s. Server " +"terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:170 +#, python-format +msgid "Physical Network type mappings: %s" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:178 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:348 +#, python-format +msgid "%s. Server terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:316 +#, python-format +msgid "Unsupported vnic type %(vnic_type)s for physical network type %(net_type)s" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:320 +msgid "Invalid vnic_type on port_create" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:322 +msgid "vnic_type is not defined in port profile" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:366 +msgid "Update network" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:379 +msgid "Delete network" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:427 +#, python-format +msgid "create_port with %s" +msgstr "" + +#: neutron/plugins/mlnx/rpc_callbacks.py:111 +#, python-format +msgid "Device %(device)s up %(agent_id)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:58 +#, python-format +msgid "Agent cache inconsistency - port id is not stored for %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:73 +#, python-format +msgid "Network %s not defined on Agent." +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:86 +#, python-format +msgid "Network %s is not available on this agent" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:97 +#, python-format +msgid "Connecting port %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:107 +#, python-format +msgid "Binding Segmentation ID %(seg_id)sto eSwitch for vNIC mac_address %(mac)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:126 +#, python-format +msgid "Port_mac %s is not available on this agent" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:133 +msgid "Creating VLAN Network" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:135 +#, python-format +msgid "Unknown network type %(network_type)s for network %(network_id)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:167 +msgid "Invalid Network ID, cannot remove Network" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:169 +#, python-format +msgid "Delete network %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:209 +#, python-format +msgid "RPC timeout while updating port %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:291 +msgid "Ports added!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:294 +msgid "Ports removed!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:319 +#, python-format +msgid "Adding port with mac %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:326 +#, python-format +msgid "" +"Unable to get device dev_details for device with mac_address %(device)s: " +"due to %(exc)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:332 +#, python-format +msgid "Port %s updated" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:333 +#, python-format +msgid "Device details %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:346 +#, python-format +msgid "Device with mac_address %s not defined on Neutron Plugin" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:353 +#, python-format +msgid "Removing device with mac_address %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:361 +#, python-format +msgid "Removing port failed for device %(device)s due to %(exc)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:376 +msgid "eSwitch Agent Started!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:389 +msgid "Agent loop process devices!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:394 +msgid "" +"Request timeout in agent event loop eSwitchD is not responding - " +"exiting..." +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:427 +#, python-format +msgid "Failed on Agent initialisation : %s. Agent terminated!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:432 +msgid "Agent initialised successfully, now running... " +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:32 +msgid "" +"Failed to import eventlet.green.zmq. Won't connect to eSwitchD - " +"exiting..." +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:76 +#, python-format +msgid "Action %(action)s failed: %(reason)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:78 +#, python-format +msgid "Unknown operation status %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:83 +msgid "get_attached_vnics" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:90 +#, python-format +msgid "" +"Set Vlan %(segmentation_id)s on Port %(port_mac)s on Fabric " +"%(physical_network)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:103 +#, python-format +msgid "Define Fabric %(fabric)s on interface %(ifc)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:112 +#, python-format +msgid "Port Up for %(port_mac)s on fabric %(fabric)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:121 +#, python-format +msgid "Port Down for %(port_mac)s on fabric %(fabric)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:130 +#, python-format +msgid "Port Release for %(port_mac)s on fabric %(fabric)s" +msgstr "" + +#: neutron/plugins/mlnx/common/comm_utils.py:59 +#, python-format +msgid "Request timeout - call again after %s seconds" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:36 +msgid "" +"List of : with " +"physical_network_type is either eth or ib" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:39 +msgid "Physical network type for provider network (eth or ib)" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:54 +msgid "eswitch daemon end point" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:56 +msgid "" +"The number of milliseconds the agent will wait for response on request to" +" daemon." +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:59 +msgid "" +"The number of retries the agent will send request to daemon before giving" +" up" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:62 +msgid "" +"backoff rate multiplier for waiting period between retries for request to" +" daemon, i.e. value of 2 will double the request timeout each retry" +msgstr "" + +#: neutron/plugins/mlnx/common/exceptions.py:22 +#, python-format +msgid "Mlnx Exception: %(err_msg)s" +msgstr "" + +#: neutron/plugins/mlnx/common/exceptions.py:26 +msgid "Request Timeout: no response from eSwitchD" +msgstr "" + +#: neutron/plugins/mlnx/common/exceptions.py:30 +#, python-format +msgid "Operation Failed: %(err_msg)s" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:44 +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:65 +#, python-format +msgid "Removing vlan %(seg_id)s on physical network %(net)s from pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:121 +#, python-format +msgid "Reserving vlan %(seg_id)s on physical network %(net)s from pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:140 +#, python-format +msgid "" +"Reserving specific vlan %(seg_id)s on physical network %(phy_net)s from " +"pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:145 +#, python-format +msgid "" +"Reserving specific vlan %(seg_id)s on physical network %(phy_net)s " +"outside pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:172 +#, python-format +msgid "Releasing vlan %(seg_id)s on physical network %(phy_net)s to pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:177 +#, python-format +msgid "Releasing vlan %(seg_id)s on physical network %(phy_net)s outside pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:183 +#, python-format +msgid "vlan_id %(seg_id)s on physical network %(phy_net)s not found" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:241 +msgid "Get_port_from_device_mac() called" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:249 +#, python-format +msgid "Set_port_status as %s called" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:194 +#, python-format +msgid "_cleanup_ofc_tenant: No OFC tenant for %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:197 +#, python-format +msgid "delete_ofc_tenant() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:213 +msgid "activate_port_if_ready(): skip, port.admin_state_up is False." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:217 +msgid "activate_port_if_ready(): skip, network.admin_state_up is False." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:221 +msgid "activate_port_if_ready(): skip, no portinfo for this port." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:225 +msgid "activate_port_if_ready(): skip, ofc_port already exists." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:233 +#, python-format +msgid "create_ofc_port() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:246 +#, python-format +msgid "deactivate_port(): skip, ofc_port for port=%s does not exist." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:265 +#, python-format +msgid "deactivate_port(): OFC port for port=%s is already removed." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:273 +#, python-format +msgid "Failed to delete port=%(port)s from OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:292 +#, python-format +msgid "NECPluginV2.create_network() called, network=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:309 +#, python-format +msgid "Failed to create network id=%(id)s on OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:325 +#, python-format +msgid "NECPluginV2.update_network() called, id=%(id)s network=%(network)s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:369 +#, python-format +msgid "NECPluginV2.delete_network() called, id=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:403 +#, python-format +msgid "delete_network() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:544 +#, python-format +msgid "NECPluginV2.create_port() called, port=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:608 +#, python-format +msgid "NECPluginV2.update_port() called, id=%(id)s port=%(port)s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:636 +#, python-format +msgid "NECPluginV2.delete_port() called, id=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:704 +#, python-format +msgid "" +"NECPluginV2RPCCallbacks.get_port_from_device() called, device=%(device)s " +"=> %(ret)s." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:728 +#, python-format +msgid "NECPluginV2RPCCallbacks.update_ports() called, kwargs=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:738 +#, python-format +msgid "" +"update_ports(): ignore unchanged portinfo in port_added message " +"(port_id=%s)." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:758 +#, python-format +msgid "" +"update_ports(): ignore port_removed message due to portinfo for " +"port_id=%s was not registered" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:763 +#, python-format +msgid "" +"update_ports(): ignore port_removed message received from different host " +"(registered_datapath_id=%(registered)s, " +"received_datapath_id=%(received)s)." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:60 +#, python-format +msgid "RouterMixin.create_router() called, router=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:86 +#, python-format +msgid "RouterMixin.update_router() called, id=%(id)s, router=%(router)s ." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:104 +#, python-format +msgid "RouterMixin.delete_router() called, id=%s." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:123 +#, python-format +msgid "" +"RouterMixin.add_router_interface() called, id=%(id)s, " +"interface=%(interface)s." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:130 +#, python-format +msgid "" +"RouterMixin.remove_router_interface() called, id=%(id)s, " +"interface=%(interface)s." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:313 +#, python-format +msgid "" +"OFC does not support router with provider=%(provider)s, so removed it " +"from supported provider (new router driver map=%(driver_map)s)" +msgstr "" + +#: neutron/plugins/nec/nec_router.py:321 +#, python-format +msgid "" +"default_router_provider %(default)s is supported! Please specify one of " +"%(supported)s" +msgstr "" + +#: neutron/plugins/nec/nec_router.py:335 +#, python-format +msgid "Enabled router drivers: %s" +msgstr "" + +#: neutron/plugins/nec/nec_router.py:338 +#, python-format +msgid "" +"No router provider is enabled. neutron-server terminated! " +"(supported=%(supported)s, configured=%(config)s)" +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:42 +msgid "Disabled packet-filter extension." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:47 +#, python-format +msgid "create_packet_filter() called, packet_filter=%s ." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:63 +#, python-format +msgid "update_packet_filter() called, id=%(id)s packet_filter=%(packet_filter)s ." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:136 +#: neutron/plugins/nec/packet_filter.py:189 +#, python-format +msgid "Failed to create packet_filter id=%(id)s on OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:146 +#, python-format +msgid "delete_packet_filter() called, id=%s ." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:164 +#, python-format +msgid "activate_packet_filter_if_ready() called, packet_filter=%s." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:173 +#, python-format +msgid "" +"activate_packet_filter_if_ready(): skip pf_id=%s, " +"packet_filter.admin_state_up is False." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:176 +#, python-format +msgid "" +"activate_packet_filter_if_ready(): skip pf_id=%s, no portinfo for the " +"in_port." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:179 +msgid "" +"_activate_packet_filter_if_ready(): skip, ofc_packet_filter already " +"exists." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:182 +#, python-format +msgid "activate_packet_filter_if_ready(): create packet_filter id=%s on OFC." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:202 +#, python-format +msgid "deactivate_packet_filter_if_ready() called, packet_filter=%s." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:207 +#, python-format +msgid "" +"deactivate_packet_filter(): skip, Not found OFC Mapping for packet_filter" +" id=%s." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:212 +#, python-format +msgid "deactivate_packet_filter(): deleting packet_filter id=%s from OFC." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:221 +#, python-format +msgid "Failed to delete packet_filter id=%(id)s from OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:252 +#, python-format +msgid "Error occurred while disabling packet filter(s) for port %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:127 +#, python-format +msgid "create_router() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:156 +#, python-format +msgid "_update_ofc_routes() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:171 +#, python-format +msgid "delete_router() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:182 +#, python-format +msgid "" +"RouterOpenFlowDriver.add_interface(): the requested port has no subnet. " +"add_interface() is skipped. router_id=%(id)s, port=%(port)s)" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:202 +#, python-format +msgid "add_router_interface() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:220 +#, python-format +msgid "delete_router_interface() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/agent/nec_neutron_agent.py:53 +#, python-format +msgid "Update ports: added=%(added)s, removed=%(removed)s" +msgstr "" + +#: neutron/plugins/nec/agent/nec_neutron_agent.py:76 +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:51 +#, python-format +msgid "port_update received: %s" +msgstr "" + +#: neutron/plugins/nec/agent/nec_neutron_agent.py:220 +msgid "No port changed." +msgstr "" + +#: neutron/plugins/nec/common/config.py:37 +msgid "Host to connect to" +msgstr "" + +#: neutron/plugins/nec/common/config.py:39 +msgid "Base URL of OFC REST API. It is prepended to each API request." +msgstr "" + +#: neutron/plugins/nec/common/config.py:42 +msgid "Port to connect to" +msgstr "" + +#: neutron/plugins/nec/common/config.py:44 +msgid "Driver to use" +msgstr "" + +#: neutron/plugins/nec/common/config.py:46 +msgid "Enable packet filter" +msgstr "" + +#: neutron/plugins/nec/common/config.py:48 +msgid "Use SSL to connect" +msgstr "" + +#: neutron/plugins/nec/common/config.py:50 +msgid "Key file" +msgstr "" + +#: neutron/plugins/nec/common/config.py:52 +msgid "Certificate file" +msgstr "" + +#: neutron/plugins/nec/common/config.py:54 +msgid "Disable SSL certificate verification" +msgstr "" + +#: neutron/plugins/nec/common/config.py:56 +msgid "" +"Maximum attempts per OFC API request.NEC plugin retries API request to " +"OFC when OFC returns ServiceUnavailable (503).The value must be greater " +"than 0." +msgstr "" + +#: neutron/plugins/nec/common/config.py:65 +msgid "Default router provider to use." +msgstr "" + +#: neutron/plugins/nec/common/config.py:68 +msgid "List of enabled router providers." +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:22 +#, python-format +msgid "An OFC exception has occurred: %(reason)s" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:32 +#, python-format +msgid "The specified OFC resource (%(resource)s) is not found." +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:36 +#, python-format +msgid "An exception occurred in NECPluginV2 DB: %(reason)s" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:40 +#, python-format +msgid "" +"Neutron-OFC resource mapping for %(resource)s %(neutron_id)s is not " +"found. It may be deleted during processing." +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:46 +#, python-format +msgid "OFC returns Server Unavailable (503) (Retry-After=%(retry_after)s)" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:55 +#, python-format +msgid "PortInfo %(id)s could not be found" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:59 +msgid "" +"Invalid input for operation: datapath_id should be a hex string with at " +"most 8 bytes" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:65 +msgid "Invalid input for operation: port_no should be [0:65535]" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:70 +#, python-format +msgid "Router (provider=%(provider)s) does not support an external network" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:75 +#, python-format +msgid "Provider %(provider)s could not be found" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:79 +#, python-format +msgid "Cannot create more routers with provider=%(provider)s" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:83 +#, python-format +msgid "" +"Provider of Router %(router_id)s is %(provider)s. This operation is " +"supported only for router provider %(expected_provider)s." +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:56 +#, python-format +msgid "Operation on OFC failed: %(status)s%(msg)s" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:80 +#, python-format +msgid "Client request: %(host)s:%(port)s %(method)s %(action)s [%(body)s]" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:89 +#, python-format +msgid "OFC returns [%(status)s:%(data)s]" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:106 +#, python-format +msgid "OFC returns ServiceUnavailable (retry-after=%s)" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:110 +#, python-format +msgid "Specified resource %s does not exist on OFC " +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:114 +#, python-format +msgid "Operation on OFC failed: status=%(status)s, detail=%(detail)s" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:117 +msgid "Operation on OFC failed" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:126 +#, python-format +msgid "Failed to connect OFC : %s" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:142 +#, python-format +msgid "Waiting for %s seconds due to OFC Service_Unavailable." +msgstr "" + +#: neutron/plugins/nec/db/api.py:110 +#, python-format +msgid "del_ofc_item(): NotFound item (resource=%(resource)s, id=%(id)s) " +msgstr "" + +#: neutron/plugins/nec/db/api.py:144 +#, python-format +msgid "del_portinfo(): NotFound portinfo for port_id: %s" +msgstr "" + +#: neutron/plugins/nec/db/api.py:165 +#: neutron/plugins/openvswitch/ovs_db_v2.py:317 +#, python-format +msgid "get_port_with_securitygroups() called:port_id=%s" +msgstr "" + +#: neutron/plugins/nec/db/router.py:87 +#, python-format +msgid "Add provider binding (router=%(router_id)s, provider=%(provider)s)" +msgstr "" + +#: neutron/plugins/nec/drivers/__init__.py:38 +#, python-format +msgid "Loading OFC driver: %s" +msgstr "" + +#: neutron/plugins/nec/drivers/pfc.py:35 +#, python-format +msgid "OFC %(resource)s ID has an invalid format: %(ofc_id)s" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:35 +msgid "Number of packet_filters allowed per tenant, -1 for unlimited" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:42 +#, python-format +msgid "PacketFilter %(id)s could not be found" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:46 +#, python-format +msgid "" +"IP version %(version)s is not supported for %(field)s (%(value)s is " +"specified)" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:51 +#, python-format +msgid "Packet Filter priority should be %(min)s-%(max)s (included)" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:55 +#, python-format +msgid "%(field)s field cannot be updated" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:59 +#, python-format +msgid "" +"The backend does not support duplicated priority. Priority %(priority)s " +"is in use" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:64 +#, python-format +msgid "" +"Ether Type '%(eth_type)s' conflicts with protocol '%(protocol)s'. Update " +"or clear protocol before changing ether type." +msgstr "" + +#: neutron/plugins/nuage/plugin.py:89 +#, python-format +msgid "%(resource)s with id %(resource_id)s does not exist" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:98 +#, python-format +msgid "" +"Either %(resource)s %(req_resource)s not found or you dont have " +"credential to access it" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:104 +#, python-format +msgid "" +"More than one entry found for %(resource)s %(req_resource)s. Use id " +"instead" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:214 +#, python-format +msgid "Subnet %s not found on VSD" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:219 +#, python-format +msgid "Port-Mapping for port %s not found on VSD" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:314 +msgid "External network with subnets can not be changed to non-external network" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:368 +msgid "" +"Either net_partition is not provided with subnet OR default net_partition" +" is not created at the start" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:397 +#, python-format +msgid "Only one subnet is allowed per external network %s" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:479 +#, python-format +msgid "" +"Unable to complete operation on subnet %s.One or more ports have an IP " +"allocation from this subnet." +msgstr "" + +#: neutron/plugins/nuage/plugin.py:509 +#, python-format +msgid "" +"Router %s does not hold default zone OR net_partition mapping. Router-IF " +"add failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:519 +#, python-format +msgid "Subnet %s does not hold Nuage VSD reference. Router-IF add failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:529 +#, python-format +msgid "" +"Subnet %(subnet)s and Router %(router)s belong to different net_partition" +" Router-IF add not permitted" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:541 +#, python-format +msgid "Subnet %s has one or more active VMs Router-IF add not permitted" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:587 neutron/plugins/nuage/plugin.py:592 +#: neutron/plugins/nuage/plugin.py:598 +#, python-format +msgid "No router interface found for Router %s. Router-IF delete failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:614 +#, python-format +msgid "Subnet %s has one or more active VMs Router-IF delete not permitted" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:623 +#, python-format +msgid "" +"Router %s does not hold net_partition assoc on Nuage VSD. Router-IF " +"delete failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:662 +msgid "" +"Either net_partition is not provided with router OR default net_partition" +" is not created at the start" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:708 +msgid "for same subnet, multiple static routes not allowed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:724 +#, python-format +msgid "Router %s does not hold net-partition assoc on VSD. extra-route failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:837 +#, python-format +msgid "One or more router still attached to net_partition %s." +msgstr "" + +#: neutron/plugins/nuage/plugin.py:842 +#, python-format +msgid "NetPartition with %s does not exist" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:888 +#, python-format +msgid "router %s is not associated with any net-partition" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:903 +msgid "Floating IP can not be associated to VM in different router context" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:22 +msgid "IP Address and Port of Nuage's VSD server" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:25 +msgid "Username and password for authentication" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:27 +msgid "Boolean for SSL connection with VSD server" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:29 +msgid "Nuage provided base uri to reach out to VSD" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:31 +msgid "" +"Organization name in which VSD will orchestrate network resources using " +"openstack" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:34 +msgid "Nuage provided uri for initial authorization to access VSD" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:38 +msgid "" +"Default Network partition in which VSD will orchestrate network resources" +" using openstack" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:42 +msgid "Per Net Partition quota of floating ips" +msgstr "" + +#: neutron/plugins/nuage/common/exceptions.py:24 +#, python-format +msgid "Nuage Plugin does not support this operation: %(msg)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:92 +msgid "Agent terminated!: Failed to get a datapath." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:111 +msgid "Agent terminated" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:145 +msgid "Agent failed to create agent config map" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:272 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1258 +#, python-format +msgid "Unable to create tunnel port. Invalid remote IP: %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:277 +#, python-format +msgid "ryu send_msg() result: %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:311 +#, python-format +msgid "network_delete received network %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:317 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:544 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:284 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:607 +#, python-format +msgid "Network %s not used on agent." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:326 +#, python-format +msgid "port_update received port %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:329 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:296 +msgid "tunnel_update received" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:335 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:305 +msgid "No tunnel_type specified, cannot create tunnels" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:338 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:308 +#, python-format +msgid "tunnel_type %s not supported by agent" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:459 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:515 +#, python-format +msgid "No local VLAN available for net-id=%s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:462 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:523 +#, python-format +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:474 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:545 +#, python-format +msgid "" +"Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " +"tunneling disabled" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:482 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:564 +#, python-format +msgid "" +"Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " +"physical_network %(physical_network)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:492 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:584 +#, python-format +msgid "" +"Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " +"physical_network %(physical_network)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:501 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:593 +#, python-format +msgid "" +"Cannot provision unknown network type %(network_type)s for net-" +"id=%(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:547 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:610 +#, python-format +msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:581 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:650 +#, python-format +msgid "" +"Cannot reclaim unknown network type %(network_type)s for net-" +"id=%(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:632 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:696 +#, python-format +msgid "port_unbound() net_uuid %s not in local_vlan_map" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:706 +#, python-format +msgid "ancillary bridge list: %s." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:796 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:778 +msgid "" +"Failed to create OVS patch port. Cannot have tunneling enabled on this " +"agent, since this version of OVS does not support tunnels or patch ports." +" Agent terminated!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:880 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:891 +#, python-format +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:886 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:897 +#, python-format +msgid "" +"Bridge %(bridge)s for physical network %(physical_network)s does not " +"exist. Agent terminated!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:954 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:986 +#, python-format +msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:983 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1015 +#, python-format +msgid "VIF port: %s has no ofport configured, and might not be able to transmit" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:991 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1025 +#, python-format +msgid "No VIF port for port %s defined on agent." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1004 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1038 +#: neutron/tests/unit/ofagent/test_ofa_neutron_agent.py:683 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:763 +msgid "ofport should have a value that can be interpreted as an integer" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1007 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1041 +#: neutron/tests/unit/ofagent/test_ofa_neutron_agent.py:666 +#: neutron/tests/unit/ofagent/test_ofa_neutron_agent.py:686 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:746 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:766 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:783 +#, python-format +msgid "Failed to set-up %(type)s tunnel port to %(ip)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1055 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1084 +#, python-format +msgid "Processing port %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1061 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1090 +#, python-format +msgid "" +"Port %s was not found on the integration bridge and will therefore not be" +" processed" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1086 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1117 +#, python-format +msgid "Setting status for %s to UP" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1090 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1121 +#, python-format +msgid "Setting status for %s to DOWN" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1093 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1124 +#, python-format +msgid "Configuration for device %s completed." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1103 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1134 +#, python-format +msgid "Ancillary Port %s added" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1178 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d - " +"treat_devices_added_or_updated completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1186 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d - treat_devices_removed " +"completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1199 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1236 +#, python-format +msgid "" +"process_ancillary_network_ports - iteration: %(iter_num)d - " +"treat_ancillary_devices_added completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1208 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1245 +#, python-format +msgid "" +"process_ancillary_network_ports - iteration: %(iter_num)d - " +"treat_ancillary_devices_removed completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1235 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1287 +#, python-format +msgid "Unable to sync tunnel IP %(local_ip)s: %(e)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1263 +#, python-format +msgid "Agent ovsdb_monitor_loop - iteration:%d started" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1274 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1334 +msgid "Agent tunnel out of sync with plugin!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1278 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1338 +msgid "Error while synchronizing tunnels" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1282 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - starting polling. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1295 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - port information " +"retrieved. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1305 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1371 +#, python-format +msgid "Starting to process devices in:%s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1309 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - ports processed. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1324 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - ancillary port info " +"retrieved. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1334 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - ancillary ports " +"processed. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1349 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1413 +msgid "Error while processing VIF ports" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1356 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d completed. Processed " +"ports statistics:%(port_stats)s. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1389 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 +#, python-format +msgid "Parsing bridge_mappings failed: %s." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1412 +#, python-format +msgid "Invalid tunnel type specificed: %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1415 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1483 +msgid "Tunneling cannot be enabled without a valid local_ip." +msgstr "" + +#: neutron/plugins/ofagent/common/config.py:24 +msgid "Number of seconds to retry acquiring an Open vSwitch datapath" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:240 +msgid "Failed to create subnet, deleting it from neutron" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:304 +#, python-format +msgid "Deleting newly created neutron port %s" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:374 +msgid "Failed to create floatingip" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:413 +msgid "Failed to create router" +msgstr "" + +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:154 +msgid "Port list is updated" +msgstr "" + +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:161 +msgid "AGENT looping....." +msgstr "" + +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:173 +msgid "NVSD Agent initialized successfully, now running... " +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:26 +msgid "NVSD Controller IP address" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:29 +msgid "NVSD Controller Port number" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:32 +msgid "NVSD Controller username" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:35 +msgid "NVSD Controller password" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:38 +msgid "NVSD controller REST API request timeout in seconds" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:40 +msgid "Number of login retries to NVSD controller" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:45 +msgid "integration bridge" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:23 +#, python-format +msgid "An unknown nvsd plugin exception occurred: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:27 +#: neutron/plugins/vmware/api_client/exception.py:68 +msgid "The request has timed out." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:31 +msgid "Invalid access credentials to the Server." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:35 +#, python-format +msgid "A resource is not found: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:39 +#, python-format +msgid "Request sent to server is invalid: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:43 +#, python-format +msgid "Internal Server Error: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:47 +msgid "Connection is closed by the server." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:51 +#, python-format +msgid "The request is forbidden access to the resource: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:55 +#, python-format +msgid "Internal Server Error from NVSD controller: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:55 +#, python-format +msgid "Could not create a %(resource)s under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:59 +#, python-format +msgid "Failed to %(method)s %(resource)s id=%(resource_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:65 +#, python-format +msgid "Failed to %(method)s %(resource)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:109 +#, python-format +msgid "Network %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:126 +#, python-format +msgid "Network %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:147 +#, python-format +msgid "Network %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:160 +#, python-format +msgid "Subnet %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:174 +#, python-format +msgid "Subnet %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:190 +#, python-format +msgid "Subnet %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:222 +#, python-format +msgid "Port %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:246 +#, python-format +msgid "Port %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:259 +#, python-format +msgid "Port %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:281 +#, python-format +msgid "Flatingip %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:298 +#, python-format +msgid "Flatingip %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:312 +#, python-format +msgid "Flatingip %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:325 +#, python-format +msgid "Router %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:341 +#, python-format +msgid "Router %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:351 +#, python-format +msgid "Router %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:81 +#, python-format +msgid "Unable to connect to NVSD controller. Exiting after %(retries)s attempts" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:91 +#, python-format +msgid "Login Failed: %s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:92 +#, python-format +msgid "Unable to establish connection with Controller %s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:94 +msgid "Retrying after 1 second..." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:98 +#, python-format +msgid "Login Successful %(uri)s %(status)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:102 +#, python-format +msgid "AuthToken = %s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:104 +msgid "login failed" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:112 +msgid "No Token, Re-login" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:129 +#, python-format +msgid "request: %(method)s %(uri)s successful" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:136 +#, python-format +msgid "request: Request failed from Controller side :%s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:141 +#, python-format +msgid "Response is Null, Request timed out: %(method)s to %(uri)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:153 +#, python-format +msgid "Request %(method)s %(uri)s body = %(body)s failed with status %(status)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:157 +#, python-format +msgid "%s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:161 +#, python-format +msgid "%(method)s to %(url)s, unexpected response code: %(status)d" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:167 +#, python-format +msgid "Request failed from Controller side with Status=%s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:171 +#, python-format +msgid "Success: %(method)s %(url)s status=%(status)s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:210 +#, python-format +msgid "Skipping unreasonable tunnel ID range %(tun_min)s:%(tun_max)s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:258 +#, python-format +msgid "Reserving tunnel %s from pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:273 +#, python-format +msgid "Reserving specific tunnel %s from pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:276 +#, python-format +msgid "Reserving specific tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:299 +#, python-format +msgid "Releasing tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:301 +#, python-format +msgid "Releasing tunnel %s to pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:303 +#, python-format +msgid "tunnel_id %s not found" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:375 +#, python-format +msgid "Adding a tunnel endpoint for %s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:391 +#, python-format +msgid "" +"Adding a tunnel endpoint failed due to a concurrenttransaction had been " +"committed (%s attempts left)" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:396 +msgid "Unable to generate a new tunnel id" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:296 +#, python-format +msgid "Invalid tenant_network_type: %s. Server terminated!" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:313 +#, python-format +msgid "Tunneling disabled but tenant_network_type is '%s'. Server terminated!" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:359 +#, python-format +msgid "Invalid tunnel ID range: '%(range)s' - %(e)s. Server terminated!" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:363 +#, python-format +msgid "Tunnel ID ranges: %s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:418 +#, python-format +msgid "%s networks are not enabled" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:228 +msgid "OVS version can not support ARP responder." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:293 +#, python-format +msgid "port_update message processed for port %s" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:494 +#, python-format +msgid "Action %s not supported" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:757 +#, python-format +msgid "Adding %s to list of bridges." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:871 +#, python-format +msgid "" +"Creating an interface named %(name)s exceeds the %(limit)d character " +"limitation. It was shortened to %(new_name)s to fit." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1215 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d " +"-treat_devices_added_or_updated completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1223 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d -treat_devices_removed " +"completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1324 +#, python-format +msgid "Agent rpc_loop - iteration:%d started" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1348 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d - starting polling. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1361 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d - port information retrieved. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1376 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d -ports processed. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1390 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d -ancillary port info retrieved. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1399 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d - ancillary ports processed. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1420 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d completed. Processed ports " +"statistics: %(port_stats)s. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1480 +#, python-format +msgid "Invalid tunnel type specified: %s" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:32 +msgid "Enable tunneling support" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:34 +msgid "Tunnel bridge to use" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:36 +msgid "Peer patch port in integration bridge for tunnel bridge" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:39 +msgid "Peer patch port in tunnel bridge for integration bridge" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:42 +msgid "Local IP address of GRE tunnel endpoints." +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:45 +msgid "List of :" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:47 +msgid "Network type for tenant networks (local, vlan, gre, vxlan, or none)" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:55 +msgid "List of :" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:57 +msgid "The type of tunnels to use when utilizing tunnels, either 'gre' or 'vxlan'" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:67 +msgid "Minimize polling by monitoring ovsdb for interface changes." +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:71 +msgid "" +"The number of seconds to wait before respawning the ovsdb monitor after " +"losing communication with it" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:74 +msgid "Network types supported by the agent (gre and/or vxlan)" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:79 +msgid "MTU size of veth interfaces" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:81 +msgid "" +"Use ml2 l2population mechanism driver to learn remote mac and IPs and " +"improve tunnel scalability" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:84 +msgid "Enable local ARP responder if it is supported" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:86 +msgid "" +"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying " +"GRE/VXLAN tunnel" +msgstr "" + +#: neutron/plugins/plumgrid/common/exceptions.py:26 +#, python-format +msgid "PLUMgrid Plugin Error: %(err_msg)s" +msgstr "" + +#: neutron/plugins/plumgrid/common/exceptions.py:30 +#, python-format +msgid "Connection failed with PLUMgrid Director: %(err_msg)s" +msgstr "" + +#: neutron/plugins/plumgrid/drivers/fake_plumlib.py:32 +msgid "Python PLUMgrid Fake Library Started " +msgstr "" + +#: neutron/plugins/plumgrid/drivers/fake_plumlib.py:37 +#, python-format +msgid "Fake Director: %s" +msgstr "" + +#: neutron/plugins/plumgrid/drivers/plumlib.py:38 +msgid "Python PLUMgrid Library Started " +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:45 +msgid "PLUMgrid Director server to connect to" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:47 +msgid "PLUMgrid Director server port to connect to" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:49 +msgid "PLUMgrid Director admin username" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:51 +msgid "PLUMgrid Director admin password" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:53 +msgid "PLUMgrid Director server timeout" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:56 +msgid "PLUMgrid Driver" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:73 +msgid "Neutron PLUMgrid Director: Starting Plugin" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:78 +msgid "Neutron PLUMgrid Director: Neutron server with PLUMgrid Plugin has started" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:91 +#, python-format +msgid "Neutron PLUMgrid Director: %s" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:102 +msgid "Neutron PLUMgrid Director: create_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:116 +msgid "PLUMgrid Library: create_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:131 +msgid "Neutron PLUMgrid Director: update_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:143 +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:169 +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:353 +msgid "PLUMgrid Library: update_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:158 +msgid "Neutron PLUMgrid Director: delete_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:181 +msgid "Neutron PLUMgrid Director: create_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:200 +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:227 +msgid "PLUMgrid Library: create_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:215 +msgid "Neutron PLUMgrid Director: update_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:243 +msgid "Neutron PLUMgrid Director: delete_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:258 +msgid "PLUMgrid Library: delete_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:287 +msgid "Neutron PLUMgrid Director: create_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:311 +msgid "PLUMgrid Library: create_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:321 +msgid "Neutron PLUMgrid Director: delete_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:333 +msgid "PLUMgrid Library: delete_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:341 +msgid "update_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:365 +msgid "Neutron PLUMgrid Director: create_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:377 +msgid "PLUMgrid Library: create_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:387 +msgid "Neutron PLUMgrid Director: update_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:393 +msgid "PLUMgrid Library: update_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:402 +msgid "Neutron PLUMgrid Director: delete_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:412 +msgid "PLUMgrid Library: delete_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:420 +msgid "Neutron PLUMgrid Director: add_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:440 +msgid "PLUMgrid Library: add_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:451 +msgid "Neutron PLUMgrid Director: remove_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:473 +msgid "PLUMgrid Library: remove_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:484 +msgid "Neutron PLUMgrid Director: create_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:491 +msgid "PLUMgrid Library: create_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:500 +msgid "Neutron PLUMgrid Director: update_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:509 +msgid "PLUMgrid Library: update_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:519 +msgid "Neutron PLUMgrid Director: delete_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:529 +msgid "PLUMgrid Library: delete_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:536 +msgid "Neutron PLUMgrid Director: disassociate_floatingips() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:543 +msgid "PLUMgrid Library: disassociate_floatingips() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:573 +msgid "" +"Networks with admin_state_up=False are not supported by PLUMgrid plugin " +"yet." +msgstr "" + +#: neutron/plugins/ryu/ryu_neutron_plugin.py:61 +#, python-format +msgid "get_ofp_rest_api: %s" +msgstr "" + +#: neutron/plugins/ryu/ryu_neutron_plugin.py:125 +msgid "Invalid configuration. check ryu.ini" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:95 +#, python-format +msgid "Could not get IPv4 address from %(nic)s: %(cfg)s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:161 +#, python-format +msgid "External port %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:169 +msgid "Get Ryu rest API address" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:217 +msgid "Ryu rest API port isn't specified" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:218 +#, python-format +msgid "Going to ofp controller mode %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:294 +#, python-format +msgid "tunnel_ip %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:296 +#, python-format +msgid "ovsdb_port %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:298 +#, python-format +msgid "ovsdb_ip %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:304 +#, python-format +msgid "Initialization failed: %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:307 +msgid "" +"Ryu initialization on the node is done. Agent initialized successfully, " +"now running..." +msgstr "" + +#: neutron/plugins/ryu/common/config.py:26 +msgid "OpenFlow REST API location" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:28 +msgid "Minimum tunnel ID to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:30 +msgid "Maximum tunnel ID to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:32 +msgid "Tunnel IP to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:34 +msgid "Tunnel interface to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:36 +msgid "OVSDB port to connect to" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:38 +msgid "OVSDB IP to connect to" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:40 +msgid "OVSDB interface to connect to" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:40 +#, python-format +msgid "get_port_from_device() called:port_id=%s" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:76 +#, python-format +msgid "" +"Invalid tunnel key options tunnel_key_min: %(key_min)d tunnel_key_max: " +"%(key_max)d. Using default value" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:156 +#, python-format +msgid "last_key %(last_key)s new_key %(new_key)s" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:159 +msgid "No key found" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:192 +#, python-format +msgid "Transaction retry exhausted (%d). Abandoned tunnel key allocation." +msgstr "" + +#: neutron/plugins/vmware/check_nsx_config.py:47 +#: neutron/plugins/vmware/check_nsx_config.py:82 +#, python-format +msgid "Error '%(err)s' when connecting to controller(s): %(ctl)s." +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:55 +#, python-format +msgid "Invalid agent_mode: %s" +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:106 +msgid "network_auto_schedule has been disabled" +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:127 +#, python-format +msgid "Unable to run Neutron with config option '%s', as NSX does not support it" +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:130 +#, python-format +msgid "Unmet dependency for config option '%s'" +msgstr "" + +#: neutron/plugins/vmware/nsx_cluster.py:49 +#, python-format +msgid "" +"Attribute '%s' has been deprecated or moved to a new section. See new " +"configuration file for details." +msgstr "" + +#: neutron/plugins/vmware/nsx_cluster.py:61 +#, python-format +msgid "The following cluster attributes were not specified: %s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/__init__.py:28 +#, python-format +msgid "Invalid connection type: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:103 +#, python-format +msgid "[%d] no API providers currently available." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:106 +#, python-format +msgid "[%d] Waiting to acquire API client connection." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:110 +#, python-format +msgid "[%(rid)d] Connection %(conn)s idle for %(sec)0.2f seconds; reconnecting." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:119 +#, python-format +msgid "[%(rid)d] Acquired connection %(conn)s. %(qsize)d connection(s) available." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:140 +#, python-format +msgid "" +"[%(rid)d] Released connection %(conn)s is not an API provider for the " +"cluster" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:150 +#, python-format +msgid "[%(rid)d] Connection returned in bad state, reconnecting to %(conn)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:176 +#, python-format +msgid "[%(rid)d] Released connection %(conn)s. %(qsize)d connection(s) available." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:186 +#, python-format +msgid "Login request for an invalid connection: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:197 +msgid "Waiting for auth to complete" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:239 +#, python-format +msgid "Invalid conn_params value: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:93 +#, python-format +msgid "Request returns \"%s\"" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:106 +#, python-format +msgid "Request timed out: %(method)s to %(url)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:117 +#, python-format +msgid "Received error code: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:118 +#, python-format +msgid "Server Error Message: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:124 +#, python-format +msgid "" +"%(method)s to %(url)s, unexpected response code: %(status)d (content = " +"'%(body)s')" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:141 +msgid "Unable to determine NSX version. Plugin might not work as expected." +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_client.py:145 +#, python-format +msgid "Login error \"%s\"" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_client.py:150 +#, python-format +msgid "Saving new authentication cookie '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:102 +msgid "Joining an invalid green thread" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:122 +#, python-format +msgid "[%d] Request timeout." +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:123 +msgid "Request timeout" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:149 +#, python-format +msgid "[%(rid)d] Completed request '%(method)s %(url)s': %(status)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:156 +#, python-format +msgid "[%(rid)d] Error while handling request: %(req)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:212 +#, python-format +msgid "[%(rid)d] Failed to parse API provider: %(e)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:41 +msgid "Server denied session's authentication credentials." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:45 +msgid "An entity referenced in the request was not found." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:49 +msgid "Request conflicts with configuration on a different entity." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:54 +msgid "" +"Request could not completed because the associated resource could not be " +"reached." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:59 +msgid "The request is forbidden from accessing the referenced resource." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:64 +msgid "Create/Update actions are forbidden when in read-only mode." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:72 +msgid "The server is unable to fulfill the request due to a bad syntax" +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:77 +msgid "The backend received an invalid security certificate." +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:85 +msgid "No API connections available" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:90 +#, python-format +msgid "[%(rid)d] Issuing - request %(conn)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:116 +#, python-format +msgid "Setting X-Nvp-Wait-For-Config-Generation request header: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:122 +#, python-format +msgid "[%(rid)d] Exception issuing request: %(e)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:130 +#, python-format +msgid "[%(rid)d] Completed request '%(conn)s': %(status)s (%(elapsed)s seconds)" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:139 +#, python-format +msgid "Reading X-Nvp-config-Generation response header: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:168 +#, python-format +msgid "[%d] Maximum redirects exceeded, aborting request" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:178 +#, python-format +msgid "[%(rid)d] Redirecting request to: %(conn)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:191 +#, python-format +msgid "[%(rid)d] Request '%(method)s %(url)s' received: %(status)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:195 +#, python-format +msgid "Server error return: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:199 +msgid "Invalid server response" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:204 +#, python-format +msgid "[%(rid)d] Failed request '%(conn)s': '%(msg)s' (%(elapsed)s seconds)" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:238 +#, python-format +msgid "[%d] Received redirect status without location header field" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:255 +#, python-format +msgid "[%(rid)d] Received invalid redirect location: '%(url)s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:259 +#, python-format +msgid "[%(rid)d] Received malformed redirect location: %(url)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/version.py:30 +#, python-format +msgid "Unable to fetch NSX version from response headers :%s" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:41 +msgid "" +"Maximum number of ports of a logical switch on a bridged transport zone " +"(default 5000)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:45 +msgid "" +"Maximum number of ports of a logical switch on an overlay transport zone " +"(default 256)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:49 +msgid "Maximum concurrent connections to each NSX controller." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:54 +msgid "" +"Number of seconds a generation id should be valid for (default -1 meaning" +" do not time out)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:58 +msgid "" +"If set to access_network this enables a dedicated connection to the " +"metadata proxy for metadata server access via Neutron router. If set to " +"dhcp_host_route this enables host route injection via the dhcp agent. " +"This option is only useful if running on a host that does not support " +"namespaces otherwise access_network should be used." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:67 +msgid "" +"The default network tranport type to use (stt, gre, bridge, ipsec_gre, or" +" ipsec_stt)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:71 +msgid "The mode used to implement DHCP/metadata services." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:73 +msgid "" +"The default option leverages service nodes to perform packet replication " +"though one could set to this to 'source' to perform replication locally. " +"This is useful if one does not want to deploy a service node(s)." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:82 +msgid "" +"Interval in seconds between runs of the state synchronization task. Set " +"it to 0 to disable it" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:86 +msgid "" +"Maximum value for the additional random delay in seconds between runs of " +"the state synchronization task" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:91 +msgid "" +"Minimum delay, in seconds, between two state synchronization queries to " +"NSX. It must not exceed state_sync_interval" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:96 +msgid "" +"Minimum number of resources to be retrieved from NSX during state " +"synchronization" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:100 +msgid "" +"Always read operational status from backend on show operations. Enabling " +"this option might slow down the system." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:109 +msgid "User name for NSX controllers in this cluster" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:114 +msgid "Password for NSX controllers in this cluster" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:117 +msgid "Total time limit for a cluster request" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:120 +msgid "Time before aborting a request" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:123 +msgid "Number of time a request should be retried" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:126 +msgid "Number of times a redirect should be followed" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:129 +msgid "Lists the NSX controllers in this cluster" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:134 +msgid "" +"This is uuid of the default NSX Transport zone that will be used for " +"creating tunneled isolated \"Neutron\" networks. It needs to be created " +"in NSX before starting Neutron with the nsx plugin." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:139 +msgid "" +"Unique identifier of the NSX L3 Gateway service which will be used for " +"implementing routers and floating IPs" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:143 +msgid "" +"Unique identifier of the NSX L2 Gateway service which will be used by " +"default for network gateways" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:146 +msgid "" +"Unique identifier of the Service Cluster which will be used by logical " +"services like dhcp and metadata" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:149 +msgid "" +"Name of the interface on a L2 Gateway transport nodewhich should be used " +"by default when setting up a network connection" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:159 +msgid "User name for vsm" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:163 +msgid "Password for vsm" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:165 +msgid "uri for vsm" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:167 +msgid "Optional parameter identifying the ID of datacenter to deploy NSX Edges" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:170 +#: neutron/plugins/vmware/common/config.py:176 +msgid "Optional parameter identifying the ID of datastore to deploy NSX Edges" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:173 +msgid "Optional parameter identifying the ID of resource to deploy NSX Edges" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:179 +msgid "Network ID for physical network connectivity" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:182 +msgid "Task status check interval" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:196 +#, python-format +msgid "Invalid replication_mode: %s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:21 +#, python-format +msgid "An unexpected error occurred in the NSX Plugin: %(err_msg)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:25 +#, python-format +msgid "Unable to fulfill request with version %(version)s." +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:29 +#, python-format +msgid "Invalid NSX connection parameters: %(conn_params)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:33 +#, python-format +msgid "" +"Invalid cluster values: %(invalid_attrs)s. Please ensure that these " +"values are specified in the [DEFAULT] section of the NSX plugin ini file." +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:39 +#, python-format +msgid "Unable to find cluster config entry for nova zone: %(nova_zone)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:44 +#, python-format +msgid "" +"Unable to create port on network %(network)s. Maximum number of ports " +"reached" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:49 +#, python-format +msgid "" +"While retrieving NAT rules, %(actual_rules)s were found whereas rules in " +"the (%(min_rules)s,%(max_rules)s) interval were expected" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:55 +#, python-format +msgid "Invalid NSX attachment type '%(attachment_type)s'" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:59 +msgid "" +"The networking backend is currently in maintenance mode and therefore " +"unable to accept requests which modify its state. Please try later." +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:65 +#, python-format +msgid "Gateway Service %(gateway)s is already in use" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:69 +msgid "" +"An invalid security certificate was specified for the gateway device. " +"Certificates must be enclosed between '-----BEGIN CERTIFICATE-----' and '" +"-----END CERTIFICATE-----'" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:76 +#, python-format +msgid "Quota exceeded for Vcns resource: %(overs)s: %(err_msg)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:80 +#, python-format +msgid "Router %(router_id)s is in use by Loadbalancer Service %(vip_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:85 +#, python-format +msgid "Router %(router_id)s is in use by firewall Service %(firewall_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:90 +#, python-format +msgid "Error happened in NSX VCNS Driver: %(err_msg)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:94 +#, python-format +msgid "" +"Router %(router_id)s is not in 'ACTIVE' status, thus unable to provide " +"advanced service" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:99 +#, python-format +msgid "" +"Service cluster: '%(cluster_id)s' is unavailable. Please, check NSX setup" +" and/or configuration" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:104 +#, python-format +msgid "" +"An error occurred while connecting LSN %(lsn_id)s and network %(net_id)s " +"via port %(port_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:113 +#, python-format +msgid "Unable to find LSN for %(entity)s %(entity_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:117 +#, python-format +msgid "Unable to find port for LSN %(lsn_id)s and %(entity)s %(entity_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:122 +#, python-format +msgid "Unable to migrate network '%(net_id)s' to LSN: %(reason)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:126 +#, python-format +msgid "Configuration conflict on Logical Service Node %(lsn_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:65 +#, python-format +msgid "Unable to find NSX switches for Neutron network %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:112 +#, python-format +msgid "Unable to find NSX port for Neutron port %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:152 +#, python-format +msgid "Unable to find NSX security profile for Neutron security group %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:156 +#, python-format +msgid "Multiple NSX security profiles found for Neutron security group %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:187 +#, python-format +msgid "Unable to find NSX router for Neutron router %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:245 +#, python-format +msgid "" +"Unable to retrieve operational status for gateway devices belonging to " +"tenant: %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:248 +msgid "Unable to retrieve operational status for gateway devices" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:226 +#, python-format +msgid "" +"Minimum request delay:%(req_delay)s must not exceed synchronization " +"interval:%(sync_interval)s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:259 +#, python-format +msgid "Logical switch for neutron network %s not found on NSX." +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:294 +#: neutron/plugins/vmware/common/sync.py:376 +#: neutron/plugins/vmware/common/sync.py:471 +#, python-format +msgid "Updating status for neutron resource %(q_id)s to: %(status)s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:346 +#, python-format +msgid "Logical router for neutron router %s not found on NSX." +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:396 +#, python-format +msgid "Unable to find Neutron router id for NSX logical router: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:438 +#, python-format +msgid "Logical switch port for neutron port %s not found on NSX." +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:531 +#, python-format +msgid "" +"Requested page size is %(cur_chunk_size)d.It might be necessary to do " +"%(num_requests)d round-trips to NSX for fetching data. Please tune sync " +"parameters to ensure chunk size is less than %(max_page_size)d" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:564 +#, python-format +msgid "Fetching up to %s resources from NSX backend" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:584 +#, python-format +msgid "Total data size: %d" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:588 +#, python-format +msgid "" +"Fetched %(num_lswitches)d logical switches, %(num_lswitchports)d logical " +"switch ports,%(num_lrouters)d logical routers" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:604 +#, python-format +msgid "Running state synchronization task. Chunk: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:614 +#, python-format +msgid "" +"An error occurred while communicating with NSX backend. Will retry " +"synchronization in %d seconds" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:618 +#, python-format +msgid "Time elapsed querying NSX: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:625 +#, python-format +msgid "Number of chunks: %d" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:643 +#, python-format +msgid "Time elapsed hashing data: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:655 +#, python-format +msgid "Synchronization for chunk %(chunk_num)d of %(total_chunks)d performed" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:667 +#, python-format +msgid "Time elapsed at end of sync: %s" +msgstr "" + +#: neutron/plugins/vmware/common/utils.py:66 +#, python-format +msgid "Specified name:'%s' exceeds maximum length. It will be truncated on NSX" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:76 +#, python-format +msgid "Port mapping for %s already available" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:123 +#, python-format +msgid "NSX identifiers for neutron port %s not yet stored in Neutron DB" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:134 +#, python-format +msgid "NSX identifiers for neutron router %s not yet stored in Neutron DB" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:149 +#, python-format +msgid "NSX identifiers for neutron security group %s not yet stored in Neutron DB" +msgstr "" + +#: neutron/plugins/vmware/dbexts/lsn_db.py:87 +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:71 +#, python-format +msgid "Unable to find Logical Service Node for network %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:45 +#, python-format +msgid "" +"Network Gateway '%(gateway_id)s' still has active mappings with one or " +"more neutron networks." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:50 +#, python-format +msgid "Network Gateway %(gateway_id)s could not be found" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:54 +#, python-format +msgid "" +"Network Gateway Device '%(device_id)s' is still used by one or more " +"network gateways." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:59 +#, python-format +msgid "Network Gateway Device %(device_id)s could not be found." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:63 +#, python-format +msgid "" +"Port '%(port_id)s' is owned by '%(device_owner)s' and therefore cannot be" +" deleted directly via the port API." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:68 +#, python-format +msgid "" +"The specified mapping '%(mapping)s' is already in use on network gateway " +"'%(gateway_id)s'." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:73 +#, python-format +msgid "" +"Multiple network connections found on '%(gateway_id)s' with provided " +"criteria." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:78 +#, python-format +msgid "" +"The connection %(network_mapping_info)s was not found on the network " +"gateway '%(network_gateway_id)s'" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:83 +#, python-format +msgid "The network gateway %(gateway_id)s cannot be updated or deleted" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:191 +msgid "" +"A network identifier must be specified when connecting a network to a " +"network gateway. Unable to complete operation" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:197 +#, python-format +msgid "" +"Invalid keys found among the ones provided in request body: " +"%(connection_attrs)s." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:203 +msgid "" +"In order to specify a segmentation id the segmentation type must be " +"specified as well" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:207 +msgid "Cannot specify a segmentation id when the segmentation type is flat" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:262 +#, python-format +msgid "Created network gateway with id:%s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:274 +#, python-format +msgid "Updated network gateway with id:%s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:289 +#, python-format +msgid "Network gateway '%s' was destroyed." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:306 +#, python-format +msgid "Connecting network '%(network_id)s' to gateway '%(network_gateway_id)s'" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:347 +#, python-format +msgid "" +"Requested network '%(network_id)s' not found.Unable to create network " +"connection on gateway '%(network_gateway_id)s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:355 +#, python-format +msgid "" +"Gateway port for '%(network_gateway_id)s' created on network " +"'%(network_id)s':%(port_id)s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:371 +#, python-format +msgid "Ensured no Ip addresses are configured on port %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:381 +#, python-format +msgid "" +"Disconnecting network '%(network_id)s' from gateway " +"'%(network_gateway_id)s'" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:475 +#, python-format +msgid "Created network gateway device: %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:486 +#, python-format +msgid "Updated network gateway device: %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:499 +#, python-format +msgid "Deleted network gateway device: %s." +msgstr "" + +#: neutron/plugins/vmware/dbexts/nsxrouter.py:61 +#, python-format +msgid "Nsx router extension successfully processed for router:%s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/qos_db.py:291 +#, python-format +msgid "DSCP value (%s) will be ignored with 'trusted' marking" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:77 +#, python-format +msgid "Rule Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:94 +msgid "Rule Resource binding not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:123 +#: neutron/plugins/vmware/dbexts/vcns_db.py:133 +#, python-format +msgid "VIP Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:162 +#, python-format +msgid "" +"Pool Resource binding with edge_id:%(edge_id)s pool_vseid:%(pool_vseid)s " +"not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:174 +#, python-format +msgid "Pool Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:200 +#, python-format +msgid "Monitor Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:40 +msgid "" +"Pull LSN information from NSX in case it is missing from the local data " +"store. This is useful to rebuild the local store in case of server " +"recovery." +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:82 +#, python-format +msgid "Unable to create LSN for network %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:90 +#, python-format +msgid "Unable to delete Logical Service Node %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:107 +#, python-format +msgid "" +"Unable to find Logical Service Node Port for LSN %(lsn_id)s and subnet " +"%(subnet_id)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:129 +#, python-format +msgid "" +"Unable to find Logical Service Node Port for LSN %(lsn_id)s and mac " +"address %(mac)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:149 +#, python-format +msgid "Unable to create port for LSN %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:157 +#, python-format +msgid "Unable to delete LSN Port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:174 +#, python-format +msgid "Metadata port not found while attempting to delete it from network %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:177 +#, python-format +msgid "Unable to find Logical Services Node Port with MAC %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:262 +#, python-format +msgid "" +"Unable to configure dhcp for Logical Service Node %(lsn_id)s and port " +"%(lsn_port_id)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:283 +#, python-format +msgid "Unable to configure metadata for subnet %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:305 +#, python-format +msgid "Error while configuring LSN port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:377 +#, python-format +msgid "Unable to save LSN for network %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:443 +#, python-format +msgid "Unable to save LSN port for subnet %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:81 +#, python-format +msgid "Port %s is already gone" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:112 +msgid "LSN already exist" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:116 +msgid "Cannot migrate an external network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:125 +msgid "Cannot migrate a 'metadata' network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:128 +msgid "Unable to support multiple subnets per network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:39 +msgid "Comma separated list of additional domain name servers" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:47 +msgid "Default DHCP lease time" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:55 +msgid "IP address used by Metadata server." +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:59 +msgid "TCP Port used by Metadata server." +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:136 +#, python-format +msgid "" +"Error while creating subnet %(cidr)s for network %(network)s. Please, " +"contact administrator" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:206 +#: neutron/plugins/vmware/dhcp_meta/nsx.py:224 +#, python-format +msgid "Performing DHCP %(action)s for resource: %(resource)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:211 +#, python-format +msgid "Network %s is external: no LSN to create" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:219 +#, python-format +msgid "Logical Services Node for network %s configured successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:241 +#, python-format +msgid "Error while configuring DHCP for port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:253 +#, python-format +msgid "DHCP is disabled for subnet %s: nothing to do" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:272 +#, python-format +msgid "DHCP for port %s configured successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:280 +#, python-format +msgid "Network %s is external: nothing to do" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:288 +#, python-format +msgid "Configuring metadata entry for port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:300 +#, python-format +msgid "Metadata for port %s configured successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:304 +#, python-format +msgid "Handle metadata access via router: %(r)s and interface %(i)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:321 +#, python-format +msgid "Metadata for router %s handled successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:77 +#, python-format +msgid "Subnet %s does not have a gateway, the metadata route will not be created" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:101 +msgid "Metadata access network is disabled" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:104 +msgid "" +"Overlapping IPs must be enabled in order to setup the metadata access " +"network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:124 +#, python-format +msgid "" +"No router interface found for router '%s'. No metadata access network " +"should be created or destroyed" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:132 +#, python-format +msgid "" +"An error occurred while operating on the metadata access network for " +"router:'%s'" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:86 +msgid "Cannot create a gateway with an empty device list" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:102 +#, python-format +msgid "Unexpected keys found in device description:%s" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:106 +#, python-format +msgid "%s: provided data are not iterable" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:113 +msgid "A connector type is required to create a gateway device" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:122 +#, python-format +msgid "Unknown connector type: %s" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:129 +msgid "Number of network gateways allowed per tenant, -1 for unlimited" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:36 +msgid "Need to be admin in order to create queue called default" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:40 +msgid "Default queue already exists." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:44 +#, python-format +msgid "Invalid value for dscp %(data)s must be integer value between 0 and 63." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:49 +msgid "Invalid bandwidth rate, min greater than max." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:53 +#, python-format +msgid "Invalid bandwidth rate, %(data)s must be a non negative integer." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:58 +#, python-format +msgid "Queue %(id)s does not exist" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:62 +msgid "Unable to delete queue attached to port." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:66 +msgid "Port is not associated with lqueue" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:77 +#, python-format +msgid "'%s' must be a non negative integer." +msgstr "" + +#: neutron/plugins/vmware/nsxlib/__init__.py:77 +#, python-format +msgid "Error. %(type)s exception: %(exc)s." +msgstr "" + +#: neutron/plugins/vmware/nsxlib/__init__.py:81 +#, python-format +msgid "locals=[%s]" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/lsn.py:173 +#, python-format +msgid "" +"Attempt to plug Logical Services Node %(lsn)s into network with port " +"%(port)s failed. PatchAttachment already exists with another port" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:252 +#, python-format +msgid "Cannot update NSX routes %(routes)s for router %(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:346 +#, python-format +msgid "Created logical port %(lport_uuid)s on logical router %(lrouter_uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:374 +#, python-format +msgid "Updated logical port %(lport_uuid)s on logical router %(lrouter_uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:385 +#, python-format +msgid "" +"Delete logical router port %(lport_uuid)s on logical router " +"%(lrouter_uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:449 +#, python-format +msgid "Invalid keys for NAT match: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:455 +#, python-format +msgid "Creating NAT rule: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:470 +msgid "" +"No SNAT rules cannot be applied as they are not available in this version" +" of the NSX platform" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:475 +msgid "" +"No DNAT rules cannot be applied as they are not available in this version" +" of the NSX platform" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:609 +#, python-format +msgid "Router Port %(lport_id)s not found on router %(lrouter_id)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:614 +#, python-format +msgid "" +"An exception occurred while updating IP addresses on a router logical " +"port:%s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/secgroup.py:94 +#, python-format +msgid "Created Security Profile: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/secgroup.py:120 +#, python-format +msgid "Updated Security Profile: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/secgroup.py:140 +#, python-format +msgid "Unable to find security profile %s on NSX backend" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:130 +#, python-format +msgid "Created logical switch: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:150 +#: neutron/plugins/vmware/nsxlib/switch.py:165 +#, python-format +msgid "Network not found, Error: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:188 +msgid "Port or Network not found" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:247 +#, python-format +msgid "Lswitch %s not found in NSX" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:256 +msgid "Unable to get ports" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:273 +#, python-format +msgid "" +"Looking for port with q_port_id tag '%(neutron_port_id)s' on: " +"'%(lswitch_uuid)s'" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:281 +#, python-format +msgid "" +"Found '%(num_ports)d' ports with q_port_id tag: '%(neutron_port_id)s'. " +"Only 1 was expected." +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:290 +#, python-format +msgid "get_port() %(network)s %(port)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:298 +#: neutron/plugins/vmware/nsxlib/switch.py:329 +#, python-format +msgid "Port or Network not found, Error: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:324 +#, python-format +msgid "Updated logical port %(result)s on logical switch %(uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:359 +#, python-format +msgid "Created logical port %(result)s on logical switch %(uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:371 +#, python-format +msgid "Port not found, Error: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/versioning.py:56 +msgid "Operation may not be supported" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/versioning.py:64 +msgid "" +"NSX version is not set. Unable to complete request correctly. Check log " +"for NSX communication errors." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:211 +#, python-format +msgid "Unable to process default l2 gw service:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:246 +#, python-format +msgid "Created NSX router port:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:248 +#: neutron/plugins/vmware/plugins/service.py:440 +#, python-format +msgid "Unable to create port on NSX logical router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:251 +#, python-format +msgid "" +"Unable to create logical router port for neutron port id %(port_id)s on " +"router %(nsx_router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:329 +#, python-format +msgid "Attached %(att)s to NSX router port %(port)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:335 +#, python-format +msgid "" +"Unable to plug attachment in NSX logical router port %(r_port_id)s, " +"associated with Neutron %(q_port_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:341 +#, python-format +msgid "" +"Unable to plug attachment in router port %(r_port_id)s for neutron port " +"id %(q_port_id)s on router %(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:387 +msgid "An exception occurred while selecting logical switch for the port" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:427 +#, python-format +msgid "" +"An exception occurred while creating the neutron port %s on the NSX " +"plaform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:439 +#: neutron/plugins/vmware/plugins/base.py:491 +#: neutron/plugins/vmware/plugins/base.py:689 +#, python-format +msgid "" +"NSX plugin does not support regular VIF ports on external networks. Port " +"%s will be down." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:460 +#, python-format +msgid "" +"_nsx_create_port completed for port %(name)s on network %(network_id)s. " +"The new port id is %(id)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:471 +#, python-format +msgid "" +"Concurrent network deletion detected; Back-end Port %(nsx_id)s creation " +"to be rolled back for Neutron port: %(neutron_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:483 +#, python-format +msgid "NSX Port %s already gone" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:498 +#, python-format +msgid "Port '%s' was already deleted on NSX platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:505 +#, python-format +msgid "_nsx_delete_port completed for port %(port_id)s on network %(net_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:510 +#, python-format +msgid "Port %s not found in NSX" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:519 +#, python-format +msgid "" +"Neutron port %(port_id)s not found on NSX backend. Terminating delete " +"operation. A dangling router port might have been left on router " +"%(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:534 +#: neutron/plugins/vmware/plugins/base.py:1069 +#, python-format +msgid "" +"Ignoring exception as this means the peer for port '%s' has already been " +"deleted." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:546 +#, python-format +msgid "" +"It is not allowed to create router interface ports on external networks " +"as '%s'" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:571 +#, python-format +msgid "" +"_nsx_create_router_port completed for port %(name)s on network " +"%(network_id)s. The new port id is %(id)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:584 +#, python-format +msgid "" +"device_id field must be populated in order to create an external gateway " +"port for network %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:594 +#, python-format +msgid "The gateway port for the NSX router %s was not found on the backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:635 +#, python-format +msgid "" +"_nsx_create_ext_gw_port completed on external network %(ext_net_id)s, " +"attached to router:%(router_id)s. NSX port id is %(nsx_port_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:670 +#: neutron/plugins/vmware/plugins/base.py:1806 +#, python-format +msgid "Logical router resource %s not found on NSX platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:674 +#: neutron/plugins/vmware/plugins/base.py:1810 +msgid "Unable to update logical routeron NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:676 +#, python-format +msgid "" +"_nsx_delete_ext_gw_port completed on external network %(ext_net_id)s, " +"attached to NSX router:%(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:718 +#, python-format +msgid "" +"_nsx_create_l2_gw_port completed for port %(name)s on network " +"%(network_id)s. The new port id is %(id)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:760 +#, python-format +msgid "%s required" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:765 +msgid "Segmentation ID cannot be specified with flat network type" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:769 +msgid "Segmentation ID must be specified with vlan network type" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:773 +#: neutron/plugins/vmware/plugins/base.py:789 +#, python-format +msgid "%(segmentation_id)s out of range (%(min_id)s through %(max_id)s)" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:795 +#, python-format +msgid "%(net_type_param)s %(net_type_value)s not supported" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:840 +#, python-format +msgid "No switch has available ports (%d checked)" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:874 +#, python-format +msgid "Maximum number of logical ports reached for logical network %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:966 +#, python-format +msgid "" +"Network with admin_state_up=False are not yet supported by this plugin. " +"Ignoring setting for network %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1058 +#, python-format +msgid "" +"A nsx lport identifier was not found for neutron port '%s'. Unable to " +"remove the peer router port for this switch port" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1077 +#, python-format +msgid "delete_network completed for tenant: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1080 +#: neutron/plugins/vmware/plugins/service.py:553 +#, python-format +msgid "Did not found lswitch %s in NSX" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1115 +msgid "admin_state_up=False networks are not supported." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1135 +#, python-format +msgid "Unable to find NSX mappings for neutron network:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1142 +#, python-format +msgid "" +"Logical switch update on NSX backend failed. Neutron network " +"id:%(net_id)s; NSX lswitch id:%(lswitch_id)s;Error:%(error)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1213 +#, python-format +msgid "port created on NSX backend for tenant %(tenant_id)s: (%(id)s)" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1216 +#, python-format +msgid "Logical switch for network %s was not found in NSX." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1227 +msgid "Unable to create port or set port attachment in NSX." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1315 +#, python-format +msgid "Updating port: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1350 +#, python-format +msgid "Unable to update port id: %s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1433 +msgid "" +"Cannot create a distributed router with the NSX platform currently in " +"execution. Please, try without specifying the 'distributed' attribute." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1439 +msgid "Unable to create logical router on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1450 +#, python-format +msgid "" +"Unable to create L3GW port on logical router %(router_uuid)s. Verify " +"Default Layer-3 Gateway service %(def_l3_gw_svc)s id is correct" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1460 +#, python-format +msgid "Unable to create router %s on NSX backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1489 +#: neutron/plugins/vmware/plugins/base.py:1574 +#: neutron/plugins/vmware/plugins/service.py:202 +#: neutron/plugins/vmware/plugins/service.py:1232 +#, python-format +msgid "Network '%s' is not a valid external network" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1538 +#, python-format +msgid "Failed to set gateway info for router being created:%s - removing router" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1541 +#, python-format +msgid "" +"Create router failed while setting external gateway. Router:%s has been " +"removed from DB and backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1583 +msgid "" +"'routes' cannot contain route '0.0.0.0/0', this must be updated through " +"the default gateway attribute" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1599 +#, python-format +msgid "Logical router %s not found on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1603 +msgid "Unable to update logical router on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1605 +msgid "" +"Request cannot contain 'routes' with the NSX platform currently in " +"execution. Please, try without specifying the static routes." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1662 +#, python-format +msgid "Logical router '%s' not found on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1666 +#, python-format +msgid "Unable to delete logical router '%s' on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1676 +#, python-format +msgid "" +"Unable to remove NSX mapping for Neutron router %(router_id)s because of " +"the following exception:%(d_exc)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1744 +#, python-format +msgid "" +"Add_router_interface completed for subnet:%(subnet_id)s and " +"router:%(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1842 +#, python-format +msgid "" +"An error occurred while removing NAT rules on the NSX platform for " +"floating ip:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1847 +msgid "An incorrect number of matching NAT rules was found on the NSX platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1979 +#, python-format +msgid "" +"An error occurred while creating NAT rules on the NSX platform for " +"floating ip:%(floating_ip)s mapped to internal ip:%(internal_ip)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1985 +msgid "Failed to update NAT rules for floatingip update" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2022 +#, python-format +msgid "The port '%s' is not associated with floating IPs" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2025 +#, python-format +msgid "Nat rules not found in nsx for port: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2061 +#, python-format +msgid "Unable to create l2_gw_service for: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2084 +msgid "" +"Unable to remove gateway service from NSX plaform - the resource was not " +"found" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2113 +#, python-format +msgid "Unable to update name on NSX backend for network gateway: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2142 +#, python-format +msgid "" +"Rolling back database changes for gateway device %s because of an error " +"in the NSX backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2187 +#: neutron/plugins/vmware/plugins/base.py:2225 +#, python-format +msgid "" +"Neutron gateway device: %(neutron_id)s; NSX transport node identifier: " +"%(nsx_id)s; Operational status: %(status)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2322 +#, python-format +msgid "" +"Removal of gateway device: %(neutron_id)s failed on NSX backend (NSX " +"id:%(nsx_id)s) because the NSX resource was not found" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2329 +#, python-format +msgid "" +"Removal of gateway device: %(neutron_id)s failed on NSX backend (NSX " +"id:%(nsx_id)s). Neutron and NSX states have diverged." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2377 +#, python-format +msgid "" +"Error while updating security profile %(uuid)s with name %(name)s: " +"%(error)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2409 +#, python-format +msgid "" +"The NSX security profile %(sec_profile_id)s, associated with the Neutron " +"security group %(sec_group_id)s was not found on the backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2417 +#, python-format +msgid "" +"An exception occurred while removing the NSX security profile " +"%(sec_profile_id)s, associated with Netron security group " +"%(sec_group_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2424 +#, python-format +msgid "Unable to remove security group %s from backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2437 +#, python-format +msgid "Port values not valid for protocol: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:145 +#, python-format +msgid "EDGE: router = %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:176 +msgid "EDGE: _vcns_create_ext_gw_port" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:185 +msgid "EDGE: _vcns_delete_ext_gw_port" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:340 +#, python-format +msgid "VCNS: delete default gateway %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:425 +#, python-format +msgid "An exception occurred while creating a port on lswitch %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:473 +#, python-format +msgid "Unable to create integration logic switch for router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:483 +#, python-format +msgid "Unable to add router interface to integration lswitch for router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:493 +#, python-format +msgid "Unable to create advance service router for %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:525 +msgid "router_id is not provided!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:529 +#, python-format +msgid "router_id:%s is not an advanced router!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:895 +#, python-format +msgid "Failed to create firewall on vShield Edge bound on router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:903 +msgid "Bad Firewall request Input" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:920 +msgid "A firewall is already associated with the router" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1117 +#, python-format +msgid "Failed to find the edge with vip_id: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1154 +#, python-format +msgid "" +"Operation can't be performed, Since resource %(model)s : %(id)s is in " +"DELETEing status!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1163 +#, python-format +msgid "Resource %(model)s : %(id)s not found!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1188 +#, python-format +msgid "Failed to create healthmonitor associated with pool id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1205 +msgid "Failed to create pool on vshield edge" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1224 +msgid "create_vip() called" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1256 +msgid "Failed to create vip!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1296 +#, python-format +msgid "Failed to update vip with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1313 +#, python-format +msgid "Failed to delete vip with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1369 +#, python-format +msgid "Failed to update pool with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1391 +#: neutron/plugins/vmware/plugins/service.py:1438 +#: neutron/plugins/vmware/plugins/service.py:1461 +msgid "Failed to update pool with the member" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1417 +msgid "Failed to update old pool with the member" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1481 +#, python-format +msgid "Failed to update monitor with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1491 +msgid "Vcns right now can only support one monitor per pool" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1520 +msgid "Failed to associate monitor with pool!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1551 +msgid "Failed to update pool with pool_monitor!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1568 +#, python-format +msgid "" +"Failed to update ipsec vpn configuration on edge, since the router: %s " +"does not have a gateway yet!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1593 +msgid "Bad or unsupported Input request!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1596 +#, python-format +msgid "" +"Failed to update ipsec VPN configuration with vpnservice: " +"%(vpnservice_id)s on vShield Edge: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1603 +msgid "create_vpnservice() called" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1607 +#, python-format +msgid "a vpnservice is already associated with the router: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1720 +#, python-format +msgid "Start deploying %(edge_id)s for router %(name)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1726 +#: neutron/plugins/vmware/plugins/service.py:1763 +#, python-format +msgid "Failed to deploy Edge for router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1744 +#, python-format +msgid "Router %s not found" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1747 +#, python-format +msgid "Successfully deployed %(edge_id)s for router %(name)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1779 +#, python-format +msgid "interface_update_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1782 +#, python-format +msgid "snat_create_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1785 +#, python-format +msgid "snat_delete_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1788 +#, python-format +msgid "dnat_create_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1791 +#, python-format +msgid "dnat_delete_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1794 +#, python-format +msgid "routes_update_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1797 +#, python-format +msgid "nat_update_result %d" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:24 +#, python-format +msgid "" +"\n" +"Service type = %s\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:27 +#, python-format +msgid "Service uuids = %s\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:28 +#, python-format +msgid "" +"Port uuids = %s\n" +"\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:37 +msgid "ID or name of network to run report on" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:46 +msgid "Migration report is:\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:56 +msgid "ID or name of network to migrate" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:66 +msgid "Migration has been successful:\n" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:137 +#, python-format +msgid "" +"VCNS: Failed to get edge status:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:164 +#, python-format +msgid "VCNS: start updating vnic %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:169 +#, python-format +msgid "" +"VCNS: Failed to update vnic %(config)s:\n" +"%(response)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:175 +#, python-format +msgid "VCNS: Failed to update vnic %d" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:183 +#, python-format +msgid "VCNS: update vnic %(index)d: %(addr)s %(netmask)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:192 +#, python-format +msgid "Vnic %d currently not supported" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:213 +#, python-format +msgid "VCNS: start deploying edge %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:221 +#, python-format +msgid "VCNS: deploying edge %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:226 +#, python-format +msgid "VCNS: deploy edge failed for router %s." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:245 +#, python-format +msgid "VCNS: Edge %s status query failed." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:250 +#, python-format +msgid "VCNS: Unable to retrieve edge %(edge_id)s status. Retry %(retries)d." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:257 +#, python-format +msgid "VCNS: Unable to retrieve edge %s status. Abort." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:261 +#, python-format +msgid "VCNS: Edge %s status" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:268 +#, python-format +msgid "VCNS: Failed to deploy edge %(edge_id)s for %(name)s, status %(status)d" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:275 +#, python-format +msgid "VCNS: Edge %(edge_id)s deployed for router %(name)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:282 +#, python-format +msgid "VCNS: start destroying edge %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:290 +#, python-format +msgid "" +"VCNS: Failed to delete %(edge_id)s:\n" +"%(response)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:296 +#, python-format +msgid "VCNS: Failed to delete %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:306 +#, python-format +msgid "" +"VCNS: Failed to get edges:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:384 +#, python-format +msgid "" +"VCNS: Failed to get nat config:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:391 +#, python-format +msgid "VCNS: start creating nat rules: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:407 +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:532 +#, python-format +msgid "" +"VCNS: Failed to create snat rule:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:415 +#, python-format +msgid "VCNS: create snat rule %(src)s/%(translated)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:438 +#, python-format +msgid "VCNS: start deleting %(type)s rules: %(addr)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:449 +#, python-format +msgid "" +"VCNS: Failed to delete snat rule:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:456 +#, python-format +msgid "VCNS: delete snat rule %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:474 +#, python-format +msgid "VCNS: create dnat rule %(dst)s/%(translated)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:495 +#, python-format +msgid "VCNS: delete dnat rule %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:519 +#, python-format +msgid "VCNS: start updating nat rules: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:540 +#, python-format +msgid "" +"VCNS: update nat rule\n" +"SNAT:%(snat)s\n" +"DNAT:%(dnat)s\n" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:578 +#, python-format +msgid "VCNS: start updating routes for %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:602 +#, python-format +msgid "" +"VCNS: Failed to update routes:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:654 +msgid "Failed to get service config" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:666 +msgid "Failed to enable loadbalancer service config" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:43 +#, python-format +msgid "Invalid action value %s in a firewall rule" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:52 +#, python-format +msgid "Invalid action value %s in a vshield firewall rule" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:192 +#, python-format +msgid "Failed to get firewall with edge id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:210 +#, python-format +msgid "No rule id:%s found in the edge_firewall_binding" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:220 +#, python-format +msgid "Failed to get firewall rule: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:236 +#, python-format +msgid "Failed to update firewall with edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:248 +#, python-format +msgid "Failed to delete firewall with edge_id:%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:262 +#, python-format +msgid "Failed to update firewall rule: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:275 +#, python-format +msgid "Failed to delete firewall rule: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:292 +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:320 +#, python-format +msgid "Failed to add firewall rule above: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:331 +#, python-format +msgid "Failed to append a firewall rulewith edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:352 +msgid "Can't execute insert rule operation without reference rule_id" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:50 +#, python-format +msgid "" +"Unsupported ike_version: %s! Only 'v1' ike version is supported on " +"vshield Edge!" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:64 +msgid "" +"IKEPolicy and IPsecPolicy should have consistent auth_algorithm, " +"encryption_algorithm and pfs for VSE!" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:72 +#, python-format +msgid "" +"Unsupported encryption_algorithm: %s! '3des', 'aes-128' and 'aes-256' are" +" supported on VSE right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:81 +#, python-format +msgid "Unsupported pfs: %s! 'group2' and 'group5' are supported on VSE right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:89 +#, python-format +msgid "" +"Unsupported transform protocol: %s! 'esp' is supported by default on VSE " +"right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:98 +#, python-format +msgid "" +"Unsupported encapsulation mode: %s! 'tunnel' is supported by default on " +"VSE right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:136 +#, python-format +msgid "Failed to update ipsec vpn configuration with edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:143 +#, python-format +msgid "IPsec config not found on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:146 +#, python-format +msgid "Failed to delete ipsec vpn configuration with edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:156 +#, python-format +msgid "Invalid %(protocol)s persistence method: %(type)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:182 +#, python-format +msgid "Failed to create app profile on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:193 +#, python-format +msgid "Failed to create vip on vshield edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:211 +#, python-format +msgid "vip_binding not found with id: %(id)s edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:228 +msgid "Failed to get vip on edge" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:245 +#, python-format +msgid "Failed to update app profile on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:253 +#, python-format +msgid "Failed to update vip on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:264 +#, python-format +msgid "vip not found on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:267 +#, python-format +msgid "Failed to delete vip on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:272 +#, python-format +msgid "app profile not found on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:275 +#, python-format +msgid "Failed to delete app profile on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:286 +msgid "Failed to create pool" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:303 +#, python-format +msgid "pool_binding not found with id: %(id)s edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:313 +msgid "Failed to get pool on edge" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:325 +msgid "Failed to update pool" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:335 +msgid "Failed to delete pool" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:345 +#, python-format +msgid "Failed to create monitor on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:363 +#, python-format +msgid "monitor_binding not found with id: %(id)s edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:373 +#, python-format +msgid "Failed to get monitor on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:390 +#, python-format +msgid "Failed to update monitor on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:401 +msgid "Failed to delete monitor" +msgstr "" + +#: neutron/plugins/vmware/vshield/vcns.py:56 +#, python-format +msgid "VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')" +msgstr "" + +#: neutron/plugins/vmware/vshield/vcns.py:64 +#, python-format +msgid "Header: '%s'" +msgstr "" + +#: neutron/plugins/vmware/vshield/vcns.py:65 +#, python-format +msgid "Content: '%s'" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:35 +#, python-format +msgid "%(resource)s not found: %(msg)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:39 +#, python-format +msgid "An unknown exception %(status)s occurred: %(response)s." +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:50 +#, python-format +msgid "Resource %(uri)s has been redirected" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:54 +#, python-format +msgid "Request %(uri)s is Bad, response %(response)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:58 +#, python-format +msgid "Forbidden: %(uri)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:62 +#, python-format +msgid "Resource %(uri)s not found" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:66 +#, python-format +msgid "Media Type %(uri)s is not supported" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:70 +#, python-format +msgid "Service Unavailable: %(uri)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:46 +#, python-format +msgid "Invalid state %(state)d" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:50 +#, python-format +msgid "State %(state)d skipped. Current state %(current)d" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:96 +#, python-format +msgid "Task %(task)s encountered exception in %(func)s at state %(state)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:182 +#, python-format +msgid "Start task %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:188 +#: neutron/plugins/vmware/vshield/tasks/tasks.py:208 +#: neutron/plugins/vmware/vshield/tasks/tasks.py:231 +#, python-format +msgid "Task %(task)s encountered exception in %(cb)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:194 +#: neutron/plugins/vmware/vshield/tasks/tasks.py:213 +#, python-format +msgid "Task %(task)s return %(status)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:296 +msgid "Stopping TaskManager" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:321 +msgid "TaskManager terminating because of an exception" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:343 +msgid "TaskManager terminated" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:375 +msgid "Exception in _check_pending_tasks" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:53 +#, python-format +msgid "Agent %s already present" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:54 +#, python-format +msgid "" +"Network %(network_id)s is scheduled to be hosted by DHCP agent " +"%(agent_id)s" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:72 +#, python-format +msgid "Network %s is hosted already" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:81 +#: neutron/scheduler/dhcp_agent_scheduler.py:90 +msgid "No more DHCP agents" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:112 +#, python-format +msgid "DHCP agent %s is not active" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:119 +msgid "No non-hosted networks" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:65 +#, python-format +msgid "No enabled L3 agent on host %s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:70 +#, python-format +msgid "L3 agent %s is not active" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:78 +#: neutron/scheduler/l3_agent_scheduler.py:129 +#, python-format +msgid "Router %(router_id)s has already been hosted by L3 agent %(agent_id)s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:97 +msgid "No non-hosted routers" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:111 +#, python-format +msgid "No routers compatible with L3 agent configuration on host %s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:137 +msgid "No active L3 agents" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:142 +#, python-format +msgid "No L3 agents can host the router %s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:155 +#, python-format +msgid "Router %(router_id)s is scheduled to L3 agent %(agent_id)s" +msgstr "" + +#: neutron/server/__init__.py:42 +msgid "" +"ERROR: Unable to find configuration file via the default search paths " +"(~/.neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" +msgstr "" + +#: neutron/server/__init__.py:54 +msgid "RPC was already started in parent process by plugin." +msgstr "" + +#: neutron/server/__init__.py:66 +#, python-format +msgid "ERROR: %s" +msgstr "" + +#: neutron/services/provider_configuration.py:28 +msgid "" +"Defines providers for advanced services using the format: " +"::[:default]" +msgstr "" + +#: neutron/services/provider_configuration.py:46 +#, python-format +msgid "Provider name is limited by 255 characters: %s" +msgstr "" + +#: neutron/services/provider_configuration.py:55 +msgid "Invalid service provider format" +msgstr "" + +#: neutron/services/provider_configuration.py:63 +#, python-format +msgid "Invalid provider format. Last part should be 'default' or empty: %s" +msgstr "" + +#: neutron/services/provider_configuration.py:69 +#, python-format +msgid "Service type '%(svc_type)s' is not allowed, allowed types: %(allowed)s" +msgstr "" + +#: neutron/services/provider_configuration.py:83 +#, python-format +msgid "" +"Service provider '%(provider)s' could not be found for service type " +"%(service_type)s" +msgstr "" + +#: neutron/services/provider_configuration.py:88 +#, python-format +msgid "Service type %(service_type)s does not have a default service provider" +msgstr "" + +#: neutron/services/provider_configuration.py:93 +#, python-format +msgid "" +"Resource '%(resource_id)s' is already associated with provider " +"'%(provider)s' for service type '%(service_type)s'" +msgstr "" + +#: neutron/services/provider_configuration.py:106 +#, python-format +msgid "Driver %s is not unique across providers" +msgstr "" + +#: neutron/services/provider_configuration.py:116 +#, python-format +msgid "Multiple default providers for service %s" +msgstr "" + +#: neutron/services/provider_configuration.py:127 +#, python-format +msgid "Multiple providers specified for service %s" +msgstr "" + +#: neutron/services/service_base.py:72 +#, python-format +msgid "No providers specified for '%s' service, exiting" +msgstr "" + +#: neutron/services/service_base.py:83 +#, python-format +msgid "Loaded '%(provider)s' provider for service %(service_type)s" +msgstr "" + +#: neutron/services/service_base.py:89 +#, python-format +msgid "Error loading provider '%(provider)s' for service %(service_type)s" +msgstr "" + +#: neutron/services/service_base.py:100 +#, python-format +msgid "Default provider is not specified for service type %s" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:45 +msgid "set_firewall_status() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:53 +#, python-format +msgid "Firewall %(fw_id)s in PENDING_DELETE state, not changing to %(status)s" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:68 +msgid "firewall_deleted() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:76 +#, python-format +msgid "Firewall %(fw)s unexpectedly deleted by agent, status was %(status)s" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:84 +msgid "get_firewalls_for_tenant() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:93 +msgid "get_firewalls_for_tenant_without_rules() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:99 +msgid "get_tenants_with_firewalls() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:147 +#, python-format +msgid "" +"Exceeded allowed count of firewalls for tenant %(tenant_id)s. Only one " +"firewall is supported per tenant." +msgstr "" + +#: neutron/services/firewall/agents/firewall_agent_api.py:33 +msgid "Name of the FWaaS Driver" +msgstr "" + +#: neutron/services/firewall/agents/firewall_agent_api.py:37 +msgid "Enable FWaaS" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:45 +msgid "Retrieve Firewall with rules from Plugin" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:54 +msgid "Retrieve Tenants with Firewalls configured from Plugin" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:66 +msgid "Initializing firewall agent" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:74 +#, python-format +msgid "FWaaS Driver Loaded: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:77 +#, python-format +msgid "Error importing FWaaS device driver: %s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:114 +#, python-format +msgid "%(func_name)s from agent for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:122 +#, python-format +msgid "No Routers on tenant: %s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:129 +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:262 +#, python-format +msgid "Apply fw on Router List: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:141 +#, python-format +msgid "Firewall Driver Error for %(func_name)s for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:156 +#, python-format +msgid "FWaaS RPC failure in %(func_name)s for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:173 +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:189 +#, python-format +msgid "Firewall Driver Error on fw state %(fwmsg)s for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:201 +#, python-format +msgid "Process router add, router_id: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:212 +#, python-format +msgid "Process router add, fw_list: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:229 +#, python-format +msgid "FWaaS RPC info call failed for '%s'." +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:244 +#, python-format +msgid "Tenants with Firewalls: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:254 +#, python-format +msgid "Router List: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:256 +#, python-format +msgid "fw_list: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:273 +msgid "Failed fwaas process services sync" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:31 +msgid "vArmour director ip" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:33 +msgid "vArmour director port" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:35 +msgid "vArmour director username" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:37 +msgid "vArmour director password" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:47 +msgid "An unknown exception." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:61 +msgid "Invalid login credential." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:67 +msgid "vArmourRestAPI: started" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:100 +#, python-format +msgid "vArmourRestAPI: %(server)s %(port)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:106 +#, python-format +msgid "vArmourRestAPI Sending: %(method)s %(action)s %(headers)s %(body_data)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:117 +#, python-format +msgid "vArmourRestAPI Response: %(status)s %(resp_str)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:125 +msgid "vArmourRestAPI: Could not establish HTTP connection" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:51 +msgid "vArmourL3NATAgent: __init__" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:65 +#, python-format +msgid "_router_added: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:72 +#, python-format +msgid "_router_removed: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:113 +#, python-format +msgid "_va_unset_zone_interfaces: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:145 +#, python-format +msgid "_va_set_interface_ip: %(pif)s %(cidr)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:165 +#, python-format +msgid "_va_config_trusted_zone: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:196 +#, python-format +msgid "_va_config_untrusted_zone: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:211 +#, python-format +msgid "_va_config_untrusted_zone: gw=%r" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:224 +#, python-format +msgid "_va_config_router_snat_rules: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:256 +#, python-format +msgid "_va_config_floating_ips: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:278 +#, python-format +msgid "process_router: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:289 +msgid "Unable to parse interface mapping." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:292 +msgid "Unable to read interface mapping." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:311 +#, python-format +msgid "external_gateway_added: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:49 +msgid "Initializing fwaas iptables driver" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:52 +#, python-format +msgid "Creating firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:61 +#, python-format +msgid "Failed to create firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:65 +#, python-format +msgid "Deleting firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:77 +#, python-format +msgid "Failed to delete firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:81 +#, python-format +msgid "Updating firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:90 +#, python-format +msgid "Failed to update firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:94 +#, python-format +msgid "Applying firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:113 +#, python-format +msgid "Failed to apply default policy on firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:30 +msgid "Initializing fwaas vArmour driver" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:35 +#, python-format +msgid "create_firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:40 +#, python-format +msgid "update_firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:48 +#, python-format +msgid "delete_firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:53 +#, python-format +msgid "apply_default_policy (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:63 +#, python-format +msgid "Updating firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:112 +msgid "Unsupported IP version rule." +msgstr "" + +#: neutron/services/l3_router/l3_apic.py:55 +msgid "L3 Router Service Plugin for basic L3 using the APIC" +msgstr "" + +#: neutron/services/l3_router/l3_apic.py:96 +#, python-format +msgid "Error attaching subnet %(subnet_id)s to router %(router_id)s" +msgstr "" + +#: neutron/services/l3_router/l3_apic.py:131 +#, python-format +msgid "Error detaching subnet %(subnet_id)s from router %(router_id)s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:103 +#, python-format +msgid "Pool %(pool_id)s has already been hosted by lbaas agent %(agent_id)s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:111 +#, python-format +msgid "No active lbaas agents for pool %s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:117 +#, python-format +msgid "No lbaas agent supporting device driver %s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:126 +#, python-format +msgid "Pool %(pool_id)s is scheduled to lbaas agent %(agent_id)s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:82 +#, python-format +msgid "Delete associated loadbalancer pools before removing providers %s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:91 +#, python-format +msgid "Error retrieving driver for provider %s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:99 +#, python-format +msgid "Error retrieving provider for pool %s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:190 +#, python-format +msgid "Failed to delete pool %s, putting it in ERROR state" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent.py:38 +msgid "Seconds between periodic task runs" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:41 +msgid "Drivers used to manage loadbalancing devices" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:47 +#, python-format +msgid "Unknown device with pool_id %(pool_id)s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:96 +#, python-format +msgid "Error importing loadbalancer device driver: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:103 +#, python-format +msgid "Multiple device drivers with the same name found: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:143 +#, python-format +msgid "Error updating statistics on pool %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:159 +msgid "Unable to retrieve ready devices" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:176 +#: neutron/services/loadbalancer/agent/agent_manager.py:241 +#, python-format +msgid "No device driver on agent: %s." +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:186 +#, python-format +msgid "Unable to deploy instance for pool: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:196 +#, python-format +msgid "Unable to destroy device for pool: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:209 +#, python-format +msgid "%(operation)s %(obj)s %(id)s failed on device driver %(driver)s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:335 +#, python-format +msgid "Destroying pool %s due to agent disabling" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:338 +#, python-format +msgid "Agent_updated by server side %s!" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:42 +msgid "Driver to use for scheduling pool to a default loadbalancer agent" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:50 +msgid "Device driver for agent should be specified in plugin driver." +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:74 +#, python-format +msgid "Multiple lbaas agents found on host %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:154 +#, python-format +msgid "Unknown object type: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:165 +#, python-format +msgid "" +"Cannot update status: %(obj_type)s %(obj_id)s not found in the DB, it was" +" probably deleted concurrently" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:188 +#, python-format +msgid "Unable to find port %s to plug." +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:212 +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:229 +#, python-format +msgid "" +"Unable to find port %s to unplug. This can occur when the Vip has been " +"deleted first." +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:32 +msgid "Load Balancer image id (Embrane LB)" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:34 +msgid "In band Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:36 +msgid "Out of band Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:38 +msgid "Management Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:40 +msgid "Dummy user traffic Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:44 +msgid "choose LB image flavor to use, accepted values: small, medium" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:47 +msgid "resource synchronization interval in seconds" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/constants.py:51 +#, python-format +msgid "%s, probably was cancelled through the heleos UI" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/constants.py:58 +#, python-format +msgid "" +"Failed to delete the backend load balancer for reason %s. Please remove " +"it manually through the heleos UI" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/constants.py:61 +#, python-format +msgid "" +"No subnet is associated to member %s (required to identify the proper " +"load balancer port)" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/driver.py:88 +msgid "Connection limit is not supported by Embrane LB" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/driver.py:94 +#, python-format +msgid "Session persistence %s not supported by Embrane LBaaS" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/driver.py:132 +#, python-format +msgid "Subnet assigned to pool %s doesn't exist, backend port can't be created" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/agent/lb_operations.py:111 +#, python-format +msgid "" +"The load balancer %s had no physical representation, likely already " +"deleted" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:48 +msgid "Location to store config and state files" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:54 +msgid "The user group" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:60 +msgid "" +"When delete and re-add the same vip, send this many gratuitous ARPs to " +"flush the ARP cache in the Router. Set it below or equal to 0 to disable " +"this feature." +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:77 +#, python-format +msgid "Error importing interface driver: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:173 +#, python-format +msgid "Stats socket not found for pool %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:215 +#, python-format +msgid "Error while connecting to stats socket: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:394 +#, python-format +msgid "Unable to kill haproxy process: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:43 +#, python-format +msgid "NCC Error %d" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:54 +msgid "No NetScaler Control Center URI specified. Cannot connect." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:133 +#, python-format +msgid "Connection error occurred while connecting to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:138 +#, python-format +msgid "SSL error occurred while connecting to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:143 +#, python-format +msgid "Request to %s timed out" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:150 +msgid "Request did not specify a valid URL" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:154 +#, python-format +msgid "Too many redirects occurred for request to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:158 +#, python-format +msgid "A request error while connecting to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:163 +#, python-format +msgid "A unknown error occurred during request to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:171 +#, python-format +msgid "Unable to login. Invalid credentials passed.for: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:175 +#, python-format +msgid "Failed %(method)s operation on %(url)s status code: %(response_status)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:29 +msgid "The URL to reach the NetScaler Control Center Server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:33 +msgid "Username to login to the NetScaler Control Center Server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:37 +msgid "Password to login to the NetScaler Control Center Server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:75 +#, python-format +msgid "NetScaler driver vip creation: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:90 +#, python-format +msgid "NetScaler driver vip %(vip_id)s update: %(vip_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:105 +#, python-format +msgid "NetScaler driver vip removal: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:126 +#, python-format +msgid "NetScaler driver pool creation: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:141 +#, python-format +msgid "NetScaler driver pool %(pool_id)s update: %(pool_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:156 +#, python-format +msgid "NetScaler driver pool removal: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:173 +#, python-format +msgid "NetScaler driver poolmember creation: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:191 +#, python-format +msgid "NetScaler driver poolmember %(member_id)s update: %(member_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:208 +#, python-format +msgid "NetScaler driver poolmember removal: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:226 +#, python-format +msgid "" +"NetScaler driver healthmonitor creation for pool %(pool_id)s: " +"%(monitor_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:249 +#, python-format +msgid "NetScaler driver healthmonitor %(monitor_id)s update: %(monitor_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:270 +#, python-format +msgid "NetScaler driver healthmonitor %(monitor_id)sremoval for pool %(pool_id)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:290 +#, python-format +msgid "NetScaler driver pool stats retrieval: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:415 +#, python-format +msgid "" +"Filtering ports based on network_id=%(network_id)s, " +"tenant_id=%(tenant_id)s, device_id=%(device_id)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:430 +#, python-format +msgid "Found an existing SNAT port for subnet %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:433 +#, python-format +msgid "Found no SNAT ports for subnet %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:454 +#, python-format +msgid "Created SNAT port: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:462 +#, python-format +msgid "Removed SNAT port: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:469 +#, python-format +msgid "No SNAT port found for subnet %s. Creating one..." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:477 +#, python-format +msgid "SNAT port: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:487 +#, python-format +msgid "Removing SNAT port for subnet %s as this is the last pool using it..." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:63 +msgid "IP address of vDirect server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:65 +msgid "IP address of secondary vDirect server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:68 +msgid "vDirect user name." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:71 +msgid "vDirect user password." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:74 +msgid "Service ADC type. Default: VA." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:77 +msgid "Service ADC version." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:80 +msgid "Enables or disables the Service HA pair. Default: False." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:84 +msgid "Service throughput. Default: 1000." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:87 +msgid "Service SSL throughput. Default: 100." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:90 +msgid "Service compression throughput. Default: 100." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:93 +msgid "Size of service cache. Default: 20." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:96 +msgid "Name of l2_l3 workflow. Default: openstack_l2_l3." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:100 +msgid "Name of l4 workflow. Default: openstack_l4." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:108 +msgid "Parameter for l2_l3 workflow constructor." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:115 +msgid "Parameter for l2_l3 workflow setup." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:118 +msgid "List of actions that are not pushed to the completion queue." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:122 +msgid "Name of the l4 workflow action. Default: BaseCreate." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:126 +msgid "Resource pool IDs." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:129 +msgid "A required VLAN for the interswitch link to use." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:132 +msgid "" +"Enable or disable Alteon interswitch link for stateful session failover. " +"Default: False." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:229 +#, python-format +msgid "" +"vip: %(vip)s, extended_vip: %(extended_vip)s, network_id: " +"%(vip_network_id)s, service_name: %(service_name)s, pip_info: " +"%(pip_info)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:264 +#, python-format +msgid "Retrieved pip nport: %(port)r for vip: %(vip)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:272 +#, python-format +msgid "Found no pip nports associated with vip: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:281 +#, python-format +msgid "Failed to remove workflow %s. Going to set vip to ERROR status" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:295 +#, python-format +msgid "pip nport id: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:299 +#, python-format +msgid "pip nport delete failed: %r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:387 +#, python-format +msgid "" +"_handle_pool_health_monitor. health_monitor = %(hm_id)s pool_id = " +"%(pool_id)s delete = %(delete)s vip_id = %(vip_id)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:418 +msgid "Starting operation completion handling thread" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:448 +#, python-format +msgid "_update_workflow response: %s " +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:457 +#: neutron/services/loadbalancer/drivers/radware/driver.py:488 +#, python-format +msgid "Pushing operation %s to the queue" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:465 +#, python-format +msgid "Remove the workflow %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:473 +#, python-format +msgid "Post-remove workflow function %r completed" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:477 +#, python-format +msgid "Post-remove workflow function %r failed" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:594 +#, python-format +msgid "create_workflow response: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:663 +#, python-format +msgid "" +"vDirectRESTClient:init server=%(server)s, secondary " +"server=%(sec_server)s, port=%(port)d, ssl=%(ssl)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:669 +#, python-format +msgid "Fliping servers. Current is: %(server)s, switching to %(secondary)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:682 +msgid "" +"REST client is not able to recover since only one vDirect server is " +"configured." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:690 +#, python-format +msgid "vDirect server is not responding (%s)." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:694 +#, python-format +msgid "vDirect server is not active (%s)." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:722 +msgid "vdirectRESTClient: Could not establish HTTPS connection" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:729 +msgid "vdirectRESTClient: Could not establish HTTP connection" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:746 +#, python-format +msgid "vdirectRESTClient: %(action)s failure, %(e)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:814 +#, python-format +msgid "" +"Operation %(oper)s is completed after %(sec_to_completion)d sec with " +"success status: %(success)s :" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:826 +#, python-format +msgid "Operation %(operation)s failed. Reason: %(msg)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:858 +#, python-format +msgid "Operation %s is not completed yet.." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:873 +msgid "Exception was thrown inside OperationCompletionHandler" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:882 +#, python-format +msgid "Post-operation function %(func)r completed after operation %(oper)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:888 +#, python-format +msgid "Post-operation function %(func)r failed after operation %(oper)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:929 +#, python-format +msgid "_update: %s " +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:969 +#, python-format +msgid "_remove_object_from_db %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:24 +msgid "An unknown exception occurred in Radware LBaaS provider." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:28 +msgid "" +"vDirect user/password missing. Specify in configuration file, under " +"[radware] section" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:33 +#, python-format +msgid "" +"Workflow %(workflow)s is missing on vDirect server. Upload missing " +"workflow" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:38 +#, python-format +msgid "" +"REST request failed with status %(status)s. Reason: %(reason)s, " +"Description: %(description)s. Success status codes are %(success_codes)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:44 +#, python-format +msgid "%(operation)s operation is not supported for %(entity)s." +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:71 +msgid "Metering driver" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:73 +msgid "Interval between two metering measures" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:75 +msgid "Interval between two metering reports" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:99 +#, python-format +msgid "Loading Metering driver %s" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:101 +msgid "A metering driver must be specified" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:116 +#, python-format +msgid "Send metering report: %s" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:180 +#, python-format +msgid "Driver %(driver)s does not implement %(func)s" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:184 +#, python-format +msgid "Driver %(driver)s:%(func)s runtime error" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:219 +msgid "Get router traffic counters" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:223 +msgid "Update metering rules from agent" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:228 +msgid "Creating a metering label from agent" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:235 +msgid "Delete a metering label from agent" +msgstr "" + +#: neutron/services/metering/drivers/iptables/iptables_driver.py:90 +#, python-format +msgid "Loading interface driver %s" +msgstr "" + +#: neutron/services/vpn/agent.py:28 +msgid "The vpn device drivers Neutron will use" +msgstr "" + +#: neutron/services/vpn/plugin.py:48 +#, python-format +msgid "VPN plugin using service driver: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:71 +#, python-format +msgid "RESPONSE: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:84 +#, python-format +msgid "%(method)s: Request for %(resource)s payload: %(payload)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:91 +#, python-format +msgid "%(method)s Took %(time).2f seconds to process" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:97 +#, python-format +msgid "%(method)s: Request timeout%(ssl)s (%(timeout).3f sec) for CSR(%(host)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:106 +#, python-format +msgid "%(method)s: Unable to connect to CSR(%(host)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:110 +#, python-format +msgid "%(method)s: Unexpected error for CSR (%(host)s): %(error)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:116 +#, python-format +msgid "%(method)s: Completed [%(status)s]" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:131 +#, python-format +msgid "%(auth)s with CSR %(host)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:138 +#, python-format +msgid "Successfully authenticated with CSR %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:140 +#, python-format +msgid "Failed authentication with CSR %(host)s [%(status)s]" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:175 +#, python-format +msgid "%(method)s: Request timeout for CSR(%(host)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:43 +msgid "Status check interval for Cisco CSR IPSec connections" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:54 +#, python-format +msgid "Cisco CSR failed to create %(resource)s (%(which)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:58 +#, python-format +msgid "Cisco CSR failed to change %(tunnel)s admin state to %(state)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:63 +#, python-format +msgid "" +"Required %(resource)s attribute %(attr)s mapping for Cisco CSR is missing" +" in device driver" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:68 +#, python-format +msgid "" +"Device driver does not have a mapping of '%(value)s for attribute " +"%(attr)s of %(resource)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:83 +#, python-format +msgid "Scanning config files %s for Cisco CSR configurations" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:88 +#, python-format +msgid "Config file parse error: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:92 +#, python-format +msgid "Unable to parse config files %s for Cisco CSR info" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:102 +#, python-format +msgid "Ignoring Cisco CSR configuration entry - router IP %s is not valid" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:113 +#, python-format +msgid "Ignoring Cisco CSR for router %(router)s - missing %(field)s setting" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:121 +#, python-format +msgid "Ignoring Cisco CSR for router %s - timeout is not a floating point number" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:130 +#, python-format +msgid "Ignoring Cisco CSR for subnet %s - REST management is not an IP address" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:137 +#, python-format +msgid "Ignoring Cisco CSR for router %s - local tunnel is not an IP address" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:147 +#, python-format +msgid "Found CSR for router %(router)s: %(info)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:213 +#, python-format +msgid "Loaded %(num)d Cisco CSR configuration%(plural)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:217 +#, python-format +msgid "No Cisco CSR configurations found in: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:228 +#, python-format +msgid "Handling VPN service update notification '%s'" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:250 +#, python-format +msgid "Update: Existing connection %s changed" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:257 +#, python-format +msgid "Update: Connection %s no longer admin down" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:263 +#, python-format +msgid "Update: Connection %s forced to admin down" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:271 +#, python-format +msgid "Update: Created new connection %s in admin down state" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:276 +#, python-format +msgid "Update: Created new connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:288 +#, python-format +msgid "" +"Update: Skipping VPN service %(service)s as it's router (%(csr_id)s is " +"not associated with a Cisco CSR" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:294 +#, python-format +msgid "Update: Existing VPN service %s detected" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:298 +#, python-format +msgid "Update: New VPN service %s detected" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:307 +msgid "Update: Completed update processing" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:337 +#, python-format +msgid "Mark: %(service)d VPN services and %(conn)d IPSec connections marked dirty" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:359 +#, python-format +msgid "" +"Sweep: Removed %(service)d dirty VPN service%(splural)s and %(conn)d " +"dirty IPSec connection%(cplural)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:374 +#, python-format +msgid "Report: Collecting status for IPSec connections on VPN service %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:380 +#, python-format +msgid "Connection %s forced down" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:384 +#, python-format +msgid "Connection %(conn)s reported %(status)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:389 +#, python-format +msgid "Report: Adding info for IPSec connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:409 +#, python-format +msgid "Report: Adding info for VPN service %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:431 +msgid "Report: Starting status report processing" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:433 +#, python-format +msgid "Report: Collecting status for VPN service %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:439 +msgid "Sending status report update to plugin" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:441 +msgid "Report: Completed status report processing" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:736 +#, python-format +msgid "Unable to create %(resource)s %(which)s: %(status)d" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:749 +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:777 +#, python-format +msgid "Internal error - '%s' is not defined" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:762 +#, python-format +msgid "Unable to delete %(resource)s %(which)s: %(status)d" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:771 +#, python-format +msgid "Performing rollback action %(action)s for resource %(resource)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:797 +#, python-format +msgid "Creating IPSec connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:830 +#, python-format +msgid "FAILED: Create of IPSec site-to-site connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:833 +#, python-format +msgid "SUCCESS: Created IPSec site-to-site connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:842 +#, python-format +msgid "Deleting IPSec connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:844 +#, python-format +msgid "Unable to find connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:848 +#, python-format +msgid "SUCCESS: Deleted IPSec site-to-site connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:856 +#, python-format +msgid "Unable to change %(tunnel)s admin state to %(state)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:48 +msgid "Location to store ipsec server config files" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:51 +msgid "Interval for checking ipsec status" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:61 +msgid "Template file for ipsec configuration" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:67 +msgid "Template file for ipsec secret configuration" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:249 +#, python-format +msgid "Failed to enable vpn process on router %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:260 +#, python-format +msgid "Failed to disable vpn process on router %s" +msgstr "" + +#: neutron/services/vpn/service_drivers/__init__.py:78 +#, python-format +msgid "Notify agent at %(topic)s.%(host)s the message %(method)s %(args)s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:46 +#, python-format +msgid "Fatal - %(reason)s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:80 +#, python-format +msgid "No available Cisco CSR %(type)s IDs from %(min)d..%(max)d" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:135 +#, python-format +msgid "" +"Database inconsistency between IPSec connection and Cisco CSR mapping " +"table (%s)" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:161 +#, python-format +msgid "Reserved new CSR ID %(csr_id)d for %(policy)s ID %(policy_id)s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:188 +#, python-format +msgid "" +"Mappings for IPSec connection %(conn)s - tunnel=%(tunnel)s " +"ike_policy=%(csr_ike)d ipsec_policy=%(csr_ipsec)d" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:197 +#, python-format +msgid "" +"Existing entry for IPSec connection %s not found in Cisco CSR mapping " +"table" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:224 +#, python-format +msgid "" +"Attempt to create duplicate entry in Cisco CSR mapping table for " +"connection %s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:227 +#, python-format +msgid "" +"Mapped connection %(conn_id)s to Tunnel%(tunnel_id)d using IKE policy ID " +"%(ike_id)d and IPSec policy ID %(ipsec_id)d" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:239 +#, python-format +msgid "Removed mapping for connection %s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_ipsec.py:39 +#, python-format +msgid "" +"Cisco CSR does not support %(resource)s attribute %(key)s with value " +"'%(value)s'" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_ipsec.py:160 +#, python-format +msgid "IPSec connection %s validated for Cisco CSR" +msgstr "" + +#: neutron/tests/unit/test_api_v2_resource.py:176 +#: neutron/tests/unit/test_api_v2_resource.py:246 +msgid "Unmapped error" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:74 +#, python-format +msgid "" +"Request: action=%(action)s, uri=%(uri)r, body=%(body)s, " +"headers=%(headers)s" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:126 +#, python-format +msgid "No floating IPs in requesturi=%(uri)s, body=%(body)s" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:135 +#, python-format +msgid "Expected floating IPs from multiple tenants.uri=%(uri)s, body=%(body)s" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:180 +#, python-format +msgid "No host cert for %(server)s in cert %(cert)s" +msgstr "" + +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:217 +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:239 +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:258 +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:281 +#, python-format +msgid "Unexpected error code: %s" +msgstr "" + +#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:32 +#, python-format +msgid "" +"%(method)s called with network settings %(current)s (original settings " +"%(original)s) and network segments %(segments)s" +msgstr "" + +#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:59 +#, python-format +msgid "" +"%(method)s called with subnet settings %(current)s (original settings " +"%(original)s)" +msgstr "" + +#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:85 +#, python-format +msgid "" +"%(method)s called with port settings %(current)s (original settings " +"%(original)s) bound to segment %(segment)s (original segment " +"%(original_segment)s) using driver %(driver)s (original driver " +"%(original_driver)s) on network %(network)s" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:67 +#, python-format +msgid "(create_tenant) OFC tenant %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:79 +#, python-format +msgid "(delete_tenant) OFC tenant %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:81 +msgid "delete_tenant: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:88 +#, python-format +msgid "(create_network) OFC tenant %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:91 +#, python-format +msgid "(create_network) OFC network %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:102 +#, python-format +msgid "(update_network) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:106 +msgid "update_network: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:114 +#, python-format +msgid "(delete_network) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:116 +msgid "delete_network: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:123 +#, python-format +msgid "(create_port) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:126 +#, python-format +msgid "(create_port) OFC port %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:140 +#, python-format +msgid "(delete_port) OFC port %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:142 +msgid "delete_port: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:175 +#, python-format +msgid "(create_router) OFC tenant %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:178 +#, python-format +msgid "(create_router) OFC router %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:181 +msgid "Operation on OFC is failed" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:195 +#: neutron/tests/unit/nec/stub_ofc_driver.py:285 +#, python-format +msgid "(delete_router) OFC router %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:197 +msgid "delete_router: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:205 +#, python-format +msgid "(add_router_interface) ip_address %s is not a valid format (a.b.c.d/N)." +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:210 +#, python-format +msgid "(add_router_interface) OFC router %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:213 +#, python-format +msgid "(add_router_interface) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:220 +#, python-format +msgid "add_router_interface: SUCCEED (if_id=%s)" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:228 +#: neutron/tests/unit/nec/stub_ofc_driver.py:245 +#, python-format +msgid "(delete_router_interface) OFC router interface %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:237 +msgid "update_router_route: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:248 +msgid "delete_router_interface: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:258 +#, python-format +msgid "(add_router_route) OFC router %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:263 +#, python-format +msgid "(add_router_route) route to \"%s\" already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:268 +#, python-format +msgid "add_router_route: SUCCEED (route_id=%s)" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:277 +#, python-format +msgid "(delete_router_route) OFC router route %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:279 +msgid "delete_router_route: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:292 +#, python-format +msgid "list_router_routes: routes=%s" +msgstr "" + +#: neutron/tests/unit/nec/test_ofc_client.py:88 +msgid "The specified OFC resource (/somewhere) is not found." +msgstr "" + +#: neutron/tests/unit/nec/test_ofc_client.py:92 +#: neutron/tests/unit/nec/test_ofc_client.py:98 +#: neutron/tests/unit/nec/test_ofc_client.py:107 +msgid "An OFC exception has occurred: Operation on OFC failed" +msgstr "" + +#: neutron/tests/unit/nec/test_ofc_client.py:114 +msgid "An OFC exception has occurred: Failed to connect OFC : " +msgstr "" + +#: neutron/tests/unit/vmware/apiclient/fake.py:406 +#, python-format +msgid "lswitch:%s not found" +msgstr "" + +#: neutron/tests/unit/vmware/apiclient/fake.py:415 +#, python-format +msgid "lrouter:%s not found" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:104 +#, python-format +msgid "Job %s does not nexist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:116 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:127 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:144 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:162 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:184 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:206 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:290 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:304 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:318 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:360 +#, python-format +msgid "Edge %s does not exist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:194 +#, python-format +msgid "Rule id %d doest not exist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:257 +#, python-format +msgid "Lswitch %s does not exist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/test_edge_router.py:130 +msgid "Tasks not completed" +msgstr "" + diff --git a/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-error.po b/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 000000000..5f04744ad --- /dev/null +++ b/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,168 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/" +"neutron/language/pt_BR/)\n" +"Language: pt_BR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Exceção original sendo descartada: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "Exceção não esperada ocorreu %d vez(es)... tentando novamente." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "Exceção durante limpeza de RPC." + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "Exceção não tratada" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "Exceção de BD incluída." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Exceção durante a manipulação de mensagem" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "Exceção na operação de formato de sequência" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Retornando exceção %s ao método de origem" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "Falha ao processar mensagem...pulando ela." + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "Falha ao processar mensagem... Irá voltar para a fila." + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"O servidor AMQP em %(hostname)s:%(port)d está inatingível: %(err_str)s. " +"Tentando novamente em %(sleep_time)d segundos." + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Falha ao declarar consumidor para o tópico '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Falha ao consumir mensagem da fila: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Falha ao publicar mensagem no tópico '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "Falha ao processar mensagem... ignorando-a." + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" +"Não é possível conectar ao servidor AMQP: %(e)s. Suspendendo em %(delay)s " +"segundos" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "Erro ao processar mensagem. Ignorando-o." + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "Falha na serialização de JSON." + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "A mensagem de RPC não incluiu o método." + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "Falha na criação do arquivo de soquete do tópico." + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"Buffer de lista não processada por tópico local integral para o tópico " +"%(topic)s. Descartando mensagem." + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "Diretório IPC requerido não existe em %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "Permissão negada para o doretório IPC em %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" +"Não foi possível criar o daemon receptor ZeroMQ. O soquete já pode estar em " +"uso." + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "Versão de Envelope ZMQ não suportada ou desconhecida." diff --git a/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po b/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 000000000..11be020e6 --- /dev/null +++ b/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/" +"neutron/language/pt_BR/)\n" +"Language: pt_BR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "%s capturadas, saindo" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Processo pai saiu inesperadamente, saindo" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Bifurcação muito rápida, suspendendo" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Filho %d iniciado" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Iniciando %d trabalhadores" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "%(pid)d filho eliminado pelo sinal %(sig)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Filho %(pid)s encerrando com status %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s capturado, parando filhos" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Aguardando em %d filhos para sair" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "Excluindo linha duplicada com ID: %(id)s da tabela: %(table)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Reconectando ao servidor AMQP em %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Conectado ao servidor AMQP em %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Conectado ao servidor AMQP em %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registrando reator" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "No reator registrado" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Consumindo soquete" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Criando proxy para o tópico: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "Ignorando registro de tópico. Já registrado." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "Matchmaker não registrado: %(key)s, %(host)s" diff --git a/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-error.po b/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 000000000..b581129f0 --- /dev/null +++ b/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,162 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/neutron/" +"language/zh_CN/)\n" +"Language: zh_CN\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "正在删除原始异常:%s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "意外的异常已发生 %d 次...正在重试。" + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "在RPC清除期间发生异常。" + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "存在未处理的异常" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "数据库异常被包裹。" + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "消息处理期间发生异常" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "字符串格式操作中发生异常" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "正在将异常 %s 返回至调用者" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "未能处理消息...正在跳过该消息。" + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "未能处理消息...将重新排队。" + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"%(hostname)s:%(port)d 上的 AMQP 服务器不可访问:%(err_str)s。将在 " +"%(sleep_time)d 秒后再次进行尝试。" + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "未能针对主题“%(topic)s”声明使用者:%(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "未能使用队列中的消息:%s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "未能将消息发布到主题“%(topic)s”:%(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "未能处理消息... 正在跳过该消息。" + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "无法连接至 AMQP 服务器:%(e)s。正在休眠,持续时间为 %(delay)s 秒" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "处理消息时出错。正在跳过该消息。" + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON 序列化失败。" + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC 消息未包括方法。" + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "主题套接字文件创建失败。" + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "对于主题 %(topic)s,本地“每主题”储备缓冲区已满。正在删除消息。" + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "在%s不存在需要的IPC目录" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "在 %s 的IPC目录的权限被拒绝" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "未能创建 ZeroMQ 接收器守护程序。套接字可能已在使用中。" + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ 包络版本不受支持或未知。" diff --git a/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po b/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 000000000..ccd2e998e --- /dev/null +++ b/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/neutron/" +"language/zh_CN/)\n" +"Language: zh_CN\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "捕获到 %s,正在退出" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "父进程已意外终止,正在退出" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "派生速度太快,正在休眠" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "已启动子代 %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "正在启动 %d 工作程序" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "信号 %(sig)d 已终止子代 %(pid)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "子代 %(pid)s 已退出,状态为 %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "捕获到 %s,正在停止子代" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "正在等待 %d 个子代退出" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "正在从表 %(table)s 中删除具有id %(id)s 的重复行" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "正在重新连接至 %(hostname)s:%(port)d 上的 AMQP 服务器" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "已连接至 %(hostname)s:%(port)d 上的 AMQP 服务器" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "已连接至 %s 上的 AMQP 服务器" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "正在注册反应器" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "已注册内部反应器" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "正在使用套接字" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "正在为主题创建代理:%s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "正在跳过主题注册。已注册。" + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "Matchmaker已注销: %(key)s, %(host)s" diff --git a/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po b/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 000000000..c6e025cc0 --- /dev/null +++ b/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Chinese (Taiwan) (http://www.transifex.com/projects/p/neutron/" +"language/zh_TW/)\n" +"Language: zh_TW\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "已捕捉到 %s,正在結束" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "母程序已非預期地當掉,正在結束" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "分岔太快,正在休眠" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "已開始子行程 %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "正在啟動 %d 個工作程式" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "信號 %(sig)d 結束了子項 %(pid)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "子項 %(pid)s 已結束,狀態為 %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "已捕捉到 %s,正在停止子項" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "正在等待 %d 個子項結束" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "正在重新連接至 %(hostname)s:%(port)d 上的 AMQP 伺服器" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "已連接至 %(hostname)s:%(port)d 上的 AMQP 伺服器" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "已連接至 %s 上的 AMQP 伺服器" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "正在登錄反應程式" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "已登錄輸入反應程式" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "正在耗用 Socket" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "正在給主題 %s 建立 Proxy" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "正在跳過主題登錄。已登錄。" + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "已取消登錄符合程式:%(key)s, %(host)s" diff --git a/neutron/manager.py b/neutron/manager.py new file mode 100644 index 000000000..2c4e7f994 --- /dev/null +++ b/neutron/manager.py @@ -0,0 +1,225 @@ +# Copyright 2011 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import weakref + +from oslo.config import cfg + +from neutron.common import rpc_compat +from neutron.common import utils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import periodic_task +from neutron.plugins.common import constants + +from stevedore import driver + + +LOG = logging.getLogger(__name__) + + +class Manager(rpc_compat.RpcCallback, periodic_task.PeriodicTasks): + + # Set RPC API version to 1.0 by default. + RPC_API_VERSION = '1.0' + + def __init__(self, host=None): + if not host: + host = cfg.CONF.host + self.host = host + super(Manager, self).__init__() + + def periodic_tasks(self, context, raise_on_error=False): + self.run_periodic_tasks(context, raise_on_error=raise_on_error) + + def init_host(self): + """Handle initialization if this is a standalone service. + + Child classes should override this method. + + """ + pass + + def after_start(self): + """Handler post initialization stuff. + + Child classes can override this method. + """ + pass + + +def validate_post_plugin_load(): + """Checks if the configuration variables are valid. + + If the configuration is invalid then the method will return an error + message. If all is OK then it will return None. + """ + if ('dhcp_agents_per_network' in cfg.CONF and + cfg.CONF.dhcp_agents_per_network <= 0): + msg = _("dhcp_agents_per_network must be >= 1. '%s' " + "is invalid.") % cfg.CONF.dhcp_agents_per_network + return msg + + +def validate_pre_plugin_load(): + """Checks if the configuration variables are valid. + + If the configuration is invalid then the method will return an error + message. If all is OK then it will return None. + """ + if cfg.CONF.core_plugin is None: + msg = _('Neutron core_plugin not configured!') + return msg + + +class NeutronManager(object): + """Neutron's Manager class. + + Neutron's Manager class is responsible for parsing a config file and + instantiating the correct plugin that concretely implements + neutron_plugin_base class. + The caller should make sure that NeutronManager is a singleton. + """ + _instance = None + + def __init__(self, options=None, config_file=None): + # If no options have been provided, create an empty dict + if not options: + options = {} + + msg = validate_pre_plugin_load() + if msg: + LOG.critical(msg) + raise Exception(msg) + + # NOTE(jkoelker) Testing for the subclass with the __subclasshook__ + # breaks tach monitoring. It has been removed + # intentionally to allow v2 plugins to be monitored + # for performance metrics. + plugin_provider = cfg.CONF.core_plugin + LOG.info(_("Loading core plugin: %s"), plugin_provider) + self.plugin = self._get_plugin_instance('neutron.core_plugins', + plugin_provider) + msg = validate_post_plugin_load() + if msg: + LOG.critical(msg) + raise Exception(msg) + + # core plugin as a part of plugin collection simplifies + # checking extensions + # TODO(enikanorov): make core plugin the same as + # the rest of service plugins + self.service_plugins = {constants.CORE: self.plugin} + self._load_service_plugins() + + def _get_plugin_instance(self, namespace, plugin_provider): + try: + # Try to resolve plugin by name + mgr = driver.DriverManager(namespace, plugin_provider) + plugin_class = mgr.driver + except RuntimeError as e1: + # fallback to class name + try: + plugin_class = importutils.import_class(plugin_provider) + except ImportError as e2: + LOG.exception(_("Error loading plugin by name, %s"), e1) + LOG.exception(_("Error loading plugin by class, %s"), e2) + raise ImportError(_("Plugin not found.")) + return plugin_class() + + def _load_services_from_core_plugin(self): + """Puts core plugin in service_plugins for supported services.""" + LOG.debug(_("Loading services supported by the core plugin")) + + # supported service types are derived from supported extensions + for ext_alias in getattr(self.plugin, + "supported_extension_aliases", []): + if ext_alias in constants.EXT_TO_SERVICE_MAPPING: + service_type = constants.EXT_TO_SERVICE_MAPPING[ext_alias] + self.service_plugins[service_type] = self.plugin + LOG.info(_("Service %s is supported by the core plugin"), + service_type) + + def _load_service_plugins(self): + """Loads service plugins. + + Starts from the core plugin and checks if it supports + advanced services then loads classes provided in configuration. + """ + # load services from the core plugin first + self._load_services_from_core_plugin() + + plugin_providers = cfg.CONF.service_plugins + LOG.debug(_("Loading service plugins: %s"), plugin_providers) + for provider in plugin_providers: + if provider == '': + continue + + LOG.info(_("Loading Plugin: %s"), provider) + plugin_inst = self._get_plugin_instance('neutron.service_plugins', + provider) + + # only one implementation of svc_type allowed + # specifying more than one plugin + # for the same type is a fatal exception + if plugin_inst.get_plugin_type() in self.service_plugins: + raise ValueError(_("Multiple plugins for service " + "%s were configured"), + plugin_inst.get_plugin_type()) + + self.service_plugins[plugin_inst.get_plugin_type()] = plugin_inst + + # search for possible agent notifiers declared in service plugin + # (needed by agent management extension) + if (hasattr(self.plugin, 'agent_notifiers') and + hasattr(plugin_inst, 'agent_notifiers')): + self.plugin.agent_notifiers.update(plugin_inst.agent_notifiers) + + LOG.debug(_("Successfully loaded %(type)s plugin. " + "Description: %(desc)s"), + {"type": plugin_inst.get_plugin_type(), + "desc": plugin_inst.get_plugin_description()}) + + @classmethod + @utils.synchronized("manager") + def _create_instance(cls): + if not cls.has_instance(): + cls._instance = cls() + + @classmethod + def has_instance(cls): + return cls._instance is not None + + @classmethod + def clear_instance(cls): + cls._instance = None + + @classmethod + def get_instance(cls): + # double checked locking + if not cls.has_instance(): + cls._create_instance() + return cls._instance + + @classmethod + def get_plugin(cls): + # Return a weakref to minimize gc-preventing references. + return weakref.proxy(cls.get_instance().plugin) + + @classmethod + def get_service_plugins(cls): + # Return weakrefs to minimize gc-preventing references. + return dict((x, weakref.proxy(y)) + for x, y in cls.get_instance().service_plugins.iteritems()) diff --git a/neutron/neutron_plugin_base_v2.py b/neutron/neutron_plugin_base_v2.py new file mode 100644 index 000000000..8c0c7804d --- /dev/null +++ b/neutron/neutron_plugin_base_v2.py @@ -0,0 +1,352 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +v2 Neutron Plug-in API specification. + +:class:`NeutronPluginBaseV2` provides the definition of minimum set of +methods that needs to be implemented by a v2 Neutron Plug-in. +""" + +import abc +import six + + +@six.add_metaclass(abc.ABCMeta) +class NeutronPluginBaseV2(object): + + @abc.abstractmethod + def create_subnet(self, context, subnet): + """Create a subnet. + + Create a subnet, which represents a range of IP addresses + that can be allocated to devices + + :param context: neutron api request context + :param subnet: dictionary describing the subnet, with keys + as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object + in :file:`neutron/api/v2/attributes.py`. All keys will + be populated. + """ + pass + + @abc.abstractmethod + def update_subnet(self, context, id, subnet): + """Update values of a subnet. + + :param context: neutron api request context + :param id: UUID representing the subnet to update. + :param subnet: dictionary with keys indicating fields to update. + valid keys are those that have a value of True for + 'allow_put' as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. + """ + pass + + @abc.abstractmethod + def get_subnet(self, context, id, fields=None): + """Retrieve a subnet. + + :param context: neutron api request context + :param id: UUID representing the subnet to fetch. + :param fields: a list of strings that are valid keys in a + subnet dictionary as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Only these fields + will be returned. + """ + pass + + @abc.abstractmethod + def get_subnets(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + """Retrieve a list of subnets. + + The contents of the list depends on + the identity of the user making the request (as indicated by the + context) as well as any filters. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for + a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` + object in :file:`neutron/api/v2/attributes.py`. + Values in this dictiontary are an iterable containing + values that will be used for an exact match comparison + for that value. Each result returned by this + function will have matched one of the values for each + key in filters. + :param fields: a list of strings that are valid keys in a + subnet dictionary as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Only these fields + will be returned. + """ + pass + + def get_subnets_count(self, context, filters=None): + """Return the number of subnets. + + The result depends on the identity of + the user making the request (as indicated by the context) as well as + any filters. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for + a network as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Values in this + dictiontary are an iterable containing values that + will be used for an exact match comparison for that + value. Each result returned by this function will + have matched one of the values for each key in filters. + + .. note:: this method is optional, as it was not part of the originally + defined plugin API. + """ + raise NotImplementedError + + @abc.abstractmethod + def delete_subnet(self, context, id): + """Delete a subnet. + + :param context: neutron api request context + :param id: UUID representing the subnet to delete. + """ + pass + + @abc.abstractmethod + def create_network(self, context, network): + """Create a network. + + Create a network, which represents an L2 network segment which + can have a set of subnets and ports associated with it. + + :param context: neutron api request context + :param network: dictionary describing the network, with keys + as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object + in :file:`neutron/api/v2/attributes.py`. All keys will + be populated. + + """ + pass + + @abc.abstractmethod + def update_network(self, context, id, network): + """Update values of a network. + + :param context: neutron api request context + :param id: UUID representing the network to update. + :param network: dictionary with keys indicating fields to update. + valid keys are those that have a value of True for + 'allow_put' as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. + """ + pass + + @abc.abstractmethod + def get_network(self, context, id, fields=None): + """Retrieve a network. + + :param context: neutron api request context + :param id: UUID representing the network to fetch. + :param fields: a list of strings that are valid keys in a + network dictionary as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Only these fields + will be returned. + """ + pass + + @abc.abstractmethod + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + """Retrieve a list of networks. + + The contents of the list depends on + the identity of the user making the request (as indicated by the + context) as well as any filters. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for + a network as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Values in this + dictiontary are an iterable containing values that will + be used for an exact match comparison for that value. + Each result returned by this function will have matched + one of the values for each key in filters. + :param fields: a list of strings that are valid keys in a + network dictionary as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Only these fields + will be returned. + """ + pass + + def get_networks_count(self, context, filters=None): + """Return the number of networks. + + The result depends on the identity + of the user making the request (as indicated by the context) as well + as any filters. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for + a network as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object + in :file:`neutron/api/v2/attributes.py`. Values in + this dictiontary are an iterable containing values that + will be used for an exact match comparison for that + value. Each result returned by this function will have + matched one of the values for each key in filters. + + NOTE: this method is optional, as it was not part of the originally + defined plugin API. + """ + raise NotImplementedError + + @abc.abstractmethod + def delete_network(self, context, id): + """Delete a network. + + :param context: neutron api request context + :param id: UUID representing the network to delete. + """ + pass + + @abc.abstractmethod + def create_port(self, context, port): + """Create a port. + + Create a port, which is a connection point of a device (e.g., a VM + NIC) to attach to a L2 neutron network. + + :param context: neutron api request context + :param port: dictionary describing the port, with keys as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. All keys will be + populated. + """ + pass + + @abc.abstractmethod + def update_port(self, context, id, port): + """Update values of a port. + + :param context: neutron api request context + :param id: UUID representing the port to update. + :param port: dictionary with keys indicating fields to update. + valid keys are those that have a value of True for + 'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` + object in :file:`neutron/api/v2/attributes.py`. + """ + pass + + @abc.abstractmethod + def get_port(self, context, id, fields=None): + """Retrieve a port. + + :param context: neutron api request context + :param id: UUID representing the port to fetch. + :param fields: a list of strings that are valid keys in a port + dictionary as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Only these fields + will be returned. + """ + pass + + @abc.abstractmethod + def get_ports(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + """Retrieve a list of ports. + + The contents of the list depends on the identity of the user making + the request (as indicated by the context) as well as any filters. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for + a port as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` + object in :file:`neutron/api/v2/attributes.py`. Values + in this dictiontary are an iterable containing values + that will be used for an exact match comparison for + that value. Each result returned by this function will + have matched one of the values for each key in filters. + :param fields: a list of strings that are valid keys in a + port dictionary as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Only these fields + will be returned. + """ + pass + + def get_ports_count(self, context, filters=None): + """Return the number of ports. + + The result depends on the identity of the user making the request + (as indicated by the context) as well as any filters. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for + a network as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Values in this + dictiontary are an iterable containing values that will + be used for an exact match comparison for that value. + Each result returned by this function will have matched + one of the values for each key in filters. + + .. note:: this method is optional, as it was not part of the originally + defined plugin API. + """ + raise NotImplementedError + + @abc.abstractmethod + def delete_port(self, context, id): + """Delete a port. + + :param context: neutron api request context + :param id: UUID representing the port to delete. + """ + pass + + def start_rpc_listeners(self): + """Start the RPC listeners. + + Most plugins start RPC listeners implicitly on initialization. In + order to support multiple process RPC, the plugin needs to expose + control over when this is started. + + .. note:: this method is optional, as it was not part of the originally + defined plugin API. + """ + raise NotImplementedError + + def rpc_workers_supported(self): + """Return whether the plugin supports multiple RPC workers. + + A plugin that supports multiple RPC workers should override the + start_rpc_listeners method to ensure that this method returns True and + that start_rpc_listeners is called at the appropriate time. + Alternately, a plugin can override this method to customize detection + of support for multiple rpc workers + + .. note:: this method is optional, as it was not part of the originally + defined plugin API. + """ + return (self.__class__.start_rpc_listeners != + NeutronPluginBaseV2.start_rpc_listeners) diff --git a/neutron/notifiers/__init__.py b/neutron/notifiers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/notifiers/nova.py b/neutron/notifiers/nova.py new file mode 100644 index 000000000..4c233ff1e --- /dev/null +++ b/neutron/notifiers/nova.py @@ -0,0 +1,249 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +from novaclient import exceptions as nova_exceptions +import novaclient.v1_1.client as nclient +from novaclient.v1_1.contrib import server_external_events +from oslo.config import cfg +from sqlalchemy.orm import attributes as sql_attr + +from neutron.common import constants +from neutron import context +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils + + +LOG = logging.getLogger(__name__) + +VIF_UNPLUGGED = 'network-vif-unplugged' +VIF_PLUGGED = 'network-vif-plugged' +NEUTRON_NOVA_EVENT_STATUS_MAP = {constants.PORT_STATUS_ACTIVE: 'completed', + constants.PORT_STATUS_ERROR: 'failed', + constants.PORT_STATUS_DOWN: 'completed'} + + +class Notifier(object): + + def __init__(self): + # TODO(arosen): we need to cache the endpoints and figure out + # how to deal with different regions here.... + bypass_url = "%s/%s" % (cfg.CONF.nova_url, + cfg.CONF.nova_admin_tenant_id) + self.nclient = nclient.Client( + username=cfg.CONF.nova_admin_username, + api_key=cfg.CONF.nova_admin_password, + project_id=None, + tenant_id=cfg.CONF.nova_admin_tenant_id, + auth_url=cfg.CONF.nova_admin_auth_url, + cacert=cfg.CONF.nova_ca_certificates_file, + insecure=cfg.CONF.nova_api_insecure, + bypass_url=bypass_url, + region_name=cfg.CONF.nova_region_name, + extensions=[server_external_events]) + self.pending_events = [] + self._waiting_to_send = False + + def queue_event(self, event): + """Called to queue sending an event with the next batch of events. + + Sending events individually, as they occur, has been problematic as it + can result in a flood of sends. Previously, there was a loopingcall + thread that would send batched events on a periodic interval. However, + maintaining a persistent thread in the loopingcall was also + problematic. + + This replaces the loopingcall with a mechanism that creates a + short-lived thread on demand when the first event is queued. That + thread will sleep once for the same send_events_interval to allow other + events to queue up in pending_events and then will send them when it + wakes. + + If a thread is already alive and waiting, this call will simply queue + the event and return leaving it up to the thread to send it. + + :param event: the event that occurred. + """ + if not event: + return + + self.pending_events.append(event) + + if self._waiting_to_send: + return + + self._waiting_to_send = True + + def last_out_sends(): + eventlet.sleep(cfg.CONF.send_events_interval) + self._waiting_to_send = False + self.send_events() + + eventlet.spawn_n(last_out_sends) + + def _is_compute_port(self, port): + try: + if (port['device_id'] and uuidutils.is_uuid_like(port['device_id']) + and port['device_owner'].startswith('compute:')): + return True + except (KeyError, AttributeError): + pass + return False + + def _get_network_changed_event(self, device_id): + return {'name': 'network-changed', + 'server_uuid': device_id} + + @property + def _plugin(self): + # NOTE(arosen): this cannot be set in __init__ currently since + # this class is initalized at the same time as NeutronManager() + # which is decorated with synchronized() + if not hasattr(self, '_plugin_ref'): + self._plugin_ref = manager.NeutronManager.get_plugin() + return self._plugin_ref + + def send_network_change(self, action, original_obj, + returned_obj): + """Called when a network change is made that nova cares about. + + :param action: the event that occurred. + :param original_obj: the previous value of resource before action. + :param returned_obj: the body returned to client as result of action. + """ + + if not cfg.CONF.notify_nova_on_port_data_changes: + return + + event = self.create_port_changed_event(action, original_obj, + returned_obj) + self.queue_event(event) + + def create_port_changed_event(self, action, original_obj, returned_obj): + port = None + if action == 'update_port': + port = returned_obj['port'] + + elif action in ['update_floatingip', 'create_floatingip', + 'delete_floatingip']: + # NOTE(arosen) if we are associating a floatingip the + # port_id is in the returned_obj. Otherwise on disassociate + # it's in the original_object + port_id = (returned_obj['floatingip'].get('port_id') or + original_obj.get('port_id')) + + if port_id is None: + return + + ctx = context.get_admin_context() + port = self._plugin.get_port(ctx, port_id) + + if port and self._is_compute_port(port): + return self._get_network_changed_event(port['device_id']) + + def record_port_status_changed(self, port, current_port_status, + previous_port_status, initiator): + """Determine if nova needs to be notified due to port status change. + """ + # clear out previous _notify_event + port._notify_event = None + # If there is no device_id set there is nothing we can do here. + if not port.device_id: + LOG.debug(_("device_id is not set on port yet.")) + return + + if not port.id: + LOG.warning(_("Port ID not set! Nova will not be notified of " + "port status change.")) + return + + # We only want to notify about nova ports. + if not self._is_compute_port(port): + return + + # We notify nova when a vif is unplugged which only occurs when + # the status goes from ACTIVE to DOWN. + if (previous_port_status == constants.PORT_STATUS_ACTIVE and + current_port_status == constants.PORT_STATUS_DOWN): + event_name = VIF_UNPLUGGED + + # We only notify nova when a vif is plugged which only occurs + # when the status goes from: + # NO_VALUE/DOWN/BUILD -> ACTIVE/ERROR. + elif (previous_port_status in [sql_attr.NO_VALUE, + constants.PORT_STATUS_DOWN, + constants.PORT_STATUS_BUILD] + and current_port_status in [constants.PORT_STATUS_ACTIVE, + constants.PORT_STATUS_ERROR]): + event_name = VIF_PLUGGED + # All the remaining state transitions are of no interest to nova + else: + LOG.debug(_("Ignoring state change previous_port_status: " + "%(pre_status)s current_port_status: %(cur_status)s" + " port_id %(id)s") % + {'pre_status': previous_port_status, + 'cur_status': current_port_status, + 'id': port.id}) + return + + port._notify_event = ( + {'server_uuid': port.device_id, + 'name': event_name, + 'status': NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status), + 'tag': port.id}) + + def send_port_status(self, mapper, connection, port): + event = getattr(port, "_notify_event", None) + self.queue_event(event) + port._notify_event = None + + def send_events(self): + if not self.pending_events: + return + + batched_events = self.pending_events + self.pending_events = [] + + LOG.debug(_("Sending events: %s"), batched_events) + try: + response = self.nclient.server_external_events.create( + batched_events) + except nova_exceptions.NotFound: + LOG.warning(_("Nova returned NotFound for event: %s"), + batched_events) + except Exception: + LOG.exception(_("Failed to notify nova on events: %s"), + batched_events) + else: + if not isinstance(response, list): + LOG.error(_("Error response returned from nova: %s"), + response) + return + response_error = False + for event in response: + try: + code = event['code'] + except KeyError: + response_error = True + continue + if code != 200: + LOG.warning(_("Nova event: %s returned with failed " + "status"), event) + else: + LOG.info(_("Nova event response: %s"), event) + if response_error: + LOG.error(_("Error response returned from nova: %s"), + response) diff --git a/neutron/openstack/__init__.py b/neutron/openstack/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/openstack/common/__init__.py b/neutron/openstack/common/__init__.py new file mode 100644 index 000000000..d1223eaf7 --- /dev/null +++ b/neutron/openstack/common/__init__.py @@ -0,0 +1,17 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + + +six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox')) diff --git a/neutron/openstack/common/cache/__init__.py b/neutron/openstack/common/cache/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/openstack/common/cache/_backends/__init__.py b/neutron/openstack/common/cache/_backends/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/openstack/common/cache/_backends/memory.py b/neutron/openstack/common/cache/_backends/memory.py new file mode 100644 index 000000000..d6f5249fe --- /dev/null +++ b/neutron/openstack/common/cache/_backends/memory.py @@ -0,0 +1,165 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections + +from neutron.openstack.common.cache import backends +from neutron.openstack.common import lockutils +from neutron.openstack.common import timeutils + + +class MemoryBackend(backends.BaseCache): + + def __init__(self, parsed_url, options=None): + super(MemoryBackend, self).__init__(parsed_url, options) + self._clear() + + def _set_unlocked(self, key, value, ttl=0): + expires_at = 0 + if ttl != 0: + expires_at = timeutils.utcnow_ts() + ttl + + self._cache[key] = (expires_at, value) + + if expires_at: + self._keys_expires[expires_at].add(key) + + def _set(self, key, value, ttl=0, not_exists=False): + with lockutils.lock(key): + + # NOTE(flaper87): This is needed just in `set` + # calls, hence it's not in `_set_unlocked` + if not_exists and self._exists_unlocked(key): + return False + + self._set_unlocked(key, value, ttl) + return True + + def _get_unlocked(self, key, default=None): + now = timeutils.utcnow_ts() + + try: + timeout, value = self._cache[key] + except KeyError: + return (0, default) + + if timeout and now >= timeout: + + # NOTE(flaper87): Record expired, + # remove it from the cache but catch + # KeyError and ValueError in case + # _purge_expired removed this key already. + try: + del self._cache[key] + except KeyError: + pass + + try: + # NOTE(flaper87): Keys with ttl == 0 + # don't exist in the _keys_expires dict + self._keys_expires[timeout].remove(key) + except (KeyError, ValueError): + pass + + return (0, default) + + return (timeout, value) + + def _get(self, key, default=None): + with lockutils.lock(key): + return self._get_unlocked(key, default)[1] + + def _exists_unlocked(self, key): + now = timeutils.utcnow_ts() + try: + timeout = self._cache[key][0] + return not timeout or now <= timeout + except KeyError: + return False + + def __contains__(self, key): + with lockutils.lock(key): + return self._exists_unlocked(key) + + def _incr_append(self, key, other): + with lockutils.lock(key): + timeout, value = self._get_unlocked(key) + + if value is None: + return None + + ttl = timeutils.utcnow_ts() - timeout + new_value = value + other + self._set_unlocked(key, new_value, ttl) + return new_value + + def _incr(self, key, delta): + if not isinstance(delta, int): + raise TypeError('delta must be an int instance') + + return self._incr_append(key, delta) + + def _append_tail(self, key, tail): + return self._incr_append(key, tail) + + def _purge_expired(self): + """Removes expired keys from the cache.""" + + now = timeutils.utcnow_ts() + for timeout in sorted(self._keys_expires.keys()): + + # NOTE(flaper87): If timeout is greater + # than `now`, stop the iteration, remaining + # keys have not expired. + if now < timeout: + break + + # NOTE(flaper87): Unset every key in + # this set from the cache if its timeout + # is equal to `timeout`. (The key might + # have been updated) + for subkey in self._keys_expires.pop(timeout): + try: + if self._cache[subkey][0] == timeout: + del self._cache[subkey] + except KeyError: + continue + + def __delitem__(self, key): + self._purge_expired() + + # NOTE(flaper87): Delete the key. Using pop + # since it could have been deleted already + value = self._cache.pop(key, None) + + if value: + try: + # NOTE(flaper87): Keys with ttl == 0 + # don't exist in the _keys_expires dict + self._keys_expires[value[0]].remove(value[1]) + except (KeyError, ValueError): + pass + + def _clear(self): + self._cache = {} + self._keys_expires = collections.defaultdict(set) + + def _get_many(self, keys, default): + return super(MemoryBackend, self)._get_many(keys, default) + + def _set_many(self, data, ttl=0): + return super(MemoryBackend, self)._set_many(data, ttl) + + def _unset_many(self, keys): + return super(MemoryBackend, self)._unset_many(keys) diff --git a/neutron/openstack/common/cache/backends.py b/neutron/openstack/common/cache/backends.py new file mode 100644 index 000000000..2fa4aaeb2 --- /dev/null +++ b/neutron/openstack/common/cache/backends.py @@ -0,0 +1,263 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + + +NOTSET = object() + + +@six.add_metaclass(abc.ABCMeta) +class BaseCache(object): + """Base Cache Abstraction + + :params parsed_url: Parsed url object. + :params options: A dictionary with configuration parameters + for the cache. For example: + - default_ttl: An integer defining the default ttl + for keys. + """ + + def __init__(self, parsed_url, options=None): + self._parsed_url = parsed_url + self._options = options or {} + self._default_ttl = int(self._options.get('default_ttl', 0)) + + @abc.abstractmethod + def _set(self, key, value, ttl, not_exists=False): + """Implementations of this class have to override this method.""" + + def set(self, key, value, ttl, not_exists=False): + """Sets or updates a cache entry + + NOTE: Thread-safety is required and has to be + guaranteed by the backend implementation. + + :params key: Item key as string. + :type key: `unicode string` + :params value: Value to assign to the key. This + can be anything that is handled + by the current backend. + :params ttl: Key's timeout in seconds. 0 means + no timeout. + :type ttl: int + :params not_exists: If True, the key will be set + if it doesn't exist. Otherwise, + it'll always be set. + :type not_exists: bool + + :returns: True if the operation succeeds, False otherwise. + """ + if ttl is None: + ttl = self._default_ttl + + return self._set(key, value, ttl, not_exists) + + def __setitem__(self, key, value): + self.set(key, value, self._default_ttl) + + def setdefault(self, key, value): + """Sets the key value to `value` if it doesn't exist + + :params key: Item key as string. + :type key: `unicode string` + :params value: Value to assign to the key. This + can be anything that is handled + by the current backend. + """ + try: + return self[key] + except KeyError: + self[key] = value + return value + + @abc.abstractmethod + def _get(self, key, default): + """Implementations of this class have to override this method.""" + + def get(self, key, default=None): + """Gets one item from the cache + + NOTE: Thread-safety is required and it has to be + guaranteed by the backend implementation. + + :params key: Key for the item to retrieve + from the cache. + :params default: The default value to return. + + :returns: `key`'s value in the cache if it exists, + otherwise `default` should be returned. + """ + return self._get(key, default) + + def __getitem__(self, key): + value = self.get(key, NOTSET) + + if value is NOTSET: + raise KeyError + + return value + + @abc.abstractmethod + def __delitem__(self, key): + """Removes an item from cache. + + NOTE: Thread-safety is required and it has to be + guaranteed by the backend implementation. + + :params key: The key to remove. + + :returns: The key value if there's one + """ + + @abc.abstractmethod + def _clear(self): + """Implementations of this class have to override this method.""" + + def clear(self): + """Removes all items from the cache. + + NOTE: Thread-safety is required and it has to be + guaranteed by the backend implementation. + """ + return self._clear() + + @abc.abstractmethod + def _incr(self, key, delta): + """Implementations of this class have to override this method.""" + + def incr(self, key, delta=1): + """Increments the value for a key + + :params key: The key for the value to be incremented + :params delta: Number of units by which to increment + the value. Pass a negative number to + decrement the value. + + :returns: The new value + """ + return self._incr(key, delta) + + @abc.abstractmethod + def _append_tail(self, key, tail): + """Implementations of this class have to override this method.""" + + def append_tail(self, key, tail): + """Appends `tail` to `key`'s value. + + :params key: The key of the value to which + `tail` should be appended. + :params tail: The list of values to append to the + original. + + :returns: The new value + """ + + if not hasattr(tail, "__iter__"): + raise TypeError('Tail must be an iterable') + + if not isinstance(tail, list): + # NOTE(flaper87): Make sure we pass a list + # down to the implementation. Not all drivers + # have support for generators, sets or other + # iterables. + tail = list(tail) + + return self._append_tail(key, tail) + + def append(self, key, value): + """Appends `value` to `key`'s value. + + :params key: The key of the value to which + `tail` should be appended. + :params value: The value to append to the + original. + + :returns: The new value + """ + return self.append_tail(key, [value]) + + @abc.abstractmethod + def __contains__(self, key): + """Verifies that a key exists. + + :params key: The key to verify. + + :returns: True if the key exists, + otherwise False. + """ + + @abc.abstractmethod + def _get_many(self, keys, default): + """Implementations of this class have to override this method.""" + return ((k, self.get(k, default=default)) for k in keys) + + def get_many(self, keys, default=NOTSET): + """Gets keys' value from cache + + :params keys: List of keys to retrieve. + :params default: The default value to return + for each key that is not in + the cache. + + :returns: A generator of (key, value) + """ + return self._get_many(keys, default) + + @abc.abstractmethod + def _set_many(self, data, ttl): + """Implementations of this class have to override this method.""" + + for key, value in data.items(): + self.set(key, value, ttl=ttl) + + def set_many(self, data, ttl=None): + """Puts several items into the cache at once + + Depending on the backend, this operation may or may + not be efficient. The default implementation calls + set for each (key, value) pair passed, other backends + support set_many operations as part of their protocols. + + :params data: A dictionary like {key: val} to store + in the cache. + :params ttl: Key's timeout in seconds. + """ + + if ttl is None: + ttl = self._default_ttl + + self._set_many(data, ttl) + + def update(self, **kwargs): + """Sets several (key, value) paris. + + Refer to the `set_many` docstring. + """ + self.set_many(kwargs, ttl=self._default_ttl) + + @abc.abstractmethod + def _unset_many(self, keys): + """Implementations of this class have to override this method.""" + for key in keys: + del self[key] + + def unset_many(self, keys): + """Removes several keys from the cache at once + + :params keys: List of keys to unset. + """ + self._unset_many(keys) diff --git a/neutron/openstack/common/cache/cache.py b/neutron/openstack/common/cache/cache.py new file mode 100644 index 000000000..1247787a2 --- /dev/null +++ b/neutron/openstack/common/cache/cache.py @@ -0,0 +1,78 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Cache library. + +Supported configuration options: + +`default_backend`: Name of the cache backend to use. +`key_namespace`: Namespace under which keys will be created. +""" + +from six.moves.urllib import parse +from stevedore import driver + + +def _get_olso_configs(): + """Returns the oslo.config options to register.""" + # NOTE(flaper87): Oslo config should be + # optional. Instead of doing try / except + # at the top of this file, lets import cfg + # here and assume that the caller of this + # function already took care of this dependency. + from oslo.config import cfg + + return [ + cfg.StrOpt('cache_url', default='memory://', + help='URL to connect to the cache back end.') + ] + + +def register_oslo_configs(conf): + """Registers a cache configuration options + + :params conf: Config object. + :type conf: `cfg.ConfigOptions` + """ + conf.register_opts(_get_olso_configs()) + + +def get_cache(url='memory://'): + """Loads the cache backend + + This function loads the cache backend + specified in the given configuration. + + :param conf: Configuration instance to use + """ + + parsed = parse.urlparse(url) + backend = parsed.scheme + + query = parsed.query + # NOTE(flaper87): We need the following hack + # for python versions < 2.7.5. Previous versions + # of python parsed query params just for 'known' + # schemes. This was changed in this patch: + # http://hg.python.org/cpython/rev/79e6ff3d9afd + if not query and '?' in parsed.path: + query = parsed.path.split('?', 1)[-1] + parameters = parse.parse_qsl(query) + kwargs = {'options': dict(parameters)} + + mgr = driver.DriverManager('neutron.openstack.common.cache.backends', backend, + invoke_on_load=True, + invoke_args=[parsed], + invoke_kwds=kwargs) + return mgr.driver diff --git a/neutron/openstack/common/context.py b/neutron/openstack/common/context.py new file mode 100644 index 000000000..fa62fc1f1 --- /dev/null +++ b/neutron/openstack/common/context.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple class that stores security context information in the web request. + +Projects should subclass this class if they wish to enhance the request +context or provide additional information in their specific WSGI pipeline. +""" + +import itertools + +from neutron.openstack.common import uuidutils + + +def generate_request_id(): + return 'req-%s' % uuidutils.generate_uuid() + + +class RequestContext(object): + + """Helper class to represent useful information about a request context. + + Stores information about the security context under which the user + accesses the system, as well as additional request information. + """ + + def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False, + read_only=False, show_deleted=False, request_id=None): + self.auth_token = auth_token + self.user = user + self.tenant = tenant + self.is_admin = is_admin + self.read_only = read_only + self.show_deleted = show_deleted + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + + def to_dict(self): + return {'user': self.user, + 'tenant': self.tenant, + 'is_admin': self.is_admin, + 'read_only': self.read_only, + 'show_deleted': self.show_deleted, + 'auth_token': self.auth_token, + 'request_id': self.request_id} + + +def get_admin_context(show_deleted="no"): + context = RequestContext(None, + tenant=None, + is_admin=True, + show_deleted=show_deleted) + return context + + +def get_context_from_function_and_args(function, args, kwargs): + """Find an arg of type RequestContext and return it. + + This is useful in a couple of decorators where we don't + know much about the function we're wrapping. + """ + + for arg in itertools.chain(kwargs.values(), args): + if isinstance(arg, RequestContext): + return arg + + return None diff --git a/neutron/openstack/common/db/__init__.py b/neutron/openstack/common/db/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/openstack/common/db/api.py b/neutron/openstack/common/db/api.py new file mode 100644 index 000000000..7f71d6a2b --- /dev/null +++ b/neutron/openstack/common/db/api.py @@ -0,0 +1,162 @@ +# Copyright (c) 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Multiple DB API backend support. + +A DB backend module should implement a method named 'get_backend' which +takes no arguments. The method can return any object that implements DB +API methods. +""" + +import functools +import logging +import threading +import time + +from neutron.openstack.common.db import exception +from neutron.openstack.common.gettextutils import _LE +from neutron.openstack.common import importutils + + +LOG = logging.getLogger(__name__) + + +def safe_for_db_retry(f): + """Enable db-retry for decorated function, if config option enabled.""" + f.__dict__['enable_retry'] = True + return f + + +class wrap_db_retry(object): + """Retry db.api methods, if DBConnectionError() raised + + Retry decorated db.api methods. If we enabled `use_db_reconnect` + in config, this decorator will be applied to all db.api functions, + marked with @safe_for_db_retry decorator. + Decorator catchs DBConnectionError() and retries function in a + loop until it succeeds, or until maximum retries count will be reached. + """ + + def __init__(self, retry_interval, max_retries, inc_retry_interval, + max_retry_interval): + super(wrap_db_retry, self).__init__() + + self.retry_interval = retry_interval + self.max_retries = max_retries + self.inc_retry_interval = inc_retry_interval + self.max_retry_interval = max_retry_interval + + def __call__(self, f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + next_interval = self.retry_interval + remaining = self.max_retries + + while True: + try: + return f(*args, **kwargs) + except exception.DBConnectionError as e: + if remaining == 0: + LOG.exception(_LE('DB exceeded retry limit.')) + raise exception.DBError(e) + if remaining != -1: + remaining -= 1 + LOG.exception(_LE('DB connection error.')) + # NOTE(vsergeyev): We are using patched time module, so + # this effectively yields the execution + # context to another green thread. + time.sleep(next_interval) + if self.inc_retry_interval: + next_interval = min( + next_interval * 2, + self.max_retry_interval + ) + return wrapper + + +class DBAPI(object): + def __init__(self, backend_name, backend_mapping=None, lazy=False, + **kwargs): + """Initialize the chosen DB API backend. + + :param backend_name: name of the backend to load + :type backend_name: str + + :param backend_mapping: backend name -> module/class to load mapping + :type backend_mapping: dict + + :param lazy: load the DB backend lazily on the first DB API method call + :type lazy: bool + + Keyword arguments: + + :keyword use_db_reconnect: retry DB transactions on disconnect or not + :type use_db_reconnect: bool + + :keyword retry_interval: seconds between transaction retries + :type retry_interval: int + + :keyword inc_retry_interval: increase retry interval or not + :type inc_retry_interval: bool + + :keyword max_retry_interval: max interval value between retries + :type max_retry_interval: int + + :keyword max_retries: max number of retries before an error is raised + :type max_retries: int + + """ + + self._backend = None + self._backend_name = backend_name + self._backend_mapping = backend_mapping or {} + self._lock = threading.Lock() + + if not lazy: + self._load_backend() + + self.use_db_reconnect = kwargs.get('use_db_reconnect', False) + self.retry_interval = kwargs.get('retry_interval', 1) + self.inc_retry_interval = kwargs.get('inc_retry_interval', True) + self.max_retry_interval = kwargs.get('max_retry_interval', 10) + self.max_retries = kwargs.get('max_retries', 20) + + def _load_backend(self): + with self._lock: + if not self._backend: + # Import the untranslated name if we don't have a mapping + backend_path = self._backend_mapping.get(self._backend_name, + self._backend_name) + backend_mod = importutils.import_module(backend_path) + self._backend = backend_mod.get_backend() + + def __getattr__(self, key): + if not self._backend: + self._load_backend() + + attr = getattr(self._backend, key) + if not hasattr(attr, '__call__'): + return attr + # NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry + # DB API methods, decorated with @safe_for_db_retry + # on disconnect. + if self.use_db_reconnect and hasattr(attr, 'enable_retry'): + attr = wrap_db_retry( + retry_interval=self.retry_interval, + max_retries=self.max_retries, + inc_retry_interval=self.inc_retry_interval, + max_retry_interval=self.max_retry_interval)(attr) + + return attr diff --git a/neutron/openstack/common/db/exception.py b/neutron/openstack/common/db/exception.py new file mode 100644 index 000000000..ee92ccfa6 --- /dev/null +++ b/neutron/openstack/common/db/exception.py @@ -0,0 +1,56 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""DB related custom exceptions.""" + +import six + +from neutron.openstack.common.gettextutils import _ + + +class DBError(Exception): + """Wraps an implementation specific exception.""" + def __init__(self, inner_exception=None): + self.inner_exception = inner_exception + super(DBError, self).__init__(six.text_type(inner_exception)) + + +class DBDuplicateEntry(DBError): + """Wraps an implementation specific exception.""" + def __init__(self, columns=[], inner_exception=None): + self.columns = columns + super(DBDuplicateEntry, self).__init__(inner_exception) + + +class DBDeadlock(DBError): + def __init__(self, inner_exception=None): + super(DBDeadlock, self).__init__(inner_exception) + + +class DBInvalidUnicodeParameter(Exception): + message = _("Invalid Parameter: " + "Unicode is not supported by the current database.") + + +class DbMigrationError(DBError): + """Wraps migration specific exception.""" + def __init__(self, message=None): + super(DbMigrationError, self).__init__(message) + + +class DBConnectionError(DBError): + """Wraps connection specific exception.""" + pass diff --git a/neutron/openstack/common/db/options.py b/neutron/openstack/common/db/options.py new file mode 100644 index 000000000..66443162c --- /dev/null +++ b/neutron/openstack/common/db/options.py @@ -0,0 +1,171 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo.config import cfg + + +database_opts = [ + cfg.StrOpt('sqlite_db', + deprecated_group='DEFAULT', + default='neutron.sqlite', + help='The file name to use with SQLite'), + cfg.BoolOpt('sqlite_synchronous', + deprecated_group='DEFAULT', + default=True, + help='If True, SQLite uses synchronous mode'), + cfg.StrOpt('backend', + default='sqlalchemy', + deprecated_name='db_backend', + deprecated_group='DEFAULT', + help='The backend to use for db'), + cfg.StrOpt('connection', + help='The SQLAlchemy connection string used to connect to the ' + 'database', + secret=True, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_connection', + group='DATABASE'), + cfg.DeprecatedOpt('connection', + group='sql'), ]), + cfg.StrOpt('mysql_sql_mode', + default='TRADITIONAL', + help='The SQL mode to be used for MySQL sessions. ' + 'This option, including the default, overrides any ' + 'server-set SQL mode. To use whatever SQL mode ' + 'is set by the server configuration, ' + 'set this to no value. Example: mysql_sql_mode='), + cfg.IntOpt('idle_timeout', + default=3600, + deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_idle_timeout', + group='DATABASE'), + cfg.DeprecatedOpt('idle_timeout', + group='sql')], + help='Timeout before idle sql connections are reaped'), + cfg.IntOpt('min_pool_size', + default=1, + deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_min_pool_size', + group='DATABASE')], + help='Minimum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('max_pool_size', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_max_pool_size', + group='DATABASE')], + help='Maximum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('max_retries', + default=10, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_max_retries', + group='DATABASE')], + help='Maximum db connection retries during startup. ' + '(setting -1 implies an infinite retry count)'), + cfg.IntOpt('retry_interval', + default=10, + deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval', + group='DEFAULT'), + cfg.DeprecatedOpt('reconnect_interval', + group='DATABASE')], + help='Interval between retries of opening a sql connection'), + cfg.IntOpt('max_overflow', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow', + group='DEFAULT'), + cfg.DeprecatedOpt('sqlalchemy_max_overflow', + group='DATABASE')], + help='If set, use this value for max_overflow with sqlalchemy'), + cfg.IntOpt('connection_debug', + default=0, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug', + group='DEFAULT')], + help='Verbosity of SQL debugging information. 0=None, ' + '100=Everything'), + cfg.BoolOpt('connection_trace', + default=False, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace', + group='DEFAULT')], + help='Add python stack traces to SQL as comment strings'), + cfg.IntOpt('pool_timeout', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout', + group='DATABASE')], + help='If set, use this value for pool_timeout with sqlalchemy'), + cfg.BoolOpt('use_db_reconnect', + default=False, + help='Enable the experimental use of database reconnect ' + 'on connection lost'), + cfg.IntOpt('db_retry_interval', + default=1, + help='seconds between db connection retries'), + cfg.BoolOpt('db_inc_retry_interval', + default=True, + help='Whether to increase interval between db connection ' + 'retries, up to db_max_retry_interval'), + cfg.IntOpt('db_max_retry_interval', + default=10, + help='max seconds between db connection retries, if ' + 'db_inc_retry_interval is enabled'), + cfg.IntOpt('db_max_retries', + default=20, + help='maximum db connection retries before error is raised. ' + '(setting -1 implies an infinite retry count)'), +] + +CONF = cfg.CONF +CONF.register_opts(database_opts, 'database') + + +def set_defaults(sql_connection, sqlite_db, max_pool_size=None, + max_overflow=None, pool_timeout=None): + """Set defaults for configuration variables.""" + cfg.set_defaults(database_opts, + connection=sql_connection, + sqlite_db=sqlite_db) + # Update the QueuePool defaults + if max_pool_size is not None: + cfg.set_defaults(database_opts, + max_pool_size=max_pool_size) + if max_overflow is not None: + cfg.set_defaults(database_opts, + max_overflow=max_overflow) + if pool_timeout is not None: + cfg.set_defaults(database_opts, + pool_timeout=pool_timeout) + + +def list_opts(): + """Returns a list of oslo.config options available in the library. + + The returned list includes all oslo.config options which may be registered + at runtime by the library. + + Each element of the list is a tuple. The first element is the name of the + group under which the list of elements in the second element will be + registered. A group name of None corresponds to the [DEFAULT] group in + config files. + + The purpose of this is to allow tools like the Oslo sample config file + generator to discover the options exposed to users by this library. + + :returns: a list of (group_name, opts) tuples + """ + return [('database', copy.deepcopy(database_opts))] diff --git a/neutron/openstack/common/db/sqlalchemy/__init__.py b/neutron/openstack/common/db/sqlalchemy/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/openstack/common/db/sqlalchemy/models.py b/neutron/openstack/common/db/sqlalchemy/models.py new file mode 100644 index 000000000..12cfc7645 --- /dev/null +++ b/neutron/openstack/common/db/sqlalchemy/models.py @@ -0,0 +1,119 @@ +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# Copyright 2012 Cloudscaling Group, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +SQLAlchemy models. +""" + +import six + +from sqlalchemy import Column, Integer +from sqlalchemy import DateTime +from sqlalchemy.orm import object_mapper + +from neutron.openstack.common import timeutils + + +class ModelBase(six.Iterator): + """Base class for models.""" + __table_initialized__ = False + + def save(self, session): + """Save this object.""" + + # NOTE(boris-42): This part of code should be look like: + # session.add(self) + # session.flush() + # But there is a bug in sqlalchemy and eventlet that + # raises NoneType exception if there is no running + # transaction and rollback is called. As long as + # sqlalchemy has this bug we have to create transaction + # explicitly. + with session.begin(subtransactions=True): + session.add(self) + session.flush() + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default=None): + return getattr(self, key, default) + + @property + def _extra_keys(self): + """Specifies custom fields + + Subclasses can override this property to return a list + of custom fields that should be included in their dict + representation. + + For reference check tests/db/sqlalchemy/test_models.py + """ + return [] + + def __iter__(self): + columns = dict(object_mapper(self).columns).keys() + # NOTE(russellb): Allow models to specify other keys that can be looked + # up, beyond the actual db columns. An example would be the 'name' + # property for an Instance. + columns.extend(self._extra_keys) + self._i = iter(columns) + return self + + # In Python 3, __next__() has replaced next(). + def __next__(self): + n = six.advance_iterator(self._i) + return n, getattr(self, n) + + def next(self): + return self.__next__() + + def update(self, values): + """Make the model object behave like a dict.""" + for k, v in six.iteritems(values): + setattr(self, k, v) + + def iteritems(self): + """Make the model object behave like a dict. + + Includes attributes from joins. + """ + local = dict(self) + joined = dict([(k, v) for k, v in six.iteritems(self.__dict__) + if not k[0] == '_']) + local.update(joined) + return six.iteritems(local) + + +class TimestampMixin(object): + created_at = Column(DateTime, default=lambda: timeutils.utcnow()) + updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow()) + + +class SoftDeleteMixin(object): + deleted_at = Column(DateTime) + deleted = Column(Integer, default=0) + + def soft_delete(self, session): + """Mark this object as deleted.""" + self.deleted = self.id + self.deleted_at = timeutils.utcnow() + self.save(session=session) diff --git a/neutron/openstack/common/db/sqlalchemy/provision.py b/neutron/openstack/common/db/sqlalchemy/provision.py new file mode 100644 index 000000000..70c89a921 --- /dev/null +++ b/neutron/openstack/common/db/sqlalchemy/provision.py @@ -0,0 +1,157 @@ +# Copyright 2013 Mirantis.inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provision test environment for specific DB backends""" + +import argparse +import logging +import os +import random +import string + +from six import moves +import sqlalchemy + +from neutron.openstack.common.db import exception as exc + + +LOG = logging.getLogger(__name__) + + +def get_engine(uri): + """Engine creation + + Call the function without arguments to get admin connection. Admin + connection required to create temporary user and database for each + particular test. Otherwise use existing connection to recreate connection + to the temporary database. + """ + return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool) + + +def _execute_sql(engine, sql, driver): + """Initialize connection, execute sql query and close it.""" + try: + with engine.connect() as conn: + if driver == 'postgresql': + conn.connection.set_isolation_level(0) + for s in sql: + conn.execute(s) + except sqlalchemy.exc.OperationalError: + msg = ('%s does not match database admin ' + 'credentials or database does not exist.') + LOG.exception(msg % engine.url) + raise exc.DBConnectionError(msg % engine.url) + + +def create_database(engine): + """Provide temporary user and database for each particular test.""" + driver = engine.name + + auth = { + 'database': ''.join(random.choice(string.ascii_lowercase) + for i in moves.range(10)), + 'user': engine.url.username, + 'passwd': engine.url.password, + } + + sqls = [ + "drop database if exists %(database)s;", + "create database %(database)s;" + ] + + if driver == 'sqlite': + return 'sqlite:////tmp/%s' % auth['database'] + elif driver in ['mysql', 'postgresql']: + sql_query = map(lambda x: x % auth, sqls) + _execute_sql(engine, sql_query, driver) + else: + raise ValueError('Unsupported RDBMS %s' % driver) + + params = auth.copy() + params['backend'] = driver + return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params + + +def drop_database(admin_engine, current_uri): + """Drop temporary database and user after each particular test.""" + + engine = get_engine(current_uri) + driver = engine.name + auth = {'database': engine.url.database, 'user': engine.url.username} + + if driver == 'sqlite': + try: + os.remove(auth['database']) + except OSError: + pass + elif driver in ['mysql', 'postgresql']: + sql = "drop database if exists %(database)s;" + _execute_sql(admin_engine, [sql % auth], driver) + else: + raise ValueError('Unsupported RDBMS %s' % driver) + + +def main(): + """Controller to handle commands + + ::create: Create test user and database with random names. + ::drop: Drop user and database created by previous command. + """ + parser = argparse.ArgumentParser( + description='Controller to handle database creation and dropping' + ' commands.', + epilog='Under normal circumstances is not used directly.' + ' Used in .testr.conf to automate test database creation' + ' and dropping processes.') + subparsers = parser.add_subparsers( + help='Subcommands to manipulate temporary test databases.') + + create = subparsers.add_parser( + 'create', + help='Create temporary test ' + 'databases and users.') + create.set_defaults(which='create') + create.add_argument( + 'instances_count', + type=int, + help='Number of databases to create.') + + drop = subparsers.add_parser( + 'drop', + help='Drop temporary test databases and users.') + drop.set_defaults(which='drop') + drop.add_argument( + 'instances', + nargs='+', + help='List of databases uri to be dropped.') + + args = parser.parse_args() + + connection_string = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', + 'sqlite://') + engine = get_engine(connection_string) + which = args.which + + if which == "create": + for i in range(int(args.instances_count)): + print(create_database(engine)) + elif which == "drop": + for db in args.instances: + drop_database(engine, db) + + +if __name__ == "__main__": + main() diff --git a/neutron/openstack/common/db/sqlalchemy/session.py b/neutron/openstack/common/db/sqlalchemy/session.py new file mode 100644 index 000000000..00c4d6101 --- /dev/null +++ b/neutron/openstack/common/db/sqlalchemy/session.py @@ -0,0 +1,904 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Session Handling for SQLAlchemy backend. + +Recommended ways to use sessions within this framework: + +* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``. + `model_query()` will implicitly use a session when called without one + supplied. This is the ideal situation because it will allow queries + to be automatically retried if the database connection is interrupted. + + .. note:: Automatic retry will be enabled in a future patch. + + It is generally fine to issue several queries in a row like this. Even though + they may be run in separate transactions and/or separate sessions, each one + will see the data from the prior calls. If needed, undo- or rollback-like + functionality should be handled at a logical level. For an example, look at + the code around quotas and `reservation_rollback()`. + + Examples: + + .. code:: python + + def get_foo(context, foo): + return (model_query(context, models.Foo). + filter_by(foo=foo). + first()) + + def update_foo(context, id, newfoo): + (model_query(context, models.Foo). + filter_by(id=id). + update({'foo': newfoo})) + + def create_foo(context, values): + foo_ref = models.Foo() + foo_ref.update(values) + foo_ref.save() + return foo_ref + + +* Within the scope of a single method, keep all the reads and writes within + the context managed by a single session. In this way, the session's + `__exit__` handler will take care of calling `flush()` and `commit()` for + you. If using this approach, you should not explicitly call `flush()` or + `commit()`. Any error within the context of the session will cause the + session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be + raised in `session`'s `__exit__` handler, and any try/except within the + context managed by `session` will not be triggered. And catching other + non-database errors in the session will not trigger the ROLLBACK, so + exception handlers should always be outside the session, unless the + developer wants to do a partial commit on purpose. If the connection is + dropped before this is possible, the database will implicitly roll back the + transaction. + + .. note:: Statements in the session scope will not be automatically retried. + + If you create models within the session, they need to be added, but you + do not need to call `model.save()`: + + .. code:: python + + def create_many_foo(context, foos): + session = sessionmaker() + with session.begin(): + for foo in foos: + foo_ref = models.Foo() + foo_ref.update(foo) + session.add(foo_ref) + + def update_bar(context, foo_id, newbar): + session = sessionmaker() + with session.begin(): + foo_ref = (model_query(context, models.Foo, session). + filter_by(id=foo_id). + first()) + (model_query(context, models.Bar, session). + filter_by(id=foo_ref['bar_id']). + update({'bar': newbar})) + + .. note:: `update_bar` is a trivially simple example of using + ``with session.begin``. Whereas `create_many_foo` is a good example of + when a transaction is needed, it is always best to use as few queries as + possible. + + The two queries in `update_bar` can be better expressed using a single query + which avoids the need for an explicit transaction. It can be expressed like + so: + + .. code:: python + + def update_bar(context, foo_id, newbar): + subq = (model_query(context, models.Foo.id). + filter_by(id=foo_id). + limit(1). + subquery()) + (model_query(context, models.Bar). + filter_by(id=subq.as_scalar()). + update({'bar': newbar})) + + For reference, this emits approximately the following SQL statement: + + .. code:: sql + + UPDATE bar SET bar = ${newbar} + WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); + + .. note:: `create_duplicate_foo` is a trivially simple example of catching an + exception while using ``with session.begin``. Here create two duplicate + instances with same primary key, must catch the exception out of context + managed by a single session: + + .. code:: python + + def create_duplicate_foo(context): + foo1 = models.Foo() + foo2 = models.Foo() + foo1.id = foo2.id = 1 + session = sessionmaker() + try: + with session.begin(): + session.add(foo1) + session.add(foo2) + except exception.DBDuplicateEntry as e: + handle_error(e) + +* Passing an active session between methods. Sessions should only be passed + to private methods. The private method must use a subtransaction; otherwise + SQLAlchemy will throw an error when you call `session.begin()` on an existing + transaction. Public methods should not accept a session parameter and should + not be involved in sessions within the caller's scope. + + Note that this incurs more overhead in SQLAlchemy than the above means + due to nesting transactions, and it is not possible to implicitly retry + failed database operations when using this approach. + + This also makes code somewhat more difficult to read and debug, because a + single database transaction spans more than one method. Error handling + becomes less clear in this situation. When this is needed for code clarity, + it should be clearly documented. + + .. code:: python + + def myfunc(foo): + session = sessionmaker() + with session.begin(): + # do some database things + bar = _private_func(foo, session) + return bar + + def _private_func(foo, session=None): + if not session: + session = sessionmaker() + with session.begin(subtransaction=True): + # do some other database things + return bar + + +There are some things which it is best to avoid: + +* Don't keep a transaction open any longer than necessary. + + This means that your ``with session.begin()`` block should be as short + as possible, while still containing all the related calls for that + transaction. + +* Avoid ``with_lockmode('UPDATE')`` when possible. + + In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match + any rows, it will take a gap-lock. This is a form of write-lock on the + "gap" where no rows exist, and prevents any other writes to that space. + This can effectively prevent any INSERT into a table by locking the gap + at the end of the index. Similar problems will occur if the SELECT FOR UPDATE + has an overly broad WHERE clause, or doesn't properly use an index. + + One idea proposed at ODS Fall '12 was to use a normal SELECT to test the + number of rows matching a query, and if only one row is returned, + then issue the SELECT FOR UPDATE. + + The better long-term solution is to use + ``INSERT .. ON DUPLICATE KEY UPDATE``. + However, this can not be done until the "deleted" columns are removed and + proper UNIQUE constraints are added to the tables. + + +Enabling soft deletes: + +* To use/enable soft-deletes, the `SoftDeleteMixin` must be added + to your model class. For example: + + .. code:: python + + class NovaBase(models.SoftDeleteMixin, models.ModelBase): + pass + + +Efficient use of soft deletes: + +* There are two possible ways to mark a record as deleted: + `model.soft_delete()` and `query.soft_delete()`. + + The `model.soft_delete()` method works with a single already-fetched entry. + `query.soft_delete()` makes only one db request for all entries that + correspond to the query. + +* In almost all cases you should use `query.soft_delete()`. Some examples: + + .. code:: python + + def soft_delete_bar(): + count = model_query(BarModel).find(some_condition).soft_delete() + if count == 0: + raise Exception("0 entries were soft deleted") + + def complex_soft_delete_with_synchronization_bar(session=None): + if session is None: + session = sessionmaker() + with session.begin(subtransactions=True): + count = (model_query(BarModel). + find(some_condition). + soft_delete(synchronize_session=True)) + # Here synchronize_session is required, because we + # don't know what is going on in outer session. + if count == 0: + raise Exception("0 entries were soft deleted") + +* There is only one situation where `model.soft_delete()` is appropriate: when + you fetch a single record, work with it, and mark it as deleted in the same + transaction. + + .. code:: python + + def soft_delete_bar_model(): + session = sessionmaker() + with session.begin(): + bar_ref = model_query(BarModel).find(some_condition).first() + # Work with bar_ref + bar_ref.soft_delete(session=session) + + However, if you need to work with all entries that correspond to query and + then soft delete them you should use the `query.soft_delete()` method: + + .. code:: python + + def soft_delete_multi_models(): + session = sessionmaker() + with session.begin(): + query = (model_query(BarModel, session=session). + find(some_condition)) + model_refs = query.all() + # Work with model_refs + query.soft_delete(synchronize_session=False) + # synchronize_session=False should be set if there is no outer + # session and these entries are not used after this. + + When working with many rows, it is very important to use query.soft_delete, + which issues a single query. Using `model.soft_delete()`, as in the following + example, is very inefficient. + + .. code:: python + + for bar_ref in bar_refs: + bar_ref.soft_delete(session=session) + # This will produce count(bar_refs) db requests. + +""" + +import functools +import logging +import re +import time + +import six +from sqlalchemy import exc as sqla_exc +from sqlalchemy.interfaces import PoolListener +import sqlalchemy.orm +from sqlalchemy.pool import NullPool, StaticPool +from sqlalchemy.sql.expression import literal_column + +from neutron.openstack.common.db import exception +from neutron.openstack.common.gettextutils import _LE, _LW +from neutron.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + + +class SqliteForeignKeysListener(PoolListener): + """Ensures that the foreign key constraints are enforced in SQLite. + + The foreign key constraints are disabled by default in SQLite, + so the foreign key constraints will be enabled here for every + database connection + """ + def connect(self, dbapi_con, con_record): + dbapi_con.execute('pragma foreign_keys=ON') + + +# note(boris-42): In current versions of DB backends unique constraint +# violation messages follow the structure: +# +# sqlite: +# 1 column - (IntegrityError) column c1 is not unique +# N columns - (IntegrityError) column c1, c2, ..., N are not unique +# +# sqlite since 3.7.16: +# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1 +# +# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2 +# +# postgres: +# 1 column - (IntegrityError) duplicate key value violates unique +# constraint "users_c1_key" +# N columns - (IntegrityError) duplicate key value violates unique +# constraint "name_of_our_constraint" +# +# mysql: +# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key +# 'c1'") +# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined +# with -' for key 'name_of_our_constraint'") +# +# ibm_db_sa: +# N columns - (IntegrityError) SQL0803N One or more values in the INSERT +# statement, UPDATE statement, or foreign key update caused by a +# DELETE statement are not valid because the primary key, unique +# constraint or unique index identified by "2" constrains table +# "NOVA.KEY_PAIRS" from having duplicate values for the index +# key. +_DUP_KEY_RE_DB = { + "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), + re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")), + "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),), + "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),), + "ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),), +} + + +def _raise_if_duplicate_entry_error(integrity_error, engine_name): + """Raise exception if two entries are duplicated. + + In this function will be raised DBDuplicateEntry exception if integrity + error wrap unique constraint violation. + """ + + def get_columns_from_uniq_cons_or_name(columns): + # note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2" + # where `t` it is table name and columns `c1`, `c2` + # are in UniqueConstraint. + uniqbase = "uniq_" + if not columns.startswith(uniqbase): + if engine_name == "postgresql": + return [columns[columns.index("_") + 1:columns.rindex("_")]] + return [columns] + return columns[len(uniqbase):].split("0")[1:] + + if engine_name not in ("ibm_db_sa", "mysql", "sqlite", "postgresql"): + return + + # FIXME(johannes): The usage of the .message attribute has been + # deprecated since Python 2.6. However, the exceptions raised by + # SQLAlchemy can differ when using unicode() and accessing .message. + # An audit across all three supported engines will be necessary to + # ensure there are no regressions. + for pattern in _DUP_KEY_RE_DB[engine_name]: + match = pattern.match(integrity_error.message) + if match: + break + else: + return + + # NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the + # columns so we have to omit that from the DBDuplicateEntry error. + columns = '' + + if engine_name != 'ibm_db_sa': + columns = match.group(1) + + if engine_name == "sqlite": + columns = [c.split('.')[-1] for c in columns.strip().split(", ")] + else: + columns = get_columns_from_uniq_cons_or_name(columns) + raise exception.DBDuplicateEntry(columns, integrity_error) + + +# NOTE(comstud): In current versions of DB backends, Deadlock violation +# messages follow the structure: +# +# mysql: +# (OperationalError) (1213, 'Deadlock found when trying to get lock; try ' +# 'restarting transaction') +_DEADLOCK_RE_DB = { + "mysql": re.compile(r"^.*\(1213, 'Deadlock.*") +} + + +def _raise_if_deadlock_error(operational_error, engine_name): + """Raise exception on deadlock condition. + + Raise DBDeadlock exception if OperationalError contains a Deadlock + condition. + """ + re = _DEADLOCK_RE_DB.get(engine_name) + if re is None: + return + # FIXME(johannes): The usage of the .message attribute has been + # deprecated since Python 2.6. However, the exceptions raised by + # SQLAlchemy can differ when using unicode() and accessing .message. + # An audit across all three supported engines will be necessary to + # ensure there are no regressions. + m = re.match(operational_error.message) + if not m: + return + raise exception.DBDeadlock(operational_error) + + +def _wrap_db_error(f): + @functools.wraps(f) + def _wrap(self, *args, **kwargs): + try: + assert issubclass( + self.__class__, sqlalchemy.orm.session.Session + ), ('_wrap_db_error() can only be applied to methods of ' + 'subclasses of sqlalchemy.orm.session.Session.') + + return f(self, *args, **kwargs) + except UnicodeEncodeError: + raise exception.DBInvalidUnicodeParameter() + except sqla_exc.OperationalError as e: + _raise_if_db_connection_lost(e, self.bind) + _raise_if_deadlock_error(e, self.bind.dialect.name) + # NOTE(comstud): A lot of code is checking for OperationalError + # so let's not wrap it for now. + raise + # note(boris-42): We should catch unique constraint violation and + # wrap it by our own DBDuplicateEntry exception. Unique constraint + # violation is wrapped by IntegrityError. + except sqla_exc.IntegrityError as e: + # note(boris-42): SqlAlchemy doesn't unify errors from different + # DBs so we must do this. Also in some tables (for example + # instance_types) there are more than one unique constraint. This + # means we should get names of columns, which values violate + # unique constraint, from error message. + _raise_if_duplicate_entry_error(e, self.bind.dialect.name) + raise exception.DBError(e) + except Exception as e: + LOG.exception(_LE('DB exception wrapped.')) + raise exception.DBError(e) + return _wrap + + +def _synchronous_switch_listener(dbapi_conn, connection_rec): + """Switch sqlite connections to non-synchronous mode.""" + dbapi_conn.execute("PRAGMA synchronous = OFF") + + +def _add_regexp_listener(dbapi_con, con_record): + """Add REGEXP function to sqlite connections.""" + + def regexp(expr, item): + reg = re.compile(expr) + return reg.search(six.text_type(item)) is not None + dbapi_con.create_function('regexp', 2, regexp) + + +def _thread_yield(dbapi_con, con_record): + """Ensure other greenthreads get a chance to be executed. + + If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will + execute instead of time.sleep(0). + Force a context switch. With common database backends (eg MySQLdb and + sqlite), there is no implicit yield caused by network I/O since they are + implemented by C libraries that eventlet cannot monkey patch. + """ + time.sleep(0) + + +def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): + """Ensures that MySQL, PostgreSQL or DB2 connections are alive. + + Borrowed from: + http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f + """ + cursor = dbapi_conn.cursor() + try: + ping_sql = 'select 1' + if engine.name == 'ibm_db_sa': + # DB2 requires a table expression + ping_sql = 'select 1 from (values (1)) AS t1' + cursor.execute(ping_sql) + except Exception as ex: + if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): + msg = _LW('Database server has gone away: %s') % ex + LOG.warning(msg) + + # if the database server has gone away, all connections in the pool + # have become invalid and we can safely close all of them here, + # rather than waste time on checking of every single connection + engine.dispose() + + # this will be handled by SQLAlchemy and will force it to create + # a new connection and retry the original action + raise sqla_exc.DisconnectionError(msg) + else: + raise + + +def _set_session_sql_mode(dbapi_con, connection_rec, sql_mode=None): + """Set the sql_mode session variable. + + MySQL supports several server modes. The default is None, but sessions + may choose to enable server modes like TRADITIONAL, ANSI, + several STRICT_* modes and others. + + Note: passing in '' (empty string) for sql_mode clears + the SQL mode for the session, overriding a potentially set + server default. + """ + + cursor = dbapi_con.cursor() + cursor.execute("SET SESSION sql_mode = %s", [sql_mode]) + + +def _mysql_get_effective_sql_mode(engine): + """Returns the effective SQL mode for connections from the engine pool. + + Returns ``None`` if the mode isn't available, otherwise returns the mode. + + """ + # Get the real effective SQL mode. Even when unset by + # our own config, the server may still be operating in a specific + # SQL mode as set by the server configuration. + # Also note that the checkout listener will be called on execute to + # set the mode if it's registered. + row = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone() + if row is None: + return + return row[1] + + +def _mysql_check_effective_sql_mode(engine): + """Logs a message based on the effective SQL mode for MySQL connections.""" + realmode = _mysql_get_effective_sql_mode(engine) + + if realmode is None: + LOG.warning(_LW('Unable to detect effective SQL mode')) + return + + LOG.debug('MySQL server mode set to %s', realmode) + # 'TRADITIONAL' mode enables several other modes, so + # we need a substring match here + if not ('TRADITIONAL' in realmode.upper() or + 'STRICT_ALL_TABLES' in realmode.upper()): + LOG.warning(_LW("MySQL SQL mode is '%s', " + "consider enabling TRADITIONAL or STRICT_ALL_TABLES"), + realmode) + + +def _mysql_set_mode_callback(engine, sql_mode): + if sql_mode is not None: + mode_callback = functools.partial(_set_session_sql_mode, + sql_mode=sql_mode) + sqlalchemy.event.listen(engine, 'connect', mode_callback) + _mysql_check_effective_sql_mode(engine) + + +def _is_db_connection_error(args): + """Return True if error in connecting to db.""" + # NOTE(adam_g): This is currently MySQL specific and needs to be extended + # to support Postgres and others. + # For the db2, the error code is -30081 since the db2 is still not ready + conn_err_codes = ('2002', '2003', '2006', '2013', '-30081') + for err_code in conn_err_codes: + if args.find(err_code) != -1: + return True + return False + + +def _raise_if_db_connection_lost(error, engine): + # NOTE(vsergeyev): Function is_disconnect(e, connection, cursor) + # requires connection and cursor in incoming parameters, + # but we have no possibility to create connection if DB + # is not available, so in such case reconnect fails. + # But is_disconnect() ignores these parameters, so it + # makes sense to pass to function None as placeholder + # instead of connection and cursor. + if engine.dialect.is_disconnect(error, None, None): + raise exception.DBConnectionError(error) + + +def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None, + idle_timeout=3600, + connection_debug=0, max_pool_size=None, max_overflow=None, + pool_timeout=None, sqlite_synchronous=True, + connection_trace=False, max_retries=10, retry_interval=10): + """Return a new SQLAlchemy engine.""" + + connection_dict = sqlalchemy.engine.url.make_url(sql_connection) + + engine_args = { + "pool_recycle": idle_timeout, + 'convert_unicode': True, + } + + logger = logging.getLogger('sqlalchemy.engine') + + # Map SQL debug level to Python log level + if connection_debug >= 100: + logger.setLevel(logging.DEBUG) + elif connection_debug >= 50: + logger.setLevel(logging.INFO) + else: + logger.setLevel(logging.WARNING) + + if "sqlite" in connection_dict.drivername: + if sqlite_fk: + engine_args["listeners"] = [SqliteForeignKeysListener()] + engine_args["poolclass"] = NullPool + + if sql_connection == "sqlite://": + engine_args["poolclass"] = StaticPool + engine_args["connect_args"] = {'check_same_thread': False} + else: + if max_pool_size is not None: + engine_args['pool_size'] = max_pool_size + if max_overflow is not None: + engine_args['max_overflow'] = max_overflow + if pool_timeout is not None: + engine_args['pool_timeout'] = pool_timeout + + engine = sqlalchemy.create_engine(sql_connection, **engine_args) + + sqlalchemy.event.listen(engine, 'checkin', _thread_yield) + + if engine.name in ('ibm_db_sa', 'mysql', 'postgresql'): + ping_callback = functools.partial(_ping_listener, engine) + sqlalchemy.event.listen(engine, 'checkout', ping_callback) + if engine.name == 'mysql': + if mysql_sql_mode: + _mysql_set_mode_callback(engine, mysql_sql_mode) + elif 'sqlite' in connection_dict.drivername: + if not sqlite_synchronous: + sqlalchemy.event.listen(engine, 'connect', + _synchronous_switch_listener) + sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) + + if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb': + _patch_mysqldb_with_stacktrace_comments() + + try: + engine.connect() + except sqla_exc.OperationalError as e: + if not _is_db_connection_error(e.args[0]): + raise + + remaining = max_retries + if remaining == -1: + remaining = 'infinite' + while True: + msg = _LW('SQL connection failed. %s attempts left.') + LOG.warning(msg % remaining) + if remaining != 'infinite': + remaining -= 1 + time.sleep(retry_interval) + try: + engine.connect() + break + except sqla_exc.OperationalError as e: + if (remaining != 'infinite' and remaining == 0) or \ + not _is_db_connection_error(e.args[0]): + raise + return engine + + +class Query(sqlalchemy.orm.query.Query): + """Subclass of sqlalchemy.query with soft_delete() method.""" + def soft_delete(self, synchronize_session='evaluate'): + return self.update({'deleted': literal_column('id'), + 'updated_at': literal_column('updated_at'), + 'deleted_at': timeutils.utcnow()}, + synchronize_session=synchronize_session) + + +class Session(sqlalchemy.orm.session.Session): + """Custom Session class to avoid SqlAlchemy Session monkey patching.""" + @_wrap_db_error + def query(self, *args, **kwargs): + return super(Session, self).query(*args, **kwargs) + + @_wrap_db_error + def flush(self, *args, **kwargs): + return super(Session, self).flush(*args, **kwargs) + + @_wrap_db_error + def execute(self, *args, **kwargs): + return super(Session, self).execute(*args, **kwargs) + + +def get_maker(engine, autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy sessionmaker using the given engine.""" + return sqlalchemy.orm.sessionmaker(bind=engine, + class_=Session, + autocommit=autocommit, + expire_on_commit=expire_on_commit, + query_cls=Query) + + +def _patch_mysqldb_with_stacktrace_comments(): + """Adds current stack trace as a comment in queries. + + Patches MySQLdb.cursors.BaseCursor._do_query. + """ + import MySQLdb.cursors + import traceback + + old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query + + def _do_query(self, q): + stack = '' + for filename, line, method, function in traceback.extract_stack(): + # exclude various common things from trace + if filename.endswith('session.py') and method == '_do_query': + continue + if filename.endswith('api.py') and method == 'wrapper': + continue + if filename.endswith('utils.py') and method == '_inner': + continue + if filename.endswith('exception.py') and method == '_wrap': + continue + # db/api is just a wrapper around db/sqlalchemy/api + if filename.endswith('db/api.py'): + continue + # only trace inside neutron + index = filename.rfind('neutron') + if index == -1: + continue + stack += "File:%s:%s Method:%s() Line:%s | " \ + % (filename[index:], line, method, function) + + # strip trailing " | " from stack + if stack: + stack = stack[:-3] + qq = "%s /* %s */" % (q, stack) + else: + qq = q + old_mysql_do_query(self, qq) + + setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) + + +class EngineFacade(object): + """A helper class for removing of global engine instances from neutron.db. + + As a library, neutron.db can't decide where to store/when to create engine + and sessionmaker instances, so this must be left for a target application. + + On the other hand, in order to simplify the adoption of neutron.db changes, + we'll provide a helper class, which creates engine and sessionmaker + on its instantiation and provides get_engine()/get_session() methods + that are compatible with corresponding utility functions that currently + exist in target projects, e.g. in Nova. + + engine/sessionmaker instances will still be global (and they are meant to + be global), but they will be stored in the app context, rather that in the + neutron.db context. + + Note: using of this helper is completely optional and you are encouraged to + integrate engine/sessionmaker instances into your apps any way you like + (e.g. one might want to bind a session to a request context). Two important + things to remember: + + 1. An Engine instance is effectively a pool of DB connections, so it's + meant to be shared (and it's thread-safe). + 2. A Session instance is not meant to be shared and represents a DB + transactional context (i.e. it's not thread-safe). sessionmaker is + a factory of sessions. + + """ + + def __init__(self, sql_connection, + sqlite_fk=False, autocommit=True, + expire_on_commit=False, **kwargs): + """Initialize engine and sessionmaker instances. + + :param sqlite_fk: enable foreign keys in SQLite + :type sqlite_fk: bool + + :param autocommit: use autocommit mode for created Session instances + :type autocommit: bool + + :param expire_on_commit: expire session objects on commit + :type expire_on_commit: bool + + Keyword arguments: + + :keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions. + (defaults to TRADITIONAL) + :keyword idle_timeout: timeout before idle sql connections are reaped + (defaults to 3600) + :keyword connection_debug: verbosity of SQL debugging information. + 0=None, 100=Everything (defaults to 0) + :keyword max_pool_size: maximum number of SQL connections to keep open + in a pool (defaults to SQLAlchemy settings) + :keyword max_overflow: if set, use this value for max_overflow with + sqlalchemy (defaults to SQLAlchemy settings) + :keyword pool_timeout: if set, use this value for pool_timeout with + sqlalchemy (defaults to SQLAlchemy settings) + :keyword sqlite_synchronous: if True, SQLite uses synchronous mode + (defaults to True) + :keyword connection_trace: add python stack traces to SQL as comment + strings (defaults to False) + :keyword max_retries: maximum db connection retries during startup. + (setting -1 implies an infinite retry count) + (defaults to 10) + :keyword retry_interval: interval between retries of opening a sql + connection (defaults to 10) + + """ + + super(EngineFacade, self).__init__() + + self._engine = create_engine( + sql_connection=sql_connection, + sqlite_fk=sqlite_fk, + mysql_sql_mode=kwargs.get('mysql_sql_mode', 'TRADITIONAL'), + idle_timeout=kwargs.get('idle_timeout', 3600), + connection_debug=kwargs.get('connection_debug', 0), + max_pool_size=kwargs.get('max_pool_size'), + max_overflow=kwargs.get('max_overflow'), + pool_timeout=kwargs.get('pool_timeout'), + sqlite_synchronous=kwargs.get('sqlite_synchronous', True), + connection_trace=kwargs.get('connection_trace', False), + max_retries=kwargs.get('max_retries', 10), + retry_interval=kwargs.get('retry_interval', 10)) + self._session_maker = get_maker( + engine=self._engine, + autocommit=autocommit, + expire_on_commit=expire_on_commit) + + def get_engine(self): + """Get the engine instance (note, that it's shared).""" + + return self._engine + + def get_session(self, **kwargs): + """Get a Session instance. + + If passed, keyword arguments values override the ones used when the + sessionmaker instance was created. + + :keyword autocommit: use autocommit mode for created Session instances + :type autocommit: bool + + :keyword expire_on_commit: expire session objects on commit + :type expire_on_commit: bool + + """ + + for arg in kwargs: + if arg not in ('autocommit', 'expire_on_commit'): + del kwargs[arg] + + return self._session_maker(**kwargs) + + @classmethod + def from_config(cls, connection_string, conf, + sqlite_fk=False, autocommit=True, expire_on_commit=False): + """Initialize EngineFacade using oslo.config config instance options. + + :param connection_string: SQLAlchemy connection string + :type connection_string: string + + :param conf: oslo.config config instance + :type conf: oslo.config.cfg.ConfigOpts + + :param sqlite_fk: enable foreign keys in SQLite + :type sqlite_fk: bool + + :param autocommit: use autocommit mode for created Session instances + :type autocommit: bool + + :param expire_on_commit: expire session objects on commit + :type expire_on_commit: bool + + """ + + return cls(sql_connection=connection_string, + sqlite_fk=sqlite_fk, + autocommit=autocommit, + expire_on_commit=expire_on_commit, + **dict(conf.database.items())) diff --git a/neutron/openstack/common/db/sqlalchemy/test_base.py b/neutron/openstack/common/db/sqlalchemy/test_base.py new file mode 100644 index 000000000..e6e047fbf --- /dev/null +++ b/neutron/openstack/common/db/sqlalchemy/test_base.py @@ -0,0 +1,153 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import functools +import os + +import fixtures +import six + +from neutron.openstack.common.db.sqlalchemy import session +from neutron.openstack.common.db.sqlalchemy import utils +from neutron.openstack.common.fixture import lockutils +from neutron.openstack.common import test + + +class DbFixture(fixtures.Fixture): + """Basic database fixture. + + Allows to run tests on various db backends, such as SQLite, MySQL and + PostgreSQL. By default use sqlite backend. To override default backend + uri set env variable OS_TEST_DBAPI_CONNECTION with database admin + credentials for specific backend. + """ + + def _get_uri(self): + return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://') + + def __init__(self, test): + super(DbFixture, self).__init__() + + self.test = test + + def setUp(self): + super(DbFixture, self).setUp() + + self.test.engine = session.create_engine(self._get_uri()) + self.test.sessionmaker = session.get_maker(self.test.engine) + self.addCleanup(self.test.engine.dispose) + + +class DbTestCase(test.BaseTestCase): + """Base class for testing of DB code. + + Using `DbFixture`. Intended to be the main database test case to use all + the tests on a given backend with user defined uri. Backend specific + tests should be decorated with `backend_specific` decorator. + """ + + FIXTURE = DbFixture + + def setUp(self): + super(DbTestCase, self).setUp() + self.useFixture(self.FIXTURE(self)) + + +ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql'] + + +def backend_specific(*dialects): + """Decorator to skip backend specific tests on inappropriate engines. + + ::dialects: list of dialects names under which the test will be launched. + """ + def wrap(f): + @functools.wraps(f) + def ins_wrap(self): + if not set(dialects).issubset(ALLOWED_DIALECTS): + raise ValueError( + "Please use allowed dialects: %s" % ALLOWED_DIALECTS) + if self.engine.name not in dialects: + msg = ('The test "%s" can be run ' + 'only on %s. Current engine is %s.') + args = (f.__name__, ' '.join(dialects), self.engine.name) + self.skip(msg % args) + else: + return f(self) + return ins_wrap + return wrap + + +@six.add_metaclass(abc.ABCMeta) +class OpportunisticFixture(DbFixture): + """Base fixture to use default CI databases. + + The databases exist in OpenStack CI infrastructure. But for the + correct functioning in local environment the databases must be + created manually. + """ + + DRIVER = abc.abstractproperty(lambda: None) + DBNAME = PASSWORD = USERNAME = 'openstack_citest' + + def _get_uri(self): + return utils.get_connect_string(backend=self.DRIVER, + user=self.USERNAME, + passwd=self.PASSWORD, + database=self.DBNAME) + + +@six.add_metaclass(abc.ABCMeta) +class OpportunisticTestCase(DbTestCase): + """Base test case to use default CI databases. + + The subclasses of the test case are running only when openstack_citest + database is available otherwise a tests will be skipped. + """ + + FIXTURE = abc.abstractproperty(lambda: None) + + def setUp(self): + # TODO(bnemec): Remove this once infra is ready for + # https://review.openstack.org/#/c/74963/ to merge. + self.useFixture(lockutils.LockFixture('opportunistic-db')) + credentials = { + 'backend': self.FIXTURE.DRIVER, + 'user': self.FIXTURE.USERNAME, + 'passwd': self.FIXTURE.PASSWORD, + 'database': self.FIXTURE.DBNAME} + + if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials): + msg = '%s backend is not available.' % self.FIXTURE.DRIVER + return self.skip(msg) + + super(OpportunisticTestCase, self).setUp() + + +class MySQLOpportunisticFixture(OpportunisticFixture): + DRIVER = 'mysql' + + +class PostgreSQLOpportunisticFixture(OpportunisticFixture): + DRIVER = 'postgresql' + + +class MySQLOpportunisticTestCase(OpportunisticTestCase): + FIXTURE = MySQLOpportunisticFixture + + +class PostgreSQLOpportunisticTestCase(OpportunisticTestCase): + FIXTURE = PostgreSQLOpportunisticFixture diff --git a/neutron/openstack/common/db/sqlalchemy/utils.py b/neutron/openstack/common/db/sqlalchemy/utils.py new file mode 100644 index 000000000..878431773 --- /dev/null +++ b/neutron/openstack/common/db/sqlalchemy/utils.py @@ -0,0 +1,647 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010-2011 OpenStack Foundation. +# Copyright 2012 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import re + +import sqlalchemy +from sqlalchemy import Boolean +from sqlalchemy import CheckConstraint +from sqlalchemy import Column +from sqlalchemy.engine import reflection +from sqlalchemy.ext.compiler import compiles +from sqlalchemy import func +from sqlalchemy import Index +from sqlalchemy import Integer +from sqlalchemy import MetaData +from sqlalchemy import or_ +from sqlalchemy.sql.expression import literal_column +from sqlalchemy.sql.expression import UpdateBase +from sqlalchemy import String +from sqlalchemy import Table +from sqlalchemy.types import NullType + +from neutron.openstack.common import context as request_context +from neutron.openstack.common.db.sqlalchemy import models +from neutron.openstack.common.gettextutils import _, _LI, _LW +from neutron.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + +_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+") + + +def sanitize_db_url(url): + match = _DBURL_REGEX.match(url) + if match: + return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):]) + return url + + +class InvalidSortKey(Exception): + message = _("Sort key supplied was not valid.") + + +# copy from glance/db/sqlalchemy/api.py +def paginate_query(query, model, limit, sort_keys, marker=None, + sort_dir=None, sort_dirs=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort_key, specified by sort_keys. + (If sort_keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort_key, this would be easy: sort_key > X. + With a compound-values sort_key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + + We also have to cope with different sort_directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sort_keys: array of attributes by which results should be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :param sort_dir: direction in which results should be sorted (asc, desc) + :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys + + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + + if 'id' not in sort_keys: + # TODO(justinsb): If this ever gives a false-positive, check + # the actual primary key, rather than assuming its id + LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?')) + + assert(not (sort_dir and sort_dirs)) + + # Default the sort direction to ascending + if sort_dirs is None and sort_dir is None: + sort_dir = 'asc' + + # Ensure a per-column sort direction + if sort_dirs is None: + sort_dirs = [sort_dir for _sort_key in sort_keys] + + assert(len(sort_dirs) == len(sort_keys)) + + # Add sorting + for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): + try: + sort_dir_func = { + 'asc': sqlalchemy.asc, + 'desc': sqlalchemy.desc, + }[current_sort_dir] + except KeyError: + raise ValueError(_("Unknown sort direction, " + "must be 'desc' or 'asc'")) + try: + sort_key_attr = getattr(model, current_sort_key) + except AttributeError: + raise InvalidSortKey() + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if marker is not None: + marker_values = [] + for sort_key in sort_keys: + v = getattr(marker, sort_key) + marker_values.append(v) + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i in range(len(sort_keys)): + crit_attrs = [] + for j in range(i): + model_attr = getattr(model, sort_keys[j]) + crit_attrs.append((model_attr == marker_values[j])) + + model_attr = getattr(model, sort_keys[i]) + if sort_dirs[i] == 'desc': + crit_attrs.append((model_attr < marker_values[i])) + else: + crit_attrs.append((model_attr > marker_values[i])) + + criteria = sqlalchemy.sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sqlalchemy.sql.or_(*criteria_list) + query = query.filter(f) + + if limit is not None: + query = query.limit(limit) + + return query + + +def _read_deleted_filter(query, db_model, read_deleted): + if 'deleted' not in db_model.__table__.columns: + raise ValueError(_("There is no `deleted` column in `%s` table. " + "Project doesn't use soft-deleted feature.") + % db_model.__name__) + + default_deleted_value = db_model.__table__.c.deleted.default.arg + if read_deleted == 'no': + query = query.filter(db_model.deleted == default_deleted_value) + elif read_deleted == 'yes': + pass # omit the filter to include deleted and active + elif read_deleted == 'only': + query = query.filter(db_model.deleted != default_deleted_value) + else: + raise ValueError(_("Unrecognized read_deleted value '%s'") + % read_deleted) + return query + + +def _project_filter(query, db_model, context, project_only): + if project_only and 'project_id' not in db_model.__table__.columns: + raise ValueError(_("There is no `project_id` column in `%s` table.") + % db_model.__name__) + + if request_context.is_user_context(context) and project_only: + if project_only == 'allow_none': + is_none = None + query = query.filter(or_(db_model.project_id == context.project_id, + db_model.project_id == is_none)) + else: + query = query.filter(db_model.project_id == context.project_id) + + return query + + +def model_query(context, model, session, args=None, project_only=False, + read_deleted=None): + """Query helper that accounts for context's `read_deleted` field. + + :param context: context to query under + + :param model: Model to query. Must be a subclass of ModelBase. + :type model: models.ModelBase + + :param session: The session to use. + :type session: sqlalchemy.orm.session.Session + + :param args: Arguments to query. If None - model is used. + :type args: tuple + + :param project_only: If present and context is user-type, then restrict + query to match the context's project_id. If set to + 'allow_none', restriction includes project_id = None. + :type project_only: bool + + :param read_deleted: If present, overrides context's read_deleted field. + :type read_deleted: bool + + Usage: + + ..code:: python + + result = (utils.model_query(context, models.Instance, session=session) + .filter_by(uuid=instance_uuid) + .all()) + + query = utils.model_query( + context, Node, + session=session, + args=(func.count(Node.id), func.sum(Node.ram)) + ).filter_by(project_id=project_id) + + """ + + if not read_deleted: + if hasattr(context, 'read_deleted'): + # NOTE(viktors): some projects use `read_deleted` attribute in + # their contexts instead of `show_deleted`. + read_deleted = context.read_deleted + else: + read_deleted = context.show_deleted + + if not issubclass(model, models.ModelBase): + raise TypeError(_("model should be a subclass of ModelBase")) + + query = session.query(model) if not args else session.query(*args) + query = _read_deleted_filter(query, model, read_deleted) + query = _project_filter(query, model, context, project_only) + + return query + + +def get_table(engine, name): + """Returns an sqlalchemy table dynamically from db. + + Needed because the models don't work for us in migrations + as models will be far out of sync with the current data. + """ + metadata = MetaData() + metadata.bind = engine + return Table(name, metadata, autoload=True) + + +class InsertFromSelect(UpdateBase): + """Form the base for `INSERT INTO table (SELECT ... )` statement.""" + def __init__(self, table, select): + self.table = table + self.select = select + + +@compiles(InsertFromSelect) +def visit_insert_from_select(element, compiler, **kw): + """Form the `INSERT INTO table (SELECT ... )` statement.""" + return "INSERT INTO %s %s" % ( + compiler.process(element.table, asfrom=True), + compiler.process(element.select)) + + +class ColumnError(Exception): + """Error raised when no column or an invalid column is found.""" + + +def _get_not_supported_column(col_name_col_instance, column_name): + try: + column = col_name_col_instance[column_name] + except KeyError: + msg = _("Please specify column %s in col_name_col_instance " + "param. It is required because column has unsupported " + "type by sqlite).") + raise ColumnError(msg % column_name) + + if not isinstance(column, Column): + msg = _("col_name_col_instance param has wrong type of " + "column instance for column %s It should be instance " + "of sqlalchemy.Column.") + raise ColumnError(msg % column_name) + return column + + +def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, + **col_name_col_instance): + """Drop unique constraint from table. + + DEPRECATED: this function is deprecated and will be removed from neutron.db + in a few releases. Please use UniqueConstraint.drop() method directly for + sqlalchemy-migrate migration scripts. + + This method drops UC from table and works for mysql, postgresql and sqlite. + In mysql and postgresql we are able to use "alter table" construction. + Sqlalchemy doesn't support some sqlite column types and replaces their + type with NullType in metadata. We process these columns and replace + NullType with the correct column type. + + :param migrate_engine: sqlalchemy engine + :param table_name: name of table that contains uniq constraint. + :param uc_name: name of uniq constraint that will be dropped. + :param columns: columns that are in uniq constraint. + :param col_name_col_instance: contains pair column_name=column_instance. + column_instance is instance of Column. These params + are required only for columns that have unsupported + types by sqlite. For example BigInteger. + """ + + from migrate.changeset import UniqueConstraint + + meta = MetaData() + meta.bind = migrate_engine + t = Table(table_name, meta, autoload=True) + + if migrate_engine.name == "sqlite": + override_cols = [ + _get_not_supported_column(col_name_col_instance, col.name) + for col in t.columns + if isinstance(col.type, NullType) + ] + for col in override_cols: + t.columns.replace(col) + + uc = UniqueConstraint(*columns, table=t, name=uc_name) + uc.drop() + + +def drop_old_duplicate_entries_from_table(migrate_engine, table_name, + use_soft_delete, *uc_column_names): + """Drop all old rows having the same values for columns in uc_columns. + + This method drop (or mark ad `deleted` if use_soft_delete is True) old + duplicate rows form table with name `table_name`. + + :param migrate_engine: Sqlalchemy engine + :param table_name: Table with duplicates + :param use_soft_delete: If True - values will be marked as `deleted`, + if False - values will be removed from table + :param uc_column_names: Unique constraint columns + """ + meta = MetaData() + meta.bind = migrate_engine + + table = Table(table_name, meta, autoload=True) + columns_for_group_by = [table.c[name] for name in uc_column_names] + + columns_for_select = [func.max(table.c.id)] + columns_for_select.extend(columns_for_group_by) + + duplicated_rows_select = sqlalchemy.sql.select( + columns_for_select, group_by=columns_for_group_by, + having=func.count(table.c.id) > 1) + + for row in migrate_engine.execute(duplicated_rows_select): + # NOTE(boris-42): Do not remove row that has the biggest ID. + delete_condition = table.c.id != row[0] + is_none = None # workaround for pyflakes + delete_condition &= table.c.deleted_at == is_none + for name in uc_column_names: + delete_condition &= table.c[name] == row[name] + + rows_to_delete_select = sqlalchemy.sql.select( + [table.c.id]).where(delete_condition) + for row in migrate_engine.execute(rows_to_delete_select).fetchall(): + LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: " + "%(table)s") % dict(id=row[0], table=table_name)) + + if use_soft_delete: + delete_statement = table.update().\ + where(delete_condition).\ + values({ + 'deleted': literal_column('id'), + 'updated_at': literal_column('updated_at'), + 'deleted_at': timeutils.utcnow() + }) + else: + delete_statement = table.delete().where(delete_condition) + migrate_engine.execute(delete_statement) + + +def _get_default_deleted_value(table): + if isinstance(table.c.id.type, Integer): + return 0 + if isinstance(table.c.id.type, String): + return "" + raise ColumnError(_("Unsupported id columns type")) + + +def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes): + table = get_table(migrate_engine, table_name) + + insp = reflection.Inspector.from_engine(migrate_engine) + real_indexes = insp.get_indexes(table_name) + existing_index_names = dict( + [(index['name'], index['column_names']) for index in real_indexes]) + + # NOTE(boris-42): Restore indexes on `deleted` column + for index in indexes: + if 'deleted' not in index['column_names']: + continue + name = index['name'] + if name in existing_index_names: + column_names = [table.c[c] for c in existing_index_names[name]] + old_index = Index(name, *column_names, unique=index["unique"]) + old_index.drop(migrate_engine) + + column_names = [table.c[c] for c in index['column_names']] + new_index = Index(index["name"], *column_names, unique=index["unique"]) + new_index.create(migrate_engine) + + +def change_deleted_column_type_to_boolean(migrate_engine, table_name, + **col_name_col_instance): + if migrate_engine.name == "sqlite": + return _change_deleted_column_type_to_boolean_sqlite( + migrate_engine, table_name, **col_name_col_instance) + insp = reflection.Inspector.from_engine(migrate_engine) + indexes = insp.get_indexes(table_name) + + table = get_table(migrate_engine, table_name) + + old_deleted = Column('old_deleted', Boolean, default=False) + old_deleted.create(table, populate_default=False) + + table.update().\ + where(table.c.deleted == table.c.id).\ + values(old_deleted=True).\ + execute() + + table.c.deleted.drop() + table.c.old_deleted.alter(name="deleted") + + _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) + + +def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, + **col_name_col_instance): + insp = reflection.Inspector.from_engine(migrate_engine) + table = get_table(migrate_engine, table_name) + + columns = [] + for column in table.columns: + column_copy = None + if column.name != "deleted": + if isinstance(column.type, NullType): + column_copy = _get_not_supported_column(col_name_col_instance, + column.name) + else: + column_copy = column.copy() + else: + column_copy = Column('deleted', Boolean, default=0) + columns.append(column_copy) + + constraints = [constraint.copy() for constraint in table.constraints] + + meta = table.metadata + new_table = Table(table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], *column_names, + unique=index["unique"])) + + c_select = [] + for c in table.c: + if c.name != "deleted": + c_select.append(c) + else: + c_select.append(table.c.deleted == table.c.id) + + ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select)) + migrate_engine.execute(ins) + + table.drop() + [index.create(migrate_engine) for index in indexes] + + new_table.rename(table_name) + new_table.update().\ + where(new_table.c.deleted == new_table.c.id).\ + values(deleted=True).\ + execute() + + +def change_deleted_column_type_to_id_type(migrate_engine, table_name, + **col_name_col_instance): + if migrate_engine.name == "sqlite": + return _change_deleted_column_type_to_id_type_sqlite( + migrate_engine, table_name, **col_name_col_instance) + insp = reflection.Inspector.from_engine(migrate_engine) + indexes = insp.get_indexes(table_name) + + table = get_table(migrate_engine, table_name) + + new_deleted = Column('new_deleted', table.c.id.type, + default=_get_default_deleted_value(table)) + new_deleted.create(table, populate_default=True) + + deleted = True # workaround for pyflakes + table.update().\ + where(table.c.deleted == deleted).\ + values(new_deleted=table.c.id).\ + execute() + table.c.deleted.drop() + table.c.new_deleted.alter(name="deleted") + + _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) + + +def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, + **col_name_col_instance): + # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check + # constraints in sqlite DB and our `deleted` column has + # 2 check constraints. So there is only one way to remove + # these constraints: + # 1) Create new table with the same columns, constraints + # and indexes. (except deleted column). + # 2) Copy all data from old to new table. + # 3) Drop old table. + # 4) Rename new table to old table name. + insp = reflection.Inspector.from_engine(migrate_engine) + meta = MetaData(bind=migrate_engine) + table = Table(table_name, meta, autoload=True) + default_deleted_value = _get_default_deleted_value(table) + + columns = [] + for column in table.columns: + column_copy = None + if column.name != "deleted": + if isinstance(column.type, NullType): + column_copy = _get_not_supported_column(col_name_col_instance, + column.name) + else: + column_copy = column.copy() + else: + column_copy = Column('deleted', table.c.id.type, + default=default_deleted_value) + columns.append(column_copy) + + def is_deleted_column_constraint(constraint): + # NOTE(boris-42): There is no other way to check is CheckConstraint + # associated with deleted column. + if not isinstance(constraint, CheckConstraint): + return False + sqltext = str(constraint.sqltext) + return (sqltext.endswith("deleted in (0, 1)") or + sqltext.endswith("deleted IN (:deleted_1, :deleted_2)")) + + constraints = [] + for constraint in table.constraints: + if not is_deleted_column_constraint(constraint): + constraints.append(constraint.copy()) + + new_table = Table(table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], *column_names, + unique=index["unique"])) + + ins = InsertFromSelect(new_table, table.select()) + migrate_engine.execute(ins) + + table.drop() + [index.create(migrate_engine) for index in indexes] + + new_table.rename(table_name) + deleted = True # workaround for pyflakes + new_table.update().\ + where(new_table.c.deleted == deleted).\ + values(deleted=new_table.c.id).\ + execute() + + # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. + deleted = False # workaround for pyflakes + new_table.update().\ + where(new_table.c.deleted == deleted).\ + values(deleted=default_deleted_value).\ + execute() + + +def get_connect_string(backend, database, user=None, passwd=None): + """Get database connection + + Try to get a connection with a very specific set of values, if we get + these then we'll run the tests, otherwise they are skipped + """ + args = {'backend': backend, + 'user': user, + 'passwd': passwd, + 'database': database} + if backend == 'sqlite': + template = '%(backend)s:///%(database)s' + else: + template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" + return template % args + + +def is_backend_avail(backend, database, user=None, passwd=None): + try: + connect_uri = get_connect_string(backend=backend, + database=database, + user=user, + passwd=passwd) + engine = sqlalchemy.create_engine(connect_uri) + connection = engine.connect() + except Exception: + # intentionally catch all to handle exceptions even if we don't + # have any backend code loaded. + return False + else: + connection.close() + engine.dispose() + return True + + +def get_db_connection_info(conn_pieces): + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + password = auth_pieces[1].strip() + + return (user, password, database, host) diff --git a/neutron/openstack/common/eventlet_backdoor.py b/neutron/openstack/common/eventlet_backdoor.py new file mode 100644 index 000000000..b55b0ceb3 --- /dev/null +++ b/neutron/openstack/common/eventlet_backdoor.py @@ -0,0 +1,144 @@ +# Copyright (c) 2012 OpenStack Foundation. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import errno +import gc +import os +import pprint +import socket +import sys +import traceback + +import eventlet +import eventlet.backdoor +import greenlet +from oslo.config import cfg + +from neutron.openstack.common.gettextutils import _ +from neutron.openstack.common import log as logging + +help_for_backdoor_port = ( + "Acceptable values are 0, , and :, where 0 results " + "in listening on a random tcp port number; results in listening " + "on the specified port number (and not enabling backdoor if that port " + "is in use); and : results in listening on the smallest " + "unused port number within the specified range of port numbers. The " + "chosen port is displayed in the service's log file.") +eventlet_backdoor_opts = [ + cfg.StrOpt('backdoor_port', + default=None, + help="Enable eventlet backdoor. %s" % help_for_backdoor_port) +] + +CONF = cfg.CONF +CONF.register_opts(eventlet_backdoor_opts) +LOG = logging.getLogger(__name__) + + +class EventletBackdoorConfigValueError(Exception): + def __init__(self, port_range, help_msg, ex): + msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' + '%(help)s' % + {'range': port_range, 'ex': ex, 'help': help_msg}) + super(EventletBackdoorConfigValueError, self).__init__(msg) + self.port_range = port_range + + +def _dont_use_this(): + print("Don't use this, just disconnect instead") + + +def _find_objects(t): + return [o for o in gc.get_objects() if isinstance(o, t)] + + +def _print_greenthreads(): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): + print(i, gt) + traceback.print_stack(gt.gr_frame) + print() + + +def _print_nativethreads(): + for threadId, stack in sys._current_frames().items(): + print(threadId) + traceback.print_stack(stack) + print() + + +def _parse_port_range(port_range): + if ':' not in port_range: + start, end = port_range, port_range + else: + start, end = port_range.split(':', 1) + try: + start, end = int(start), int(end) + if end < start: + raise ValueError + return start, end + except ValueError as ex: + raise EventletBackdoorConfigValueError(port_range, ex, + help_for_backdoor_port) + + +def _listen(host, start_port, end_port, listen_func): + try_port = start_port + while True: + try: + return listen_func((host, try_port)) + except socket.error as exc: + if (exc.errno != errno.EADDRINUSE or + try_port >= end_port): + raise + try_port += 1 + + +def initialize_if_enabled(): + backdoor_locals = { + 'exit': _dont_use_this, # So we don't exit the entire process + 'quit': _dont_use_this, # So we don't exit the entire process + 'fo': _find_objects, + 'pgt': _print_greenthreads, + 'pnt': _print_nativethreads, + } + + if CONF.backdoor_port is None: + return None + + start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) + + # NOTE(johannes): The standard sys.displayhook will print the value of + # the last expression and set it to __builtin__._, which overwrites + # the __builtin__._ that gettext sets. Let's switch to using pprint + # since it won't interact poorly with gettext, and it's easier to + # read the output too. + def displayhook(val): + if val is not None: + pprint.pprint(val) + sys.displayhook = displayhook + + sock = _listen('localhost', start_port, end_port, eventlet.listen) + + # In the case of backdoor port being zero, a port number is assigned by + # listen(). In any case, pull the port number out here. + port = sock.getsockname()[1] + LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') % + {'port': port, 'pid': os.getpid()}) + eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, + locals=backdoor_locals) + return port diff --git a/neutron/openstack/common/excutils.py b/neutron/openstack/common/excutils.py new file mode 100644 index 000000000..5b3c5c86b --- /dev/null +++ b/neutron/openstack/common/excutils.py @@ -0,0 +1,113 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exception related utilities. +""" + +import logging +import sys +import time +import traceback + +import six + +from neutron.openstack.common.gettextutils import _LE + + +class save_and_reraise_exception(object): + """Save current exception, run some code and then re-raise. + + In some cases the exception context can be cleared, resulting in None + being attempted to be re-raised after an exception handler is run. This + can happen when eventlet switches greenthreads or when running an + exception handler, code raises and catches an exception. In both + cases the exception context will be cleared. + + To work around this, we save the exception state, run handler code, and + then re-raise the original exception. If another exception occurs, the + saved exception is logged and the new exception is re-raised. + + In some cases the caller may not want to re-raise the exception, and + for those circumstances this context provides a reraise flag that + can be used to suppress the exception. For example:: + + except Exception: + with save_and_reraise_exception() as ctxt: + decide_if_need_reraise() + if not should_be_reraised: + ctxt.reraise = False + + If another exception occurs and reraise flag is False, + the saved exception will not be logged. + + If the caller wants to raise new exception during exception handling + he/she sets reraise to False initially with an ability to set it back to + True if needed:: + + except Exception: + with save_and_reraise_exception(reraise=False) as ctxt: + [if statements to determine whether to raise a new exception] + # Not raising a new exception, so reraise + ctxt.reraise = True + """ + def __init__(self, reraise=True): + self.reraise = reraise + + def __enter__(self): + self.type_, self.value, self.tb, = sys.exc_info() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + if self.reraise: + logging.error(_LE('Original exception being dropped: %s'), + traceback.format_exception(self.type_, + self.value, + self.tb)) + return False + if self.reraise: + six.reraise(self.type_, self.value, self.tb) + + +def forever_retry_uncaught_exceptions(infunc): + def inner_func(*args, **kwargs): + last_log_time = 0 + last_exc_message = None + exc_count = 0 + while True: + try: + return infunc(*args, **kwargs) + except Exception as exc: + this_exc_message = six.u(str(exc)) + if this_exc_message == last_exc_message: + exc_count += 1 + else: + exc_count = 1 + # Do not log any more frequently than once a minute unless + # the exception message changes + cur_time = int(time.time()) + if (cur_time - last_log_time > 60 or + this_exc_message != last_exc_message): + logging.exception( + _LE('Unexpected exception occurred %d time(s)... ' + 'retrying.') % exc_count) + last_log_time = cur_time + last_exc_message = this_exc_message + exc_count = 0 + # This should be a very rare event. In case it isn't, do + # a sleep. + time.sleep(1) + return inner_func diff --git a/neutron/openstack/common/fileutils.py b/neutron/openstack/common/fileutils.py new file mode 100644 index 000000000..704af0962 --- /dev/null +++ b/neutron/openstack/common/fileutils.py @@ -0,0 +1,137 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import contextlib +import errno +import os +import tempfile + +from neutron.openstack.common import excutils +from neutron.openstack.common.gettextutils import _ +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +_FILE_CACHE = {} + + +def ensure_tree(path): + """Create a directory (and any ancestor directories required) + + :param path: Directory to create + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST: + if not os.path.isdir(path): + raise + else: + raise + + +def read_cached_file(filename, force_reload=False): + """Read from a file if it has been modified. + + :param force_reload: Whether to reload the file. + :returns: A tuple with a boolean specifying if the data is fresh + or not. + """ + global _FILE_CACHE + + if force_reload and filename in _FILE_CACHE: + del _FILE_CACHE[filename] + + reloaded = False + mtime = os.path.getmtime(filename) + cache_info = _FILE_CACHE.setdefault(filename, {}) + + if not cache_info or mtime > cache_info.get('mtime', 0): + LOG.debug(_("Reloading cached file %s") % filename) + with open(filename) as fap: + cache_info['data'] = fap.read() + cache_info['mtime'] = mtime + reloaded = True + return (reloaded, cache_info['data']) + + +def delete_if_exists(path, remove=os.unlink): + """Delete a file, but ignore file not found error. + + :param path: File to delete + :param remove: Optional function to remove passed path + """ + + try: + remove(path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + +@contextlib.contextmanager +def remove_path_on_error(path, remove=delete_if_exists): + """Protect code that wants to operate on PATH atomically. + Any exception will cause PATH to be removed. + + :param path: File to work with + :param remove: Optional function to remove passed path + """ + + try: + yield + except Exception: + with excutils.save_and_reraise_exception(): + remove(path) + + +def file_open(*args, **kwargs): + """Open file + + see built-in file() documentation for more details + + Note: The reason this is kept in a separate module is to easily + be able to provide a stub module that doesn't alter system + state at all (for unit tests) + """ + return file(*args, **kwargs) + + +def write_to_tempfile(content, path=None, suffix='', prefix='tmp'): + """Create temporary file or use existing file. + + This util is needed for creating temporary file with + specified content, suffix and prefix. If path is not None, + it will be used for writing content. If the path doesn't + exist it'll be created. + + :param content: content for temporary file. + :param path: same as parameter 'dir' for mkstemp + :param suffix: same as parameter 'suffix' for mkstemp + :param prefix: same as parameter 'prefix' for mkstemp + + For example: it can be used in database tests for creating + configuration files. + """ + if path: + ensure_tree(path) + + (fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix) + try: + os.write(fd, content) + finally: + os.close(fd) + return path diff --git a/neutron/openstack/common/fixture/__init__.py b/neutron/openstack/common/fixture/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/openstack/common/fixture/config.py b/neutron/openstack/common/fixture/config.py new file mode 100644 index 000000000..0bf90ff7a --- /dev/null +++ b/neutron/openstack/common/fixture/config.py @@ -0,0 +1,45 @@ +# +# Copyright 2013 Mirantis, Inc. +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import fixtures +from oslo.config import cfg +import six + + +class Config(fixtures.Fixture): + """Override some configuration values. + + The keyword arguments are the names of configuration options to + override and their values. + + If a group argument is supplied, the overrides are applied to + the specified configuration option group. + + All overrides are automatically cleared at the end of the current + test by the reset() method, which is registered by addCleanup(). + """ + + def __init__(self, conf=cfg.CONF): + self.conf = conf + + def setUp(self): + super(Config, self).setUp() + self.addCleanup(self.conf.reset) + + def config(self, **kw): + group = kw.pop('group', None) + for k, v in six.iteritems(kw): + self.conf.set_override(k, v, group) diff --git a/neutron/openstack/common/fixture/lockutils.py b/neutron/openstack/common/fixture/lockutils.py new file mode 100644 index 000000000..90932c56e --- /dev/null +++ b/neutron/openstack/common/fixture/lockutils.py @@ -0,0 +1,51 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures + +from neutron.openstack.common import lockutils + + +class LockFixture(fixtures.Fixture): + """External locking fixture. + + This fixture is basically an alternative to the synchronized decorator with + the external flag so that tearDowns and addCleanups will be included in + the lock context for locking between tests. The fixture is recommended to + be the first line in a test method, like so:: + + def test_method(self): + self.useFixture(LockFixture) + ... + + or the first line in setUp if all the test methods in the class are + required to be serialized. Something like:: + + class TestCase(testtools.testcase): + def setUp(self): + self.useFixture(LockFixture) + super(TestCase, self).setUp() + ... + + This is because addCleanups are put on a LIFO queue that gets run after the + test method exits. (either by completing or raising an exception) + """ + def __init__(self, name, lock_file_prefix=None): + self.mgr = lockutils.lock(name, lock_file_prefix, True) + + def setUp(self): + super(LockFixture, self).setUp() + self.addCleanup(self.mgr.__exit__, None, None, None) + self.mgr.__enter__() diff --git a/neutron/openstack/common/fixture/mockpatch.py b/neutron/openstack/common/fixture/mockpatch.py new file mode 100644 index 000000000..858e77cd0 --- /dev/null +++ b/neutron/openstack/common/fixture/mockpatch.py @@ -0,0 +1,49 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock + + +class PatchObject(fixtures.Fixture): + """Deal with code around mock.""" + + def __init__(self, obj, attr, **kwargs): + self.obj = obj + self.attr = attr + self.kwargs = kwargs + + def setUp(self): + super(PatchObject, self).setUp() + _p = mock.patch.object(self.obj, self.attr, **self.kwargs) + self.mock = _p.start() + self.addCleanup(_p.stop) + + +class Patch(fixtures.Fixture): + + """Deal with code around mock.patch.""" + + def __init__(self, obj, **kwargs): + self.obj = obj + self.kwargs = kwargs + + def setUp(self): + super(Patch, self).setUp() + _p = mock.patch(self.obj, **self.kwargs) + self.mock = _p.start() + self.addCleanup(_p.stop) diff --git a/neutron/openstack/common/fixture/moxstubout.py b/neutron/openstack/common/fixture/moxstubout.py new file mode 100644 index 000000000..e8c031f08 --- /dev/null +++ b/neutron/openstack/common/fixture/moxstubout.py @@ -0,0 +1,32 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mox + + +class MoxStubout(fixtures.Fixture): + """Deal with code around mox and stubout as a fixture.""" + + def setUp(self): + super(MoxStubout, self).setUp() + # emulate some of the mox stuff, we can't use the metaclass + # because it screws with our generators + self.mox = mox.Mox() + self.stubs = self.mox.stubs + self.addCleanup(self.mox.UnsetStubs) + self.addCleanup(self.mox.VerifyAll) diff --git a/neutron/openstack/common/gettextutils.py b/neutron/openstack/common/gettextutils.py new file mode 100644 index 000000000..71abc7151 --- /dev/null +++ b/neutron/openstack/common/gettextutils.py @@ -0,0 +1,498 @@ +# Copyright 2012 Red Hat, Inc. +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from neutron.openstack.common.gettextutils import _ +""" + +import copy +import functools +import gettext +import locale +from logging import handlers +import os + +from babel import localedata +import six + +_AVAILABLE_LANGUAGES = {} + +# FIXME(dhellmann): Remove this when moving to oslo.i18n. +USE_LAZY = False + + +class TranslatorFactory(object): + """Create translator functions + """ + + def __init__(self, domain, lazy=False, localedir=None): + """Establish a set of translation functions for the domain. + + :param domain: Name of translation domain, + specifying a message catalog. + :type domain: str + :param lazy: Delays translation until a message is emitted. + Defaults to False. + :type lazy: Boolean + :param localedir: Directory with translation catalogs. + :type localedir: str + """ + self.domain = domain + self.lazy = lazy + if localedir is None: + localedir = os.environ.get(domain.upper() + '_LOCALEDIR') + self.localedir = localedir + + def _make_translation_func(self, domain=None): + """Return a new translation function ready for use. + + Takes into account whether or not lazy translation is being + done. + + The domain can be specified to override the default from the + factory, but the localedir from the factory is always used + because we assume the log-level translation catalogs are + installed in the same directory as the main application + catalog. + + """ + if domain is None: + domain = self.domain + if self.lazy: + return functools.partial(Message, domain=domain) + t = gettext.translation( + domain, + localedir=self.localedir, + fallback=True, + ) + if six.PY3: + return t.gettext + return t.ugettext + + @property + def primary(self): + "The default translation function." + return self._make_translation_func() + + def _make_log_translation_func(self, level): + return self._make_translation_func(self.domain + '-log-' + level) + + @property + def log_info(self): + "Translate info-level log messages." + return self._make_log_translation_func('info') + + @property + def log_warning(self): + "Translate warning-level log messages." + return self._make_log_translation_func('warning') + + @property + def log_error(self): + "Translate error-level log messages." + return self._make_log_translation_func('error') + + @property + def log_critical(self): + "Translate critical-level log messages." + return self._make_log_translation_func('critical') + + +# NOTE(dhellmann): When this module moves out of the incubator into +# oslo.i18n, these global variables can be moved to an integration +# module within each application. + +# Create the global translation functions. +_translators = TranslatorFactory('neutron') + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical + +# NOTE(dhellmann): End of globals that will move to the application's +# integration module. + + +def enable_lazy(): + """Convenience function for configuring _() to use lazy gettext + + Call this at the start of execution to enable the gettextutils._ + function to use lazy gettext functionality. This is useful if + your project is importing _ directly instead of using the + gettextutils.install() way of importing the _ function. + """ + # FIXME(dhellmann): This function will be removed in oslo.i18n, + # because the TranslatorFactory makes it superfluous. + global _, _LI, _LW, _LE, _LC, USE_LAZY + tf = TranslatorFactory('neutron', lazy=True) + _ = tf.primary + _LI = tf.log_info + _LW = tf.log_warning + _LE = tf.log_error + _LC = tf.log_critical + USE_LAZY = True + + +def install(domain, lazy=False): + """Install a _() function using the given translation domain. + + Given a translation domain, install a _() function using gettext's + install() function. + + The main difference from gettext.install() is that we allow + overriding the default localedir (e.g. /usr/share/locale) using + a translation-domain-specific environment variable (e.g. + NOVA_LOCALEDIR). + + :param domain: the translation domain + :param lazy: indicates whether or not to install the lazy _() function. + The lazy _() introduces a way to do deferred translation + of messages by installing a _ that builds Message objects, + instead of strings, which can then be lazily translated into + any available locale. + """ + if lazy: + from six import moves + tf = TranslatorFactory(domain, lazy=True) + moves.builtins.__dict__['_'] = tf.primary + else: + localedir = '%s_LOCALEDIR' % domain.upper() + if six.PY3: + gettext.install(domain, + localedir=os.environ.get(localedir)) + else: + gettext.install(domain, + localedir=os.environ.get(localedir), + unicode=True) + + +class Message(six.text_type): + """A Message object is a unicode object that can be translated. + + Translation of Message is done explicitly using the translate() method. + For all non-translation intents and purposes, a Message is simply unicode, + and can be treated as such. + """ + + def __new__(cls, msgid, msgtext=None, params=None, + domain='neutron', *args): + """Create a new Message object. + + In order for translation to work gettext requires a message ID, this + msgid will be used as the base unicode text. It is also possible + for the msgid and the base unicode text to be different by passing + the msgtext parameter. + """ + # If the base msgtext is not given, we use the default translation + # of the msgid (which is in English) just in case the system locale is + # not English, so that the base text will be in that locale by default. + if not msgtext: + msgtext = Message._translate_msgid(msgid, domain) + # We want to initialize the parent unicode with the actual object that + # would have been plain unicode if 'Message' was not enabled. + msg = super(Message, cls).__new__(cls, msgtext) + msg.msgid = msgid + msg.domain = domain + msg.params = params + return msg + + def translate(self, desired_locale=None): + """Translate this message to the desired locale. + + :param desired_locale: The desired locale to translate the message to, + if no locale is provided the message will be + translated to the system's default locale. + + :returns: the translated message in unicode + """ + + translated_message = Message._translate_msgid(self.msgid, + self.domain, + desired_locale) + if self.params is None: + # No need for more translation + return translated_message + + # This Message object may have been formatted with one or more + # Message objects as substitution arguments, given either as a single + # argument, part of a tuple, or as one or more values in a dictionary. + # When translating this Message we need to translate those Messages too + translated_params = _translate_args(self.params, desired_locale) + + translated_message = translated_message % translated_params + + return translated_message + + @staticmethod + def _translate_msgid(msgid, domain, desired_locale=None): + if not desired_locale: + system_locale = locale.getdefaultlocale() + # If the system locale is not available to the runtime use English + if not system_locale[0]: + desired_locale = 'en_US' + else: + desired_locale = system_locale[0] + + locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') + lang = gettext.translation(domain, + localedir=locale_dir, + languages=[desired_locale], + fallback=True) + if six.PY3: + translator = lang.gettext + else: + translator = lang.ugettext + + translated_message = translator(msgid) + return translated_message + + def __mod__(self, other): + # When we mod a Message we want the actual operation to be performed + # by the parent class (i.e. unicode()), the only thing we do here is + # save the original msgid and the parameters in case of a translation + params = self._sanitize_mod_params(other) + unicode_mod = super(Message, self).__mod__(params) + modded = Message(self.msgid, + msgtext=unicode_mod, + params=params, + domain=self.domain) + return modded + + def _sanitize_mod_params(self, other): + """Sanitize the object being modded with this Message. + + - Add support for modding 'None' so translation supports it + - Trim the modded object, which can be a large dictionary, to only + those keys that would actually be used in a translation + - Snapshot the object being modded, in case the message is + translated, it will be used as it was when the Message was created + """ + if other is None: + params = (other,) + elif isinstance(other, dict): + # Merge the dictionaries + # Copy each item in case one does not support deep copy. + params = {} + if isinstance(self.params, dict): + for key, val in self.params.items(): + params[key] = self._copy_param(val) + for key, val in other.items(): + params[key] = self._copy_param(val) + else: + params = self._copy_param(other) + return params + + def _copy_param(self, param): + try: + return copy.deepcopy(param) + except Exception: + # Fallback to casting to unicode this will handle the + # python code-like objects that can't be deep-copied + return six.text_type(param) + + def __add__(self, other): + msg = _('Message objects do not support addition.') + raise TypeError(msg) + + def __radd__(self, other): + return self.__add__(other) + + if six.PY2: + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) + + +def get_available_languages(domain): + """Lists the available languages for the given translation domain. + + :param domain: the domain to get languages for + """ + if domain in _AVAILABLE_LANGUAGES: + return copy.copy(_AVAILABLE_LANGUAGES[domain]) + + localedir = '%s_LOCALEDIR' % domain.upper() + find = lambda x: gettext.find(domain, + localedir=os.environ.get(localedir), + languages=[x]) + + # NOTE(mrodden): en_US should always be available (and first in case + # order matters) since our in-line message strings are en_US + language_list = ['en_US'] + # NOTE(luisg): Babel <1.0 used a function called list(), which was + # renamed to locale_identifiers() in >=1.0, the requirements master list + # requires >=0.9.6, uncapped, so defensively work with both. We can remove + # this check when the master list updates to >=1.0, and update all projects + list_identifiers = (getattr(localedata, 'list', None) or + getattr(localedata, 'locale_identifiers')) + locale_identifiers = list_identifiers() + + for i in locale_identifiers: + if find(i) is not None: + language_list.append(i) + + # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported + # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they + # are perfectly legitimate locales: + # https://github.com/mitsuhiko/babel/issues/37 + # In Babel 1.3 they fixed the bug and they support these locales, but + # they are still not explicitly "listed" by locale_identifiers(). + # That is why we add the locales here explicitly if necessary so that + # they are listed as supported. + aliases = {'zh': 'zh_CN', + 'zh_Hant_HK': 'zh_HK', + 'zh_Hant': 'zh_TW', + 'fil': 'tl_PH'} + for (locale, alias) in six.iteritems(aliases): + if locale in language_list and alias not in language_list: + language_list.append(alias) + + _AVAILABLE_LANGUAGES[domain] = language_list + return copy.copy(language_list) + + +def translate(obj, desired_locale=None): + """Gets the translated unicode representation of the given object. + + If the object is not translatable it is returned as-is. + If the locale is None the object is translated to the system locale. + + :param obj: the object to translate + :param desired_locale: the locale to translate the message to, if None the + default system locale will be used + :returns: the translated object in unicode, or the original object if + it could not be translated + """ + message = obj + if not isinstance(message, Message): + # If the object to translate is not already translatable, + # let's first get its unicode representation + message = six.text_type(obj) + if isinstance(message, Message): + # Even after unicoding() we still need to check if we are + # running with translatable unicode before translating + return message.translate(desired_locale) + return obj + + +def _translate_args(args, desired_locale=None): + """Translates all the translatable elements of the given arguments object. + + This method is used for translating the translatable values in method + arguments which include values of tuples or dictionaries. + If the object is not a tuple or a dictionary the object itself is + translated if it is translatable. + + If the locale is None the object is translated to the system locale. + + :param args: the args to translate + :param desired_locale: the locale to translate the args to, if None the + default system locale will be used + :returns: a new args object with the translated contents of the original + """ + if isinstance(args, tuple): + return tuple(translate(v, desired_locale) for v in args) + if isinstance(args, dict): + translated_dict = {} + for (k, v) in six.iteritems(args): + translated_v = translate(v, desired_locale) + translated_dict[k] = translated_v + return translated_dict + return translate(args, desired_locale) + + +class TranslationHandler(handlers.MemoryHandler): + """Handler that translates records before logging them. + + The TranslationHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating them. This handler + depends on Message objects being logged, instead of regular strings. + + The handler can be configured declaratively in the logging.conf as follows: + + [handlers] + keys = translatedlog, translator + + [handler_translatedlog] + class = handlers.WatchedFileHandler + args = ('/var/log/api-localized.log',) + formatter = context + + [handler_translator] + class = openstack.common.log.TranslationHandler + target = translatedlog + args = ('zh_CN',) + + If the specified locale is not available in the system, the handler will + log in the default locale. + """ + + def __init__(self, locale=None, target=None): + """Initialize a TranslationHandler + + :param locale: locale to use for translating messages + :param target: logging.Handler object to forward + LogRecord objects to after translation + """ + # NOTE(luisg): In order to allow this handler to be a wrapper for + # other handlers, such as a FileHandler, and still be able to + # configure it using logging.conf, this handler has to extend + # MemoryHandler because only the MemoryHandlers' logging.conf + # parsing is implemented such that it accepts a target handler. + handlers.MemoryHandler.__init__(self, capacity=0, target=target) + self.locale = locale + + def setFormatter(self, fmt): + self.target.setFormatter(fmt) + + def emit(self, record): + # We save the message from the original record to restore it + # after translation, so other handlers are not affected by this + original_msg = record.msg + original_args = record.args + + try: + self._translate_and_log_record(record) + finally: + record.msg = original_msg + record.args = original_args + + def _translate_and_log_record(self, record): + record.msg = translate(record.msg, self.locale) + + # In addition to translating the message, we also need to translate + # arguments that were passed to the log method that were not part + # of the main message e.g., log.info(_('Some message %s'), this_one)) + record.args = _translate_args(record.args, self.locale) + + self.target.emit(record) diff --git a/neutron/openstack/common/importutils.py b/neutron/openstack/common/importutils.py new file mode 100644 index 000000000..4fd9ae2bc --- /dev/null +++ b/neutron/openstack/common/importutils.py @@ -0,0 +1,66 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Import related utilities and helper functions. +""" + +import sys +import traceback + + +def import_class(import_str): + """Returns a class from a string including module and class.""" + mod_str, _sep, class_str = import_str.rpartition('.') + try: + __import__(mod_str) + return getattr(sys.modules[mod_str], class_str) + except (ValueError, AttributeError): + raise ImportError('Class %s cannot be found (%s)' % + (class_str, + traceback.format_exception(*sys.exc_info()))) + + +def import_object(import_str, *args, **kwargs): + """Import a class and return an instance of it.""" + return import_class(import_str)(*args, **kwargs) + + +def import_object_ns(name_space, import_str, *args, **kwargs): + """Tries to import object from default namespace. + + Imports a class and return an instance of it, first by trying + to find the class in a default namespace, then failing back to + a full path if not found in the default namespace. + """ + import_value = "%s.%s" % (name_space, import_str) + try: + return import_class(import_value)(*args, **kwargs) + except ImportError: + return import_class(import_str)(*args, **kwargs) + + +def import_module(import_str): + """Import a module.""" + __import__(import_str) + return sys.modules[import_str] + + +def try_import(import_str, default=None): + """Try to import a module and if it fails return default.""" + try: + return import_module(import_str) + except ImportError: + return default diff --git a/neutron/openstack/common/jsonutils.py b/neutron/openstack/common/jsonutils.py new file mode 100644 index 000000000..581b8c051 --- /dev/null +++ b/neutron/openstack/common/jsonutils.py @@ -0,0 +1,186 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +JSON related utilities. + +This module provides a few things: + + 1) A handy function for getting an object down to something that can be + JSON serialized. See to_primitive(). + + 2) Wrappers around loads() and dumps(). The dumps() wrapper will + automatically use to_primitive() for you if needed. + + 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson + is available. +''' + + +import codecs +import datetime +import functools +import inspect +import itertools +import sys + +if sys.version_info < (2, 7): + # On Python <= 2.6, json module is not C boosted, so try to use + # simplejson module if available + try: + import simplejson as json + except ImportError: + import json +else: + import json + +import six +import six.moves.xmlrpc_client as xmlrpclib + +from neutron.openstack.common import gettextutils +from neutron.openstack.common import importutils +from neutron.openstack.common import strutils +from neutron.openstack.common import timeutils + +netaddr = importutils.try_import("netaddr") + +_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + +_simple_types = (six.string_types + six.integer_types + + (type(None), bool, float)) + + +def to_primitive(value, convert_instances=False, convert_datetime=True, + level=0, max_depth=3): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + # handle obvious types first - order of basic types determined by running + # full tests on nova project, resulting in the following counts: + # 572754 + # 460353 + # 379632 + # 274610 + # 199918 + # 114200 + # 51817 + # 26164 + # 6491 + # 283 + # 19 + if isinstance(value, _simple_types): + return value + + if isinstance(value, datetime.datetime): + if convert_datetime: + return timeutils.strtime(value) + else: + return value + + # value of itertools.count doesn't get caught by nasty_type_tests + # and results in infinite loop when list(value) is called. + if type(value) == itertools.count: + return six.text_type(value) + + # FIXME(vish): Workaround for LP bug 852095. Without this workaround, + # tests that raise an exception in a mocked method that + # has a @wrap_exception with a notifier will fail. If + # we up the dependency to 0.5.4 (when it is released) we + # can remove this workaround. + if getattr(value, '__module__', None) == 'mox': + return 'mock' + + if level > max_depth: + return '?' + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + recursive = functools.partial(to_primitive, + convert_instances=convert_instances, + convert_datetime=convert_datetime, + level=level, + max_depth=max_depth) + if isinstance(value, dict): + return dict((k, recursive(v)) for k, v in six.iteritems(value)) + elif isinstance(value, (list, tuple)): + return [recursive(lv) for lv in value] + + # It's not clear why xmlrpclib created their own DateTime type, but + # for our purposes, make it a datetime type which is explicitly + # handled + if isinstance(value, xmlrpclib.DateTime): + value = datetime.datetime(*tuple(value.timetuple())[:6]) + + if convert_datetime and isinstance(value, datetime.datetime): + return timeutils.strtime(value) + elif isinstance(value, gettextutils.Message): + return value.data + elif hasattr(value, 'iteritems'): + return recursive(dict(value.iteritems()), level=level + 1) + elif hasattr(value, '__iter__'): + return recursive(list(value)) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return recursive(value.__dict__, level=level + 1) + elif netaddr and isinstance(value, netaddr.IPAddress): + return six.text_type(value) + else: + if any(test(value) for test in _nasty_type_tests): + return six.text_type(value) + return value + except TypeError: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return six.text_type(value) + + +def dumps(value, default=to_primitive, **kwargs): + return json.dumps(value, default=default, **kwargs) + + +def loads(s, encoding='utf-8'): + return json.loads(strutils.safe_decode(s, encoding)) + + +def load(fp, encoding='utf-8'): + return json.load(codecs.getreader(encoding)(fp)) + + +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append((__name__, 'dumps', TypeError, + 'loads', ValueError, 'load')) + anyjson.force_implementation(__name__) diff --git a/neutron/openstack/common/local.py b/neutron/openstack/common/local.py new file mode 100644 index 000000000..0819d5b97 --- /dev/null +++ b/neutron/openstack/common/local.py @@ -0,0 +1,45 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Local storage of variables using weak references""" + +import threading +import weakref + + +class WeakLocal(threading.local): + def __getattribute__(self, attr): + rval = super(WeakLocal, self).__getattribute__(attr) + if rval: + # NOTE(mikal): this bit is confusing. What is stored is a weak + # reference, not the value itself. We therefore need to lookup + # the weak reference and return the inner value here. + rval = rval() + return rval + + def __setattr__(self, attr, value): + value = weakref.ref(value) + return super(WeakLocal, self).__setattr__(attr, value) + + +# NOTE(mikal): the name "store" should be deprecated in the future +store = WeakLocal() + +# A "weak" store uses weak references and allows an object to fall out of scope +# when it falls out of scope in the code that uses the thread local storage. A +# "strong" store will hold a reference to the object so that it never falls out +# of scope. +weak_store = WeakLocal() +strong_store = threading.local() diff --git a/neutron/openstack/common/lockutils.py b/neutron/openstack/common/lockutils.py new file mode 100644 index 000000000..f85b8c130 --- /dev/null +++ b/neutron/openstack/common/lockutils.py @@ -0,0 +1,303 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import contextlib +import errno +import functools +import os +import shutil +import subprocess +import sys +import tempfile +import threading +import time +import weakref + +from oslo.config import cfg + +from neutron.openstack.common import fileutils +from neutron.openstack.common.gettextutils import _ +from neutron.openstack.common import local +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +util_opts = [ + cfg.BoolOpt('disable_process_locking', default=False, + help='Whether to disable inter-process locks'), + cfg.StrOpt('lock_path', + default=os.environ.get("NEUTRON_LOCK_PATH"), + help=('Directory to use for lock files.')) +] + + +CONF = cfg.CONF +CONF.register_opts(util_opts) + + +def set_defaults(lock_path): + cfg.set_defaults(util_opts, lock_path=lock_path) + + +class _InterProcessLock(object): + """Lock implementation which allows multiple locks, working around + issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does + not require any cleanup. Since the lock is always held on a file + descriptor rather than outside of the process, the lock gets dropped + automatically if the process crashes, even if __exit__ is not executed. + + There are no guarantees regarding usage by multiple green threads in a + single process here. This lock works only between processes. Exclusive + access between local threads should be achieved using the semaphores + in the @synchronized decorator. + + Note these locks are released when the descriptor is closed, so it's not + safe to close the file descriptor while another green thread holds the + lock. Just opening and closing the lock file can break synchronisation, + so lock files must be accessed only using this abstraction. + """ + + def __init__(self, name): + self.lockfile = None + self.fname = name + + def __enter__(self): + self.lockfile = open(self.fname, 'w') + + while True: + try: + # Using non-blocking locks since green threads are not + # patched to deal with blocking locking calls. + # Also upon reading the MSDN docs for locking(), it seems + # to have a laughable 10 attempts "blocking" mechanism. + self.trylock() + return self + except IOError as e: + if e.errno in (errno.EACCES, errno.EAGAIN): + # external locks synchronise things like iptables + # updates - give it some time to prevent busy spinning + time.sleep(0.01) + else: + raise + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + self.unlock() + self.lockfile.close() + except IOError: + LOG.exception(_("Could not release the acquired lock `%s`"), + self.fname) + + def trylock(self): + raise NotImplementedError() + + def unlock(self): + raise NotImplementedError() + + +class _WindowsLock(_InterProcessLock): + def trylock(self): + msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) + + def unlock(self): + msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) + + +class _PosixLock(_InterProcessLock): + def trylock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) + + def unlock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_UN) + + +if os.name == 'nt': + import msvcrt + InterProcessLock = _WindowsLock +else: + import fcntl + InterProcessLock = _PosixLock + +_semaphores = weakref.WeakValueDictionary() +_semaphores_lock = threading.Lock() + + +@contextlib.contextmanager +def lock(name, lock_file_prefix=None, external=False, lock_path=None): + """Context based lock + + This function yields a `threading.Semaphore` instance (if we don't use + eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is + True, in which case, it'll yield an InterProcessLock instance. + + :param lock_file_prefix: The lock_file_prefix argument is used to provide + lock files on disk with a meaningful prefix. + + :param external: The external keyword argument denotes whether this lock + should work across multiple processes. This means that if two different + workers both run a a method decorated with @synchronized('mylock', + external=True), only one of them will execute at a time. + + :param lock_path: The lock_path keyword argument is used to specify a + special location for external lock files to live. If nothing is set, then + CONF.lock_path is used as a default. + """ + with _semaphores_lock: + try: + sem = _semaphores[name] + except KeyError: + sem = threading.Semaphore() + _semaphores[name] = sem + + with sem: + LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) + + # NOTE(mikal): I know this looks odd + if not hasattr(local.strong_store, 'locks_held'): + local.strong_store.locks_held = [] + local.strong_store.locks_held.append(name) + + try: + if external and not CONF.disable_process_locking: + LOG.debug(_('Attempting to grab file lock "%(lock)s"'), + {'lock': name}) + + # We need a copy of lock_path because it is non-local + local_lock_path = lock_path or CONF.lock_path + if not local_lock_path: + raise cfg.RequiredOptError('lock_path') + + if not os.path.exists(local_lock_path): + fileutils.ensure_tree(local_lock_path) + LOG.info(_('Created lock path: %s'), local_lock_path) + + def add_prefix(name, prefix): + if not prefix: + return name + sep = '' if prefix.endswith('-') else '-' + return '%s%s%s' % (prefix, sep, name) + + # NOTE(mikal): the lock name cannot contain directory + # separators + lock_file_name = add_prefix(name.replace(os.sep, '_'), + lock_file_prefix) + + lock_file_path = os.path.join(local_lock_path, lock_file_name) + + try: + lock = InterProcessLock(lock_file_path) + with lock as lock: + LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), + {'lock': name, 'path': lock_file_path}) + yield lock + finally: + LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), + {'lock': name, 'path': lock_file_path}) + else: + yield sem + + finally: + local.strong_store.locks_held.remove(name) + + +def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): + """Synchronization decorator. + + Decorating a method like so:: + + @synchronized('mylock') + def foo(self, *args): + ... + + ensures that only one thread will execute the foo method at a time. + + Different methods can share the same lock:: + + @synchronized('mylock') + def foo(self, *args): + ... + + @synchronized('mylock') + def bar(self, *args): + ... + + This way only one of either foo or bar can be executing at a time. + """ + + def wrap(f): + @functools.wraps(f) + def inner(*args, **kwargs): + try: + with lock(name, lock_file_prefix, external, lock_path): + LOG.debug(_('Got semaphore / lock "%(function)s"'), + {'function': f.__name__}) + return f(*args, **kwargs) + finally: + LOG.debug(_('Semaphore / lock released "%(function)s"'), + {'function': f.__name__}) + return inner + return wrap + + +def synchronized_with_prefix(lock_file_prefix): + """Partial object generator for the synchronization decorator. + + Redefine @synchronized in each project like so:: + + (in nova/utils.py) + from nova.openstack.common import lockutils + + synchronized = lockutils.synchronized_with_prefix('nova-') + + + (in nova/foo.py) + from nova import utils + + @utils.synchronized('mylock') + def bar(self, *args): + ... + + The lock_file_prefix argument is used to provide lock files on disk with a + meaningful prefix. + """ + + return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) + + +def main(argv): + """Create a dir for locks and pass it to command from arguments + + If you run this: + python -m openstack.common.lockutils python setup.py testr + + a temporary directory will be created for all your locks and passed to all + your tests in an environment variable. The temporary dir will be deleted + afterwards and the return value will be preserved. + """ + + lock_dir = tempfile.mkdtemp() + os.environ["NEUTRON_LOCK_PATH"] = lock_dir + try: + ret_val = subprocess.call(argv[1:]) + finally: + shutil.rmtree(lock_dir, ignore_errors=True) + return ret_val + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/neutron/openstack/common/log.py b/neutron/openstack/common/log.py new file mode 100644 index 000000000..427921c4a --- /dev/null +++ b/neutron/openstack/common/log.py @@ -0,0 +1,626 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Openstack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import re +import sys +import traceback + +from oslo.config import cfg +import six +from six import moves + +from neutron.openstack.common.gettextutils import _ +from neutron.openstack.common import importutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import local + + +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS = [] +_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS.append(reg_ex) + + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config-append', + metavar='PATH', + deprecated_name='log-config', + help='The name of logging configuration file. It does not ' + 'disable existing loggers, but just appends specified ' + 'logging configuration to any other existing logging ' + 'options. Please see the Python logging module ' + 'documentation for details on logging configuration ' + 'files.'), + cfg.StrOpt('log-format', + default=None, + metavar='FORMAT', + help='DEPRECATED. ' + 'A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'This option is deprecated. Please use ' + 'logging_context_format_string and ' + 'logging_default_format_string instead.'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If no default is set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The base directory used for relative ' + '--log-file paths'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='syslog facility to receive log lines') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error') +] + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [%(request_id)s %(user_identity)s] ' + '%(instance)s%(message)s', + help='format string to use for log messages with context'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [-] %(instance)s%(message)s', + help='format string to use for log messages without context'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='data to append to log format when level is DEBUG'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' + '%(instance)s', + help='prefix each line of exception output with this format'), + cfg.ListOpt('default_log_levels', + default=[ + 'amqp=WARN', + 'amqplib=WARN', + 'boto=WARN', + 'qpid=WARN', + 'sqlalchemy=WARN', + 'suds=INFO', + 'iso8601=WARN', + ], + help='list of logger=LEVEL pairs'), + cfg.BoolOpt('publish_errors', + default=False, + help='publish error events'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='make deprecations fatal'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='If an instance is passed with the log message, format ' + 'it like this'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='If an instance UUID is passed with the log message, ' + 'format it like this'), +] + +CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file + logdir = CONF.log_dir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + return None + + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + message = six.text_type(message) + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + secret = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS: + message = re.sub(pattern, secret, message) + return message + + +class BaseLoggerAdapter(logging.LoggerAdapter): + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + +class LazyAdapter(BaseLoggerAdapter): + def __init__(self, name='unknown', version='unknown'): + self._logger = None + self.extra = {} + self.name = name + self.version = version + + @property + def logger(self): + if not self._logger: + self._logger = getLogger(self.name, self.version) + return self._logger + + +class ContextAdapter(BaseLoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + + @property + def handlers(self): + return self.logger.handlers + + def deprecated(self, msg, *args, **kwargs): + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + else: + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + # NOTE(mrodden): catch any Message/other object and + # coerce to unicode before they can get + # to the python logging and possibly + # cause string encoding trouble + if not isinstance(msg, six.string_types): + msg = six.text_type(msg) + + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_uuid = (extra.get('instance_uuid', None) or + kwargs.pop('instance_uuid', None)) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + elif instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra['instance'] = instance_extra + + extra.setdefault('user_identity', kwargs.pop('user_identity', None)) + + extra['project'] = self.project + extra['version'] = self.version + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [moves.filter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(exc_type, value, tb): + extra = {} + if CONF.verbose: + extra['exc_info'] = (exc_type, value, tb) + getLogger(product_name).critical(str(value), **extra) + return logging_excepthook + + +class LogConfigError(Exception): + + message = _('Error loading logging config %(log_config)s: %(err_msg)s') + + def __init__(self, log_config, err_msg): + self.log_config = log_config + self.err_msg = err_msg + + def __str__(self): + return self.message % dict(log_config=self.log_config, + err_msg=self.err_msg) + + +def _load_log_config(log_config_append): + try: + logging.config.fileConfig(log_config_append, + disable_existing_loggers=False) + except moves.configparser.Error as exc: + raise LogConfigError(log_config_append, str(exc)) + + +def setup(product_name): + """Setup logging.""" + if CONF.log_config_append: + _load_log_config(CONF.log_config_append) + else: + _setup_logging_from_conf() + sys.excepthook = _create_logging_excepthook(product_name) + + +def set_defaults(logging_context_format_string): + cfg.set_defaults(log_opts, + logging_context_format_string= + logging_context_format_string) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +def _setup_logging_from_conf(): + log_root = getLogger(None).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + if CONF.use_syslog: + facility = _find_facility_from_conf() + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not logpath: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + handler = importutils.import_object( + "neutron.openstack.common.log_handler.PublishErrorsHandler", + logging.ERROR) + log_root.addHandler(handler) + + datefmt = CONF.log_date_format + for handler in log_root.handlers: + # NOTE(alaski): CONF.log_format overrides everything currently. This + # should be deprecated in favor of context aware formatting. + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + log_root.info('Deprecated: log_format is now deprecated and will ' + 'be removed in the next release') + else: + handler.setFormatter(ContextFormatter(datefmt=datefmt)) + + if CONF.debug: + log_root.setLevel(logging.DEBUG) + elif CONF.verbose: + log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) + + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + level = logging.getLevelName(level_name) + logger = logging.getLogger(mod) + logger.setLevel(level) + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +def getLazyLogger(name='unknown', version='unknown'): + """Returns lazy logger. + + Creates a pass-through logger that does not create the real logger + until it is really needed and delegates all calls to the real logger + once it is created. + """ + return LazyAdapter(name, version) + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg) + + +class ContextFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + # NOTE(sdague): default the fancier formating params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id', None): + self._fmt = CONF.logging_context_format_string + else: + self._fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + self._fmt += " " + CONF.logging_debug_format_suffix + + # Cache this on the record, Logger will respect our formated copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = moves.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/neutron/openstack/common/log_handler.py b/neutron/openstack/common/log_handler.py new file mode 100644 index 000000000..4da3ad368 --- /dev/null +++ b/neutron/openstack/common/log_handler.py @@ -0,0 +1,30 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from oslo.config import cfg + +from neutron.openstack.common import notifier + + +class PublishErrorsHandler(logging.Handler): + def emit(self, record): + if ('neutron.openstack.common.notifier.log_notifier' in + cfg.CONF.notification_driver): + return + notifier.api.notify(None, 'error.publisher', + 'error_notification', + notifier.api.ERROR, + dict(error=record.getMessage())) diff --git a/neutron/openstack/common/loopingcall.py b/neutron/openstack/common/loopingcall.py new file mode 100644 index 000000000..e588c8309 --- /dev/null +++ b/neutron/openstack/common/loopingcall.py @@ -0,0 +1,145 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from eventlet import event +from eventlet import greenthread + +from neutron.openstack.common.gettextutils import _ +from neutron.openstack.common import log as logging +from neutron.openstack.common import timeutils + +LOG = logging.getLogger(__name__) + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCall. + + The poll-function passed to LoopingCall can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCall.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCall.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCallBase(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + self.done = None + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +class FixedIntervalLoopingCall(LoopingCallBase): + """A fixed interval looping call.""" + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + start = timeutils.utcnow() + self.f(*self.args, **self.kw) + end = timeutils.utcnow() + if not self._running: + break + delay = interval - timeutils.delta_seconds(start, end) + if delay <= 0: + LOG.warn(_('task run outlasted interval by %s sec') % + -delay) + greenthread.sleep(delay if delay > 0 else 0) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in fixed duration looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn_n(_inner) + return self.done + + +# TODO(mikal): this class name is deprecated in Havana and should be removed +# in the I release +LoopingCall = FixedIntervalLoopingCall + + +class DynamicLoopingCall(LoopingCallBase): + """A looping call which sleeps until the next known event. + + The function called should return how long to sleep for before being + called again. + """ + + def start(self, initial_delay=None, periodic_interval_max=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + idle = self.f(*self.args, **self.kw) + if not self._running: + break + + if periodic_interval_max is not None: + idle = min(idle, periodic_interval_max) + LOG.debug(_('Dynamic looping call sleeping for %.02f ' + 'seconds'), idle) + greenthread.sleep(idle) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in dynamic looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done diff --git a/neutron/openstack/common/middleware/__init__.py b/neutron/openstack/common/middleware/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/openstack/common/middleware/audit.py b/neutron/openstack/common/middleware/audit.py new file mode 100644 index 000000000..5d8da5244 --- /dev/null +++ b/neutron/openstack/common/middleware/audit.py @@ -0,0 +1,44 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Attach open standard audit information to request.environ + +AuditMiddleware filter should be place after Keystone's auth_token middleware +in the pipeline so that it can utilise the information Keystone provides. + +""" +from pycadf.audit import api as cadf_api + +from neutron.openstack.common.middleware import notifier + + +class AuditMiddleware(notifier.RequestNotifier): + + def __init__(self, app, **conf): + super(AuditMiddleware, self).__init__(app, **conf) + self.cadf_audit = cadf_api.OpenStackAuditApi() + + @notifier.log_and_ignore_error + def process_request(self, request): + self.cadf_audit.append_audit_event(request) + super(AuditMiddleware, self).process_request(request) + + @notifier.log_and_ignore_error + def process_response(self, request, response, + exception=None, traceback=None): + self.cadf_audit.mod_audit_event(request, response) + super(AuditMiddleware, self).process_response(request, response, + exception, traceback) diff --git a/neutron/openstack/common/middleware/base.py b/neutron/openstack/common/middleware/base.py new file mode 100644 index 000000000..464a1ccd7 --- /dev/null +++ b/neutron/openstack/common/middleware/base.py @@ -0,0 +1,56 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base class(es) for WSGI Middleware.""" + +import webob.dec + + +class Middleware(object): + """Base WSGI middleware wrapper. + + These classes require an application to be initialized that will be called + next. By default the middleware will simply call its wrapped app, or you + can override __call__ to customize its behavior. + """ + + @classmethod + def factory(cls, global_conf, **local_conf): + """Factory method for paste.deploy.""" + return cls + + def __init__(self, application): + self.application = application + + def process_request(self, req): + """Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + + @webob.dec.wsgify + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) diff --git a/neutron/openstack/common/middleware/catch_errors.py b/neutron/openstack/common/middleware/catch_errors.py new file mode 100644 index 000000000..b692aeec8 --- /dev/null +++ b/neutron/openstack/common/middleware/catch_errors.py @@ -0,0 +1,43 @@ +# Copyright (c) 2013 NEC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Middleware that provides high-level error handling. + +It catches all exceptions from subsequent applications in WSGI pipeline +to hide internal errors from API response. +""" + +import webob.dec +import webob.exc + +from neutron.openstack.common.gettextutils import _ # noqa +from neutron.openstack.common import log as logging +from neutron.openstack.common.middleware import base + + +LOG = logging.getLogger(__name__) + + +class CatchErrorsMiddleware(base.Middleware): + + @webob.dec.wsgify + def __call__(self, req): + try: + response = req.get_response(self.application) + except Exception: + LOG.exception(_('An error occurred during ' + 'processing the request: %s')) + response = webob.exc.HTTPInternalServerError() + return response diff --git a/neutron/openstack/common/middleware/correlation_id.py b/neutron/openstack/common/middleware/correlation_id.py new file mode 100644 index 000000000..80ee63f76 --- /dev/null +++ b/neutron/openstack/common/middleware/correlation_id.py @@ -0,0 +1,28 @@ +# Copyright (c) 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Middleware that attaches a correlation id to WSGI request""" + +import uuid + +from neutron.openstack.common.middleware import base + + +class CorrelationIdMiddleware(base.Middleware): + + def process_request(self, req): + correlation_id = (req.headers.get("X_CORRELATION_ID") or + str(uuid.uuid4())) + req.headers['X_CORRELATION_ID'] = correlation_id diff --git a/neutron/openstack/common/middleware/debug.py b/neutron/openstack/common/middleware/debug.py new file mode 100644 index 000000000..5ab960554 --- /dev/null +++ b/neutron/openstack/common/middleware/debug.py @@ -0,0 +1,60 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Debug middleware""" + +from __future__ import print_function + +import sys + +import six +import webob.dec + +from neutron.openstack.common.middleware import base + + +class Debug(base.Middleware): + """Helper class that returns debug information. + + Can be inserted into any WSGI application chain to get information about + the request and response. + """ + + @webob.dec.wsgify + def __call__(self, req): + print(("*" * 40) + " REQUEST ENVIRON") + for key, value in req.environ.items(): + print(key, "=", value) + print() + resp = req.get_response(self.application) + + print(("*" * 40) + " RESPONSE HEADERS") + for (key, value) in six.iteritems(resp.headers): + print(key, "=", value) + print() + + resp.app_iter = self.print_generator(resp.app_iter) + + return resp + + @staticmethod + def print_generator(app_iter): + """Prints the contents of a wrapper string iterator when iterated.""" + print(("*" * 40) + " BODY") + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print() diff --git a/neutron/openstack/common/middleware/notifier.py b/neutron/openstack/common/middleware/notifier.py new file mode 100644 index 000000000..e34699c5a --- /dev/null +++ b/neutron/openstack/common/middleware/notifier.py @@ -0,0 +1,126 @@ +# Copyright (c) 2013 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Send notifications on request + +""" +import os.path +import sys +import traceback as tb + +import six +import webob.dec + +from neutron.openstack.common import context +from neutron.openstack.common.gettextutils import _LE +from neutron.openstack.common import log as logging +from neutron.openstack.common.middleware import base +from neutron.openstack.common.notifier import api + +LOG = logging.getLogger(__name__) + + +def log_and_ignore_error(fn): + def wrapped(*args, **kwargs): + try: + return fn(*args, **kwargs) + except Exception as e: + LOG.exception(_LE('An exception occurred processing ' + 'the API call: %s ') % e) + return wrapped + + +class RequestNotifier(base.Middleware): + """Send notification on request.""" + + @classmethod + def factory(cls, global_conf, **local_conf): + """Factory method for paste.deploy.""" + conf = global_conf.copy() + conf.update(local_conf) + + def _factory(app): + return cls(app, **conf) + return _factory + + def __init__(self, app, **conf): + self.service_name = conf.get('service_name') + self.ignore_req_list = [x.upper().strip() for x in + conf.get('ignore_req_list', '').split(',')] + super(RequestNotifier, self).__init__(app) + + @staticmethod + def environ_to_dict(environ): + """Following PEP 333, server variables are lower case, so don't + include them. + + """ + return dict((k, v) for k, v in six.iteritems(environ) + if k.isupper() and k != 'HTTP_X_AUTH_TOKEN') + + @log_and_ignore_error + def process_request(self, request): + request.environ['HTTP_X_SERVICE_NAME'] = \ + self.service_name or request.host + payload = { + 'request': self.environ_to_dict(request.environ), + } + + api.notify(context.get_admin_context(), + api.publisher_id(os.path.basename(sys.argv[0])), + 'http.request', + api.INFO, + payload) + + @log_and_ignore_error + def process_response(self, request, response, + exception=None, traceback=None): + payload = { + 'request': self.environ_to_dict(request.environ), + } + + if response: + payload['response'] = { + 'status': response.status, + 'headers': response.headers, + } + + if exception: + payload['exception'] = { + 'value': repr(exception), + 'traceback': tb.format_tb(traceback) + } + + api.notify(context.get_admin_context(), + api.publisher_id(os.path.basename(sys.argv[0])), + 'http.response', + api.INFO, + payload) + + @webob.dec.wsgify + def __call__(self, req): + if req.method in self.ignore_req_list: + return req.get_response(self.application) + else: + self.process_request(req) + try: + response = req.get_response(self.application) + except Exception: + exc_type, value, traceback = sys.exc_info() + self.process_response(req, None, value, traceback) + raise + else: + self.process_response(req, response) + return response diff --git a/neutron/openstack/common/middleware/request_id.py b/neutron/openstack/common/middleware/request_id.py new file mode 100644 index 000000000..5c2620cce --- /dev/null +++ b/neutron/openstack/common/middleware/request_id.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 NEC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Middleware that ensures request ID. + +It ensures to assign request ID for each API request and set it to +request environment. The request ID is also added to API response. +""" + +import webob.dec + +from neutron.openstack.common import context +from neutron.openstack.common.middleware import base + + +ENV_REQUEST_ID = 'openstack.request_id' +HTTP_RESP_HEADER_REQUEST_ID = 'x-openstack-request-id' + + +class RequestIdMiddleware(base.Middleware): + + @webob.dec.wsgify + def __call__(self, req): + req_id = context.generate_request_id() + req.environ[ENV_REQUEST_ID] = req_id + response = req.get_response(self.application) + if HTTP_RESP_HEADER_REQUEST_ID not in response.headers: + response.headers.add(HTTP_RESP_HEADER_REQUEST_ID, req_id) + return response diff --git a/neutron/openstack/common/middleware/sizelimit.py b/neutron/openstack/common/middleware/sizelimit.py new file mode 100644 index 000000000..56b320024 --- /dev/null +++ b/neutron/openstack/common/middleware/sizelimit.py @@ -0,0 +1,81 @@ +# Copyright (c) 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Request Body limiting middleware. + +""" + +from oslo.config import cfg +import webob.dec +import webob.exc + +from neutron.openstack.common.gettextutils import _ +from neutron.openstack.common.middleware import base + + +#default request size is 112k +max_req_body_size = cfg.IntOpt('max_request_body_size', + deprecated_name='osapi_max_request_body_size', + default=114688, + help='the maximum body size ' + 'per each request(bytes)') + +CONF = cfg.CONF +CONF.register_opt(max_req_body_size) + + +class LimitingReader(object): + """Reader to limit the size of an incoming request.""" + def __init__(self, data, limit): + """Initiates LimitingReader object. + + :param data: Underlying data object + :param limit: maximum number of bytes the reader should allow + """ + self.data = data + self.limit = limit + self.bytes_read = 0 + + def __iter__(self): + for chunk in self.data: + self.bytes_read += len(chunk) + if self.bytes_read > self.limit: + msg = _("Request is too large.") + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + else: + yield chunk + + def read(self, i=None): + result = self.data.read(i) + self.bytes_read += len(result) + if self.bytes_read > self.limit: + msg = _("Request is too large.") + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + return result + + +class RequestBodySizeLimiter(base.Middleware): + """Limit the size of incoming requests.""" + + @webob.dec.wsgify + def __call__(self, req): + if req.content_length > CONF.max_request_body_size: + msg = _("Request is too large.") + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + if req.content_length is None and req.is_body_readable: + limiter = LimitingReader(req.body_file, + CONF.max_request_body_size) + req.body_file = limiter + return self.application diff --git a/neutron/openstack/common/network_utils.py b/neutron/openstack/common/network_utils.py new file mode 100644 index 000000000..d9640d15a --- /dev/null +++ b/neutron/openstack/common/network_utils.py @@ -0,0 +1,89 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Network-related utilities and helper functions. +""" + +# TODO(jd) Use six.moves once +# https://bitbucket.org/gutworth/six/pull-request/28 +# is merged +try: + import urllib.parse + SplitResult = urllib.parse.SplitResult +except ImportError: + import urlparse + SplitResult = urlparse.SplitResult + +from six.moves.urllib import parse + + +def parse_host_port(address, default_port=None): + """Interpret a string as a host:port pair. + + An IPv6 address MUST be escaped if accompanied by a port, + because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 + means both [2001:db8:85a3::8a2e:370:7334] and + [2001:db8:85a3::8a2e:370]:7334. + + >>> parse_host_port('server01:80') + ('server01', 80) + >>> parse_host_port('server01') + ('server01', None) + >>> parse_host_port('server01', default_port=1234) + ('server01', 1234) + >>> parse_host_port('[::1]:80') + ('::1', 80) + >>> parse_host_port('[::1]') + ('::1', None) + >>> parse_host_port('[::1]', default_port=1234) + ('::1', 1234) + >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) + ('2001:db8:85a3::8a2e:370:7334', 1234) + + """ + if address[0] == '[': + # Escaped ipv6 + _host, _port = address[1:].split(']') + host = _host + if ':' in _port: + port = _port.split(':')[1] + else: + port = default_port + else: + if address.count(':') == 1: + host, port = address.split(':') + else: + # 0 means ipv4, >1 means ipv6. + # We prohibit unescaped ipv6 addresses with port. + host = address + port = default_port + + return (host, None if port is None else int(port)) + + +def urlsplit(url, scheme='', allow_fragments=True): + """Parse a URL using urlparse.urlsplit(), splitting query and fragments. + This function papers over Python issue9374 when needed. + + The parameters are the same as urlparse.urlsplit. + """ + scheme, netloc, path, query, fragment = parse.urlsplit( + url, scheme, allow_fragments) + if allow_fragments and '#' in path: + path, fragment = path.split('#', 1) + if '?' in path: + path, query = path.split('?', 1) + return SplitResult(scheme, netloc, path, query, fragment) diff --git a/neutron/openstack/common/periodic_task.py b/neutron/openstack/common/periodic_task.py new file mode 100644 index 000000000..1ebdc1af6 --- /dev/null +++ b/neutron/openstack/common/periodic_task.py @@ -0,0 +1,183 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from oslo.config import cfg +import six + +from neutron.openstack.common.gettextutils import _, _LE, _LI +from neutron.openstack.common import log as logging + + +periodic_opts = [ + cfg.BoolOpt('run_external_periodic_tasks', + default=True, + help='Some periodic tasks can be run in a separate process. ' + 'Should we run them here?'), +] + +CONF = cfg.CONF +CONF.register_opts(periodic_opts) + +LOG = logging.getLogger(__name__) + +DEFAULT_INTERVAL = 60.0 + + +class InvalidPeriodicTaskArg(Exception): + message = _("Unexpected argument for periodic task creation: %(arg)s.") + + +def periodic_task(*args, **kwargs): + """Decorator to indicate that a method is a periodic task. + + This decorator can be used in two ways: + + 1. Without arguments '@periodic_task', this will be run on the default + interval of 60 seconds. + + 2. With arguments: + @periodic_task(spacing=N [, run_immediately=[True|False]]) + this will be run on approximately every N seconds. If this number is + negative the periodic task will be disabled. If the run_immediately + argument is provided and has a value of 'True', the first run of the + task will be shortly after task scheduler starts. If + run_immediately is omitted or set to 'False', the first time the + task runs will be approximately N seconds after the task scheduler + starts. + """ + def decorator(f): + # Test for old style invocation + if 'ticks_between_runs' in kwargs: + raise InvalidPeriodicTaskArg(arg='ticks_between_runs') + + # Control if run at all + f._periodic_task = True + f._periodic_external_ok = kwargs.pop('external_process_ok', False) + if f._periodic_external_ok and not CONF.run_external_periodic_tasks: + f._periodic_enabled = False + else: + f._periodic_enabled = kwargs.pop('enabled', True) + + # Control frequency + f._periodic_spacing = kwargs.pop('spacing', 0) + f._periodic_immediate = kwargs.pop('run_immediately', False) + if f._periodic_immediate: + f._periodic_last_run = None + else: + f._periodic_last_run = time.time() + return f + + # NOTE(sirp): The `if` is necessary to allow the decorator to be used with + # and without parents. + # + # In the 'with-parents' case (with kwargs present), this function needs to + # return a decorator function since the interpreter will invoke it like: + # + # periodic_task(*args, **kwargs)(f) + # + # In the 'without-parents' case, the original function will be passed + # in as the first argument, like: + # + # periodic_task(f) + if kwargs: + return decorator + else: + return decorator(args[0]) + + +class _PeriodicTasksMeta(type): + def __init__(cls, names, bases, dict_): + """Metaclass that allows us to collect decorated periodic tasks.""" + super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) + + # NOTE(sirp): if the attribute is not present then we must be the base + # class, so, go ahead an initialize it. If the attribute is present, + # then we're a subclass so make a copy of it so we don't step on our + # parent's toes. + try: + cls._periodic_tasks = cls._periodic_tasks[:] + except AttributeError: + cls._periodic_tasks = [] + + try: + cls._periodic_spacing = cls._periodic_spacing.copy() + except AttributeError: + cls._periodic_spacing = {} + + for value in cls.__dict__.values(): + if getattr(value, '_periodic_task', False): + task = value + name = task.__name__ + + if task._periodic_spacing < 0: + LOG.info(_LI('Skipping periodic task %(task)s because ' + 'its interval is negative'), + {'task': name}) + continue + if not task._periodic_enabled: + LOG.info(_LI('Skipping periodic task %(task)s because ' + 'it is disabled'), + {'task': name}) + continue + + # A periodic spacing of zero indicates that this task should + # be run on the default interval to avoid running too + # frequently. + if task._periodic_spacing == 0: + task._periodic_spacing = DEFAULT_INTERVAL + + cls._periodic_tasks.append((name, task)) + cls._periodic_spacing[name] = task._periodic_spacing + + +@six.add_metaclass(_PeriodicTasksMeta) +class PeriodicTasks(object): + def __init__(self): + super(PeriodicTasks, self).__init__() + self._periodic_last_run = {} + for name, task in self._periodic_tasks: + self._periodic_last_run[name] = task._periodic_last_run + + def run_periodic_tasks(self, context, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + idle_for = DEFAULT_INTERVAL + for task_name, task in self._periodic_tasks: + full_task_name = '.'.join([self.__class__.__name__, task_name]) + + spacing = self._periodic_spacing[task_name] + last_run = self._periodic_last_run[task_name] + + # If a periodic task is _nearly_ due, then we'll run it early + idle_for = min(idle_for, spacing) + if last_run is not None: + delta = last_run + spacing - time.time() + if delta > 0.2: + idle_for = min(idle_for, delta) + continue + + LOG.debug("Running periodic task %(full_task_name)s", + {"full_task_name": full_task_name}) + self._periodic_last_run[task_name] = time.time() + + try: + task(self, context) + except Exception as e: + if raise_on_error: + raise + LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"), + {"full_task_name": full_task_name, "e": e}) + time.sleep(0) + + return idle_for diff --git a/neutron/openstack/common/policy.py b/neutron/openstack/common/policy.py new file mode 100644 index 000000000..21ce9b80a --- /dev/null +++ b/neutron/openstack/common/policy.py @@ -0,0 +1,780 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Common Policy Engine Implementation + +Policies can be expressed in one of two forms: A list of lists, or a +string written in the new policy language. + +In the list-of-lists representation, each check inside the innermost +list is combined as with an "and" conjunction--for that check to pass, +all the specified checks must pass. These innermost lists are then +combined as with an "or" conjunction. This is the original way of +expressing policies, but there now exists a new way: the policy +language. + +In the policy language, each check is specified the same way as in the +list-of-lists representation: a simple "a:b" pair that is matched to +the correct code to perform that check. However, conjunction +operators are available, allowing for more expressiveness in crafting +policies. + +As an example, take the following rule, expressed in the list-of-lists +representation:: + + [["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]] + +In the policy language, this becomes:: + + role:admin or (project_id:%(project_id)s and role:projectadmin) + +The policy language also has the "not" operator, allowing a richer +policy rule:: + + project_id:%(project_id)s and not role:dunce + +Finally, two special policy checks should be mentioned; the policy +check "@" will always accept an access, and the policy check "!" will +always reject an access. (Note that if a rule is either the empty +list ("[]") or the empty string, this is equivalent to the "@" policy +check.) Of these, the "!" policy check is probably the most useful, +as it allows particular rules to be explicitly disabled. +""" + +import abc +import re +import urllib + +import six +import urllib2 + +from neutron.openstack.common.gettextutils import _ +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +_rules = None +_checks = {} + + +class Rules(dict): + """ + A store for rules. Handles the default_rule setting directly. + """ + + @classmethod + def load_json(cls, data, default_rule=None): + """ + Allow loading of JSON rule data. + """ + + # Suck in the JSON data and parse the rules + rules = dict((k, parse_rule(v)) for k, v in + jsonutils.loads(data).items()) + + return cls(rules, default_rule) + + def __init__(self, rules=None, default_rule=None): + """Initialize the Rules store.""" + + super(Rules, self).__init__(rules or {}) + self.default_rule = default_rule + + def __missing__(self, key): + """Implements the default rule handling.""" + + # If the default rule isn't actually defined, do something + # reasonably intelligent + if not self.default_rule or self.default_rule not in self: + raise KeyError(key) + + return self[self.default_rule] + + def __str__(self): + """Dumps a string representation of the rules.""" + + # Start by building the canonical strings for the rules + out_rules = {} + for key, value in self.items(): + # Use empty string for singleton TrueCheck instances + if isinstance(value, TrueCheck): + out_rules[key] = '' + else: + out_rules[key] = str(value) + + # Dump a pretty-printed JSON representation + return jsonutils.dumps(out_rules, indent=4) + + +# Really have to figure out a way to deprecate this +def set_rules(rules): + """Set the rules in use for policy checks.""" + + global _rules + + _rules = rules + + +# Ditto +def reset(): + """Clear the rules used for policy checks.""" + + global _rules + + _rules = None + + +def check(rule, target, creds, exc=None, *args, **kwargs): + """ + Checks authorization of a rule against the target and credentials. + + :param rule: The rule to evaluate. + :param target: As much information about the object being operated + on as possible, as a dictionary. + :param creds: As much information about the user performing the + action as possible, as a dictionary. + :param exc: Class of the exception to raise if the check fails. + Any remaining arguments passed to check() (both + positional and keyword arguments) will be passed to + the exception class. If exc is not provided, returns + False. + + :return: Returns False if the policy does not allow the action and + exc is not provided; otherwise, returns a value that + evaluates to True. Note: for rules using the "case" + expression, this True value will be the specified string + from the expression. + """ + + # Allow the rule to be a Check tree + if isinstance(rule, BaseCheck): + result = rule(target, creds) + elif not _rules: + # No rules to reference means we're going to fail closed + result = False + else: + try: + # Evaluate the rule + result = _rules[rule](target, creds) + except KeyError: + # If the rule doesn't exist, fail closed + result = False + + # If it is False, raise the exception if requested + if exc and result is False: + raise exc(*args, **kwargs) + + return result + + +class BaseCheck(object): + """ + Abstract base class for Check classes. + """ + + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def __str__(self): + """ + Retrieve a string representation of the Check tree rooted at + this node. + """ + + pass + + @abc.abstractmethod + def __call__(self, target, cred): + """ + Perform the check. Returns False to reject the access or a + true value (not necessary True) to accept the access. + """ + + pass + + +class FalseCheck(BaseCheck): + """ + A policy check that always returns False (disallow). + """ + + def __str__(self): + """Return a string representation of this check.""" + + return "!" + + def __call__(self, target, cred): + """Check the policy.""" + + return False + + +class TrueCheck(BaseCheck): + """ + A policy check that always returns True (allow). + """ + + def __str__(self): + """Return a string representation of this check.""" + + return "@" + + def __call__(self, target, cred): + """Check the policy.""" + + return True + + +class Check(BaseCheck): + """ + A base class to allow for user-defined policy checks. + """ + + def __init__(self, kind, match): + """ + :param kind: The kind of the check, i.e., the field before the + ':'. + :param match: The match of the check, i.e., the field after + the ':'. + """ + + self.kind = kind + self.match = match + + def __str__(self): + """Return a string representation of this check.""" + + return "%s:%s" % (self.kind, self.match) + + +class NotCheck(BaseCheck): + """ + A policy check that inverts the result of another policy check. + Implements the "not" operator. + """ + + def __init__(self, rule): + """ + Initialize the 'not' check. + + :param rule: The rule to negate. Must be a Check. + """ + + self.rule = rule + + def __str__(self): + """Return a string representation of this check.""" + + return "not %s" % self.rule + + def __call__(self, target, cred): + """ + Check the policy. Returns the logical inverse of the wrapped + check. + """ + + return not self.rule(target, cred) + + +class AndCheck(BaseCheck): + """ + A policy check that requires that a list of other checks all + return True. Implements the "and" operator. + """ + + def __init__(self, rules): + """ + Initialize the 'and' check. + + :param rules: A list of rules that will be tested. + """ + + self.rules = rules + + def __str__(self): + """Return a string representation of this check.""" + + return "(%s)" % ' and '.join(str(r) for r in self.rules) + + def __call__(self, target, cred): + """ + Check the policy. Requires that all rules accept in order to + return True. + """ + + for rule in self.rules: + if not rule(target, cred): + return False + + return True + + def add_check(self, rule): + """ + Allows addition of another rule to the list of rules that will + be tested. Returns the AndCheck object for convenience. + """ + + self.rules.append(rule) + return self + + +class OrCheck(BaseCheck): + """ + A policy check that requires that at least one of a list of other + checks returns True. Implements the "or" operator. + """ + + def __init__(self, rules): + """ + Initialize the 'or' check. + + :param rules: A list of rules that will be tested. + """ + + self.rules = rules + + def __str__(self): + """Return a string representation of this check.""" + + return "(%s)" % ' or '.join(str(r) for r in self.rules) + + def __call__(self, target, cred): + """ + Check the policy. Requires that at least one rule accept in + order to return True. + """ + + for rule in self.rules: + if rule(target, cred): + return True + + return False + + def add_check(self, rule): + """ + Allows addition of another rule to the list of rules that will + be tested. Returns the OrCheck object for convenience. + """ + + self.rules.append(rule) + return self + + +def _parse_check(rule): + """ + Parse a single base check rule into an appropriate Check object. + """ + + # Handle the special checks + if rule == '!': + return FalseCheck() + elif rule == '@': + return TrueCheck() + + try: + kind, match = rule.split(':', 1) + except Exception: + LOG.exception(_("Failed to understand rule %(rule)s") % locals()) + # If the rule is invalid, we'll fail closed + return FalseCheck() + + # Find what implements the check + if kind in _checks: + return _checks[kind](kind, match) + elif None in _checks: + return _checks[None](kind, match) + else: + LOG.error(_("No handler for matches of kind %s") % kind) + return FalseCheck() + + +def _parse_list_rule(rule): + """ + Provided for backwards compatibility. Translates the old + list-of-lists syntax into a tree of Check objects. + """ + + # Empty rule defaults to True + if not rule: + return TrueCheck() + + # Outer list is joined by "or"; inner list by "and" + or_list = [] + for inner_rule in rule: + # Elide empty inner lists + if not inner_rule: + continue + + # Handle bare strings + if isinstance(inner_rule, basestring): + inner_rule = [inner_rule] + + # Parse the inner rules into Check objects + and_list = [_parse_check(r) for r in inner_rule] + + # Append the appropriate check to the or_list + if len(and_list) == 1: + or_list.append(and_list[0]) + else: + or_list.append(AndCheck(and_list)) + + # If we have only one check, omit the "or" + if not or_list: + return FalseCheck() + elif len(or_list) == 1: + return or_list[0] + + return OrCheck(or_list) + + +# Used for tokenizing the policy language +_tokenize_re = re.compile(r'\s+') + + +def _parse_tokenize(rule): + """ + Tokenizer for the policy language. + + Most of the single-character tokens are specified in the + _tokenize_re; however, parentheses need to be handled specially, + because they can appear inside a check string. Thankfully, those + parentheses that appear inside a check string can never occur at + the very beginning or end ("%(variable)s" is the correct syntax). + """ + + for tok in _tokenize_re.split(rule): + # Skip empty tokens + if not tok or tok.isspace(): + continue + + # Handle leading parens on the token + clean = tok.lstrip('(') + for i in range(len(tok) - len(clean)): + yield '(', '(' + + # If it was only parentheses, continue + if not clean: + continue + else: + tok = clean + + # Handle trailing parens on the token + clean = tok.rstrip(')') + trail = len(tok) - len(clean) + + # Yield the cleaned token + lowered = clean.lower() + if lowered in ('and', 'or', 'not'): + # Special tokens + yield lowered, clean + elif clean: + # Not a special token, but not composed solely of ')' + if len(tok) >= 2 and ((tok[0], tok[-1]) in + [('"', '"'), ("'", "'")]): + # It's a quoted string + yield 'string', tok[1:-1] + else: + yield 'check', _parse_check(clean) + + # Yield the trailing parens + for i in range(trail): + yield ')', ')' + + +class ParseStateMeta(type): + """ + Metaclass for the ParseState class. Facilitates identifying + reduction methods. + """ + + def __new__(mcs, name, bases, cls_dict): + """ + Create the class. Injects the 'reducers' list, a list of + tuples matching token sequences to the names of the + corresponding reduction methods. + """ + + reducers = [] + + for key, value in cls_dict.items(): + if not hasattr(value, 'reducers'): + continue + for reduction in value.reducers: + reducers.append((reduction, key)) + + cls_dict['reducers'] = reducers + + return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict) + + +def reducer(*tokens): + """ + Decorator for reduction methods. Arguments are a sequence of + tokens, in order, which should trigger running this reduction + method. + """ + + def decorator(func): + # Make sure we have a list of reducer sequences + if not hasattr(func, 'reducers'): + func.reducers = [] + + # Add the tokens to the list of reducer sequences + func.reducers.append(list(tokens)) + + return func + + return decorator + + +class ParseState(object): + """ + Implement the core of parsing the policy language. Uses a greedy + reduction algorithm to reduce a sequence of tokens into a single + terminal, the value of which will be the root of the Check tree. + + Note: error reporting is rather lacking. The best we can get with + this parser formulation is an overall "parse failed" error. + Fortunately, the policy language is simple enough that this + shouldn't be that big a problem. + """ + + __metaclass__ = ParseStateMeta + + def __init__(self): + """Initialize the ParseState.""" + + self.tokens = [] + self.values = [] + + def reduce(self): + """ + Perform a greedy reduction of the token stream. If a reducer + method matches, it will be executed, then the reduce() method + will be called recursively to search for any more possible + reductions. + """ + + for reduction, methname in self.reducers: + if (len(self.tokens) >= len(reduction) and + self.tokens[-len(reduction):] == reduction): + # Get the reduction method + meth = getattr(self, methname) + + # Reduce the token stream + results = meth(*self.values[-len(reduction):]) + + # Update the tokens and values + self.tokens[-len(reduction):] = [r[0] for r in results] + self.values[-len(reduction):] = [r[1] for r in results] + + # Check for any more reductions + return self.reduce() + + def shift(self, tok, value): + """Adds one more token to the state. Calls reduce().""" + + self.tokens.append(tok) + self.values.append(value) + + # Do a greedy reduce... + self.reduce() + + @property + def result(self): + """ + Obtain the final result of the parse. Raises ValueError if + the parse failed to reduce to a single result. + """ + + if len(self.values) != 1: + raise ValueError("Could not parse rule") + return self.values[0] + + @reducer('(', 'check', ')') + @reducer('(', 'and_expr', ')') + @reducer('(', 'or_expr', ')') + def _wrap_check(self, _p1, check, _p2): + """Turn parenthesized expressions into a 'check' token.""" + + return [('check', check)] + + @reducer('check', 'and', 'check') + def _make_and_expr(self, check1, _and, check2): + """ + Create an 'and_expr' from two checks joined by the 'and' + operator. + """ + + return [('and_expr', AndCheck([check1, check2]))] + + @reducer('and_expr', 'and', 'check') + def _extend_and_expr(self, and_expr, _and, check): + """ + Extend an 'and_expr' by adding one more check. + """ + + return [('and_expr', and_expr.add_check(check))] + + @reducer('check', 'or', 'check') + def _make_or_expr(self, check1, _or, check2): + """ + Create an 'or_expr' from two checks joined by the 'or' + operator. + """ + + return [('or_expr', OrCheck([check1, check2]))] + + @reducer('or_expr', 'or', 'check') + def _extend_or_expr(self, or_expr, _or, check): + """ + Extend an 'or_expr' by adding one more check. + """ + + return [('or_expr', or_expr.add_check(check))] + + @reducer('not', 'check') + def _make_not_expr(self, _not, check): + """Invert the result of another check.""" + + return [('check', NotCheck(check))] + + +def _parse_text_rule(rule): + """ + Translates a policy written in the policy language into a tree of + Check objects. + """ + + # Empty rule means always accept + if not rule: + return TrueCheck() + + # Parse the token stream + state = ParseState() + for tok, value in _parse_tokenize(rule): + state.shift(tok, value) + + try: + return state.result + except ValueError: + # Couldn't parse the rule + LOG.exception(_("Failed to understand rule %(rule)r") % locals()) + + # Fail closed + return FalseCheck() + + +def parse_rule(rule): + """ + Parses a policy rule into a tree of Check objects. + """ + + # If the rule is a string, it's in the policy language + if isinstance(rule, basestring): + return _parse_text_rule(rule) + return _parse_list_rule(rule) + + +def register(name, func=None): + """ + Register a function or Check class as a policy check. + + :param name: Gives the name of the check type, e.g., 'rule', + 'role', etc. If name is None, a default check type + will be registered. + :param func: If given, provides the function or class to register. + If not given, returns a function taking one argument + to specify the function or class to register, + allowing use as a decorator. + """ + + # Perform the actual decoration by registering the function or + # class. Returns the function or class for compliance with the + # decorator interface. + def decorator(func): + _checks[name] = func + return func + + # If the function or class is given, do the registration + if func: + return decorator(func) + + return decorator + + +@register("rule") +class RuleCheck(Check): + def __call__(self, target, creds): + """ + Recursively checks credentials based on the defined rules. + """ + + try: + return _rules[self.match](target, creds) + except KeyError: + # We don't have any matching rule; fail closed + return False + + +@register("role") +class RoleCheck(Check): + def __call__(self, target, creds): + """Check that there is a matching role in the cred dict.""" + + return self.match.lower() in [x.lower() for x in creds['roles']] + + +@register('http') +class HttpCheck(Check): + def __call__(self, target, creds): + """ + Check http: rules by calling to a remote server. + + This example implementation simply verifies that the response + is exactly 'True'. + """ + + url = ('http:' + self.match) % target + data = {'target': jsonutils.dumps(target), + 'credentials': jsonutils.dumps(creds)} + post_data = urllib.urlencode(data) + f = urllib2.urlopen(url, post_data) + return f.read() == "True" + + +@register(None) +class GenericCheck(Check): + def __call__(self, target, creds): + """ + Check an individual match. + + Matches look like: + + tenant:%(tenant_id)s + role:compute:admin + """ + + # TODO(termie): do dict inspection via dot syntax + match = self.match % target + if self.kind in creds: + return match == six.text_type(creds[self.kind]) + return False diff --git a/neutron/openstack/common/processutils.py b/neutron/openstack/common/processutils.py new file mode 100644 index 000000000..039b9ad46 --- /dev/null +++ b/neutron/openstack/common/processutils.py @@ -0,0 +1,248 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import logging as stdlib_logging +import os +import random +import shlex +import signal + +from eventlet.green import subprocess +from eventlet import greenthread + +from neutron.openstack.common.gettextutils import _ +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class InvalidArgumentError(Exception): + def __init__(self, message=None): + super(InvalidArgumentError, self).__init__(message) + + +class UnknownArgumentError(Exception): + def __init__(self, message=None): + super(UnknownArgumentError, self).__init__(message) + + +class ProcessExecutionError(Exception): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, + description=None): + self.exit_code = exit_code + self.stderr = stderr + self.stdout = stdout + self.cmd = cmd + self.description = description + + if description is None: + description = "Unexpected error while running command." + if exit_code is None: + exit_code = '-' + message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" + % (description, cmd, exit_code, stdout, stderr)) + super(ProcessExecutionError, self).__init__(message) + + +class NoRootWrapSpecified(Exception): + def __init__(self, message=None): + super(NoRootWrapSpecified, self).__init__(message) + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def execute(*cmd, **kwargs): + """Helper method to shell out and execute a command through subprocess. + + Allows optional retry. + + :param cmd: Passed to subprocess.Popen. + :type cmd: string + :param process_input: Send to opened process. + :type process_input: string + :param check_exit_code: Single bool, int, or list of allowed exit + codes. Defaults to [0]. Raise + :class:`ProcessExecutionError` unless + program exits with one of these code. + :type check_exit_code: boolean, int, or [int] + :param delay_on_retry: True | False. Defaults to True. If set to True, + wait a short amount of time before retrying. + :type delay_on_retry: boolean + :param attempts: How many times to retry cmd. + :type attempts: int + :param run_as_root: True | False. Defaults to False. If set to True, + the command is prefixed by the command specified + in the root_helper kwarg. + :type run_as_root: boolean + :param root_helper: command to prefix to commands called with + run_as_root=True + :type root_helper: string + :param shell: whether or not there should be a shell used to + execute this command. Defaults to false. + :type shell: boolean + :param loglevel: log level for execute commands. + :type loglevel: int. (Should be stdlib_logging.DEBUG or + stdlib_logging.INFO) + :returns: (stdout, stderr) from process execution + :raises: :class:`UnknownArgumentError` on + receiving unknown arguments + :raises: :class:`ProcessExecutionError` + """ + + process_input = kwargs.pop('process_input', None) + check_exit_code = kwargs.pop('check_exit_code', [0]) + ignore_exit_code = False + delay_on_retry = kwargs.pop('delay_on_retry', True) + attempts = kwargs.pop('attempts', 1) + run_as_root = kwargs.pop('run_as_root', False) + root_helper = kwargs.pop('root_helper', '') + shell = kwargs.pop('shell', False) + loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG) + + if isinstance(check_exit_code, bool): + ignore_exit_code = not check_exit_code + check_exit_code = [0] + elif isinstance(check_exit_code, int): + check_exit_code = [check_exit_code] + + if kwargs: + raise UnknownArgumentError(_('Got unknown keyword args ' + 'to utils.execute: %r') % kwargs) + + if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: + if not root_helper: + raise NoRootWrapSpecified( + message=('Command requested root, but did not specify a root ' + 'helper.')) + cmd = shlex.split(root_helper) + list(cmd) + + cmd = map(str, cmd) + + while attempts > 0: + attempts -= 1 + try: + LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd)) + _PIPE = subprocess.PIPE # pylint: disable=E1101 + + if os.name == 'nt': + preexec_fn = None + close_fds = False + else: + preexec_fn = _subprocess_setup + close_fds = True + + obj = subprocess.Popen(cmd, + stdin=_PIPE, + stdout=_PIPE, + stderr=_PIPE, + close_fds=close_fds, + preexec_fn=preexec_fn, + shell=shell) + result = None + if process_input is not None: + result = obj.communicate(process_input) + else: + result = obj.communicate() + obj.stdin.close() # pylint: disable=E1101 + _returncode = obj.returncode # pylint: disable=E1101 + LOG.log(loglevel, _('Result was %s') % _returncode) + if not ignore_exit_code and _returncode not in check_exit_code: + (stdout, stderr) = result + raise ProcessExecutionError(exit_code=_returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) + return result + except ProcessExecutionError: + if not attempts: + raise + else: + LOG.log(loglevel, _('%r failed. Retrying.'), cmd) + if delay_on_retry: + greenthread.sleep(random.randint(20, 200) / 100.0) + finally: + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) + + +def trycmd(*args, **kwargs): + """A wrapper around execute() to more easily handle warnings and errors. + + Returns an (out, err) tuple of strings containing the output of + the command's stdout and stderr. If 'err' is not empty then the + command can be considered to have failed. + + :discard_warnings True | False. Defaults to False. If set to True, + then for succeeding commands, stderr is cleared + + """ + discard_warnings = kwargs.pop('discard_warnings', False) + + try: + out, err = execute(*args, **kwargs) + failed = False + except ProcessExecutionError as exn: + out, err = '', str(exn) + failed = True + + if not failed and discard_warnings and err: + # Handle commands that output to stderr but otherwise succeed + err = '' + + return out, err + + +def ssh_execute(ssh, cmd, process_input=None, + addl_env=None, check_exit_code=True): + LOG.debug(_('Running cmd (SSH): %s'), cmd) + if addl_env: + raise InvalidArgumentError(_('Environment not supported over SSH')) + + if process_input: + # This is (probably) fixable if we need it... + raise InvalidArgumentError(_('process_input not supported over SSH')) + + stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) + channel = stdout_stream.channel + + # NOTE(justinsb): This seems suspicious... + # ...other SSH clients have buffering issues with this approach + stdout = stdout_stream.read() + stderr = stderr_stream.read() + stdin_stream.close() + + exit_status = channel.recv_exit_status() + + # exit_status == -1 if no exit code was returned + if exit_status != -1: + LOG.debug(_('Result was %s') % exit_status) + if check_exit_code and exit_status != 0: + raise ProcessExecutionError(exit_code=exit_status, + stdout=stdout, + stderr=stderr, + cmd=cmd) + + return (stdout, stderr) diff --git a/neutron/openstack/common/service.py b/neutron/openstack/common/service.py new file mode 100644 index 000000000..79ae9bc5d --- /dev/null +++ b/neutron/openstack/common/service.py @@ -0,0 +1,512 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import logging as std_logging +import os +import random +import signal +import sys +import time + +try: + # Importing just the symbol here because the io module does not + # exist in Python 2.6. + from io import UnsupportedOperation # noqa +except ImportError: + # Python 2.6 + UnsupportedOperation = None + +import eventlet +from eventlet import event +from oslo.config import cfg + +from neutron.openstack.common import eventlet_backdoor +from neutron.openstack.common.gettextutils import _LE, _LI, _LW +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import systemd +from neutron.openstack.common import threadgroup + + +rpc = importutils.try_import('neutron.openstack.common.rpc') +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def _sighup_supported(): + return hasattr(signal, 'SIGHUP') + + +def _is_daemon(): + # The process group for a foreground process will match the + # process group of the controlling terminal. If those values do + # not match, or ioctl() fails on the stdout file handle, we assume + # the process is running in the background as a daemon. + # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics + try: + is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) + except OSError as err: + if err.errno == errno.ENOTTY: + # Assume we are a daemon because there is no terminal. + is_daemon = True + else: + raise + except UnsupportedOperation: + # Could not get the fileno for stdout, so we must be a daemon. + is_daemon = True + return is_daemon + + +def _is_sighup_and_daemon(signo): + if not (_sighup_supported() and signo == signal.SIGHUP): + # Avoid checking if we are a daemon, because the signal isn't + # SIGHUP. + return False + return _is_daemon() + + +def _signo_to_signame(signo): + signals = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'} + if _sighup_supported(): + signals[signal.SIGHUP] = 'SIGHUP' + return signals[signo] + + +def _set_signals_handler(handler): + signal.signal(signal.SIGTERM, handler) + signal.signal(signal.SIGINT, handler) + if _sighup_supported(): + signal.signal(signal.SIGHUP, handler) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self.services = Services() + self.backdoor_port = eventlet_backdoor.initialize_if_enabled() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + service.backdoor_port = self.backdoor_port + self.services.add(service) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + self.services.stop() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + self.services.wait() + + def restart(self): + """Reload config files and restart service. + + :returns: None + + """ + cfg.CONF.reload_config_files() + self.services.restart() + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + raise SignalExit(signo) + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _wait_for_exit_or_signal(self, ready_callback=None): + status = None + signo = 0 + + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + if ready_callback: + ready_callback() + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + finally: + self.stop() + if rpc: + try: + rpc.cleanup() + except Exception: + # We're shutting down, so it doesn't matter at this point. + LOG.exception(_LE('Exception during rpc cleanup.')) + + return status, signo + + def wait(self, ready_callback=None): + systemd.notify_once() + while True: + self.handle_signal() + status, signo = self._wait_for_exit_or_signal(ready_callback) + if not _is_sighup_and_daemon(signo): + return status + self.restart() + + +class ServiceWrapper(object): + def __init__(self, service, workers): + self.service = service + self.workers = workers + self.children = set() + self.forktimes = [] + + +class ProcessLauncher(object): + def __init__(self, wait_interval=0.01): + """Constructor. + + :param wait_interval: The interval to sleep for between checks + of child process exit. + """ + self.children = {} + self.sigcaught = None + self.running = True + self.wait_interval = wait_interval + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + self.handle_signal() + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_LI('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process_handle_signal(self): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + def _sighup(*args): + signal.signal(signal.SIGHUP, signal.SIG_DFL) + raise SignalExit(signal.SIGHUP) + + signal.signal(signal.SIGTERM, _sigterm) + if _sighup_supported(): + signal.signal(signal.SIGHUP, _sighup) + # Block SIGINT and let the parent send us a SIGTERM + signal.signal(signal.SIGINT, signal.SIG_IGN) + + def _child_wait_for_exit_or_signal(self, launcher): + status = 0 + signo = 0 + + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + try: + launcher.wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Child caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_LE('Unhandled exception')) + status = 2 + finally: + launcher.stop() + + return status, signo + + def _child_process(self, service): + self._child_process_handle_signal() + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn_n(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.launch_service(service) + return launcher + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_LI('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + launcher = self._child_process(wrap.service) + while True: + self._child_process_handle_signal() + status, signo = self._child_wait_for_exit_or_signal(launcher) + if not _is_sighup_and_daemon(signo): + break + launcher.restart() + + os._exit(status) + + LOG.info(_LI('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_service(self, service, workers=1): + wrap = ServiceWrapper(service, workers) + + LOG.info(_LI('Starting %d workers'), wrap.workers) + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def _wait_child(self): + try: + # Don't block if no child processes have exited + pid, status = os.waitpid(0, os.WNOHANG) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), + dict(pid=pid, sig=sig)) + else: + code = os.WEXITSTATUS(status) + LOG.info(_LI('Child %(pid)s exited with status %(code)d'), + dict(pid=pid, code=code)) + + if pid not in self.children: + LOG.warning(_LW('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + return wrap + + def _respawn_children(self): + while self.running: + wrap = self._wait_child() + if not wrap: + # Yield to other threads if no children have exited + # Sleep for a short time to avoid excessive CPU usage + # (see bug #1095346) + eventlet.greenthread.sleep(self.wait_interval) + continue + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def wait(self): + """Loop waiting on children to die and respawning as necessary.""" + + systemd.notify_once() + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + while True: + self.handle_signal() + self._respawn_children() + # No signal means that stop was called. Don't clean up here. + if not self.sigcaught: + return + + signame = _signo_to_signame(self.sigcaught) + LOG.info(_LI('Caught %s, stopping children'), signame) + if not _is_sighup_and_daemon(self.sigcaught): + break + + for pid in self.children: + os.kill(pid, signal.SIGHUP) + self.running = True + self.sigcaught = None + except eventlet.greenlet.GreenletExit: + LOG.info(_LI("Wait called after thread killed. Cleaning up.")) + + self.stop() + + def stop(self): + """Terminate child processes and wait on each.""" + self.running = False + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts.""" + + def __init__(self, threads=1000): + self.tg = threadgroup.ThreadGroup(threads) + + # signal that the service is done shutting itself down: + self._done = event.Event() + + def reset(self): + # NOTE(Fengqian): docs for Event.reset() recommend against using it + self._done = event.Event() + + def start(self): + pass + + def stop(self): + self.tg.stop() + self.tg.wait() + # Signal that service cleanup is done: + if not self._done.ready(): + self._done.send() + + def wait(self): + self._done.wait() + + +class Services(object): + + def __init__(self): + self.services = [] + self.tg = threadgroup.ThreadGroup() + self.done = event.Event() + + def add(self, service): + self.services.append(service) + self.tg.add_thread(self.run_service, service, self.done) + + def stop(self): + # wait for graceful shutdown of services: + for service in self.services: + service.stop() + service.wait() + + # Each service has performed cleanup, now signal that the run_service + # wrapper threads can now die: + if not self.done.ready(): + self.done.send() + + # reap threads: + self.tg.stop() + + def wait(self): + self.tg.wait() + + def restart(self): + self.stop() + self.done = event.Event() + for restart_service in self.services: + restart_service.reset() + self.tg.add_thread(self.run_service, restart_service, self.done) + + @staticmethod + def run_service(service, done): + """Service start wrapper. + + :param service: service to run + :param done: event to wait on until a shutdown is triggered + :returns: None + + """ + service.start() + done.wait() + + +def launch(service, workers=1): + if workers is None or workers == 1: + launcher = ServiceLauncher() + launcher.launch_service(service) + else: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + + return launcher diff --git a/neutron/openstack/common/sslutils.py b/neutron/openstack/common/sslutils.py new file mode 100644 index 000000000..1d0793794 --- /dev/null +++ b/neutron/openstack/common/sslutils.py @@ -0,0 +1,98 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import ssl + +from oslo.config import cfg + +from neutron.openstack.common.gettextutils import _ + + +ssl_opts = [ + cfg.StrOpt('ca_file', + default=None, + help="CA certificate file to use to verify " + "connecting clients"), + cfg.StrOpt('cert_file', + default=None, + help="Certificate file to use when starting " + "the server securely"), + cfg.StrOpt('key_file', + default=None, + help="Private key file to use when starting " + "the server securely"), +] + + +CONF = cfg.CONF +CONF.register_opts(ssl_opts, "ssl") + + +def is_enabled(): + cert_file = CONF.ssl.cert_file + key_file = CONF.ssl.key_file + ca_file = CONF.ssl.ca_file + use_ssl = cert_file or key_file + + if cert_file and not os.path.exists(cert_file): + raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) + + if ca_file and not os.path.exists(ca_file): + raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) + + if key_file and not os.path.exists(key_file): + raise RuntimeError(_("Unable to find key_file : %s") % key_file) + + if use_ssl and (not cert_file or not key_file): + raise RuntimeError(_("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + + return use_ssl + + +def wrap(sock): + ssl_kwargs = { + 'server_side': True, + 'certfile': CONF.ssl.cert_file, + 'keyfile': CONF.ssl.key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl.ca_file: + ssl_kwargs['ca_certs'] = CONF.ssl.ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + return ssl.wrap_socket(sock, **ssl_kwargs) + + +_SSL_PROTOCOLS = { + "tlsv1": ssl.PROTOCOL_TLSv1, + "sslv23": ssl.PROTOCOL_SSLv23, + "sslv3": ssl.PROTOCOL_SSLv3 +} + +try: + _SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2 +except AttributeError: + pass + + +def validate_ssl_version(version): + key = version.lower() + try: + return _SSL_PROTOCOLS[key] + except KeyError: + raise RuntimeError(_("Invalid SSL version : %s") % version) diff --git a/neutron/openstack/common/strutils.py b/neutron/openstack/common/strutils.py new file mode 100644 index 000000000..8c796d4f7 --- /dev/null +++ b/neutron/openstack/common/strutils.py @@ -0,0 +1,239 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import math +import re +import sys +import unicodedata + +import six + +from neutron.openstack.common.gettextutils import _ + + +UNIT_PREFIX_EXPONENT = { + 'k': 1, + 'K': 1, + 'Ki': 1, + 'M': 2, + 'Mi': 2, + 'G': 3, + 'Gi': 3, + 'T': 4, + 'Ti': 4, +} +UNIT_SYSTEM_INFO = { + 'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')), + 'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')), +} + +TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') +FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') + +SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") +SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") + + +def int_from_bool_as_string(subject): + """Interpret a string as a boolean and return either 1 or 0. + + Any string value in: + + ('True', 'true', 'On', 'on', '1') + + is interpreted as a boolean True. + + Useful for JSON-decoded stuff and config file parsing + """ + return bool_from_string(subject) and 1 or 0 + + +def bool_from_string(subject, strict=False, default=False): + """Interpret a string as a boolean. + + A case-insensitive match is performed such that strings matching 't', + 'true', 'on', 'y', 'yes', or '1' are considered True and, when + `strict=False`, anything else returns the value specified by 'default'. + + Useful for JSON-decoded stuff and config file parsing. + + If `strict=True`, unrecognized values, including None, will raise a + ValueError which is useful when parsing values passed in from an API call. + Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. + """ + if not isinstance(subject, six.string_types): + subject = six.text_type(subject) + + lowered = subject.strip().lower() + + if lowered in TRUE_STRINGS: + return True + elif lowered in FALSE_STRINGS: + return False + elif strict: + acceptable = ', '.join( + "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) + msg = _("Unrecognized value '%(val)s', acceptable values are:" + " %(acceptable)s") % {'val': subject, + 'acceptable': acceptable} + raise ValueError(msg) + else: + return default + + +def safe_decode(text, incoming=None, errors='strict'): + """Decodes incoming text/bytes string using `incoming` if they're not + already unicode. + + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a unicode `incoming` encoded + representation of it. + :raises TypeError: If text is not an instance of str + """ + if not isinstance(text, (six.string_types, six.binary_type)): + raise TypeError("%s can't be decoded" % type(text)) + + if isinstance(text, six.text_type): + return text + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + try: + return text.decode(incoming, errors) + except UnicodeDecodeError: + # Note(flaper87) If we get here, it means that + # sys.stdin.encoding / sys.getdefaultencoding + # didn't return a suitable encoding to decode + # text. This happens mostly when global LANG + # var is not set correctly and there's no + # default encoding. In this case, most likely + # python will use ASCII or ANSI encoders as + # default encodings but they won't be capable + # of decoding non-ASCII characters. + # + # Also, UTF-8 is being used since it's an ASCII + # extension. + return text.decode('utf-8', errors) + + +def safe_encode(text, incoming=None, + encoding='utf-8', errors='strict'): + """Encodes incoming text/bytes string using `encoding`. + + If incoming is not specified, text is expected to be encoded with + current python's default encoding. (`sys.getdefaultencoding`) + + :param incoming: Text's current encoding + :param encoding: Expected encoding for text (Default UTF-8) + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a bytestring `encoding` encoded + representation of it. + :raises TypeError: If text is not an instance of str + """ + if not isinstance(text, (six.string_types, six.binary_type)): + raise TypeError("%s can't be encoded" % type(text)) + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + if isinstance(text, six.text_type): + return text.encode(encoding, errors) + elif text and encoding != incoming: + # Decode text before encoding it with `encoding` + text = safe_decode(text, incoming, errors) + return text.encode(encoding, errors) + else: + return text + + +def string_to_bytes(text, unit_system='IEC', return_int=False): + """Converts a string into an float representation of bytes. + + The units supported for IEC :: + + Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) + KB, KiB, MB, MiB, GB, GiB, TB, TiB + + The units supported for SI :: + + kb(it), Mb(it), Gb(it), Tb(it) + kB, MB, GB, TB + + Note that the SI unit system does not support capital letter 'K' + + :param text: String input for bytes size conversion. + :param unit_system: Unit system for byte size conversion. + :param return_int: If True, returns integer representation of text + in bytes. (default: decimal) + :returns: Numerical representation of text in bytes. + :raises ValueError: If text has an invalid value. + + """ + try: + base, reg_ex = UNIT_SYSTEM_INFO[unit_system] + except KeyError: + msg = _('Invalid unit system: "%s"') % unit_system + raise ValueError(msg) + match = reg_ex.match(text) + if match: + magnitude = float(match.group(1)) + unit_prefix = match.group(2) + if match.group(3) in ['b', 'bit']: + magnitude /= 8 + else: + msg = _('Invalid string format: %s') % text + raise ValueError(msg) + if not unit_prefix: + res = magnitude + else: + res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) + if return_int: + return int(math.ceil(res)) + return res + + +def to_slug(value, incoming=None, errors="strict"): + """Normalize string. + + Convert to lowercase, remove non-word characters, and convert spaces + to hyphens. + + Inspired by Django's `slugify` filter. + + :param value: Text to slugify + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: slugified unicode representation of `value` + :raises TypeError: If text is not an instance of str + """ + value = safe_decode(value, incoming, errors) + # NOTE(aababilov): no need to use safe_(encode|decode) here: + # encodings are always "ascii", error handling is always "ignore" + # and types are always known (first: unicode; second: str) + value = unicodedata.normalize("NFKD", value).encode( + "ascii", "ignore").decode("ascii") + value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() + return SLUGIFY_HYPHENATE_RE.sub("-", value) diff --git a/neutron/openstack/common/systemd.py b/neutron/openstack/common/systemd.py new file mode 100644 index 000000000..cc02caba3 --- /dev/null +++ b/neutron/openstack/common/systemd.py @@ -0,0 +1,104 @@ +# Copyright 2012-2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper module for systemd service readiness notification. +""" + +import os +import socket +import sys + +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def _abstractify(socket_name): + if socket_name.startswith('@'): + # abstract namespace socket + socket_name = '\0%s' % socket_name[1:] + return socket_name + + +def _sd_notify(unset_env, msg): + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + try: + sock.connect(_abstractify(notify_socket)) + sock.sendall(msg) + if unset_env: + del os.environ['NOTIFY_SOCKET'] + except EnvironmentError: + LOG.debug("Systemd notification failed", exc_info=True) + finally: + sock.close() + + +def notify(): + """Send notification to Systemd that service is ready. + For details see + http://www.freedesktop.org/software/systemd/man/sd_notify.html + """ + _sd_notify(False, 'READY=1') + + +def notify_once(): + """Send notification once to Systemd that service is ready. + Systemd sets NOTIFY_SOCKET environment variable with the name of the + socket listening for notifications from services. + This method removes the NOTIFY_SOCKET environment variable to ensure + notification is sent only once. + """ + _sd_notify(True, 'READY=1') + + +def onready(notify_socket, timeout): + """Wait for systemd style notification on the socket. + + :param notify_socket: local socket address + :type notify_socket: string + :param timeout: socket timeout + :type timeout: float + :returns: 0 service ready + 1 service not ready + 2 timeout occurred + """ + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + sock.settimeout(timeout) + sock.bind(_abstractify(notify_socket)) + try: + msg = sock.recv(512) + except socket.timeout: + return 2 + finally: + sock.close() + if 'READY=1' in msg: + return 0 + else: + return 1 + + +if __name__ == '__main__': + # simple CLI for testing + if len(sys.argv) == 1: + notify() + elif len(sys.argv) >= 2: + timeout = float(sys.argv[1]) + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + retval = onready(notify_socket, timeout) + sys.exit(retval) diff --git a/neutron/openstack/common/threadgroup.py b/neutron/openstack/common/threadgroup.py new file mode 100644 index 000000000..5cfd59c94 --- /dev/null +++ b/neutron/openstack/common/threadgroup.py @@ -0,0 +1,129 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +from eventlet import greenpool +from eventlet import greenthread + +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall + + +LOG = logging.getLogger(__name__) + + +def _thread_done(gt, *args, **kwargs): + """Callback function to be passed to GreenThread.link() when we spawn() + Calls the :class:`ThreadGroup` to notify if. + + """ + kwargs['group'].thread_done(kwargs['thread']) + + +class Thread(object): + """Wrapper around a greenthread, that holds a reference to the + :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when + it has done so it can be removed from the threads list. + """ + def __init__(self, thread, group): + self.thread = thread + self.thread.link(_thread_done, group=group, thread=self) + + def stop(self): + self.thread.kill() + + def wait(self): + return self.thread.wait() + + def link(self, func, *args, **kwargs): + self.thread.link(func, *args, **kwargs) + + +class ThreadGroup(object): + """The point of the ThreadGroup classis to: + + * keep track of timers and greenthreads (making it easier to stop them + when need be). + * provide an easy API to add timers. + """ + def __init__(self, thread_pool_size=10): + self.pool = greenpool.GreenPool(thread_pool_size) + self.threads = [] + self.timers = [] + + def add_dynamic_timer(self, callback, initial_delay=None, + periodic_interval_max=None, *args, **kwargs): + timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) + timer.start(initial_delay=initial_delay, + periodic_interval_max=periodic_interval_max) + self.timers.append(timer) + + def add_timer(self, interval, callback, initial_delay=None, + *args, **kwargs): + pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) + pulse.start(interval=interval, + initial_delay=initial_delay) + self.timers.append(pulse) + + def add_thread(self, callback, *args, **kwargs): + gt = self.pool.spawn(callback, *args, **kwargs) + th = Thread(gt, self) + self.threads.append(th) + return th + + def thread_done(self, thread): + self.threads.remove(thread) + + def stop(self): + current = greenthread.getcurrent() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: + if x is current: + # don't kill the current thread. + continue + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + + for x in self.timers: + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + self.timers = [] + + def wait(self): + for x in self.timers: + try: + x.wait() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) + current = greenthread.getcurrent() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: + if x is current: + continue + try: + x.wait() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) diff --git a/neutron/openstack/common/timeutils.py b/neutron/openstack/common/timeutils.py new file mode 100644 index 000000000..d5ed81d3e --- /dev/null +++ b/neutron/openstack/common/timeutils.py @@ -0,0 +1,210 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime +import time + +import iso8601 +import six + + +# ISO 8601 extended time format with microseconds +_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' +_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' +PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND + + +def isotime(at=None, subsecond=False): + """Stringify time in ISO 8601 format.""" + if not at: + at = utcnow() + st = at.strftime(_ISO8601_TIME_FORMAT + if not subsecond + else _ISO8601_TIME_FORMAT_SUBSECOND) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + st += ('Z' if tz == 'UTC' else tz) + return st + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format.""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(six.text_type(e)) + except TypeError as e: + raise ValueError(six.text_type(e)) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object.""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + if isinstance(before, six.string_types): + before = parse_strtime(before).replace(tzinfo=None) + else: + before = before.replace(tzinfo=None) + + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + if isinstance(after, six.string_types): + after = parse_strtime(after).replace(tzinfo=None) + else: + after = after.replace(tzinfo=None) + + return after - utcnow() > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + if utcnow.override_time is None: + # NOTE(kgriffs): This is several times faster + # than going through calendar.timegm(...) + return int(time.time()) + + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time + return datetime.datetime.utcnow() + + +def iso8601_from_timestamp(timestamp): + """Returns a iso8601 formated date from timestamp.""" + return isotime(datetime.datetime.utcfromtimestamp(timestamp)) + + +utcnow.override_time = None + + +def set_time_override(override_time=None): + """Overrides utils.utcnow. + + Make it return a constant time or a list thereof, one at a time. + + :param override_time: datetime instance or list thereof. If not + given, defaults to the current UTC time. + """ + utcnow.override_time = override_time or datetime.datetime.utcnow() + + +def advance_time_delta(timedelta): + """Advance overridden time using a datetime.timedelta.""" + assert(not utcnow.override_time is None) + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overridden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times. + """ + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """Return the difference between two timing objects. + + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + return total_seconds(delta) + + +def total_seconds(delta): + """Return the total seconds of datetime.timedelta object. + + Compute total seconds of datetime.timedelta, datetime.timedelta + doesn't have method total_seconds in Python2.6, calculate it manually. + """ + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) + + +def is_soon(dt, window): + """Determines if time is going to happen in the next window seconds. + + :param dt: the time + :param window: minimum seconds to remain to consider the time not soon + + :return: True if expiration is within the given duration + """ + soon = (utcnow() + datetime.timedelta(seconds=window)) + return normalize_time(dt) <= soon diff --git a/neutron/openstack/common/uuidutils.py b/neutron/openstack/common/uuidutils.py new file mode 100644 index 000000000..234b880c9 --- /dev/null +++ b/neutron/openstack/common/uuidutils.py @@ -0,0 +1,37 @@ +# Copyright (c) 2012 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +UUID related utilities and helper functions. +""" + +import uuid + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False diff --git a/neutron/openstack/common/versionutils.py b/neutron/openstack/common/versionutils.py new file mode 100644 index 000000000..04472a867 --- /dev/null +++ b/neutron/openstack/common/versionutils.py @@ -0,0 +1,148 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helpers for comparing version strings. +""" + +import functools +import pkg_resources + +from neutron.openstack.common.gettextutils import _ +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class deprecated(object): + """A decorator to mark callables as deprecated. + + This decorator logs a deprecation message when the callable it decorates is + used. The message will include the release where the callable was + deprecated, the release where it may be removed and possibly an optional + replacement. + + Examples: + + 1. Specifying the required deprecated release + + >>> @deprecated(as_of=deprecated.ICEHOUSE) + ... def a(): pass + + 2. Specifying a replacement: + + >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()') + ... def b(): pass + + 3. Specifying the release where the functionality may be removed: + + >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1) + ... def c(): pass + + """ + + FOLSOM = 'F' + GRIZZLY = 'G' + HAVANA = 'H' + ICEHOUSE = 'I' + + _RELEASES = { + 'F': 'Folsom', + 'G': 'Grizzly', + 'H': 'Havana', + 'I': 'Icehouse', + } + + _deprecated_msg_with_alternative = _( + '%(what)s is deprecated as of %(as_of)s in favor of ' + '%(in_favor_of)s and may be removed in %(remove_in)s.') + + _deprecated_msg_no_alternative = _( + '%(what)s is deprecated as of %(as_of)s and may be ' + 'removed in %(remove_in)s. It will not be superseded.') + + def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None): + """Initialize decorator + + :param as_of: the release deprecating the callable. Constants + are define in this class for convenience. + :param in_favor_of: the replacement for the callable (optional) + :param remove_in: an integer specifying how many releases to wait + before removing (default: 2) + :param what: name of the thing being deprecated (default: the + callable's name) + + """ + self.as_of = as_of + self.in_favor_of = in_favor_of + self.remove_in = remove_in + self.what = what + + def __call__(self, func): + if not self.what: + self.what = func.__name__ + '()' + + @functools.wraps(func) + def wrapped(*args, **kwargs): + msg, details = self._build_message() + LOG.deprecated(msg, details) + return func(*args, **kwargs) + return wrapped + + def _get_safe_to_remove_release(self, release): + # TODO(dstanek): this method will have to be reimplemented once + # when we get to the X release because once we get to the Y + # release, what is Y+2? + new_release = chr(ord(release) + self.remove_in) + if new_release in self._RELEASES: + return self._RELEASES[new_release] + else: + return new_release + + def _build_message(self): + details = dict(what=self.what, + as_of=self._RELEASES[self.as_of], + remove_in=self._get_safe_to_remove_release(self.as_of)) + + if self.in_favor_of: + details['in_favor_of'] = self.in_favor_of + msg = self._deprecated_msg_with_alternative + else: + msg = self._deprecated_msg_no_alternative + return msg, details + + +def is_compatible(requested_version, current_version, same_major=True): + """Determine whether `requested_version` is satisfied by + `current_version`; in other words, `current_version` is >= + `requested_version`. + + :param requested_version: version to check for compatibility + :param current_version: version to check against + :param same_major: if True, the major version must be identical between + `requested_version` and `current_version`. This is used when a + major-version difference indicates incompatibility between the two + versions. Since this is the common-case in practice, the default is + True. + :returns: True if compatible, False if not + """ + requested_parts = pkg_resources.parse_version(requested_version) + current_parts = pkg_resources.parse_version(current_version) + + if same_major and (requested_parts[0] != current_parts[0]): + return False + + return current_parts >= requested_parts diff --git a/neutron/plugins/__init__.py b/neutron/plugins/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/bigswitch/README b/neutron/plugins/bigswitch/README new file mode 100644 index 000000000..43f157d12 --- /dev/null +++ b/neutron/plugins/bigswitch/README @@ -0,0 +1,14 @@ +# Neuron REST Proxy Plug-in for Big Switch and FloodLight Controllers + +This module provides a generic neutron plugin 'NeutronRestProxy' that +translates neutron function calls to authenticated REST requests (JSON supported) +to a set of redundant external network controllers. + +It also keeps a local persistent store of neutron state that has been +setup using that API. + +Currently the FloodLight Openflow Controller or the Big Switch Networks Controller +can be configured as external network controllers for this plugin. + +For more details on this plugin, please refer to the following link: +http://www.openflowhub.org/display/floodlightcontroller/Neutron+REST+Proxy+Plugin diff --git a/neutron/plugins/bigswitch/__init__.py b/neutron/plugins/bigswitch/__init__.py new file mode 100644 index 000000000..2a2421616 --- /dev/null +++ b/neutron/plugins/bigswitch/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Big Switch Networks, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# diff --git a/neutron/plugins/bigswitch/agent/__init__.py b/neutron/plugins/bigswitch/agent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/bigswitch/agent/restproxy_agent.py b/neutron/plugins/bigswitch/agent/restproxy_agent.py new file mode 100644 index 000000000..97aa7d0e3 --- /dev/null +++ b/neutron/plugins/bigswitch/agent/restproxy_agent.py @@ -0,0 +1,181 @@ +# Copyright 2014 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Kevin Benton, kevin.benton@bigswitch.com + +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import utils +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config +from neutron.common import rpc_compat +from neutron.common import topics +from neutron import context as q_context +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import excutils +from neutron.openstack.common import log +from neutron.plugins.bigswitch import config as pl_config + +LOG = log.getLogger(__name__) + + +class IVSBridge(ovs_lib.OVSBridge): + ''' + This class does not provide parity with OVS using IVS. + It's only the bare minimum necessary to use IVS with this agent. + ''' + def run_vsctl(self, args, check_error=False): + full_args = ["ivs-ctl"] + args + try: + return utils.execute(full_args, root_helper=self.root_helper) + except Exception as e: + with excutils.save_and_reraise_exception() as ctxt: + LOG.error(_("Unable to execute %(cmd)s. " + "Exception: %(exception)s"), + {'cmd': full_args, 'exception': e}) + if not check_error: + ctxt.reraise = False + + def get_vif_port_set(self): + port_names = self.get_port_name_list() + edge_ports = set(port_names) + return edge_ports + + def get_vif_port_by_id(self, port_id): + # IVS in nova uses hybrid method with last 14 chars of UUID + name = 'qvo%s' % port_id[:14] + if name in self.get_vif_port_set(): + return name + return False + + +class PluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class SecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): + def __init__(self, context, plugin_rpc, root_helper): + self.context = context + self.plugin_rpc = plugin_rpc + self.root_helper = root_helper + self.init_firewall() + + +class RestProxyAgent(rpc_compat.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + def __init__(self, integ_br, polling_interval, root_helper, vs='ovs'): + super(RestProxyAgent, self).__init__() + self.polling_interval = polling_interval + self._setup_rpc() + self.sg_agent = SecurityGroupAgent(self.context, + self.plugin_rpc, + root_helper) + if vs == 'ivs': + self.int_br = IVSBridge(integ_br, root_helper) + else: + self.int_br = ovs_lib.OVSBridge(integ_br, root_helper) + + def _setup_rpc(self): + self.topic = topics.AGENT + self.plugin_rpc = PluginApi(topics.PLUGIN) + self.context = q_context.get_admin_context_without_session() + self.endpoints = [self] + consumers = [[topics.PORT, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + def port_update(self, context, **kwargs): + LOG.debug(_("Port update received")) + port = kwargs.get('port') + vif_port = self.int_br.get_vif_port_by_id(port['id']) + if not vif_port: + LOG.debug(_("Port %s is not present on this host."), port['id']) + return + + LOG.debug(_("Port %s found. Refreshing firewall."), port['id']) + if ext_sg.SECURITYGROUPS in port: + self.sg_agent.refresh_firewall() + + def _update_ports(self, registered_ports): + ports = self.int_br.get_vif_port_set() + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def _process_devices_filter(self, port_info): + if 'added' in port_info: + self.sg_agent.prepare_devices_filter(port_info['added']) + if 'removed' in port_info: + self.sg_agent.remove_devices_filter(port_info['removed']) + + def daemon_loop(self): + ports = set() + + while True: + start = time.time() + try: + port_info = self._update_ports(ports) + if port_info: + LOG.debug(_("Agent loop has new device")) + self._process_devices_filter(port_info) + ports = port_info['current'] + except Exception: + LOG.exception(_("Error in agent event loop")) + + elapsed = max(time.time() - start, 0) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + + +def main(): + config.init(sys.argv[1:]) + config.setup_logging(cfg.CONF) + pl_config.register_config() + + integ_br = cfg.CONF.RESTPROXYAGENT.integration_bridge + polling_interval = cfg.CONF.RESTPROXYAGENT.polling_interval + root_helper = cfg.CONF.AGENT.root_helper + bsnagent = RestProxyAgent(integ_br, polling_interval, root_helper, + cfg.CONF.RESTPROXYAGENT.virtual_switch_type) + bsnagent.daemon_loop() + sys.exit(0) + +if __name__ == "__main__": + main() diff --git a/neutron/plugins/bigswitch/config.py b/neutron/plugins/bigswitch/config.py new file mode 100644 index 000000000..4646319c9 --- /dev/null +++ b/neutron/plugins/bigswitch/config.py @@ -0,0 +1,123 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2014 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mandeep Dhami, Big Switch Networks, Inc. +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. +# @author: Kevin Benton, Big Switch Networks, Inc. + +""" +This module manages configuration options +""" + +from oslo.config import cfg + +from neutron.agent.common import config as agconfig +from neutron.common import utils +from neutron.extensions import portbindings + +restproxy_opts = [ + cfg.ListOpt('servers', default=['localhost:8800'], + help=_("A comma separated list of Big Switch or Floodlight " + "servers and port numbers. The plugin proxies the " + "requests to the Big Switch/Floodlight server, " + "which performs the networking configuration. Only one" + "server is needed per deployment, but you may wish to" + "deploy multiple servers to support failover.")), + cfg.StrOpt('server_auth', secret=True, + help=_("The username and password for authenticating against " + " the Big Switch or Floodlight controller.")), + cfg.BoolOpt('server_ssl', default=True, + help=_("If True, Use SSL when connecting to the Big Switch or " + "Floodlight controller.")), + cfg.BoolOpt('ssl_sticky', default=True, + help=_("Trust and store the first certificate received for " + "each controller address and use it to validate future " + "connections to that address.")), + cfg.BoolOpt('no_ssl_validation', default=False, + help=_("Disables SSL certificate validation for controllers")), + cfg.BoolOpt('cache_connections', default=True, + help=_("Re-use HTTP/HTTPS connections to the controller.")), + cfg.StrOpt('ssl_cert_directory', + default='/etc/neutron/plugins/bigswitch/ssl', + help=_("Directory containing ca_certs and host_certs " + "certificate directories.")), + cfg.BoolOpt('sync_data', default=False, + help=_("Sync data on connect")), + cfg.BoolOpt('auto_sync_on_failure', default=True, + help=_("If neutron fails to create a resource because " + "the backend controller doesn't know of a dependency, " + "the plugin automatically triggers a full data " + "synchronization to the controller.")), + cfg.IntOpt('consistency_interval', default=60, + help=_("Time between verifications that the backend controller " + "database is consistent with Neutron. (0 to disable)")), + cfg.IntOpt('server_timeout', default=10, + help=_("Maximum number of seconds to wait for proxy request " + "to connect and complete.")), + cfg.IntOpt('thread_pool_size', default=4, + help=_("Maximum number of threads to spawn to handle large " + "volumes of port creations.")), + cfg.StrOpt('neutron_id', default='neutron-' + utils.get_hostname(), + deprecated_name='quantum_id', + help=_("User defined identifier for this Neutron deployment")), + cfg.BoolOpt('add_meta_server_route', default=True, + help=_("Flag to decide if a route to the metadata server " + "should be injected into the VM")), +] +router_opts = [ + cfg.MultiStrOpt('tenant_default_router_rule', default=['*:any:any:permit'], + help=_("The default router rules installed in new tenant " + "routers. Repeat the config option for each rule. " + "Format is :::" + " Use an * to specify default for all tenants.")), + cfg.IntOpt('max_router_rules', default=200, + help=_("Maximum number of router rules")), +] +nova_opts = [ + cfg.StrOpt('vif_type', default='ovs', + help=_("Virtual interface type to configure on " + "Nova compute nodes")), +] + +# Each VIF Type can have a list of nova host IDs that are fixed to that type +for i in portbindings.VIF_TYPES: + opt = cfg.ListOpt('node_override_vif_' + i, default=[], + help=_("Nova compute nodes to manually set VIF " + "type to %s") % i) + nova_opts.append(opt) + +# Add the vif types for reference later +nova_opts.append(cfg.ListOpt('vif_types', + default=portbindings.VIF_TYPES, + help=_('List of allowed vif_type values.'))) + +agent_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_('Name of integration bridge on compute ' + 'nodes used for security group insertion.')), + cfg.IntOpt('polling_interval', default=5, + help=_('Seconds between agent checks for port changes')), + cfg.StrOpt('virtual_switch_type', default='ovs', + help=_('Virtual switch type.')) +] + + +def register_config(): + cfg.CONF.register_opts(restproxy_opts, "RESTPROXY") + cfg.CONF.register_opts(router_opts, "ROUTER") + cfg.CONF.register_opts(nova_opts, "NOVA") + cfg.CONF.register_opts(agent_opts, "RESTPROXYAGENT") + agconfig.register_root_helper(cfg.CONF) diff --git a/neutron/plugins/bigswitch/db/__init__.py b/neutron/plugins/bigswitch/db/__init__.py new file mode 100644 index 000000000..c05daecf8 --- /dev/null +++ b/neutron/plugins/bigswitch/db/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kevin Benton, Big Switch Networks, Inc. diff --git a/neutron/plugins/bigswitch/db/consistency_db.py b/neutron/plugins/bigswitch/db/consistency_db.py new file mode 100644 index 000000000..cd89a2690 --- /dev/null +++ b/neutron/plugins/bigswitch/db/consistency_db.py @@ -0,0 +1,56 @@ +# Copyright 2014, Big Switch Networks +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import sqlalchemy as sa + +from neutron.db import api as db +from neutron.db import model_base +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +''' +A simple table to store the latest consistency hash +received from a server in case neutron gets restarted. +''' + + +class ConsistencyHash(model_base.BASEV2): + ''' + For now we only support one global state so the + hash_id will always be '1' + ''' + __tablename__ = 'consistencyhashes' + hash_id = sa.Column(sa.String(255), + primary_key=True) + hash = sa.Column(sa.String(255), nullable=False) + + +def get_consistency_hash(hash_id='1'): + session = db.get_session() + with session.begin(subtransactions=True): + query = session.query(ConsistencyHash) + res = query.filter_by(hash_id=hash_id).first() + if not res: + return False + return res.hash + + +def put_consistency_hash(hash, hash_id='1'): + session = db.get_session() + with session.begin(subtransactions=True): + conhash = ConsistencyHash(hash_id=hash_id, hash=hash) + session.merge(conhash) + LOG.debug(_("Consistency hash for group %(hash_id)s updated " + "to %(hash)s"), {'hash_id': hash_id, 'hash': hash}) diff --git a/neutron/plugins/bigswitch/db/porttracker_db.py b/neutron/plugins/bigswitch/db/porttracker_db.py new file mode 100644 index 000000000..7966c7c7d --- /dev/null +++ b/neutron/plugins/bigswitch/db/porttracker_db.py @@ -0,0 +1,53 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Big Switch Networks +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.v2 import attributes +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def get_port_hostid(context, port_id): + # REVISIT(kevinbenton): this is a workaround to avoid portbindings_db + # relational table generation until one of the functions is called. + from neutron.db import portbindings_db + with context.session.begin(subtransactions=True): + query = context.session.query(portbindings_db.PortBindingPort) + res = query.filter_by(port_id=port_id).first() + if not res: + return False + return res.host + + +def put_port_hostid(context, port_id, host): + # REVISIT(kevinbenton): this is a workaround to avoid portbindings_db + # relational table generation until one of the functions is called. + from neutron.db import portbindings_db + if not attributes.is_attr_set(host): + LOG.warning(_("No host_id in port request to track port location.")) + return + if port_id == '': + LOG.warning(_("Received an empty port ID for host_id '%s'"), host) + return + if host == '': + LOG.debug(_("Received an empty host_id for port '%s'"), port_id) + return + LOG.debug(_("Logging port %(port)s on host_id %(host)s"), + {'port': port_id, 'host': host}) + with context.session.begin(subtransactions=True): + location = portbindings_db.PortBindingPort(port_id=port_id, host=host) + context.session.merge(location) diff --git a/neutron/plugins/bigswitch/extensions/__init__.py b/neutron/plugins/bigswitch/extensions/__init__.py new file mode 100644 index 000000000..c05daecf8 --- /dev/null +++ b/neutron/plugins/bigswitch/extensions/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kevin Benton, Big Switch Networks, Inc. diff --git a/neutron/plugins/bigswitch/extensions/routerrule.py b/neutron/plugins/bigswitch/extensions/routerrule.py new file mode 100644 index 000000000..2563d113d --- /dev/null +++ b/neutron/plugins/bigswitch/extensions/routerrule.py @@ -0,0 +1,144 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kevin Benton, Big Switch Networks, Inc. + +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as qexception +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +# Router Rules Exceptions +class InvalidRouterRules(qexception.InvalidInput): + message = _("Invalid format for router rules: %(rule)s, %(reason)s") + + +class RulesExhausted(qexception.BadRequest): + message = _("Unable to complete rules update for %(router_id)s. " + "The number of rules exceeds the maximum %(quota)s.") + + +def convert_to_valid_router_rules(data): + """ + Validates and converts router rules to the appropriate data structure + Example argument = [{'source': 'any', 'destination': 'any', + 'action':'deny'}, + {'source': '1.1.1.1/32', 'destination': 'external', + 'action':'permit', + 'nexthops': ['1.1.1.254', '1.1.1.253']} + ] + """ + V4ANY = '0.0.0.0/0' + CIDRALL = ['any', 'external'] + if not isinstance(data, list): + emsg = _("Invalid data format for router rule: '%s'") % data + LOG.debug(emsg) + raise qexception.InvalidInput(error_message=emsg) + _validate_uniquerules(data) + rules = [] + expected_keys = ['source', 'destination', 'action'] + for rule in data: + rule['nexthops'] = rule.get('nexthops', []) + if not isinstance(rule['nexthops'], list): + rule['nexthops'] = rule['nexthops'].split('+') + + src = V4ANY if rule['source'] in CIDRALL else rule['source'] + dst = V4ANY if rule['destination'] in CIDRALL else rule['destination'] + + errors = [attr._verify_dict_keys(expected_keys, rule, False), + attr._validate_subnet(dst), + attr._validate_subnet(src), + _validate_nexthops(rule['nexthops']), + _validate_action(rule['action'])] + errors = [m for m in errors if m] + if errors: + LOG.debug(errors) + raise qexception.InvalidInput(error_message=errors) + rules.append(rule) + return rules + + +def _validate_nexthops(nexthops): + seen = [] + for ip in nexthops: + msg = attr._validate_ip_address(ip) + if ip in seen: + msg = _("Duplicate nexthop in rule '%s'") % ip + seen.append(ip) + if msg: + return msg + + +def _validate_action(action): + if action not in ['permit', 'deny']: + return _("Action must be either permit or deny." + " '%s' was provided") % action + + +def _validate_uniquerules(rules): + pairs = [] + for r in rules: + if 'source' not in r or 'destination' not in r: + continue + pairs.append((r['source'], r['destination'])) + + if len(set(pairs)) != len(pairs): + error = _("Duplicate router rules (src,dst) found '%s'") % pairs + LOG.debug(error) + raise qexception.InvalidInput(error_message=error) + + +class Routerrule(object): + + @classmethod + def get_name(cls): + return "Neutron Router Rule" + + @classmethod + def get_alias(cls): + return "router_rules" + + @classmethod + def get_description(cls): + return "Router rule configuration for L3 router" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/routerrules/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-05-23T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} + +# Attribute Map +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': { + 'router_rules': {'allow_post': False, 'allow_put': True, + 'convert_to': convert_to_valid_router_rules, + 'is_visible': True, + 'default': attr.ATTR_NOT_SPECIFIED}, + } +} diff --git a/neutron/plugins/bigswitch/plugin.py b/neutron/plugins/bigswitch/plugin.py new file mode 100644 index 000000000..c13c45b65 --- /dev/null +++ b/neutron/plugins/bigswitch/plugin.py @@ -0,0 +1,1115 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mandeep Dhami, Big Switch Networks, Inc. +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. + +""" +Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers. + +NeutronRestProxy provides a generic neutron plugin that translates all plugin +function calls to equivalent authenticated REST calls to a set of redundant +external network controllers. It also keeps persistent store for all neutron +state to allow for re-sync of the external controller(s), if required. + +The local state on the plugin also allows for local response and fast-fail +semantics where it can be determined based on the local persistent store. + +Network controller specific code is decoupled from this plugin and expected +to reside on the controller itself (via the REST interface). + +This allows for: + - independent authentication and redundancy schemes between neutron and the + network controller + - independent upgrade/development cycles between neutron and the controller + as it limits the proxy code upgrade requirement to neutron release cycle + and the controller specific code upgrade requirement to controller code + - ability to sync the controller with neutron for independent recovery/reset + +External REST API used by proxy is the same API as defined for neutron (JSON +subset) with some additional parameters (gateway on network-create and macaddr +on port-attach) on an additional PUT to do a bulk dump of all persistent data. +""" + +import copy +import httplib +import re + +import eventlet +from oslo.config import cfg +from sqlalchemy.orm import exc as sqlexc + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api import extensions as neutron_extensions +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.common import constants as const +from neutron.common import exceptions +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils +from neutron import context as qcontext +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extradhcpopt_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.db import securitygroups_rpc_base as sg_rpc_base +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import external_net +from neutron.extensions import extra_dhcp_opt as edo_ext +from neutron.extensions import l3 +from neutron.extensions import portbindings +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.bigswitch import config as pl_config +from neutron.plugins.bigswitch.db import porttracker_db +from neutron.plugins.bigswitch import extensions +from neutron.plugins.bigswitch import routerrule_db +from neutron.plugins.bigswitch import servermanager +from neutron.plugins.bigswitch import version + +LOG = logging.getLogger(__name__) + +SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin') +METADATA_SERVER_IP = '169.254.169.254' + + +class AgentNotifierApi(rpc_compat.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_port_update = topics.get_topic_name( + topic, topics.PORT, topics.UPDATE) + + def port_update(self, context, port): + self.fanout_cast(context, + self.make_msg('port_update', + port=port), + topic=self.topic_port_update) + + +class RestProxyCallbacks(rpc_compat.RpcCallback, + sg_rpc_base.SecurityGroupServerRpcCallbackMixin, + dhcp_rpc_base.DhcpRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + def get_port_from_device(self, device): + port_id = re.sub(r"^tap", "", device) + port = self.get_port_and_sgs(port_id) + if port: + port['device'] = device + return port + + def get_port_and_sgs(self, port_id): + """Get port from database with security group info.""" + + LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + with session.begin(subtransactions=True): + query = session.query( + models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id + ) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id.startswith(port_id)) + port_and_sgs = query.all() + if not port_and_sgs: + return + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict['security_groups'] = [ + sg_id for port_, sg_id in port_and_sgs if sg_id] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict + + +class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + routerrule_db.RouterRule_db_mixin): + + supported_extension_aliases = ["binding"] + servers = None + + def _get_all_data(self, get_ports=True, get_floating_ips=True, + get_routers=True): + admin_context = qcontext.get_admin_context() + networks = [] + # this method is used by the ML2 driver so it can't directly invoke + # the self.get_(ports|networks) methods + plugin = manager.NeutronManager.get_plugin() + all_networks = plugin.get_networks(admin_context) or [] + for net in all_networks: + mapped_network = self._get_mapped_network_with_subnets(net) + flips_n_ports = mapped_network + if get_floating_ips: + flips_n_ports = self._get_network_with_floatingips( + mapped_network) + + if get_ports: + ports = [] + net_filter = {'network_id': [net.get('id')]} + net_ports = plugin.get_ports(admin_context, + filters=net_filter) or [] + for port in net_ports: + mapped_port = self._map_state_and_status(port) + mapped_port['attachment'] = { + 'id': port.get('device_id'), + 'mac': port.get('mac_address'), + } + mapped_port = self._extend_port_dict_binding(admin_context, + mapped_port) + ports.append(mapped_port) + flips_n_ports['ports'] = ports + + if flips_n_ports: + networks.append(flips_n_ports) + + data = {'networks': networks} + + if get_routers: + routers = [] + all_routers = self.get_routers(admin_context) or [] + for router in all_routers: + interfaces = [] + mapped_router = self._map_state_and_status(router) + router_filter = { + 'device_owner': [const.DEVICE_OWNER_ROUTER_INTF], + 'device_id': [router.get('id')] + } + router_ports = self.get_ports(admin_context, + filters=router_filter) or [] + for port in router_ports: + net_id = port.get('network_id') + subnet_id = port['fixed_ips'][0]['subnet_id'] + intf_details = self._get_router_intf_details(admin_context, + net_id, + subnet_id) + interfaces.append(intf_details) + mapped_router['interfaces'] = interfaces + + routers.append(mapped_router) + + data.update({'routers': routers}) + return data + + def _send_all_data(self, send_ports=True, send_floating_ips=True, + send_routers=True, timeout=None, + triggered_by_tenant=None): + """Pushes all data to network ctrl (networks/ports, ports/attachments). + + This gives the controller an option to re-sync it's persistent store + with neutron's current view of that data. + """ + data = self._get_all_data(send_ports, send_floating_ips, send_routers) + data['triggered_by_tenant'] = triggered_by_tenant + errstr = _("Unable to update remote topology: %s") + return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH, + data, errstr, timeout=timeout) + + def _get_network_with_floatingips(self, network, context=None): + if context is None: + context = qcontext.get_admin_context() + + net_id = network['id'] + net_filter = {'floating_network_id': [net_id]} + fl_ips = self.get_floatingips(context, + filters=net_filter) or [] + network['floatingips'] = fl_ips + + return network + + def _get_all_subnets_json_for_network(self, net_id, context=None): + if context is None: + context = qcontext.get_admin_context() + # start a sub-transaction to avoid breaking parent transactions + with context.session.begin(subtransactions=True): + subnets = self._get_subnets_by_network(context, + net_id) + subnets_details = [] + if subnets: + for subnet in subnets: + subnet_dict = self._make_subnet_dict(subnet) + mapped_subnet = self._map_state_and_status(subnet_dict) + subnets_details.append(mapped_subnet) + + return subnets_details + + def _get_mapped_network_with_subnets(self, network, context=None): + # if context is not provided, admin context is used + if context is None: + context = qcontext.get_admin_context() + network = self._map_state_and_status(network) + subnets = self._get_all_subnets_json_for_network(network['id'], + context) + network['subnets'] = subnets + for subnet in (subnets or []): + if subnet['gateway_ip']: + # FIX: For backward compatibility with wire protocol + network['gateway'] = subnet['gateway_ip'] + break + else: + network['gateway'] = '' + network[external_net.EXTERNAL] = self._network_is_external( + context, network['id']) + # include ML2 segmentation types + network['segmentation_types'] = getattr(self, "segmentation_types", "") + return network + + def _send_create_network(self, network, context=None): + tenant_id = network['tenant_id'] + mapped_network = self._get_mapped_network_with_subnets(network, + context) + self.servers.rest_create_network(tenant_id, mapped_network) + + def _send_update_network(self, network, context=None): + net_id = network['id'] + tenant_id = network['tenant_id'] + mapped_network = self._get_mapped_network_with_subnets(network, + context) + net_fl_ips = self._get_network_with_floatingips(mapped_network, + context) + self.servers.rest_update_network(tenant_id, net_id, net_fl_ips) + + def _send_delete_network(self, network, context=None): + net_id = network['id'] + tenant_id = network['tenant_id'] + self.servers.rest_delete_network(tenant_id, net_id) + + def _map_state_and_status(self, resource): + resource = copy.copy(resource) + + resource['state'] = ('UP' if resource.pop('admin_state_up', + True) else 'DOWN') + resource.pop('status', None) + + return resource + + def _warn_on_state_status(self, resource): + if resource.get('admin_state_up', True) is False: + LOG.warning(_("Setting admin_state_up=False is not supported " + "in this plugin version. Ignoring setting for " + "resource: %s"), resource) + + if 'status' in resource: + if resource['status'] != const.NET_STATUS_ACTIVE: + LOG.warning(_("Operational status is internally set by the " + "plugin. Ignoring setting status=%s."), + resource['status']) + + def _get_router_intf_details(self, context, intf_id, subnet_id): + + # we will use the network id as interface's id + net_id = intf_id + network = self.get_network(context, net_id) + subnet = self.get_subnet(context, subnet_id) + mapped_network = self._get_mapped_network_with_subnets(network) + mapped_subnet = self._map_state_and_status(subnet) + + data = { + 'id': intf_id, + "network": mapped_network, + "subnet": mapped_subnet + } + + return data + + def _extend_port_dict_binding(self, context, port): + cfg_vif_type = cfg.CONF.NOVA.vif_type.lower() + if not cfg_vif_type in (portbindings.VIF_TYPE_OVS, + portbindings.VIF_TYPE_IVS): + LOG.warning(_("Unrecognized vif_type in configuration " + "[%s]. Defaulting to ovs."), + cfg_vif_type) + cfg_vif_type = portbindings.VIF_TYPE_OVS + # In ML2, the host_id is already populated + if portbindings.HOST_ID in port: + hostid = port[portbindings.HOST_ID] + else: + hostid = porttracker_db.get_port_hostid(context, port['id']) + if hostid: + port[portbindings.HOST_ID] = hostid + override = self._check_hostvif_override(hostid) + if override: + cfg_vif_type = override + port[portbindings.VIF_TYPE] = cfg_vif_type + + port[portbindings.VIF_DETAILS] = { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases, + portbindings.OVS_HYBRID_PLUG: True + } + return port + + def _check_hostvif_override(self, hostid): + for v in cfg.CONF.NOVA.vif_types: + if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []): + return v + return False + + def _get_port_net_tenantid(self, context, port): + net = super(NeutronRestProxyV2Base, + self).get_network(context, port["network_id"]) + return net['tenant_id'] + + def async_port_create(self, tenant_id, net_id, port): + try: + self.servers.rest_create_port(tenant_id, net_id, port) + except servermanager.RemoteRestError as e: + # 404 should never be received on a port create unless + # there are inconsistencies between the data in neutron + # and the data in the backend. + # Run a sync to get it consistent. + if (cfg.CONF.RESTPROXY.auto_sync_on_failure and + e.status == httplib.NOT_FOUND and + servermanager.NXNETWORK in e.reason): + LOG.error(_("Iconsistency with backend controller " + "triggering full synchronization.")) + # args depend on if we are operating in ML2 driver + # or as the full plugin + topoargs = self.servers.get_topo_function_args + self._send_all_data( + send_ports=topoargs['get_ports'], + send_floating_ips=topoargs['get_floating_ips'], + send_routers=topoargs['get_routers'], + triggered_by_tenant=tenant_id + ) + # If the full sync worked, the port will be created + # on the controller so it can be safely marked as active + else: + # Any errors that don't result in a successful auto-sync + # require that the port be placed into the error state. + LOG.error( + _("NeutronRestProxyV2: Unable to create port: %s"), e) + try: + self._set_port_status(port['id'], const.PORT_STATUS_ERROR) + except exceptions.PortNotFound: + # If port is already gone from DB and there was an error + # creating on the backend, everything is already consistent + pass + return + new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP' + else const.PORT_STATUS_DOWN) + try: + self._set_port_status(port['id'], new_status) + except exceptions.PortNotFound: + # This port was deleted before the create made it to the controller + # so it now needs to be deleted since the normal delete request + # would have deleted an non-existent port. + self.servers.rest_delete_port(tenant_id, net_id, port['id']) + + # NOTE(kevinbenton): workaround for eventlet/mysql deadlock + @utils.synchronized('bsn-port-barrier') + def _set_port_status(self, port_id, status): + session = db.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.flush() + except sqlexc.NoResultFound: + raise exceptions.PortNotFound(port_id=port_id) + + +class NeutronRestProxyV2(NeutronRestProxyV2Base, + addr_pair_db.AllowedAddressPairsMixin, + extradhcpopt_db.ExtraDhcpOptMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + sg_rpc_base.SecurityGroupServerRpcMixin): + + _supported_extension_aliases = ["external-net", "router", "binding", + "router_rules", "extra_dhcp_opt", "quotas", + "dhcp_agent_scheduler", "agent", + "security-group", "allowed-address-pairs"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + super(NeutronRestProxyV2, self).__init__() + LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'), + version.version_string_with_vcs()) + pl_config.register_config() + self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size) + + # Include the Big Switch Extensions path in the api_extensions + neutron_extensions.append_api_extensions_path(extensions.__path__) + + self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route + + # init network ctrl connections + self.servers = servermanager.ServerPool() + self.servers.get_topo_function = self._get_all_data + self.servers.get_topo_function_args = {'get_ports': True, + 'get_floating_ips': True, + 'get_routers': True} + + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + + # setup rpc for security and DHCP agents + self._setup_rpc() + + if cfg.CONF.RESTPROXY.sync_data: + self._send_all_data() + + LOG.debug(_("NeutronRestProxyV2: initialization done")) + + def _setup_rpc(self): + self.conn = rpc_compat.create_connection(new=True) + self.topic = topics.PLUGIN + self.notifier = AgentNotifierApi(topics.AGENT) + # init dhcp agent support + self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( + self._dhcp_agent_notifier + ) + self.endpoints = [RestProxyCallbacks(), + agents_db.AgentExtRpcCallback()] + self.conn.create_consumer(self.topic, self.endpoints, + fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def create_network(self, context, network): + """Create a network. + + Network represents an L2 network segment which can have a set of + subnets and ports associated with it. + + :param context: neutron api request context + :param network: dictionary describing the network + + :returns: a sequence of mappings with the following signature: + { + "id": UUID representing the network. + "name": Human-readable name identifying the network. + "tenant_id": Owner of network. NOTE: only admin user can specify + a tenant_id other than its own. + "admin_state_up": Sets admin state of network. + if down, network does not forward packets. + "status": Indicates whether network is currently operational + (values are "ACTIVE", "DOWN", "BUILD", and "ERROR") + "subnets": Subnets associated with this network. + } + + :raises: RemoteRestError + """ + LOG.debug(_("NeutronRestProxyV2: create_network() called")) + + self._warn_on_state_status(network['network']) + + with context.session.begin(subtransactions=True): + self._ensure_default_security_group( + context, + network['network']["tenant_id"] + ) + # create network in DB + new_net = super(NeutronRestProxyV2, self).create_network(context, + network) + self._process_l3_create(context, new_net, network['network']) + # create network on the network controller + self._send_create_network(new_net, context) + + # return created network + return new_net + + def update_network(self, context, net_id, network): + """Updates the properties of a particular Virtual Network. + + :param context: neutron api request context + :param net_id: uuid of the network to update + :param network: dictionary describing the updates + + :returns: a sequence of mappings with the following signature: + { + "id": UUID representing the network. + "name": Human-readable name identifying the network. + "tenant_id": Owner of network. NOTE: only admin user can + specify a tenant_id other than its own. + "admin_state_up": Sets admin state of network. + if down, network does not forward packets. + "status": Indicates whether network is currently operational + (values are "ACTIVE", "DOWN", "BUILD", and "ERROR") + "subnets": Subnets associated with this network. + } + + :raises: exceptions.NetworkNotFound + :raises: RemoteRestError + """ + LOG.debug(_("NeutronRestProxyV2.update_network() called")) + + self._warn_on_state_status(network['network']) + + session = context.session + with session.begin(subtransactions=True): + new_net = super(NeutronRestProxyV2, self).update_network( + context, net_id, network) + self._process_l3_update(context, new_net, network['network']) + + # update network on network controller + self._send_update_network(new_net, context) + return new_net + + # NOTE(kevinbenton): workaround for eventlet/mysql deadlock + @utils.synchronized('bsn-port-barrier') + def delete_network(self, context, net_id): + """Delete a network. + :param context: neutron api request context + :param id: UUID representing the network to delete. + + :returns: None + + :raises: exceptions.NetworkInUse + :raises: exceptions.NetworkNotFound + :raises: RemoteRestError + """ + LOG.debug(_("NeutronRestProxyV2: delete_network() called")) + + # Validate args + orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id) + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, net_id) + ret_val = super(NeutronRestProxyV2, self).delete_network(context, + net_id) + self._send_delete_network(orig_net, context) + return ret_val + + def create_port(self, context, port): + """Create a port, which is a connection point of a device + (e.g., a VM NIC) to attach to a L2 Neutron network. + :param context: neutron api request context + :param port: dictionary describing the port + + :returns: + { + "id": uuid represeting the port. + "network_id": uuid of network. + "tenant_id": tenant_id + "mac_address": mac address to use on this port. + "admin_state_up": Sets admin state of port. if down, port + does not forward packets. + "status": dicates whether port is currently operational + (limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR") + "fixed_ips": list of subnet ID"s and IP addresses to be used on + this port + "device_id": identifies the device (e.g., virtual server) using + this port. + } + + :raises: exceptions.NetworkNotFound + :raises: exceptions.StateInvalid + :raises: RemoteRestError + """ + LOG.debug(_("NeutronRestProxyV2: create_port() called")) + + # Update DB in new session so exceptions rollback changes + with context.session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + # non-router port status is set to pending. it is then updated + # after the async rest call completes. router ports are synchronous + if port['port']['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF: + port['port']['status'] = const.PORT_STATUS_ACTIVE + else: + port['port']['status'] = const.PORT_STATUS_BUILD + dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) + new_port = super(NeutronRestProxyV2, self).create_port(context, + port) + self._process_port_create_security_group(context, new_port, sgids) + if (portbindings.HOST_ID in port['port'] + and 'id' in new_port): + host_id = port['port'][portbindings.HOST_ID] + porttracker_db.put_port_hostid(context, new_port['id'], + host_id) + new_port[addr_pair.ADDRESS_PAIRS] = ( + self._process_create_allowed_address_pairs( + context, new_port, + port['port'].get(addr_pair.ADDRESS_PAIRS))) + self._process_port_create_extra_dhcp_opts(context, new_port, + dhcp_opts) + new_port = self._extend_port_dict_binding(context, new_port) + net = super(NeutronRestProxyV2, + self).get_network(context, new_port["network_id"]) + if self.add_meta_server_route: + if new_port['device_owner'] == const.DEVICE_OWNER_DHCP: + destination = METADATA_SERVER_IP + '/32' + self._add_host_route(context, destination, new_port) + + # create on network ctrl + mapped_port = self._map_state_and_status(new_port) + # ports have to be created synchronously when creating a router + # port since adding router interfaces is a multi-call process + if mapped_port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF: + self.servers.rest_create_port(net["tenant_id"], + new_port["network_id"], + mapped_port) + else: + self.evpool.spawn_n(self.async_port_create, net["tenant_id"], + new_port["network_id"], mapped_port) + self.notify_security_groups_member_updated(context, new_port) + return new_port + + def get_port(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + port = super(NeutronRestProxyV2, self).get_port(context, id, + fields) + self._extend_port_dict_binding(context, port) + return self._fields(port, fields) + + def get_ports(self, context, filters=None, fields=None): + with context.session.begin(subtransactions=True): + ports = super(NeutronRestProxyV2, self).get_ports(context, filters, + fields) + for port in ports: + self._extend_port_dict_binding(context, port) + return [self._fields(port, fields) for port in ports] + + def update_port(self, context, port_id, port): + """Update values of a port. + + :param context: neutron api request context + :param id: UUID representing the port to update. + :param port: dictionary with keys indicating fields to update. + + :returns: a mapping sequence with the following signature: + { + "id": uuid represeting the port. + "network_id": uuid of network. + "tenant_id": tenant_id + "mac_address": mac address to use on this port. + "admin_state_up": sets admin state of port. if down, port + does not forward packets. + "status": dicates whether port is currently operational + (limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR") + "fixed_ips": list of subnet ID's and IP addresses to be used on + this port + "device_id": identifies the device (e.g., virtual server) using + this port. + } + + :raises: exceptions.StateInvalid + :raises: exceptions.PortNotFound + :raises: RemoteRestError + """ + LOG.debug(_("NeutronRestProxyV2: update_port() called")) + + self._warn_on_state_status(port['port']) + + # Validate Args + orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id) + with context.session.begin(subtransactions=True): + # Update DB + new_port = super(NeutronRestProxyV2, + self).update_port(context, port_id, port) + ctrl_update_required = False + if addr_pair.ADDRESS_PAIRS in port['port']: + ctrl_update_required |= ( + self.update_address_pairs_on_port(context, port_id, port, + orig_port, new_port)) + self._update_extra_dhcp_opts_on_port(context, port_id, port, + new_port) + old_host_id = porttracker_db.get_port_hostid(context, + orig_port['id']) + if (portbindings.HOST_ID in port['port'] + and 'id' in new_port): + host_id = port['port'][portbindings.HOST_ID] + porttracker_db.put_port_hostid(context, new_port['id'], + host_id) + if old_host_id != host_id: + ctrl_update_required = True + + if (new_port.get("device_id") != orig_port.get("device_id") and + orig_port.get("device_id")): + ctrl_update_required = True + + if ctrl_update_required: + # tenant_id must come from network in case network is shared + net_tenant_id = self._get_port_net_tenantid(context, new_port) + new_port = self._extend_port_dict_binding(context, new_port) + mapped_port = self._map_state_and_status(new_port) + self.servers.rest_update_port(net_tenant_id, + new_port["network_id"], + mapped_port) + agent_update_required = self.update_security_group_on_port( + context, port_id, port, orig_port, new_port) + agent_update_required |= self.is_security_group_member_updated( + context, orig_port, new_port) + + # return new_port + return new_port + + # NOTE(kevinbenton): workaround for eventlet/mysql deadlock + @utils.synchronized('bsn-port-barrier') + def delete_port(self, context, port_id, l3_port_check=True): + """Delete a port. + :param context: neutron api request context + :param id: UUID representing the port to delete. + + :raises: exceptions.PortInUse + :raises: exceptions.PortNotFound + :raises: exceptions.NetworkNotFound + :raises: RemoteRestError + """ + LOG.debug(_("NeutronRestProxyV2: delete_port() called")) + + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, port_id) + with context.session.begin(subtransactions=True): + self.disassociate_floatingips(context, port_id) + self._delete_port_security_group_bindings(context, port_id) + port = super(NeutronRestProxyV2, self).get_port(context, port_id) + # Tenant ID must come from network in case the network is shared + tenid = self._get_port_net_tenantid(context, port) + self._delete_port(context, port_id) + self.servers.rest_delete_port(tenid, port['network_id'], port_id) + + def create_subnet(self, context, subnet): + LOG.debug(_("NeutronRestProxyV2: create_subnet() called")) + + self._warn_on_state_status(subnet['subnet']) + + with context.session.begin(subtransactions=True): + # create subnet in DB + new_subnet = super(NeutronRestProxyV2, + self).create_subnet(context, subnet) + net_id = new_subnet['network_id'] + orig_net = super(NeutronRestProxyV2, + self).get_network(context, net_id) + # update network on network controller + self._send_update_network(orig_net, context) + return new_subnet + + def update_subnet(self, context, id, subnet): + LOG.debug(_("NeutronRestProxyV2: update_subnet() called")) + + self._warn_on_state_status(subnet['subnet']) + + with context.session.begin(subtransactions=True): + # update subnet in DB + new_subnet = super(NeutronRestProxyV2, + self).update_subnet(context, id, subnet) + net_id = new_subnet['network_id'] + orig_net = super(NeutronRestProxyV2, + self).get_network(context, net_id) + # update network on network controller + self._send_update_network(orig_net, context) + return new_subnet + + # NOTE(kevinbenton): workaround for eventlet/mysql deadlock + @utils.synchronized('bsn-port-barrier') + def delete_subnet(self, context, id): + LOG.debug(_("NeutronRestProxyV2: delete_subnet() called")) + orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id) + net_id = orig_subnet['network_id'] + with context.session.begin(subtransactions=True): + # delete subnet in DB + super(NeutronRestProxyV2, self).delete_subnet(context, id) + orig_net = super(NeutronRestProxyV2, self).get_network(context, + net_id) + # update network on network controller - exception will rollback + self._send_update_network(orig_net, context) + + def _get_tenant_default_router_rules(self, tenant): + rules = cfg.CONF.ROUTER.tenant_default_router_rule + defaultset = [] + tenantset = [] + for rule in rules: + items = rule.split(':') + if len(items) == 5: + (tenantid, source, destination, action, nexthops) = items + elif len(items) == 4: + (tenantid, source, destination, action) = items + nexthops = '' + else: + continue + parsedrule = {'source': source, + 'destination': destination, 'action': action, + 'nexthops': nexthops.split(',')} + if parsedrule['nexthops'][0] == '': + parsedrule['nexthops'] = [] + if tenantid == '*': + defaultset.append(parsedrule) + if tenantid == tenant: + tenantset.append(parsedrule) + if tenantset: + return tenantset + return defaultset + + def create_router(self, context, router): + LOG.debug(_("NeutronRestProxyV2: create_router() called")) + + self._warn_on_state_status(router['router']) + + tenant_id = self._get_tenant_id_for_create(context, router["router"]) + + # set default router rules + rules = self._get_tenant_default_router_rules(tenant_id) + router['router']['router_rules'] = rules + + with context.session.begin(subtransactions=True): + # create router in DB + new_router = super(NeutronRestProxyV2, self).create_router(context, + router) + mapped_router = self._map_state_and_status(new_router) + self.servers.rest_create_router(tenant_id, mapped_router) + + # return created router + return new_router + + def update_router(self, context, router_id, router): + + LOG.debug(_("NeutronRestProxyV2.update_router() called")) + + self._warn_on_state_status(router['router']) + + orig_router = super(NeutronRestProxyV2, self).get_router(context, + router_id) + tenant_id = orig_router["tenant_id"] + with context.session.begin(subtransactions=True): + new_router = super(NeutronRestProxyV2, + self).update_router(context, router_id, router) + router = self._map_state_and_status(new_router) + + # update router on network controller + self.servers.rest_update_router(tenant_id, router, router_id) + + # return updated router + return new_router + + # NOTE(kevinbenton): workaround for eventlet/mysql deadlock. + # delete_router ends up calling _delete_port instead of delete_port. + @utils.synchronized('bsn-port-barrier') + def delete_router(self, context, router_id): + LOG.debug(_("NeutronRestProxyV2: delete_router() called")) + + with context.session.begin(subtransactions=True): + orig_router = self._get_router(context, router_id) + tenant_id = orig_router["tenant_id"] + + # Ensure that the router is not used + router_filter = {'router_id': [router_id]} + fips = self.get_floatingips_count(context.elevated(), + filters=router_filter) + if fips: + raise l3.RouterInUse(router_id=router_id) + + device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF + device_filter = {'device_id': [router_id], + 'device_owner': [device_owner]} + ports = self.get_ports_count(context.elevated(), + filters=device_filter) + if ports: + raise l3.RouterInUse(router_id=router_id) + ret_val = super(NeutronRestProxyV2, + self).delete_router(context, router_id) + + # delete from network ctrl + self.servers.rest_delete_router(tenant_id, router_id) + return ret_val + + def add_router_interface(self, context, router_id, interface_info): + + LOG.debug(_("NeutronRestProxyV2: add_router_interface() called")) + + # Validate args + router = self._get_router(context, router_id) + tenant_id = router['tenant_id'] + + with context.session.begin(subtransactions=True): + # create interface in DB + new_intf_info = super(NeutronRestProxyV2, + self).add_router_interface(context, + router_id, + interface_info) + port = self._get_port(context, new_intf_info['port_id']) + net_id = port['network_id'] + subnet_id = new_intf_info['subnet_id'] + # we will use the port's network id as interface's id + interface_id = net_id + intf_details = self._get_router_intf_details(context, + interface_id, + subnet_id) + + # create interface on the network controller + self.servers.rest_add_router_interface(tenant_id, router_id, + intf_details) + return new_intf_info + + def remove_router_interface(self, context, router_id, interface_info): + + LOG.debug(_("NeutronRestProxyV2: remove_router_interface() called")) + + # Validate args + router = self._get_router(context, router_id) + tenant_id = router['tenant_id'] + + # we will first get the interface identifier before deleting in the DB + if not interface_info: + msg = _("Either subnet_id or port_id must be specified") + raise exceptions.BadRequest(resource='router', msg=msg) + if 'port_id' in interface_info: + port = self._get_port(context, interface_info['port_id']) + interface_id = port['network_id'] + elif 'subnet_id' in interface_info: + subnet = self._get_subnet(context, interface_info['subnet_id']) + interface_id = subnet['network_id'] + else: + msg = _("Either subnet_id or port_id must be specified") + raise exceptions.BadRequest(resource='router', msg=msg) + + with context.session.begin(subtransactions=True): + # remove router in DB + del_ret = super(NeutronRestProxyV2, + self).remove_router_interface(context, + router_id, + interface_info) + + # create router on the network controller + self.servers.rest_remove_router_interface(tenant_id, router_id, + interface_id) + return del_ret + + def create_floatingip(self, context, floatingip): + LOG.debug(_("NeutronRestProxyV2: create_floatingip() called")) + + with context.session.begin(subtransactions=True): + # create floatingip in DB + new_fl_ip = super(NeutronRestProxyV2, + self).create_floatingip(context, floatingip) + + # create floatingip on the network controller + try: + if 'floatingip' in self.servers.get_capabilities(): + self.servers.rest_create_floatingip( + new_fl_ip['tenant_id'], new_fl_ip) + else: + self._send_floatingip_update(context) + except servermanager.RemoteRestError as e: + with excutils.save_and_reraise_exception(): + LOG.error( + _("NeutronRestProxyV2: Unable to create remote " + "floating IP: %s"), e) + # return created floating IP + return new_fl_ip + + def update_floatingip(self, context, id, floatingip): + LOG.debug(_("NeutronRestProxyV2: update_floatingip() called")) + + with context.session.begin(subtransactions=True): + # update floatingip in DB + new_fl_ip = super(NeutronRestProxyV2, + self).update_floatingip(context, id, floatingip) + + # update network on network controller + if 'floatingip' in self.servers.get_capabilities(): + self.servers.rest_update_floatingip(new_fl_ip['tenant_id'], + new_fl_ip, id) + else: + self._send_floatingip_update(context) + return new_fl_ip + + def delete_floatingip(self, context, id): + LOG.debug(_("NeutronRestProxyV2: delete_floatingip() called")) + + with context.session.begin(subtransactions=True): + # delete floating IP in DB + old_fip = super(NeutronRestProxyV2, self).get_floatingip(context, + id) + super(NeutronRestProxyV2, self).delete_floatingip(context, id) + + # update network on network controller + if 'floatingip' in self.servers.get_capabilities(): + self.servers.rest_delete_floatingip(old_fip['tenant_id'], id) + else: + self._send_floatingip_update(context) + + def disassociate_floatingips(self, context, port_id): + LOG.debug(_("NeutronRestProxyV2: diassociate_floatingips() called")) + super(NeutronRestProxyV2, self).disassociate_floatingips(context, + port_id) + self._send_floatingip_update(context) + + # overriding method from l3_db as original method calls + # self.delete_floatingip() which in turn calls self.delete_port() which + # is locked with 'bsn-port-barrier' + def delete_disassociated_floatingips(self, context, network_id): + query = self._model_query(context, l3_db.FloatingIP) + query = query.filter_by(floating_network_id=network_id, + fixed_port_id=None, + router_id=None) + for fip in query: + context.session.delete(fip) + self._delete_port(context.elevated(), fip['floating_port_id']) + + def _send_floatingip_update(self, context): + try: + ext_net_id = self.get_external_network_id(context) + if ext_net_id: + # Use the elevated state of the context for the ext_net query + admin_context = context.elevated() + ext_net = super(NeutronRestProxyV2, + self).get_network(admin_context, ext_net_id) + # update external network on network controller + self._send_update_network(ext_net, admin_context) + except exceptions.TooManyExternalNetworks: + # get_external_network can raise errors when multiple external + # networks are detected, which isn't supported by the Plugin + LOG.error(_("NeutronRestProxyV2: too many external networks")) + + def _add_host_route(self, context, destination, port): + subnet = {} + for fixed_ip in port['fixed_ips']: + subnet_id = fixed_ip['subnet_id'] + nexthop = fixed_ip['ip_address'] + subnet['host_routes'] = [{'destination': destination, + 'nexthop': nexthop}] + updated_subnet = self.update_subnet(context, + subnet_id, + {'subnet': subnet}) + payload = {'subnet': updated_subnet} + self._dhcp_agent_notifier.notify(context, payload, + 'subnet.update.end') + LOG.debug(_("Adding host route: ")) + LOG.debug(_("Destination:%(dst)s nexthop:%(next)s"), + {'dst': destination, 'next': nexthop}) diff --git a/neutron/plugins/bigswitch/routerrule_db.py b/neutron/plugins/bigswitch/routerrule_db.py new file mode 100644 index 000000000..e947a1f9d --- /dev/null +++ b/neutron/plugins/bigswitch/routerrule_db.py @@ -0,0 +1,148 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Big Switch Networks +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.db import l3_db +from neutron.db import model_base +from neutron.openstack.common import log as logging +from neutron.plugins.bigswitch.extensions import routerrule + + +LOG = logging.getLogger(__name__) + + +class RouterRule(model_base.BASEV2): + id = sa.Column(sa.Integer, primary_key=True) + source = sa.Column(sa.String(64), nullable=False) + destination = sa.Column(sa.String(64), nullable=False) + nexthops = orm.relationship('NextHop', cascade='all,delete') + action = sa.Column(sa.String(10), nullable=False) + router_id = sa.Column(sa.String(36), + sa.ForeignKey('routers.id', + ondelete="CASCADE")) + + +class NextHop(model_base.BASEV2): + rule_id = sa.Column(sa.Integer, + sa.ForeignKey('routerrules.id', + ondelete="CASCADE"), + primary_key=True) + nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True) + + +class RouterRule_db_mixin(l3_db.L3_NAT_db_mixin): + """ Mixin class to support route rule configuration on a router""" + def update_router(self, context, id, router): + r = router['router'] + with context.session.begin(subtransactions=True): + router_db = self._get_router(context, id) + if 'router_rules' in r: + self._update_router_rules(context, + router_db, + r['router_rules']) + updated = super(RouterRule_db_mixin, self).update_router( + context, id, router) + updated['router_rules'] = self._get_router_rules_by_router_id( + context, id) + + return updated + + def create_router(self, context, router): + r = router['router'] + with context.session.begin(subtransactions=True): + router_db = super(RouterRule_db_mixin, self).create_router( + context, router) + if 'router_rules' in r: + self._update_router_rules(context, + router_db, + r['router_rules']) + else: + LOG.debug(_('No rules in router')) + router_db['router_rules'] = self._get_router_rules_by_router_id( + context, router_db['id']) + + return router_db + + def _update_router_rules(self, context, router, rules): + if len(rules) > cfg.CONF.ROUTER.max_router_rules: + raise routerrule.RulesExhausted( + router_id=router['id'], + quota=cfg.CONF.ROUTER.max_router_rules) + del_context = context.session.query(RouterRule) + del_context.filter_by(router_id=router['id']).delete() + context.session.expunge_all() + LOG.debug(_('Updating router rules to %s'), rules) + for rule in rules: + router_rule = RouterRule( + router_id=router['id'], + destination=rule['destination'], + source=rule['source'], + action=rule['action']) + router_rule.nexthops = [NextHop(nexthop=hop) + for hop in rule['nexthops']] + context.session.add(router_rule) + context.session.flush() + + def _make_router_rule_list(self, router_rules): + ruleslist = [] + for rule in router_rules: + hops = [hop['nexthop'] for hop in rule['nexthops']] + ruleslist.append({'id': rule['id'], + 'destination': rule['destination'], + 'source': rule['source'], + 'action': rule['action'], + 'nexthops': hops}) + return ruleslist + + def _get_router_rules_by_router_id(self, context, id): + query = context.session.query(RouterRule) + router_rules = query.filter_by(router_id=id).all() + return self._make_router_rule_list(router_rules) + + def get_router(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + router = super(RouterRule_db_mixin, self).get_router( + context, id, fields) + router['router_rules'] = self._get_router_rules_by_router_id( + context, id) + return router + + def get_routers(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + with context.session.begin(subtransactions=True): + routers = super(RouterRule_db_mixin, self).get_routers( + context, filters, fields, sorts=sorts, limit=limit, + marker=marker, page_reverse=page_reverse) + for router in routers: + router['router_rules'] = self._get_router_rules_by_router_id( + context, router['id']) + return routers + + def get_sync_data(self, context, router_ids=None, active=None): + """Query routers and their related floating_ips, interfaces.""" + with context.session.begin(subtransactions=True): + routers = super(RouterRule_db_mixin, + self).get_sync_data(context, router_ids, + active=active) + for router in routers: + router['router_rules'] = self._get_router_rules_by_router_id( + context, router['id']) + return routers diff --git a/neutron/plugins/bigswitch/servermanager.py b/neutron/plugins/bigswitch/servermanager.py new file mode 100644 index 000000000..caaa10133 --- /dev/null +++ b/neutron/plugins/bigswitch/servermanager.py @@ -0,0 +1,595 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2014 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mandeep Dhami, Big Switch Networks, Inc. +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. +# @author: Kevin Benton, Big Switch Networks, Inc. + +""" +This module manages the HTTP and HTTPS connections to the backend controllers. + +The main class it provides for external use is ServerPool which manages a set +of ServerProxy objects that correspond to individual backend controllers. + +The following functionality is handled by this module: +- Translation of rest_* function calls to HTTP/HTTPS calls to the controllers +- Automatic failover between controllers +- SSL Certificate enforcement +- HTTP Authentication + +""" +import base64 +import httplib +import os +import socket +import ssl + +import eventlet +from oslo.config import cfg + +from neutron.common import exceptions +from neutron.common import utils +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.plugins.bigswitch.db import consistency_db as cdb + +LOG = logging.getLogger(__name__) + +# The following are used to invoke the API on the external controller +CAPABILITIES_PATH = "/capabilities" +NET_RESOURCE_PATH = "/tenants/%s/networks" +PORT_RESOURCE_PATH = "/tenants/%s/networks/%s/ports" +ROUTER_RESOURCE_PATH = "/tenants/%s/routers" +ROUTER_INTF_OP_PATH = "/tenants/%s/routers/%s/interfaces" +NETWORKS_PATH = "/tenants/%s/networks/%s" +FLOATINGIPS_PATH = "/tenants/%s/floatingips/%s" +PORTS_PATH = "/tenants/%s/networks/%s/ports/%s" +ATTACHMENT_PATH = "/tenants/%s/networks/%s/ports/%s/attachment" +ROUTERS_PATH = "/tenants/%s/routers/%s" +ROUTER_INTF_PATH = "/tenants/%s/routers/%s/interfaces/%s" +TOPOLOGY_PATH = "/topology" +HEALTH_PATH = "/health" +SUCCESS_CODES = range(200, 207) +FAILURE_CODES = [0, 301, 302, 303, 400, 401, 403, 404, 500, 501, 502, 503, + 504, 505] +BASE_URI = '/networkService/v1.1' +ORCHESTRATION_SERVICE_ID = 'Neutron v2.0' +HASH_MATCH_HEADER = 'X-BSN-BVS-HASH-MATCH' +# error messages +NXNETWORK = 'NXVNS' + + +class RemoteRestError(exceptions.NeutronException): + message = _("Error in REST call to remote network " + "controller: %(reason)s") + status = None + + def __init__(self, **kwargs): + self.status = kwargs.pop('status', None) + self.reason = kwargs.get('reason') + super(RemoteRestError, self).__init__(**kwargs) + + +class ServerProxy(object): + """REST server proxy to a network controller.""" + + def __init__(self, server, port, ssl, auth, neutron_id, timeout, + base_uri, name, mypool, combined_cert): + self.server = server + self.port = port + self.ssl = ssl + self.base_uri = base_uri + self.timeout = timeout + self.name = name + self.success_codes = SUCCESS_CODES + self.auth = None + self.neutron_id = neutron_id + self.failed = False + self.capabilities = [] + # enable server to reference parent pool + self.mypool = mypool + # cache connection here to avoid a SSL handshake for every connection + self.currentconn = None + if auth: + self.auth = 'Basic ' + base64.encodestring(auth).strip() + self.combined_cert = combined_cert + + def get_capabilities(self): + try: + body = self.rest_call('GET', CAPABILITIES_PATH)[2] + self.capabilities = json.loads(body) + except Exception: + LOG.exception(_("Couldn't retrieve capabilities. " + "Newer API calls won't be supported.")) + LOG.info(_("The following capabilities were received " + "for %(server)s: %(cap)s"), {'server': self.server, + 'cap': self.capabilities}) + return self.capabilities + + def rest_call(self, action, resource, data='', headers={}, timeout=False, + reconnect=False): + uri = self.base_uri + resource + body = json.dumps(data) + if not headers: + headers = {} + headers['Content-type'] = 'application/json' + headers['Accept'] = 'application/json' + headers['NeutronProxy-Agent'] = self.name + headers['Instance-ID'] = self.neutron_id + headers['Orchestration-Service-ID'] = ORCHESTRATION_SERVICE_ID + headers[HASH_MATCH_HEADER] = self.mypool.consistency_hash or '' + if 'keep-alive' in self.capabilities: + headers['Connection'] = 'keep-alive' + else: + reconnect = True + if self.auth: + headers['Authorization'] = self.auth + + LOG.debug(_("ServerProxy: server=%(server)s, port=%(port)d, " + "ssl=%(ssl)r"), + {'server': self.server, 'port': self.port, 'ssl': self.ssl}) + LOG.debug(_("ServerProxy: resource=%(resource)s, data=%(data)r, " + "headers=%(headers)r, action=%(action)s"), + {'resource': resource, 'data': data, 'headers': headers, + 'action': action}) + + # unspecified timeout is False because a timeout can be specified as + # None to indicate no timeout. + if timeout is False: + timeout = self.timeout + + if timeout != self.timeout: + # need a new connection if timeout has changed + reconnect = True + + if not self.currentconn or reconnect: + if self.currentconn: + self.currentconn.close() + if self.ssl: + self.currentconn = HTTPSConnectionWithValidation( + self.server, self.port, timeout=timeout) + if self.currentconn is None: + LOG.error(_('ServerProxy: Could not establish HTTPS ' + 'connection')) + return 0, None, None, None + self.currentconn.combined_cert = self.combined_cert + else: + self.currentconn = httplib.HTTPConnection( + self.server, self.port, timeout=timeout) + if self.currentconn is None: + LOG.error(_('ServerProxy: Could not establish HTTP ' + 'connection')) + return 0, None, None, None + + try: + self.currentconn.request(action, uri, body, headers) + response = self.currentconn.getresponse() + newhash = response.getheader(HASH_MATCH_HEADER) + if newhash: + self._put_consistency_hash(newhash) + respstr = response.read() + respdata = respstr + if response.status in self.success_codes: + try: + respdata = json.loads(respstr) + except ValueError: + # response was not JSON, ignore the exception + pass + ret = (response.status, response.reason, respstr, respdata) + except httplib.HTTPException: + # If we were using a cached connection, try again with a new one. + with excutils.save_and_reraise_exception() as ctxt: + self.currentconn.close() + if reconnect: + # if reconnect is true, this was on a fresh connection so + # reraise since this server seems to be broken + ctxt.reraise = True + else: + # if reconnect is false, it was a cached connection so + # try one more time before re-raising + ctxt.reraise = False + return self.rest_call(action, resource, data, headers, + timeout=timeout, reconnect=True) + except (socket.timeout, socket.error) as e: + self.currentconn.close() + LOG.error(_('ServerProxy: %(action)s failure, %(e)r'), + {'action': action, 'e': e}) + ret = 0, None, None, None + LOG.debug(_("ServerProxy: status=%(status)d, reason=%(reason)r, " + "ret=%(ret)s, data=%(data)r"), {'status': ret[0], + 'reason': ret[1], + 'ret': ret[2], + 'data': ret[3]}) + return ret + + def _put_consistency_hash(self, newhash): + self.mypool.consistency_hash = newhash + cdb.put_consistency_hash(newhash) + + +class ServerPool(object): + + def __init__(self, timeout=False, + base_uri=BASE_URI, name='NeutronRestProxy'): + LOG.debug(_("ServerPool: initializing")) + # 'servers' is the list of network controller REST end-points + # (used in order specified till one succeeds, and it is sticky + # till next failure). Use 'server_auth' to encode api-key + servers = cfg.CONF.RESTPROXY.servers + self.auth = cfg.CONF.RESTPROXY.server_auth + self.ssl = cfg.CONF.RESTPROXY.server_ssl + self.neutron_id = cfg.CONF.RESTPROXY.neutron_id + self.base_uri = base_uri + self.name = name + self.timeout = cfg.CONF.RESTPROXY.server_timeout + self.always_reconnect = not cfg.CONF.RESTPROXY.cache_connections + default_port = 8000 + if timeout is not False: + self.timeout = timeout + + # Function to use to retrieve topology for consistency syncs. + # Needs to be set by module that uses the servermanager. + self.get_topo_function = None + self.get_topo_function_args = {} + + # Hash to send to backend with request as expected previous + # state to verify consistency. + self.consistency_hash = cdb.get_consistency_hash() + + if not servers: + raise cfg.Error(_('Servers not defined. Aborting server manager.')) + servers = [s if len(s.rsplit(':', 1)) == 2 + else "%s:%d" % (s, default_port) + for s in servers] + if any((len(spl) != 2 or not spl[1].isdigit()) + for spl in [sp.rsplit(':', 1) + for sp in servers]): + raise cfg.Error(_('Servers must be defined as :. ' + 'Configuration was %s') % servers) + self.servers = [ + self.server_proxy_for(server, int(port)) + for server, port in (s.rsplit(':', 1) for s in servers) + ] + eventlet.spawn(self._consistency_watchdog, + cfg.CONF.RESTPROXY.consistency_interval) + LOG.debug(_("ServerPool: initialization done")) + + def get_capabilities(self): + # lookup on first try + try: + return self.capabilities + except AttributeError: + # each server should return a list of capabilities it supports + # e.g. ['floatingip'] + capabilities = [set(server.get_capabilities()) + for server in self.servers] + # Pool only supports what all of the servers support + self.capabilities = set.intersection(*capabilities) + return self.capabilities + + def server_proxy_for(self, server, port): + combined_cert = self._get_combined_cert_for_server(server, port) + return ServerProxy(server, port, self.ssl, self.auth, self.neutron_id, + self.timeout, self.base_uri, self.name, mypool=self, + combined_cert=combined_cert) + + def _get_combined_cert_for_server(self, server, port): + # The ssl library requires a combined file with all trusted certs + # so we make one containing the trusted CAs and the corresponding + # host cert for this server + combined_cert = None + if self.ssl and not cfg.CONF.RESTPROXY.no_ssl_validation: + base_ssl = cfg.CONF.RESTPROXY.ssl_cert_directory + host_dir = os.path.join(base_ssl, 'host_certs') + ca_dir = os.path.join(base_ssl, 'ca_certs') + combined_dir = os.path.join(base_ssl, 'combined') + combined_cert = os.path.join(combined_dir, '%s.pem' % server) + if not os.path.exists(base_ssl): + raise cfg.Error(_('ssl_cert_directory [%s] does not exist. ' + 'Create it or disable ssl.') % base_ssl) + for automake in [combined_dir, ca_dir, host_dir]: + if not os.path.exists(automake): + os.makedirs(automake) + + # get all CA certs + certs = self._get_ca_cert_paths(ca_dir) + + # check for a host specific cert + hcert, exists = self._get_host_cert_path(host_dir, server) + if exists: + certs.append(hcert) + elif cfg.CONF.RESTPROXY.ssl_sticky: + self._fetch_and_store_cert(server, port, hcert) + certs.append(hcert) + if not certs: + raise cfg.Error(_('No certificates were found to verify ' + 'controller %s') % (server)) + self._combine_certs_to_file(certs, combined_cert) + return combined_cert + + def _combine_certs_to_file(self, certs, cfile): + ''' + Concatenates the contents of each certificate in a list of + certificate paths to one combined location for use with ssl + sockets. + ''' + with open(cfile, 'w') as combined: + for c in certs: + with open(c, 'r') as cert_handle: + combined.write(cert_handle.read()) + + def _get_host_cert_path(self, host_dir, server): + ''' + returns full path and boolean indicating existence + ''' + hcert = os.path.join(host_dir, '%s.pem' % server) + if os.path.exists(hcert): + return hcert, True + return hcert, False + + def _get_ca_cert_paths(self, ca_dir): + certs = [os.path.join(root, name) + for name in [ + name for (root, dirs, files) in os.walk(ca_dir) + for name in files + ] + if name.endswith('.pem')] + return certs + + def _fetch_and_store_cert(self, server, port, path): + ''' + Grabs a certificate from a server and writes it to + a given path. + ''' + try: + cert = ssl.get_server_certificate((server, port)) + except Exception as e: + raise cfg.Error(_('Could not retrieve initial ' + 'certificate from controller %(server)s. ' + 'Error details: %(error)s') % + {'server': server, 'error': str(e)}) + + LOG.warning(_("Storing to certificate for host %(server)s " + "at %(path)s") % {'server': server, + 'path': path}) + self._file_put_contents(path, cert) + + return cert + + def _file_put_contents(self, path, contents): + # Simple method to write to file. + # Created for easy Mocking + with open(path, 'w') as handle: + handle.write(contents) + + def server_failure(self, resp, ignore_codes=[]): + """Define failure codes as required. + + Note: We assume 301-303 is a failure, and try the next server in + the server pool. + """ + return (resp[0] in FAILURE_CODES and resp[0] not in ignore_codes) + + def action_success(self, resp): + """Defining success codes as required. + + Note: We assume any valid 2xx as being successful response. + """ + return resp[0] in SUCCESS_CODES + + @utils.synchronized('bsn-rest-call') + def rest_call(self, action, resource, data, headers, ignore_codes, + timeout=False): + good_first = sorted(self.servers, key=lambda x: x.failed) + first_response = None + for active_server in good_first: + ret = active_server.rest_call(action, resource, data, headers, + timeout, + reconnect=self.always_reconnect) + # If inconsistent, do a full synchronization + if ret[0] == httplib.CONFLICT: + if not self.get_topo_function: + raise cfg.Error(_('Server requires synchronization, ' + 'but no topology function was defined.')) + data = self.get_topo_function(**self.get_topo_function_args) + active_server.rest_call('PUT', TOPOLOGY_PATH, data, + timeout=None) + # Store the first response as the error to be bubbled up to the + # user since it was a good server. Subsequent servers will most + # likely be cluster slaves and won't have a useful error for the + # user (e.g. 302 redirect to master) + if not first_response: + first_response = ret + if not self.server_failure(ret, ignore_codes): + active_server.failed = False + return ret + else: + LOG.error(_('ServerProxy: %(action)s failure for servers: ' + '%(server)r Response: %(response)s'), + {'action': action, + 'server': (active_server.server, + active_server.port), + 'response': ret[3]}) + LOG.error(_("ServerProxy: Error details: status=%(status)d, " + "reason=%(reason)r, ret=%(ret)s, data=%(data)r"), + {'status': ret[0], 'reason': ret[1], 'ret': ret[2], + 'data': ret[3]}) + active_server.failed = True + + # All servers failed, reset server list and try again next time + LOG.error(_('ServerProxy: %(action)s failure for all servers: ' + '%(server)r'), + {'action': action, + 'server': tuple((s.server, + s.port) for s in self.servers)}) + return first_response + + def rest_action(self, action, resource, data='', errstr='%s', + ignore_codes=[], headers={}, timeout=False): + """ + Wrapper for rest_call that verifies success and raises a + RemoteRestError on failure with a provided error string + By default, 404 errors on DELETE calls are ignored because + they already do not exist on the backend. + """ + if not ignore_codes and action == 'DELETE': + ignore_codes = [404] + resp = self.rest_call(action, resource, data, headers, ignore_codes, + timeout) + if self.server_failure(resp, ignore_codes): + LOG.error(errstr, resp[2]) + raise RemoteRestError(reason=resp[2], status=resp[0]) + if resp[0] in ignore_codes: + LOG.warning(_("NeutronRestProxyV2: Received and ignored error " + "code %(code)s on %(action)s action to resource " + "%(resource)s"), + {'code': resp[2], 'action': action, + 'resource': resource}) + return resp + + def rest_create_router(self, tenant_id, router): + resource = ROUTER_RESOURCE_PATH % tenant_id + data = {"router": router} + errstr = _("Unable to create remote router: %s") + self.rest_action('POST', resource, data, errstr) + + def rest_update_router(self, tenant_id, router, router_id): + resource = ROUTERS_PATH % (tenant_id, router_id) + data = {"router": router} + errstr = _("Unable to update remote router: %s") + self.rest_action('PUT', resource, data, errstr) + + def rest_delete_router(self, tenant_id, router_id): + resource = ROUTERS_PATH % (tenant_id, router_id) + errstr = _("Unable to delete remote router: %s") + self.rest_action('DELETE', resource, errstr=errstr) + + def rest_add_router_interface(self, tenant_id, router_id, intf_details): + resource = ROUTER_INTF_OP_PATH % (tenant_id, router_id) + data = {"interface": intf_details} + errstr = _("Unable to add router interface: %s") + self.rest_action('POST', resource, data, errstr) + + def rest_remove_router_interface(self, tenant_id, router_id, interface_id): + resource = ROUTER_INTF_PATH % (tenant_id, router_id, interface_id) + errstr = _("Unable to delete remote intf: %s") + self.rest_action('DELETE', resource, errstr=errstr) + + def rest_create_network(self, tenant_id, network): + resource = NET_RESOURCE_PATH % tenant_id + data = {"network": network} + errstr = _("Unable to create remote network: %s") + self.rest_action('POST', resource, data, errstr) + + def rest_update_network(self, tenant_id, net_id, network): + resource = NETWORKS_PATH % (tenant_id, net_id) + data = {"network": network} + errstr = _("Unable to update remote network: %s") + self.rest_action('PUT', resource, data, errstr) + + def rest_delete_network(self, tenant_id, net_id): + resource = NETWORKS_PATH % (tenant_id, net_id) + errstr = _("Unable to update remote network: %s") + self.rest_action('DELETE', resource, errstr=errstr) + + def rest_create_port(self, tenant_id, net_id, port): + resource = ATTACHMENT_PATH % (tenant_id, net_id, port["id"]) + data = {"port": port} + device_id = port.get("device_id") + if not port["mac_address"] or not device_id: + # controller only cares about ports attached to devices + LOG.warning(_("No device MAC attached to port %s. " + "Skipping notification to controller."), port["id"]) + return + data["attachment"] = {"id": device_id, + "mac": port["mac_address"]} + errstr = _("Unable to create remote port: %s") + self.rest_action('PUT', resource, data, errstr) + + def rest_delete_port(self, tenant_id, network_id, port_id): + resource = ATTACHMENT_PATH % (tenant_id, network_id, port_id) + errstr = _("Unable to delete remote port: %s") + self.rest_action('DELETE', resource, errstr=errstr) + + def rest_update_port(self, tenant_id, net_id, port): + # Controller has no update operation for the port endpoint + # the create PUT method will replace + self.rest_create_port(tenant_id, net_id, port) + + def rest_create_floatingip(self, tenant_id, floatingip): + resource = FLOATINGIPS_PATH % (tenant_id, floatingip['id']) + errstr = _("Unable to create floating IP: %s") + self.rest_action('PUT', resource, errstr=errstr) + + def rest_update_floatingip(self, tenant_id, floatingip, oldid): + resource = FLOATINGIPS_PATH % (tenant_id, oldid) + errstr = _("Unable to update floating IP: %s") + self.rest_action('PUT', resource, errstr=errstr) + + def rest_delete_floatingip(self, tenant_id, oldid): + resource = FLOATINGIPS_PATH % (tenant_id, oldid) + errstr = _("Unable to delete floating IP: %s") + self.rest_action('DELETE', resource, errstr=errstr) + + def _consistency_watchdog(self, polling_interval=60): + if 'consistency' not in self.get_capabilities(): + LOG.warning(_("Backend server(s) do not support automated " + "consitency checks.")) + return + if not polling_interval: + LOG.warning(_("Consistency watchdog disabled by polling interval " + "setting of %s."), polling_interval) + return + while True: + # If consistency is supported, all we have to do is make any + # rest call and the consistency header will be added. If it + # doesn't match, the backend will return a synchronization error + # that will be handled by the rest_action. + eventlet.sleep(polling_interval) + try: + self.rest_action('GET', HEALTH_PATH) + except Exception: + LOG.exception(_("Encountered an error checking controller " + "health.")) + + +class HTTPSConnectionWithValidation(httplib.HTTPSConnection): + + # If combined_cert is None, the connection will continue without + # any certificate validation. + combined_cert = None + + def connect(self): + try: + sock = socket.create_connection((self.host, self.port), + self.timeout, self.source_address) + except AttributeError: + # python 2.6 doesn't have the source_address attribute + sock = socket.create_connection((self.host, self.port), + self.timeout) + if self._tunnel_host: + self.sock = sock + self._tunnel() + + if self.combined_cert: + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=self.combined_cert) + else: + self.sock = ssl.wrap_socket(sock, self.key_file, + self.cert_file, + cert_reqs=ssl.CERT_NONE) diff --git a/neutron/plugins/bigswitch/tests/__init__.py b/neutron/plugins/bigswitch/tests/__init__.py new file mode 100644 index 000000000..2a2421616 --- /dev/null +++ b/neutron/plugins/bigswitch/tests/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Big Switch Networks, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# diff --git a/neutron/plugins/bigswitch/tests/test_server.py b/neutron/plugins/bigswitch/tests/test_server.py new file mode 100755 index 000000000..ee0c2be3d --- /dev/null +++ b/neutron/plugins/bigswitch/tests/test_server.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mandeep Dhami, Big Switch Networks, Inc. + +"""Test server mocking a REST based network ctrl. + +Used for NeutronRestProxy tests +""" +from __future__ import print_function + +import re + +from six import moves +from wsgiref import simple_server + +from neutron.openstack.common import jsonutils as json + + +class TestNetworkCtrl(object): + + def __init__(self, host='', port=8000, + default_status='404 Not Found', + default_response='404 Not Found', + debug=False): + self.host = host + self.port = port + self.default_status = default_status + self.default_response = default_response + self.debug = debug + self.debug_env = False + self.debug_resp = False + self.matches = [] + + def match(self, prior, method_regexp, uri_regexp, handler, data=None, + multi=True): + """Add to the list of exptected inputs. + + The incoming request is matched in the order of priority. For same + priority, match the oldest match request first. + + :param prior: intgere priority of this match (e.g. 100) + :param method_regexp: regexp to match method (e.g. 'PUT|POST') + :param uri_regexp: regexp to match uri (e.g. '/quantum/v?.?/') + :param handler: function with signature: + lambda(method, uri, body, **kwargs) : status, body + where + - method: HTTP method for this request + - uri: URI for this HTTP request + - body: body of this HTTP request + - kwargs are: + - data: data object that was in the match call + - node: TestNetworkCtrl object itself + - id: offset of the matching tuple + and return values is: + (status, body) where: + - status: HTTP resp status (e.g. '200 OK'). + If None, use default_status + - body: HTTP resp body. If None, use '' + """ + assert int(prior) == prior, 'Priority should an integer be >= 0' + assert prior >= 0, 'Priority should an integer be >= 0' + + lo, hi = 0, len(self.matches) + while lo < hi: + mid = (lo + hi) // 2 + if prior < self.matches[mid]: + hi = mid + else: + lo = mid + 1 + self.matches.insert(lo, (prior, method_regexp, uri_regexp, handler, + data, multi)) + + def remove_id(self, id_): + assert id_ >= 0, 'remove_id: id < 0' + assert id_ <= len(self.matches), 'remove_id: id > len()' + self.matches.pop(id_) + + def request_handler(self, method, uri, body): + retstatus = self.default_status + retbody = self.default_response + for i in moves.xrange(len(self.matches)): + (prior, method_regexp, uri_regexp, handler, data, multi) = \ + self.matches[i] + if re.match(method_regexp, method) and re.match(uri_regexp, uri): + kwargs = { + 'data': data, + 'node': self, + 'id': i, + } + retstatus, retbody = handler(method, uri, body, **kwargs) + if multi is False: + self.remove_id(i) + break + if retbody is None: + retbody = '' + return (retstatus, retbody) + + def server(self): + def app(environ, start_response): + uri = environ['PATH_INFO'] + method = environ['REQUEST_METHOD'] + headers = [('Content-type', 'text/json')] + content_len_str = environ['CONTENT_LENGTH'] + + content_len = 0 + request_data = None + if content_len_str: + content_len = int(content_len_str) + request_data = environ.get('wsgi.input').read(content_len) + if request_data: + try: + request_data = json.loads(request_data) + except Exception: + # OK for it not to be json! Ignore it + pass + + if self.debug: + print('\n') + if self.debug_env: + print('environ:') + for (key, value) in sorted(environ.iteritems()): + print(' %16s : %s' % (key, value)) + + print('%s %s' % (method, uri)) + if request_data: + print('%s' % + json.dumps(request_data, sort_keys=True, indent=4)) + + status, body = self.request_handler(method, uri, None) + body_data = None + if body: + try: + body_data = json.loads(body) + except Exception: + # OK for it not to be json! Ignore it + pass + + start_response(status, headers) + if self.debug: + if self.debug_env: + print('%s: %s' % ('Response', + json.dumps(body_data, sort_keys=True, indent=4))) + return body + return simple_server.make_server(self.host, self.port, app) + + def run(self): + print("Serving on port %d ..." % self.port) + try: + self.server().serve_forever() + except KeyboardInterrupt: + pass + + +if __name__ == "__main__": + import sys + + port = 8899 + if len(sys.argv) > 1: + port = int(sys.argv[1]) + + debug = False + if len(sys.argv) > 2: + if sys.argv[2].lower() in ['debug', 'true']: + debug = True + + ctrl = TestNetworkCtrl(port=port, + default_status='200 OK', + default_response='{"status":"200 OK"}', + debug=debug) + ctrl.match(100, 'GET', '/test', + lambda m, u, b, **k: ('200 OK', '["200 OK"]')) + ctrl.run() diff --git a/neutron/plugins/bigswitch/vcsversion.py b/neutron/plugins/bigswitch/vcsversion.py new file mode 100644 index 000000000..6ed5e2680 --- /dev/null +++ b/neutron/plugins/bigswitch/vcsversion.py @@ -0,0 +1,27 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Big Switch Networks, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com +# +version_info = {'branch_nick': u'neutron/trunk', + 'revision_id': u'1', + 'revno': 0} + + +NEUTRONRESTPROXY_VERSION = ['2013', '1', None] + + +FINAL = False # This becomes true at Release Candidate time diff --git a/neutron/plugins/bigswitch/version.py b/neutron/plugins/bigswitch/version.py new file mode 100755 index 000000000..2069d0bc8 --- /dev/null +++ b/neutron/plugins/bigswitch/version.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# Copyright 2012, Big Switch Networks, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Based on openstack generic code +# @author: Mandeep Dhami, Big Switch Networks, Inc. + +"""Determine version of NeutronRestProxy plugin""" +from __future__ import print_function + +from neutron.plugins.bigswitch import vcsversion + + +YEAR, COUNT, REVISION = vcsversion.NEUTRONRESTPROXY_VERSION + + +def canonical_version_string(): + return '.'.join(filter(None, + vcsversion.NEUTRONRESTPROXY_VERSION)) + + +def version_string(): + if vcsversion.FINAL: + return canonical_version_string() + else: + return '%s-dev' % (canonical_version_string(),) + + +def vcs_version_string(): + return "%s:%s" % (vcsversion.version_info['branch_nick'], + vcsversion.version_info['revision_id']) + + +def version_string_with_vcs(): + return "%s-%s" % (canonical_version_string(), vcs_version_string()) + + +if __name__ == "__main__": + print(version_string_with_vcs()) diff --git a/neutron/plugins/brocade/NeutronPlugin.py b/neutron/plugins/brocade/NeutronPlugin.py new file mode 100644 index 000000000..c633085d0 --- /dev/null +++ b/neutron/plugins/brocade/NeutronPlugin.py @@ -0,0 +1,497 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Shiv Haris (sharis@brocade.com) +# Varma Bhupatiraju (vbhupati@#brocade.com) +# +# (Some parts adapted from LinuxBridge Plugin) +# TODO(shiv) need support for security groups + + +"""Implentation of Brocade Neutron Plugin.""" + +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.common import constants as q_const +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_base +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import portbindings +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import context +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.brocade.db import models as brocade_db +from neutron.plugins.brocade import vlanbm as vbm +from neutron.plugins.common import constants as svc_constants + + +LOG = logging.getLogger(__name__) +PLUGIN_VERSION = 0.88 +AGENT_OWNER_PREFIX = "network:" +NOS_DRIVER = 'neutron.plugins.brocade.nos.nosdriver.NOSdriver' + +SWITCH_OPTS = [cfg.StrOpt('address', default='', + help=_('The address of the host to SSH to')), + cfg.StrOpt('username', default='', + help=_('The SSH username to use')), + cfg.StrOpt('password', default='', secret=True, + help=_('The SSH password to use')), + cfg.StrOpt('ostype', default='NOS', + help=_('Currently unused')) + ] + +PHYSICAL_INTERFACE_OPTS = [cfg.StrOpt('physical_interface', default='eth0', + help=_('The network interface to use when creating' + 'a port')) + ] + +cfg.CONF.register_opts(SWITCH_OPTS, "SWITCH") +cfg.CONF.register_opts(PHYSICAL_INTERFACE_OPTS, "PHYSICAL_INTERFACE") + + +class BridgeRpcCallbacks(rpc_compat.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + """Agent callback.""" + + RPC_API_VERSION = '1.1' + # Device names start with "tap" + # history + # 1.1 Support Security Group RPC + TAP_PREFIX_LEN = 3 + + @classmethod + def get_port_from_device(cls, device): + """Get port from the brocade specific db.""" + + # TODO(shh) context is not being passed as + # an argument to this function; + # + # need to be fixed in: + # file: neutron/db/securtygroups_rpc_base.py + # function: securitygroup_rules_for_devices() + # which needs to pass context to us + + # Doing what other plugins are doing + session = db.get_session() + port = brocade_db.get_port_from_device( + session, device[cls.TAP_PREFIX_LEN:]) + + # TODO(shiv): need to extend the db model to include device owners + # make it appears that the device owner is of type network + if port: + port['device'] = device + port['device_owner'] = AGENT_OWNER_PREFIX + port['binding:vif_type'] = 'bridge' + return port + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = brocade_db.get_port(rpc_context, device[self.TAP_PREFIX_LEN:]) + if port: + entry = {'device': device, + 'vlan_id': port.vlan_id, + 'network_id': port.network_id, + 'port_id': port.port_id, + 'physical_network': port.physical_interface, + 'admin_state_up': port.admin_state_up + } + + else: + entry = {'device': device} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + + device = kwargs.get('device') + port = self.get_port_from_device(device) + if port: + entry = {'device': device, + 'exists': True} + # Set port status to DOWN + port_id = port['port_id'] + brocade_db.update_port_state(rpc_context, port_id, False) + else: + entry = {'device': device, + 'exists': False} + LOG.debug(_("%s can not be found in database"), device) + return entry + + +class AgentNotifierApi(rpc_compat.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + """Agent side of the linux bridge rpc API. + + API version history: + 1.0 - Initial version. + 1.1 - Added get_active_networks_info, create_dhcp_port, + and update_dhcp_port methods. + + """ + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic = topic + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + + def network_delete(self, context, network_id): + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, physical_network, vlan_id): + self.fanout_cast(context, + self.make_msg('port_update', + port=port, + physical_network=physical_network, + vlan_id=vlan_id), + topic=self.topic_port_update) + + +class BrocadePluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + portbindings_base.PortBindingBaseMixin): + """BrocadePluginV2 is a Neutron plugin. + + Provides L2 Virtual Network functionality using VDX. Upper + layer driver class that interfaces to NETCONF layer below. + + """ + + def __init__(self): + """Initialize Brocade Plugin. + + Specify switch address and db configuration. + """ + + super(BrocadePluginV2, self).__init__() + self.supported_extension_aliases = ["binding", "security-group", + "external-net", "router", + "extraroute", "agent", + "l3_agent_scheduler", + "dhcp_agent_scheduler"] + + self.physical_interface = (cfg.CONF.PHYSICAL_INTERFACE. + physical_interface) + self.base_binding_dict = self._get_base_binding_dict() + portbindings_base.register_port_dict_function() + self.ctxt = context.get_admin_context() + self.ctxt.session = db.get_session() + self._vlan_bitmap = vbm.VlanBitmap(self.ctxt) + self._setup_rpc() + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver + ) + self.brocade_init() + + def brocade_init(self): + """Brocade specific initialization.""" + + self._switch = {'address': cfg.CONF.SWITCH.address, + 'username': cfg.CONF.SWITCH.username, + 'password': cfg.CONF.SWITCH.password + } + self._driver = importutils.import_object(NOS_DRIVER) + + def _setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.rpc_context = context.RequestContext('neutron', 'neutron', + is_admin=False) + self.conn = rpc_compat.create_connection(new=True) + self.endpoints = [BridgeRpcCallbacks(), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + self.notifier = AgentNotifierApi(topics.AGENT) + self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + l3_rpc_agent_api.L3AgentNotifyAPI() + ) + + def create_network(self, context, network): + """Create network. + + This call to create network translates to creation of port-profile on + the physical switch. + """ + + with context.session.begin(subtransactions=True): + net = super(BrocadePluginV2, self).create_network(context, network) + net_uuid = net['id'] + vlan_id = self._vlan_bitmap.get_next_vlan(None) + switch = self._switch + try: + self._driver.create_network(switch['address'], + switch['username'], + switch['password'], + vlan_id) + except Exception: + # Proper formatting + LOG.exception(_("Brocade NOS driver error")) + LOG.debug(_("Returning the allocated vlan (%d) to the pool"), + vlan_id) + self._vlan_bitmap.release_vlan(int(vlan_id)) + raise Exception(_("Brocade plugin raised exception, " + "check logs")) + + brocade_db.create_network(context, net_uuid, vlan_id) + self._process_l3_create(context, net, network['network']) + + LOG.info(_("Allocated vlan (%d) from the pool"), vlan_id) + return net + + def delete_network(self, context, net_id): + """Delete network. + + This call to delete the network translates to removing the + port-profile on the physical switch. + """ + + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, net_id) + result = super(BrocadePluginV2, self).delete_network(context, + net_id) + # we must delete all ports in db first (foreign key constraint) + # there is no need to delete port in the driver (its a no-op) + # (actually: note there is no such call to the driver) + bports = brocade_db.get_ports(context, net_id) + for bport in bports: + brocade_db.delete_port(context, bport['port_id']) + + # find the vlan for this network + net = brocade_db.get_network(context, net_id) + vlan_id = net['vlan'] + + # Tell hw to do remove PP + switch = self._switch + try: + self._driver.delete_network(switch['address'], + switch['username'], + switch['password'], + vlan_id) + except Exception: + # Proper formatting + LOG.exception(_("Brocade NOS driver error")) + raise Exception(_("Brocade plugin raised exception, " + "check logs")) + + # now ok to delete the network + brocade_db.delete_network(context, net_id) + + # relinquish vlan in bitmap + self._vlan_bitmap.release_vlan(int(vlan_id)) + return result + + def update_network(self, context, id, network): + + session = context.session + with session.begin(subtransactions=True): + net = super(BrocadePluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + return net + + def create_port(self, context, port): + """Create logical port on the switch.""" + + tenant_id = port['port']['tenant_id'] + network_id = port['port']['network_id'] + admin_state_up = port['port']['admin_state_up'] + + physical_interface = self.physical_interface + + with context.session.begin(subtransactions=True): + bnet = brocade_db.get_network(context, network_id) + vlan_id = bnet['vlan'] + + neutron_port = super(BrocadePluginV2, self).create_port(context, + port) + self._process_portbindings_create_and_update(context, + port['port'], + neutron_port) + interface_mac = neutron_port['mac_address'] + port_id = neutron_port['id'] + + switch = self._switch + + # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx + mac = self.mac_reformat_62to34(interface_mac) + try: + self._driver.associate_mac_to_network(switch['address'], + switch['username'], + switch['password'], + vlan_id, + mac) + except Exception: + # Proper formatting + LOG.exception(_("Brocade NOS driver error")) + raise Exception(_("Brocade plugin raised exception, " + "check logs")) + + # save to brocade persistent db + brocade_db.create_port(context, port_id, network_id, + physical_interface, + vlan_id, tenant_id, admin_state_up) + + # apply any extensions + return neutron_port + + def delete_port(self, context, port_id): + with context.session.begin(subtransactions=True): + neutron_port = self.get_port(context, port_id) + interface_mac = neutron_port['mac_address'] + # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx + mac = self.mac_reformat_62to34(interface_mac) + + brocade_port = brocade_db.get_port(context, port_id) + vlan_id = brocade_port['vlan_id'] + + switch = self._switch + try: + self._driver.dissociate_mac_from_network(switch['address'], + switch['username'], + switch['password'], + vlan_id, + mac) + except Exception: + LOG.exception(_("Brocade NOS driver error")) + raise Exception( + _("Brocade plugin raised exception, check logs")) + + super(BrocadePluginV2, self).delete_port(context, port_id) + brocade_db.delete_port(context, port_id) + + def update_port(self, context, port_id, port): + original_port = self.get_port(context, port_id) + session = context.session + port_updated = False + with session.begin(subtransactions=True): + # delete the port binding and read it with the new rules + if ext_sg.SECURITYGROUPS in port['port']: + port['port'][ext_sg.SECURITYGROUPS] = ( + self._get_security_groups_on_port(context, port)) + self._delete_port_security_group_bindings(context, port_id) + # process_port_create_security_group also needs port id + port['port']['id'] = port_id + self._process_port_create_security_group( + context, + port['port'], + port['port'][ext_sg.SECURITYGROUPS]) + port_updated = True + port_data = port['port'] + port = super(BrocadePluginV2, self).update_port( + context, port_id, port) + self._process_portbindings_create_and_update(context, + port_data, + port) + if original_port['admin_state_up'] != port['admin_state_up']: + port_updated = True + + if (original_port['fixed_ips'] != port['fixed_ips'] or + not utils.compare_elements( + original_port.get(ext_sg.SECURITYGROUPS), + port.get(ext_sg.SECURITYGROUPS))): + self.notifier.security_groups_member_updated( + context, port.get(ext_sg.SECURITYGROUPS)) + + if port_updated: + self._notify_port_updated(context, port) + + return port + + def _notify_port_updated(self, context, port): + port_id = port['id'] + bport = brocade_db.get_port(context, port_id) + self.notifier.port_update(context, port, + bport.physical_interface, + bport.vlan_id) + + def _get_base_binding_dict(self): + binding = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_BRIDGE, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + return binding + + def get_plugin_version(self): + """Get version number of the plugin.""" + return PLUGIN_VERSION + + @staticmethod + def mac_reformat_62to34(interface_mac): + """Transform MAC address format. + + Transforms from 6 groups of 2 hexadecimal numbers delimited by ":" + to 3 groups of 4 hexadecimals numbers delimited by ".". + + :param interface_mac: MAC address in the format xx:xx:xx:xx:xx:xx + :type interface_mac: string + :returns: MAC address in the format xxxx.xxxx.xxxx + :rtype: string + """ + + mac = interface_mac.replace(":", "") + mac = mac[0:4] + "." + mac[4:8] + "." + mac[8:12] + return mac diff --git a/neutron/plugins/brocade/README.md b/neutron/plugins/brocade/README.md new file mode 100644 index 000000000..82b3ad89d --- /dev/null +++ b/neutron/plugins/brocade/README.md @@ -0,0 +1,112 @@ +Brocade Openstack Neutron Plugin +================================ + +* up-to-date version of these instructions are located at: + http://wiki.openstack.org/brocade-neutron-plugin + +* N.B.: Please see Prerequisites section regarding ncclient (netconf client library) + +* Supports VCS (Virtual Cluster of Switches) + + +Openstack Brocade Neutron Plugin implements the Neutron v2.0 API. + +This plugin is meant to orchestrate Brocade VCS switches running NOS, examples of these are: + + 1. VDX 67xx series of switches + 2. VDX 87xx series of switches + +Brocade Neutron plugin implements the Neutron v2.0 API. It uses NETCONF at the backend +to configure the Brocade switch. + + +------------+ +------------+ +-------------+ + | | | | | | + | | | | | Brocade | + | Openstack | v2.0 | Brocade | NETCONF | VCS Switch | + | Neutron +--------+ Neutron +----------+ | + | | | Plugin | | VDX 67xx | + | | | | | VDX 87xx | + | | | | | | + | | | | | | + +------------+ +------------+ +-------------+ + + +Directory Structure +=================== + +Normally you will have your Openstack directory structure as follows: + + /opt/stack/nova/ + /opt/stack/horizon/ + ... + /opt/stack/neutron/neutron/plugins/ + +Within this structure, Brocade plugin resides at: + + /opt/stack/neutron/neutron/plugins/brocade + + +Prerequsites +============ + +This plugin requires installation of the python netconf client (ncclient) library: + +ncclient v0.3.1 - Python library for NETCONF clients available at http://github.com/brocade/ncclient + + % git clone https://www.github.com/brocade/ncclient + % cd ncclient; sudo python ./setup.py install + + +Configuration +============= + +1. Specify to Neutron that you will be using the Brocade Plugin - this is done +by setting the parameter core_plugin in Neutron: + + core_plugin = neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2 + +2. Physical switch configuration parameters and Brocade specific database configuration is specified in +the configuration file specified in the brocade.ini files: + + % cat /etc/neutron/plugins/brocade/brocade.ini + [SWITCH] + username = admin + password = password + address = + ostype = NOS + + [database] + connection = mysql://root:pass@localhost/brocade_neutron?charset=utf8 + + (please see list of more configuration parameters in the brocade.ini file) + +Running Setup.py +================ + +Running setup.py with appropriate permissions will copy the default configuration +file to /etc/neutron/plugins/brocade/brocade.ini. This file MUST be edited to +suit your setup/environment. + + % cd /opt/stack/neutron/neutron/plugins/brocade + % python setup.py + + +Devstack +======== + +Please see special notes for devstack at: +http://wiki.openstack.org/brocade-neutron-plugin + +In order to use Brocade Neutron Plugin, add the following lines in localrc, if localrc file doe + not exist create one: + +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,neutron,q-svc,q-agt +Q_PLUGIN=brocade + +As part of running devstack/stack.sh, the configuration files is copied as: + + % cp /opt/stack/neutron/etc/neutron/plugins/brocade/brocade.ini /etc/neutron/plugins/brocade/brocade.ini + +(hence it is important to make any changes to the configuration in: +/opt/stack/neutron/etc/neutron/plugins/brocade/brocade.ini) + diff --git a/neutron/plugins/brocade/__init__.py b/neutron/plugins/brocade/__init__.py new file mode 100644 index 000000000..c22f863e3 --- /dev/null +++ b/neutron/plugins/brocade/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/brocade/db/__init__.py b/neutron/plugins/brocade/db/__init__.py new file mode 100644 index 000000000..c22f863e3 --- /dev/null +++ b/neutron/plugins/brocade/db/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/brocade/db/models.py b/neutron/plugins/brocade/db/models.py new file mode 100644 index 000000000..d9b3663a1 --- /dev/null +++ b/neutron/plugins/brocade/db/models.py @@ -0,0 +1,151 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Shiv Haris (sharis@brocade.com) +# Varma Bhupatiraju (vbhupati@#brocade.com) + + +"""Brocade specific database schema/model.""" + +import sqlalchemy as sa + +from neutron.db import model_base +from neutron.db import models_v2 + + +class BrocadeNetwork(model_base.BASEV2, models_v2.HasId): + """Schema for brocade network.""" + + vlan = sa.Column(sa.String(10)) + + +class BrocadePort(model_base.BASEV2): + """Schema for brocade port.""" + + port_id = sa.Column(sa.String(36), primary_key=True, default="") + network_id = sa.Column(sa.String(36), + sa.ForeignKey("brocadenetworks.id"), + nullable=False) + admin_state_up = sa.Column(sa.Boolean, nullable=False) + physical_interface = sa.Column(sa.String(36)) + vlan_id = sa.Column(sa.String(36)) + tenant_id = sa.Column(sa.String(36)) + + +def create_network(context, net_id, vlan): + """Create a brocade specific network/port-profiles.""" + + session = context.session + with session.begin(subtransactions=True): + net = BrocadeNetwork(id=net_id, vlan=vlan) + session.add(net) + + return net + + +def delete_network(context, net_id): + """Delete a brocade specific network/port-profiles.""" + + session = context.session + with session.begin(subtransactions=True): + net = (session.query(BrocadeNetwork).filter_by(id=net_id).first()) + if net is not None: + session.delete(net) + + +def get_network(context, net_id, fields=None): + """Get brocade specific network, with vlan extension.""" + + session = context.session + return (session.query(BrocadeNetwork).filter_by(id=net_id).first()) + + +def get_networks(context, filters=None, fields=None): + """Get all brocade specific networks.""" + + session = context.session + try: + nets = session.query(BrocadeNetwork).all() + return nets + except sa.exc.SQLAlchemyError: + return None + + +def create_port(context, port_id, network_id, physical_interface, + vlan_id, tenant_id, admin_state_up): + """Create a brocade specific port, has policy like vlan.""" + + # port_id is truncated: since the linux-bridge tap device names are + # based on truncated port id, this enables port lookups using + # tap devices + port_id = port_id[0:11] + session = context.session + with session.begin(subtransactions=True): + port = BrocadePort(port_id=port_id, + network_id=network_id, + physical_interface=physical_interface, + vlan_id=vlan_id, + admin_state_up=admin_state_up, + tenant_id=tenant_id) + session.add(port) + return port + + +def get_port(context, port_id): + """get a brocade specific port.""" + + port_id = port_id[0:11] + session = context.session + port = (session.query(BrocadePort).filter_by(port_id=port_id).first()) + return port + + +def get_ports(context, network_id=None): + """get a brocade specific port.""" + + session = context.session + ports = (session.query(BrocadePort).filter_by(network_id=network_id).all()) + return ports + + +def delete_port(context, port_id): + """delete brocade specific port.""" + + port_id = port_id[0:11] + session = context.session + with session.begin(subtransactions=True): + port = (session.query(BrocadePort).filter_by(port_id=port_id).first()) + if port is not None: + session.delete(port) + + +def get_port_from_device(session, port_id): + """get port from the tap device.""" + + # device is same as truncated port_id + port = (session.query(BrocadePort).filter_by(port_id=port_id).first()) + return port + + +def update_port_state(context, port_id, admin_state_up): + """Update port attributes.""" + + port_id = port_id[0:11] + session = context.session + session.query(BrocadePort).filter_by( + port_id=port_id).update({'admin_state_up': admin_state_up}) diff --git a/neutron/plugins/brocade/nos/__init__.py b/neutron/plugins/brocade/nos/__init__.py new file mode 100644 index 000000000..9d4562b0d --- /dev/null +++ b/neutron/plugins/brocade/nos/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2013 Brocade Communications Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/brocade/nos/fake_nosdriver.py b/neutron/plugins/brocade/nos/fake_nosdriver.py new file mode 100644 index 000000000..8984768d5 --- /dev/null +++ b/neutron/plugins/brocade/nos/fake_nosdriver.py @@ -0,0 +1,117 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Varma Bhupatiraju (vbhupati@#brocade.com) +# Shiv Haris (sharis@brocade.com) + + +"""FAKE DRIVER, for unit tests purposes. + +Brocade NOS Driver implements NETCONF over SSHv2 for +Neutron network life-cycle management. +""" + + +class NOSdriver(): + """NOS NETCONF interface driver for Neutron network. + + Fake: Handles life-cycle management of Neutron network, + leverages AMPP on NOS + (for use by unit tests, avoids touching any hardware) + """ + + def __init__(self): + pass + + def connect(self, host, username, password): + """Connect via SSH and initialize the NETCONF session.""" + pass + + def create_network(self, host, username, password, net_id): + """Creates a new virtual network.""" + pass + + def delete_network(self, host, username, password, net_id): + """Deletes a virtual network.""" + pass + + def associate_mac_to_network(self, host, username, password, + net_id, mac): + """Associates a MAC address to virtual network.""" + pass + + def dissociate_mac_from_network(self, host, username, password, + net_id, mac): + """Dissociates a MAC address from virtual network.""" + pass + + def create_vlan_interface(self, mgr, vlan_id): + """Configures a VLAN interface.""" + pass + + def delete_vlan_interface(self, mgr, vlan_id): + """Deletes a VLAN interface.""" + pass + + def get_port_profiles(self, mgr): + """Retrieves all port profiles.""" + pass + + def get_port_profile(self, mgr, name): + """Retrieves a port profile.""" + pass + + def create_port_profile(self, mgr, name): + """Creates a port profile.""" + pass + + def delete_port_profile(self, mgr, name): + """Deletes a port profile.""" + pass + + def activate_port_profile(self, mgr, name): + """Activates a port profile.""" + pass + + def deactivate_port_profile(self, mgr, name): + """Deactivates a port profile.""" + pass + + def associate_mac_to_port_profile(self, mgr, name, mac_address): + """Associates a MAC address to a port profile.""" + pass + + def dissociate_mac_from_port_profile(self, mgr, name, mac_address): + """Dissociates a MAC address from a port profile.""" + pass + + def create_vlan_profile_for_port_profile(self, mgr, name): + """Creates VLAN sub-profile for port profile.""" + pass + + def configure_l2_mode_for_vlan_profile(self, mgr, name): + """Configures L2 mode for VLAN sub-profile.""" + pass + + def configure_trunk_mode_for_vlan_profile(self, mgr, name): + """Configures trunk mode for VLAN sub-profile.""" + pass + + def configure_allowed_vlans_for_vlan_profile(self, mgr, name, vlan_id): + """Configures allowed VLANs for VLAN sub-profile.""" + pass diff --git a/neutron/plugins/brocade/nos/nctemplates.py b/neutron/plugins/brocade/nos/nctemplates.py new file mode 100644 index 000000000..48071dbcd --- /dev/null +++ b/neutron/plugins/brocade/nos/nctemplates.py @@ -0,0 +1,204 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2013 Brocade Communications Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Varma Bhupatiraju (vbhupati@#brocade.com) +# Shiv Haris (sharis@brocade.com) + + +"""NOS NETCONF XML Configuration Command Templates. + +Interface Configuration Commands +""" + +# Create VLAN (vlan_id) +CREATE_VLAN_INTERFACE = """ + + + + + {vlan_id} + + + + +""" + +# Delete VLAN (vlan_id) +DELETE_VLAN_INTERFACE = """ + + + + + {vlan_id} + + + + +""" + +# +# AMPP Life-cycle Management Configuration Commands +# + +# Create AMPP port-profile (port_profile_name) +CREATE_PORT_PROFILE = """ + + + {name} + + +""" + +# Create VLAN sub-profile for port-profile (port_profile_name) +CREATE_VLAN_PROFILE_FOR_PORT_PROFILE = """ + + + {name} + + + +""" + +# Configure L2 mode for VLAN sub-profile (port_profile_name) +CONFIGURE_L2_MODE_FOR_VLAN_PROFILE = """ + + + {name} + + + + + +""" + +# Configure trunk mode for VLAN sub-profile (port_profile_name) +CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE = """ + + + {name} + + + + trunk + + + + + +""" + +# Configure allowed VLANs for VLAN sub-profile +# (port_profile_name, allowed_vlan, native_vlan) +CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE = """ + + + {name} + + + + + + {vlan_id} + + + + + + + +""" + +# Delete port-profile (port_profile_name) +DELETE_PORT_PROFILE = """ + + + {name} + + +""" + +# Activate port-profile (port_profile_name) +ACTIVATE_PORT_PROFILE = """ + + + + {name} + + + + +""" + +# Deactivate port-profile (port_profile_name) +DEACTIVATE_PORT_PROFILE = """ + + + + {name} + + + + +""" + +# Associate MAC address to port-profile (port_profile_name, mac_address) +ASSOCIATE_MAC_TO_PORT_PROFILE = """ + + + + {name} + + {mac_address} + + + + +""" + +# Dissociate MAC address from port-profile (port_profile_name, mac_address) +DISSOCIATE_MAC_FROM_PORT_PROFILE = """ + + + + {name} + + {mac_address} + + + + +""" + +# +# Custom RPC Commands +# + + +# +# Constants +# + +# Port profile naming convention for Neutron networks +OS_PORT_PROFILE_NAME = "openstack-profile-{id}" + +# Port profile filter expressions +PORT_PROFILE_XPATH_FILTER = "/port-profile" +PORT_PROFILE_NAME_XPATH_FILTER = "/port-profile[name='{name}']" diff --git a/neutron/plugins/brocade/nos/nosdriver.py b/neutron/plugins/brocade/nos/nosdriver.py new file mode 100644 index 000000000..ce4c86110 --- /dev/null +++ b/neutron/plugins/brocade/nos/nosdriver.py @@ -0,0 +1,233 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Varma Bhupatiraju (vbhupati@#brocade.com) +# Shiv Haris (sharis@brocade.com) + + +"""Brocade NOS Driver implements NETCONF over SSHv2 for +Neutron network life-cycle management. +""" + +from ncclient import manager + +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.brocade.nos import nctemplates as template + + +LOG = logging.getLogger(__name__) +SSH_PORT = 22 + + +def nos_unknown_host_cb(host, fingerprint): + """An unknown host callback. + + Returns `True` if it finds the key acceptable, + and `False` if not. This default callback for NOS always returns 'True' + (i.e. trusts all hosts for now). + """ + return True + + +class NOSdriver(): + """NOS NETCONF interface driver for Neutron network. + + Handles life-cycle management of Neutron network (leverages AMPP on NOS) + """ + + def __init__(self): + self.mgr = None + + def connect(self, host, username, password): + """Connect via SSH and initialize the NETCONF session.""" + + # Use the persisted NETCONF connection + if self.mgr and self.mgr.connected: + return self.mgr + + # Open new NETCONF connection + try: + self.mgr = manager.connect(host=host, port=SSH_PORT, + username=username, password=password, + unknown_host_cb=nos_unknown_host_cb) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_("Connect failed to switch: %s"), e) + + LOG.debug(_("Connect success to host %(host)s:%(ssh_port)d"), + dict(host=host, ssh_port=SSH_PORT)) + return self.mgr + + def close_session(self): + """Close NETCONF session.""" + if self.mgr: + self.mgr.close_session() + self.mgr = None + + def create_network(self, host, username, password, net_id): + """Creates a new virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.create_vlan_interface(mgr, net_id) + self.create_port_profile(mgr, name) + self.create_vlan_profile_for_port_profile(mgr, name) + self.configure_l2_mode_for_vlan_profile(mgr, name) + self.configure_trunk_mode_for_vlan_profile(mgr, name) + self.configure_allowed_vlans_for_vlan_profile(mgr, name, net_id) + self.activate_port_profile(mgr, name) + except Exception as ex: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error: %s"), ex) + self.close_session() + + def delete_network(self, host, username, password, net_id): + """Deletes a virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.deactivate_port_profile(mgr, name) + self.delete_port_profile(mgr, name) + self.delete_vlan_interface(mgr, net_id) + except Exception as ex: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error: %s"), ex) + self.close_session() + + def associate_mac_to_network(self, host, username, password, + net_id, mac): + """Associates a MAC address to virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.associate_mac_to_port_profile(mgr, name, mac) + except Exception as ex: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error: %s"), ex) + self.close_session() + + def dissociate_mac_from_network(self, host, username, password, + net_id, mac): + """Dissociates a MAC address from virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.dissociate_mac_from_port_profile(mgr, name, mac) + except Exception as ex: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error: %s"), ex) + self.close_session() + + def create_vlan_interface(self, mgr, vlan_id): + """Configures a VLAN interface.""" + + confstr = template.CREATE_VLAN_INTERFACE.format(vlan_id=vlan_id) + mgr.edit_config(target='running', config=confstr) + + def delete_vlan_interface(self, mgr, vlan_id): + """Deletes a VLAN interface.""" + + confstr = template.DELETE_VLAN_INTERFACE.format(vlan_id=vlan_id) + mgr.edit_config(target='running', config=confstr) + + def get_port_profiles(self, mgr): + """Retrieves all port profiles.""" + + filterstr = template.PORT_PROFILE_XPATH_FILTER + response = mgr.get_config(source='running', + filter=('xpath', filterstr)).data_xml + return response + + def get_port_profile(self, mgr, name): + """Retrieves a port profile.""" + + filterstr = template.PORT_PROFILE_NAME_XPATH_FILTER.format(name=name) + response = mgr.get_config(source='running', + filter=('xpath', filterstr)).data_xml + return response + + def create_port_profile(self, mgr, name): + """Creates a port profile.""" + + confstr = template.CREATE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def delete_port_profile(self, mgr, name): + """Deletes a port profile.""" + + confstr = template.DELETE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def activate_port_profile(self, mgr, name): + """Activates a port profile.""" + + confstr = template.ACTIVATE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def deactivate_port_profile(self, mgr, name): + """Deactivates a port profile.""" + + confstr = template.DEACTIVATE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def associate_mac_to_port_profile(self, mgr, name, mac_address): + """Associates a MAC address to a port profile.""" + + confstr = template.ASSOCIATE_MAC_TO_PORT_PROFILE.format( + name=name, mac_address=mac_address) + mgr.edit_config(target='running', config=confstr) + + def dissociate_mac_from_port_profile(self, mgr, name, mac_address): + """Dissociates a MAC address from a port profile.""" + + confstr = template.DISSOCIATE_MAC_FROM_PORT_PROFILE.format( + name=name, mac_address=mac_address) + mgr.edit_config(target='running', config=confstr) + + def create_vlan_profile_for_port_profile(self, mgr, name): + """Creates VLAN sub-profile for port profile.""" + + confstr = template.CREATE_VLAN_PROFILE_FOR_PORT_PROFILE.format( + name=name) + mgr.edit_config(target='running', config=confstr) + + def configure_l2_mode_for_vlan_profile(self, mgr, name): + """Configures L2 mode for VLAN sub-profile.""" + + confstr = template.CONFIGURE_L2_MODE_FOR_VLAN_PROFILE.format( + name=name) + mgr.edit_config(target='running', config=confstr) + + def configure_trunk_mode_for_vlan_profile(self, mgr, name): + """Configures trunk mode for VLAN sub-profile.""" + + confstr = template.CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE.format( + name=name) + mgr.edit_config(target='running', config=confstr) + + def configure_allowed_vlans_for_vlan_profile(self, mgr, name, vlan_id): + """Configures allowed VLANs for VLAN sub-profile.""" + + confstr = template.CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE.format( + name=name, vlan_id=vlan_id) + mgr.edit_config(target='running', config=confstr) diff --git a/neutron/plugins/brocade/tests/README b/neutron/plugins/brocade/tests/README new file mode 100644 index 000000000..476ca0535 --- /dev/null +++ b/neutron/plugins/brocade/tests/README @@ -0,0 +1,24 @@ +Start the neutron-server with IP address of switch configured in brocade.ini: +(for configuration instruction please see README.md in the above directory) + +nostest.py: +This tests two things: + 1. Creates port-profile on the physical switch when a neutron 'network' is created + 2. Associates the MAC address with the created port-profile + +noscli.py: + CLI interface to create/delete/associate MAC/dissociate MAC + Commands: + % noscli.py create + (after running check that PP is created on the switch) + + % noscli.py delete + (after running check that PP is deleted from the switch) + + % noscli.py associate + (after running check that MAC is associated with PP) + + % noscli.py dissociate + (after running check that MAC is dissociated from the PP) + + diff --git a/neutron/plugins/brocade/tests/noscli.py b/neutron/plugins/brocade/tests/noscli.py new file mode 100644 index 000000000..81e988e3c --- /dev/null +++ b/neutron/plugins/brocade/tests/noscli.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +# +# Copyright (c) 2013 Brocade Communications Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Varma Bhupatiraju (vbhupati@#brocade.com) +# Shiv Haris (sharis@brocade.com) + + +"""Brocade NOS Driver CLI.""" +from __future__ import print_function + +import argparse + +from neutron.openstack.common import log as logging +from neutron.plugins.brocade.nos import nosdriver as nos + +LOG = logging.getLogger(__name__) + + +class NOSCli(object): + + def __init__(self, host, username, password): + self.host = host + self.username = username + self.password = password + self.driver = nos.NOSdriver() + + def execute(self, cmd): + numargs = len(args.otherargs) + + if args.cmd == 'create' and numargs == 1: + self._create(args.otherargs[0]) + elif args.cmd == 'delete' and numargs == 1: + self._delete(args.otherargs[0]) + elif args.cmd == 'associate' and numargs == 2: + self._associate(args.otherargs[0], args.otherargs[1]) + elif args.cmd == 'dissociate' and numargs == 2: + self._dissociate(args.otherargs[0], args.otherargs[1]) + else: + print(usage_desc) + exit(0) + + def _create(self, net_id): + self.driver.create_network(self.host, self.username, self.password, + net_id) + + def _delete(self, net_id): + self.driver.delete_network(self.host, self.username, self.password, + net_id) + + def _associate(self, net_id, mac): + self.driver.associate_mac_to_network( + self.host, self.username, self.password, net_id, mac) + + def _dissociate(self, net_id, mac): + self.driver.dissociate_mac_from_network( + self.host, self.username, self.password, net_id, mac) + + +usage_desc = """ +Command descriptions: + + create + delete + associate + dissociate +""" + +parser = argparse.ArgumentParser(description='process args', + usage=usage_desc, epilog='foo bar help') +parser.add_argument('--ip', default='localhost') +parser.add_argument('--username', default='admin') +parser.add_argument('--password', default='password') +parser.add_argument('cmd') +parser.add_argument('otherargs', nargs='*') +args = parser.parse_args() + +noscli = NOSCli(args.ip, args.username, args.password) +noscli.execute(args.cmd) diff --git a/neutron/plugins/brocade/tests/nostest.py b/neutron/plugins/brocade/tests/nostest.py new file mode 100644 index 000000000..72a21ae8b --- /dev/null +++ b/neutron/plugins/brocade/tests/nostest.py @@ -0,0 +1,48 @@ +# Copyright (c) 2013 Brocade Communications Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Varma Bhupatiraju (vbhupati@#brocade.com) +# Shiv Haris (sharis@brocade.com) + + +"""Brocade NOS Driver Test.""" +from __future__ import print_function + +import sys + +from neutron.plugins.brocade.nos import nosdriver as nos + + +def nostest(host, username, password): + # Driver + driver = nos.NOSdriver() + + # Neutron operations + vlan = 1001 + mac = '0050.56bf.0001' + driver.create_network(host, username, password, vlan) + driver.associate_mac_to_network(host, username, password, vlan, mac) + driver.dissociate_mac_from_network(host, username, password, vlan, mac) + driver.delete_network(host, username, password, vlan) + + # AMPP enumeration + with driver.connect(host, username, password) as mgr: + print(driver.get_port_profiles(mgr)) + print(driver.get_port_profile(mgr, 'default')) + + +if __name__ == '__main__': + nostest(sys.argv[1], sys.argv[2], sys.argv[3]) diff --git a/neutron/plugins/brocade/vlanbm.py b/neutron/plugins/brocade/vlanbm.py new file mode 100644 index 000000000..3c4b3ccb6 --- /dev/null +++ b/neutron/plugins/brocade/vlanbm.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Shiv Haris (sharis@brocade.com) +# Varma Bhupatiraju (vbhupati@#brocade.com) + + +"""A Vlan Bitmap class to handle allocation/de-allocation of vlan ids.""" +from six import moves + +from neutron.common import constants +from neutron.plugins.brocade.db import models as brocade_db + + +MIN_VLAN = constants.MIN_VLAN_TAG + 1 +MAX_VLAN = constants.MAX_VLAN_TAG + + +class VlanBitmap(object): + """Setup a vlan bitmap for allocation/de-allocation.""" + + # Keep track of the vlans that have been allocated/de-allocated + # uses a bitmap to do this + + def __init__(self, ctxt): + """Initialize the vlan as a set.""" + self.vlans = set(int(net['vlan']) + for net in brocade_db.get_networks(ctxt) + if net['vlan'] + ) + + def get_next_vlan(self, vlan_id=None): + """Try to get a specific vlan if requested or get the next vlan.""" + min_vlan_search = vlan_id or MIN_VLAN + max_vlan_search = (vlan_id and vlan_id + 1) or MAX_VLAN + + for vlan in moves.xrange(min_vlan_search, max_vlan_search): + if vlan not in self.vlans: + self.vlans.add(vlan) + return vlan + + def release_vlan(self, vlan_id): + """Return the vlan to the pool.""" + if vlan_id in self.vlans: + self.vlans.remove(vlan_id) diff --git a/neutron/plugins/cisco/README b/neutron/plugins/cisco/README new file mode 100644 index 000000000..2bedb75b1 --- /dev/null +++ b/neutron/plugins/cisco/README @@ -0,0 +1,7 @@ +Cisco Neutron Virtual Network Plugin + +This plugin implements Neutron v2 APIs and helps configure +topologies consisting of virtual and physical switches. + +For more details on use please refer to: +http://wiki.openstack.org/cisco-neutron diff --git a/neutron/plugins/cisco/__init__.py b/neutron/plugins/cisco/__init__.py new file mode 100644 index 000000000..db695fb0a --- /dev/null +++ b/neutron/plugins/cisco/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# diff --git a/neutron/plugins/cisco/common/__init__.py b/neutron/plugins/cisco/common/__init__.py new file mode 100644 index 000000000..833357b73 --- /dev/null +++ b/neutron/plugins/cisco/common/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. diff --git a/neutron/plugins/cisco/common/cisco_constants.py b/neutron/plugins/cisco/common/cisco_constants.py new file mode 100644 index 000000000..2f1992108 --- /dev/null +++ b/neutron/plugins/cisco/common/cisco_constants.py @@ -0,0 +1,111 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. + + +# Attachment attributes +INSTANCE_ID = 'instance_id' +TENANT_ID = 'tenant_id' +TENANT_NAME = 'tenant_name' +HOST_NAME = 'host_name' + +# Network attributes +NET_ID = 'id' +NET_NAME = 'name' +NET_VLAN_ID = 'vlan_id' +NET_VLAN_NAME = 'vlan_name' +NET_PORTS = 'ports' + +CREDENTIAL_ID = 'credential_id' +CREDENTIAL_NAME = 'credential_name' +CREDENTIAL_USERNAME = 'user_name' +CREDENTIAL_PASSWORD = 'password' +CREDENTIAL_TYPE = 'type' +MASKED_PASSWORD = '********' + +USERNAME = 'username' +PASSWORD = 'password' + +LOGGER_COMPONENT_NAME = "cisco_plugin" + +NEXUS_PLUGIN = 'nexus_plugin' +VSWITCH_PLUGIN = 'vswitch_plugin' + +DEVICE_IP = 'device_ip' + +NETWORK_ADMIN = 'network_admin' + +NETWORK = 'network' +PORT = 'port' +BASE_PLUGIN_REF = 'base_plugin_ref' +CONTEXT = 'context' +SUBNET = 'subnet' + +#### N1Kv CONSTANTS +# Special vlan_id value in n1kv_vlan_allocations table indicating flat network +FLAT_VLAN_ID = -1 + +# Topic for tunnel notifications between the plugin and agent +TUNNEL = 'tunnel' + +# Maximum VXLAN range configurable for one network profile. +MAX_VXLAN_RANGE = 1000000 + +# Values for network_type +NETWORK_TYPE_FLAT = 'flat' +NETWORK_TYPE_VLAN = 'vlan' +NETWORK_TYPE_VXLAN = 'vxlan' +NETWORK_TYPE_LOCAL = 'local' +NETWORK_TYPE_NONE = 'none' +NETWORK_TYPE_TRUNK = 'trunk' +NETWORK_TYPE_MULTI_SEGMENT = 'multi-segment' + +# Values for network sub_type +NETWORK_TYPE_OVERLAY = 'overlay' +NETWORK_SUBTYPE_NATIVE_VXLAN = 'native_vxlan' +NETWORK_SUBTYPE_TRUNK_VLAN = NETWORK_TYPE_VLAN +NETWORK_SUBTYPE_TRUNK_VXLAN = NETWORK_TYPE_OVERLAY + +# Prefix for VM Network name +VM_NETWORK_NAME_PREFIX = 'vmn_' + +DEFAULT_HTTP_TIMEOUT = 15 +SET = 'set' +INSTANCE = 'instance' +PROPERTIES = 'properties' +NAME = 'name' +ID = 'id' +POLICY = 'policy' +TENANT_ID_NOT_SET = 'TENANT_ID_NOT_SET' +ENCAPSULATIONS = 'encapsulations' +STATE = 'state' +ONLINE = 'online' +MAPPINGS = 'mappings' +MAPPING = 'mapping' +SEGMENTS = 'segments' +SEGMENT = 'segment' +BRIDGE_DOMAIN_SUFFIX = '_bd' +LOGICAL_NETWORK_SUFFIX = '_log_net' +ENCAPSULATION_PROFILE_SUFFIX = '_profile' + +UUID_LENGTH = 36 + +# Nexus vlan and vxlan segment range +NEXUS_VLAN_RESERVED_MIN = 3968 +NEXUS_VLAN_RESERVED_MAX = 4047 +NEXUS_VXLAN_MIN = 4096 +NEXUS_VXLAN_MAX = 16000000 diff --git a/neutron/plugins/cisco/common/cisco_credentials_v2.py b/neutron/plugins/cisco/common/cisco_credentials_v2.py new file mode 100644 index 000000000..5d8fc8ff5 --- /dev/null +++ b/neutron/plugins/cisco/common/cisco_credentials_v2.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. + +import logging as LOG + +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import cisco_exceptions as cexc +from neutron.plugins.cisco.common import config +from neutron.plugins.cisco.db import network_db_v2 as cdb + +LOG.basicConfig(level=LOG.WARN) +LOG.getLogger(const.LOGGER_COMPONENT_NAME) + + +class Store(object): + """Credential Store.""" + + @staticmethod + def initialize(): + dev_dict = config.get_device_dictionary() + for key in dev_dict: + dev_id, dev_ip, dev_key = key + if dev_key == const.USERNAME: + try: + cdb.add_credential( + dev_ip, + dev_dict[dev_id, dev_ip, const.USERNAME], + dev_dict[dev_id, dev_ip, const.PASSWORD], + dev_id) + except cexc.CredentialAlreadyExists: + # We are quietly ignoring this, since it only happens + # if this class module is loaded more than once, in + # which case, the credentials are already populated + pass + + @staticmethod + def get_username(cred_name): + """Get the username.""" + credential = cdb.get_credential_name(cred_name) + return credential[const.CREDENTIAL_USERNAME] + + @staticmethod + def get_password(cred_name): + """Get the password.""" + credential = cdb.get_credential_name(cred_name) + return credential[const.CREDENTIAL_PASSWORD] diff --git a/neutron/plugins/cisco/common/cisco_exceptions.py b/neutron/plugins/cisco/common/cisco_exceptions.py new file mode 100644 index 000000000..be50e7665 --- /dev/null +++ b/neutron/plugins/cisco/common/cisco_exceptions.py @@ -0,0 +1,236 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# @author: Rohit Agarwalla, Cisco Systems, Inc. + +"""Exceptions used by the Cisco plugin.""" + +from neutron.common import exceptions + + +class NetworkSegmentIDNotFound(exceptions.NeutronException): + """Segmentation ID for network is not found.""" + message = _("Segmentation ID for network %(net_id)s is not found.") + + +class NoMoreNics(exceptions.NeutronException): + """No more dynamic NICs are available in the system.""" + message = _("Unable to complete operation. No more dynamic NICs are " + "available in the system.") + + +class NetworkVlanBindingAlreadyExists(exceptions.NeutronException): + """Binding cannot be created, since it already exists.""" + message = _("NetworkVlanBinding for %(vlan_id)s and network " + "%(network_id)s already exists.") + + +class VlanIDNotFound(exceptions.NeutronException): + """VLAN ID cannot be found.""" + message = _("Vlan ID %(vlan_id)s not found.") + + +class VlanIDOutsidePool(exceptions.NeutronException): + """VLAN ID cannot be allocated, since it is outside the configured pool.""" + message = _("Unable to complete operation. VLAN ID exists outside of the " + "configured network segment range.") + + +class VlanIDNotAvailable(exceptions.NeutronException): + """No VLAN ID available.""" + message = _("No Vlan ID available.") + + +class QosNotFound(exceptions.NeutronException): + """QoS level with this ID cannot be found.""" + message = _("QoS level %(qos_id)s could not be found " + "for tenant %(tenant_id)s.") + + +class QosNameAlreadyExists(exceptions.NeutronException): + """QoS Name already exists.""" + message = _("QoS level with name %(qos_name)s already exists " + "for tenant %(tenant_id)s.") + + +class CredentialNotFound(exceptions.NeutronException): + """Credential with this ID cannot be found.""" + message = _("Credential %(credential_id)s could not be found.") + + +class CredentialNameNotFound(exceptions.NeutronException): + """Credential Name could not be found.""" + message = _("Credential %(credential_name)s could not be found.") + + +class CredentialAlreadyExists(exceptions.NeutronException): + """Credential already exists.""" + message = _("Credential %(credential_name)s already exists.") + + +class ProviderNetworkExists(exceptions.NeutronException): + """Provider network already exists.""" + message = _("Provider network %s already exists") + + +class NexusComputeHostNotConfigured(exceptions.NeutronException): + """Connection to compute host is not configured.""" + message = _("Connection to %(host)s is not configured.") + + +class NexusConnectFailed(exceptions.NeutronException): + """Failed to connect to Nexus switch.""" + message = _("Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s.") + + +class NexusConfigFailed(exceptions.NeutronException): + """Failed to configure Nexus switch.""" + message = _("Failed to configure Nexus: %(config)s. Reason: %(exc)s.") + + +class NexusPortBindingNotFound(exceptions.NeutronException): + """NexusPort Binding is not present.""" + message = _("Nexus Port Binding (%(filters)s) is not present.") + + def __init__(self, **kwargs): + filters = ','.join('%s=%s' % i for i in kwargs.items()) + super(NexusPortBindingNotFound, self).__init__(filters=filters) + + +class NoNexusSviSwitch(exceptions.NeutronException): + """No usable nexus switch found.""" + message = _("No usable Nexus switch found to create SVI interface.") + + +class PortVnicBindingAlreadyExists(exceptions.NeutronException): + """PortVnic Binding already exists.""" + message = _("PortVnic Binding %(port_id)s already exists.") + + +class PortVnicNotFound(exceptions.NeutronException): + """PortVnic Binding is not present.""" + message = _("PortVnic Binding %(port_id)s is not present.") + + +class SubnetNotSpecified(exceptions.NeutronException): + """Subnet id not specified.""" + message = _("No subnet_id specified for router gateway.") + + +class SubnetInterfacePresent(exceptions.NeutronException): + """Subnet SVI interface already exists.""" + message = _("Subnet %(subnet_id)s has an interface on %(router_id)s.") + + +class PortIdForNexusSvi(exceptions.NeutronException): + """Port Id specified for Nexus SVI.""" + message = _('Nexus hardware router gateway only uses Subnet Ids.') + + +class InvalidDetach(exceptions.NeutronException): + message = _("Unable to unplug the attachment %(att_id)s from port " + "%(port_id)s for network %(net_id)s. The attachment " + "%(att_id)s does not exist.") + + +class PolicyProfileAlreadyExists(exceptions.NeutronException): + """Policy Profile cannot be created since it already exists.""" + message = _("Policy Profile %(profile_id)s " + "already exists.") + + +class PolicyProfileIdNotFound(exceptions.NotFound): + """Policy Profile with the given UUID cannot be found.""" + message = _("Policy Profile %(profile_id)s could not be found.") + + +class NetworkProfileAlreadyExists(exceptions.NeutronException): + """Network Profile cannot be created since it already exists.""" + message = _("Network Profile %(profile_id)s " + "already exists.") + + +class NetworkProfileNotFound(exceptions.NotFound): + """Network Profile with the given UUID/name cannot be found.""" + message = _("Network Profile %(profile)s could not be found.") + + +class NetworkProfileInUse(exceptions.InUse): + """Network Profile with the given UUID is in use.""" + message = _("One or more network segments belonging to network " + "profile %(profile)s is in use.") + + +class NoMoreNetworkSegments(exceptions.NoNetworkAvailable): + """Network segments exhausted for the given network profile.""" + message = _("No more segments available in network segment pool " + "%(network_profile_name)s.") + + +class VMNetworkNotFound(exceptions.NotFound): + """VM Network with the given name cannot be found.""" + message = _("VM Network %(name)s could not be found.") + + +class VxlanIDInUse(exceptions.InUse): + """VXLAN ID is in use.""" + message = _("Unable to create the network. " + "The VXLAN ID %(vxlan_id)s is in use.") + + +class VxlanIDNotFound(exceptions.NotFound): + """VXLAN ID cannot be found.""" + message = _("Vxlan ID %(vxlan_id)s not found.") + + +class VxlanIDOutsidePool(exceptions.NeutronException): + """VXLAN ID cannot be allocated, as it is outside the configured pool.""" + message = _("Unable to complete operation. VXLAN ID exists outside of the " + "configured network segment range.") + + +class VSMConnectionFailed(exceptions.ServiceUnavailable): + """Connection to VSM failed.""" + message = _("Connection to VSM failed: %(reason)s.") + + +class VSMError(exceptions.NeutronException): + """Error has occurred on the VSM.""" + message = _("Internal VSM Error: %(reason)s.") + + +class NetworkBindingNotFound(exceptions.NotFound): + """Network Binding for network cannot be found.""" + message = _("Network Binding for network %(network_id)s could " + "not be found.") + + +class PortBindingNotFound(exceptions.NotFound): + """Port Binding for port cannot be found.""" + message = _("Port Binding for port %(port_id)s could " + "not be found.") + + +class ProfileTenantBindingNotFound(exceptions.NotFound): + """Profile to Tenant binding for given profile ID cannot be found.""" + message = _("Profile-Tenant binding for profile %(profile_id)s could " + "not be found.") + + +class NoClusterFound(exceptions.NotFound): + """No service cluster found to perform multi-segment bridging.""" + message = _("No service cluster found to perform multi-segment bridging.") diff --git a/neutron/plugins/cisco/common/cisco_faults.py b/neutron/plugins/cisco/common/cisco_faults.py new file mode 100644 index 000000000..80e787e41 --- /dev/null +++ b/neutron/plugins/cisco/common/cisco_faults.py @@ -0,0 +1,138 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ying Liu, Cisco Systems, Inc. + +import webob.dec + +from neutron import wsgi + + +class Fault(webob.exc.HTTPException): + """Error codes for API faults.""" + + _fault_names = { + 400: "malformedRequest", + 401: "unauthorized", + 451: "CredentialNotFound", + 452: "QoSNotFound", + 453: "NovatenantNotFound", + 454: "MultiportNotFound", + 470: "serviceUnavailable", + 471: "pluginFault" + } + + def __init__(self, exception): + """Create a Fault for the given webob.exc.exception.""" + self.wrapped_exc = exception + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """Generate a WSGI response. + + Response is generated based on the exception passed to constructor. + """ + # Replace the body with fault details. + code = self.wrapped_exc.status_int + fault_name = self._fault_names.get(code, "neutronServiceFault") + fault_data = { + fault_name: { + 'code': code, + 'message': self.wrapped_exc.explanation}} + # 'code' is an attribute on the fault tag itself + content_type = req.best_match_content_type() + self.wrapped_exc.body = wsgi.Serializer().serialize( + fault_data, content_type) + self.wrapped_exc.content_type = content_type + return self.wrapped_exc + + +class PortNotFound(webob.exc.HTTPClientError): + """PortNotFound exception. + + subclass of :class:`~HTTPClientError` + + This indicates that the server did not find the port specified + in the HTTP request for a given network + + code: 430, title: Port not Found + """ + code = 430 + title = _('Port not Found') + explanation = _('Unable to find a port with the specified identifier.') + + +class CredentialNotFound(webob.exc.HTTPClientError): + """CredentialNotFound exception. + + subclass of :class:`~HTTPClientError` + + This indicates that the server did not find the Credential specified + in the HTTP request + + code: 451, title: Credential not Found + """ + code = 451 + title = _('Credential Not Found') + explanation = _('Unable to find a Credential with' + ' the specified identifier.') + + +class QosNotFound(webob.exc.HTTPClientError): + """QosNotFound exception. + + subclass of :class:`~HTTPClientError` + + This indicates that the server did not find the QoS specified + in the HTTP request + + code: 452, title: QoS not Found + """ + code = 452 + title = _('QoS Not Found') + explanation = _('Unable to find a QoS with' + ' the specified identifier.') + + +class NovatenantNotFound(webob.exc.HTTPClientError): + """NovatenantNotFound exception. + + subclass of :class:`~HTTPClientError` + + This indicates that the server did not find the Novatenant specified + in the HTTP request + + code: 453, title: Nova tenant not Found + """ + code = 453 + title = _('Nova tenant Not Found') + explanation = _('Unable to find a Novatenant with' + ' the specified identifier.') + + +class RequestedStateInvalid(webob.exc.HTTPClientError): + """RequestedStateInvalid exception. + + subclass of :class:`~HTTPClientError` + + This indicates that the server could not update the port state to + to the request value + + code: 431, title: Requested State Invalid + """ + code = 431 + title = _('Requested State Invalid') + explanation = _('Unable to update port state with specified value.') diff --git a/neutron/plugins/cisco/common/config.py b/neutron/plugins/cisco/common/config.py new file mode 100644 index 000000000..f13569cea --- /dev/null +++ b/neutron/plugins/cisco/common/config.py @@ -0,0 +1,151 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.common import config + + +cisco_plugins_opts = [ + cfg.StrOpt('vswitch_plugin', + default='neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2', + help=_("Virtual Switch to use")), + cfg.StrOpt('nexus_plugin', + default='neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.' + 'NexusPlugin', + help=_("Nexus Switch to use")), +] + +cisco_opts = [ + cfg.StrOpt('vlan_name_prefix', default='q-', + help=_("VLAN Name prefix")), + cfg.StrOpt('provider_vlan_name_prefix', default='p-', + help=_("VLAN Name prefix for provider vlans")), + cfg.BoolOpt('provider_vlan_auto_create', default=True, + help=_('Provider VLANs are automatically created as needed ' + 'on the Nexus switch')), + cfg.BoolOpt('provider_vlan_auto_trunk', default=True, + help=_('Provider VLANs are automatically trunked as needed ' + 'on the ports of the Nexus switch')), + cfg.BoolOpt('nexus_l3_enable', default=False, + help=_("Enable L3 support on the Nexus switches")), + cfg.BoolOpt('svi_round_robin', default=False, + help=_("Distribute SVI interfaces over all switches")), + cfg.StrOpt('model_class', + default='neutron.plugins.cisco.models.virt_phy_sw_v2.' + 'VirtualPhysicalSwitchModelV2', + help=_("Model Class")), + cfg.StrOpt('nexus_driver', + default='neutron.plugins.cisco.test.nexus.' + 'fake_nexus_driver.CiscoNEXUSFakeDriver', + help=_("Nexus Driver Name")), +] + +cisco_n1k_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_("N1K Integration Bridge")), + cfg.BoolOpt('enable_tunneling', default=True, + help=_("N1K Enable Tunneling")), + cfg.StrOpt('tunnel_bridge', default='br-tun', + help=_("N1K Tunnel Bridge")), + cfg.StrOpt('local_ip', default='10.0.0.3', + help=_("N1K Local IP")), + cfg.StrOpt('tenant_network_type', default='local', + help=_("N1K Tenant Network Type")), + cfg.StrOpt('bridge_mappings', default='', + help=_("N1K Bridge Mappings")), + cfg.StrOpt('vxlan_id_ranges', default='5000:10000', + help=_("N1K VXLAN ID Ranges")), + cfg.StrOpt('network_vlan_ranges', default='vlan:1:4095', + help=_("N1K Network VLAN Ranges")), + cfg.StrOpt('default_network_profile', default='default_network_profile', + help=_("N1K default network profile")), + cfg.StrOpt('default_policy_profile', default='service_profile', + help=_("N1K default policy profile")), + cfg.StrOpt('network_node_policy_profile', default='dhcp_pp', + help=_("N1K policy profile for network node")), + cfg.IntOpt('poll_duration', default=10, + help=_("N1K Policy profile polling duration in seconds")), + cfg.IntOpt('http_pool_size', default=4, + help=_("Number of threads to use to make HTTP requests")), +] + +cfg.CONF.register_opts(cisco_opts, "CISCO") +cfg.CONF.register_opts(cisco_n1k_opts, "CISCO_N1K") +cfg.CONF.register_opts(cisco_plugins_opts, "CISCO_PLUGINS") +config.register_root_helper(cfg.CONF) + +# shortcuts +CONF = cfg.CONF +CISCO = cfg.CONF.CISCO +CISCO_N1K = cfg.CONF.CISCO_N1K +CISCO_PLUGINS = cfg.CONF.CISCO_PLUGINS + +# +# device_dictionary - Contains all external device configuration. +# +# When populated the device dictionary format is: +# {('', '', ''): '', ...} +# +# Example: +# {('NEXUS_SWITCH', '1.1.1.1', 'username'): 'admin', +# ('NEXUS_SWITCH', '1.1.1.1', 'password'): 'mySecretPassword', +# ('NEXUS_SWITCH', '1.1.1.1', 'compute1'): '1/1', ...} +# +device_dictionary = {} + +# +# first_device_ip - IP address of first switch discovered in config +# +# Used for SVI placement when round-robin placement is disabled +# +first_device_ip = None + + +class CiscoConfigOptions(): + """Cisco Configuration Options Class.""" + + def __init__(self): + self._create_device_dictionary() + + def _create_device_dictionary(self): + """ + Create the device dictionary from the cisco_plugins.ini + device supported sections. Ex. NEXUS_SWITCH, N1KV. + """ + + global first_device_ip + + multi_parser = cfg.MultiConfigParser() + read_ok = multi_parser.read(CONF.config_file) + + if len(read_ok) != len(CONF.config_file): + raise cfg.Error(_("Some config files were not parsed properly")) + + first_device_ip = None + for parsed_file in multi_parser.parsed: + for parsed_item in parsed_file.keys(): + dev_id, sep, dev_ip = parsed_item.partition(':') + if dev_id.lower() in ['nexus_switch', 'n1kv']: + for dev_key, value in parsed_file[parsed_item].items(): + if dev_ip and not first_device_ip: + first_device_ip = dev_ip + device_dictionary[dev_id, dev_ip, dev_key] = value[0] + + +def get_device_dictionary(): + return device_dictionary diff --git a/neutron/plugins/cisco/db/__init__.py b/neutron/plugins/cisco/db/__init__.py new file mode 100644 index 000000000..db695fb0a --- /dev/null +++ b/neutron/plugins/cisco/db/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# diff --git a/neutron/plugins/cisco/db/n1kv_db_v2.py b/neutron/plugins/cisco/db/n1kv_db_v2.py new file mode 100644 index 000000000..d924af9b3 --- /dev/null +++ b/neutron/plugins/cisco/db/n1kv_db_v2.py @@ -0,0 +1,1621 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Aruna Kushwaha, Cisco Systems Inc. +# @author: Abhishek Raut, Cisco Systems Inc. +# @author: Rudrajit Tapadar, Cisco Systems Inc. +# @author: Sergey Sudakovich, Cisco Systems Inc. + +import netaddr +import re +from sqlalchemy.orm import exc +from sqlalchemy import sql + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +import neutron.db.api as db +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.plugins.cisco.common import cisco_constants as c_const +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.db import n1kv_models_v2 + +LOG = logging.getLogger(__name__) + + +def del_trunk_segment_binding(db_session, trunk_segment_id, segment_pairs): + """ + Delete a trunk network binding. + + :param db_session: database session + :param trunk_segment_id: UUID representing the trunk network + :param segment_pairs: List of segment UUIDs in pair + representing the segments that are trunked + """ + with db_session.begin(subtransactions=True): + for (segment_id, dot1qtag) in segment_pairs: + (db_session.query(n1kv_models_v2.N1kvTrunkSegmentBinding). + filter_by(trunk_segment_id=trunk_segment_id, + segment_id=segment_id, + dot1qtag=dot1qtag).delete()) + alloc = (db_session.query(n1kv_models_v2. + N1kvTrunkSegmentBinding). + filter_by(trunk_segment_id=trunk_segment_id).first()) + if not alloc: + binding = get_network_binding(db_session, trunk_segment_id) + binding.physical_network = None + + +def del_multi_segment_binding(db_session, multi_segment_id, segment_pairs): + """ + Delete a multi-segment network binding. + + :param db_session: database session + :param multi_segment_id: UUID representing the multi-segment network + :param segment_pairs: List of segment UUIDs in pair + representing the segments that are bridged + """ + with db_session.begin(subtransactions=True): + for (segment1_id, segment2_id) in segment_pairs: + (db_session.query(n1kv_models_v2. + N1kvMultiSegmentNetworkBinding).filter_by( + multi_segment_id=multi_segment_id, + segment1_id=segment1_id, + segment2_id=segment2_id).delete()) + + +def add_trunk_segment_binding(db_session, trunk_segment_id, segment_pairs): + """ + Create a trunk network binding. + + :param db_session: database session + :param trunk_segment_id: UUID representing the multi-segment network + :param segment_pairs: List of segment UUIDs in pair + representing the segments to be trunked + """ + with db_session.begin(subtransactions=True): + binding = get_network_binding(db_session, trunk_segment_id) + for (segment_id, tag) in segment_pairs: + if not binding.physical_network: + member_seg_binding = get_network_binding(db_session, + segment_id) + binding.physical_network = member_seg_binding.physical_network + trunk_segment_binding = ( + n1kv_models_v2.N1kvTrunkSegmentBinding( + trunk_segment_id=trunk_segment_id, + segment_id=segment_id, dot1qtag=tag)) + db_session.add(trunk_segment_binding) + + +def add_multi_segment_binding(db_session, multi_segment_id, segment_pairs): + """ + Create a multi-segment network binding. + + :param db_session: database session + :param multi_segment_id: UUID representing the multi-segment network + :param segment_pairs: List of segment UUIDs in pair + representing the segments to be bridged + """ + with db_session.begin(subtransactions=True): + for (segment1_id, segment2_id) in segment_pairs: + multi_segment_binding = ( + n1kv_models_v2.N1kvMultiSegmentNetworkBinding( + multi_segment_id=multi_segment_id, + segment1_id=segment1_id, + segment2_id=segment2_id)) + db_session.add(multi_segment_binding) + + +def add_multi_segment_encap_profile_name(db_session, multi_segment_id, + segment_pair, profile_name): + """ + Add the encapsulation profile name to the multi-segment network binding. + + :param db_session: database session + :param multi_segment_id: UUID representing the multi-segment network + :param segment_pair: set containing the segment UUIDs that are bridged + """ + with db_session.begin(subtransactions=True): + binding = get_multi_segment_network_binding(db_session, + multi_segment_id, + segment_pair) + binding.encap_profile_name = profile_name + + +def get_multi_segment_network_binding(db_session, + multi_segment_id, segment_pair): + """ + Retrieve multi-segment network binding. + + :param db_session: database session + :param multi_segment_id: UUID representing the trunk network whose binding + is to fetch + :param segment_pair: set containing the segment UUIDs that are bridged + :returns: binding object + """ + try: + (segment1_id, segment2_id) = segment_pair + return (db_session.query( + n1kv_models_v2.N1kvMultiSegmentNetworkBinding). + filter_by(multi_segment_id=multi_segment_id, + segment1_id=segment1_id, + segment2_id=segment2_id)).one() + except exc.NoResultFound: + raise c_exc.NetworkBindingNotFound(network_id=multi_segment_id) + + +def get_multi_segment_members(db_session, multi_segment_id): + """ + Retrieve all the member segments of a multi-segment network. + + :param db_session: database session + :param multi_segment_id: UUID representing the multi-segment network + :returns: a list of tuples representing the mapped segments + """ + with db_session.begin(subtransactions=True): + allocs = (db_session.query( + n1kv_models_v2.N1kvMultiSegmentNetworkBinding). + filter_by(multi_segment_id=multi_segment_id)) + return [(a.segment1_id, a.segment2_id) for a in allocs] + + +def get_multi_segment_encap_dict(db_session, multi_segment_id): + """ + Retrieve the encapsulation profiles for every segment pairs bridged. + + :param db_session: database session + :param multi_segment_id: UUID representing the multi-segment network + :returns: a dictionary of lists containing the segment pairs in sets + """ + with db_session.begin(subtransactions=True): + encap_dict = {} + allocs = (db_session.query( + n1kv_models_v2.N1kvMultiSegmentNetworkBinding). + filter_by(multi_segment_id=multi_segment_id)) + for alloc in allocs: + if alloc.encap_profile_name not in encap_dict: + encap_dict[alloc.encap_profile_name] = [] + seg_pair = (alloc.segment1_id, alloc.segment2_id) + encap_dict[alloc.encap_profile_name].append(seg_pair) + return encap_dict + + +def get_trunk_network_binding(db_session, trunk_segment_id, segment_pair): + """ + Retrieve trunk network binding. + + :param db_session: database session + :param trunk_segment_id: UUID representing the trunk network whose binding + is to fetch + :param segment_pair: set containing the segment_id and dot1qtag + :returns: binding object + """ + try: + (segment_id, dot1qtag) = segment_pair + return (db_session.query(n1kv_models_v2.N1kvTrunkSegmentBinding). + filter_by(trunk_segment_id=trunk_segment_id, + segment_id=segment_id, + dot1qtag=dot1qtag)).one() + except exc.NoResultFound: + raise c_exc.NetworkBindingNotFound(network_id=trunk_segment_id) + + +def get_trunk_members(db_session, trunk_segment_id): + """ + Retrieve all the member segments of a trunk network. + + :param db_session: database session + :param trunk_segment_id: UUID representing the trunk network + :returns: a list of tuples representing the segment and their + corresponding dot1qtag + """ + with db_session.begin(subtransactions=True): + allocs = (db_session.query(n1kv_models_v2.N1kvTrunkSegmentBinding). + filter_by(trunk_segment_id=trunk_segment_id)) + return [(a.segment_id, a.dot1qtag) for a in allocs] + + +def is_trunk_member(db_session, segment_id): + """ + Checks if a segment is a member of a trunk segment. + + :param db_session: database session + :param segment_id: UUID of the segment to be checked + :returns: boolean + """ + with db_session.begin(subtransactions=True): + ret = (db_session.query(n1kv_models_v2.N1kvTrunkSegmentBinding). + filter_by(segment_id=segment_id).first()) + return bool(ret) + + +def is_multi_segment_member(db_session, segment_id): + """ + Checks if a segment is a member of a multi-segment network. + + :param db_session: database session + :param segment_id: UUID of the segment to be checked + :returns: boolean + """ + with db_session.begin(subtransactions=True): + ret1 = (db_session.query( + n1kv_models_v2.N1kvMultiSegmentNetworkBinding). + filter_by(segment1_id=segment_id).first()) + ret2 = (db_session.query( + n1kv_models_v2.N1kvMultiSegmentNetworkBinding). + filter_by(segment2_id=segment_id).first()) + return bool(ret1 or ret2) + + +def get_network_binding(db_session, network_id): + """ + Retrieve network binding. + + :param db_session: database session + :param network_id: UUID representing the network whose binding is + to fetch + :returns: binding object + """ + try: + return (db_session.query(n1kv_models_v2.N1kvNetworkBinding). + filter_by(network_id=network_id). + one()) + except exc.NoResultFound: + raise c_exc.NetworkBindingNotFound(network_id=network_id) + + +def add_network_binding(db_session, network_id, network_type, + physical_network, segmentation_id, + multicast_ip, network_profile_id, add_segments): + """ + Create network binding. + + :param db_session: database session + :param network_id: UUID representing the network + :param network_type: string representing type of network (VLAN, OVERLAY, + MULTI_SEGMENT or TRUNK) + :param physical_network: Only applicable for VLAN networks. It + represents a L2 Domain + :param segmentation_id: integer representing VLAN or VXLAN ID + :param multicast_ip: Native VXLAN technology needs a multicast IP to be + associated with every VXLAN ID to deal with broadcast + packets. A single multicast IP can be shared by + multiple VXLAN IDs. + :param network_profile_id: network profile ID based on which this network + is created + :param add_segments: List of segment UUIDs in pairs to be added to either a + multi-segment or trunk network + """ + with db_session.begin(subtransactions=True): + binding = n1kv_models_v2.N1kvNetworkBinding( + network_id=network_id, + network_type=network_type, + physical_network=physical_network, + segmentation_id=segmentation_id, + multicast_ip=multicast_ip, + profile_id=network_profile_id) + db_session.add(binding) + if add_segments is None: + pass + elif network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: + add_multi_segment_binding(db_session, network_id, add_segments) + elif network_type == c_const.NETWORK_TYPE_TRUNK: + add_trunk_segment_binding(db_session, network_id, add_segments) + + +def get_segment_range(network_profile): + """ + Get the segment range min and max for a network profile. + + :params network_profile: object of type network profile + :returns: integer values representing minimum and maximum segment + range value + """ + # Sort the range to ensure min, max is in order + seg_min, seg_max = sorted( + int(i) for i in network_profile.segment_range.split('-')) + LOG.debug(_("seg_min %(seg_min)s, seg_max %(seg_max)s"), + {'seg_min': seg_min, 'seg_max': seg_max}) + return seg_min, seg_max + + +def get_multicast_ip(network_profile): + """ + Retrieve a multicast ip from the defined pool. + + :params network_profile: object of type network profile + :returns: string representing multicast IP + """ + # Round robin multicast ip allocation + min_ip, max_ip = _get_multicast_ip_range(network_profile) + addr_list = list((netaddr.iter_iprange(min_ip, max_ip))) + mul_ip_str = str(addr_list[network_profile.multicast_ip_index]) + + network_profile.multicast_ip_index += 1 + if network_profile.multicast_ip_index == len(addr_list): + network_profile.multicast_ip_index = 0 + return mul_ip_str + + +def _get_multicast_ip_range(network_profile): + """ + Helper method to retrieve minimum and maximum multicast ip. + + :params network_profile: object of type network profile + :returns: two strings representing minimum multicast ip and + maximum multicast ip + """ + # Assumption: ip range belongs to the same subnet + # Assumption: ip range is already sorted + return network_profile.multicast_ip_range.split('-') + + +def get_port_binding(db_session, port_id): + """ + Retrieve port binding. + + :param db_session: database session + :param port_id: UUID representing the port whose binding is to fetch + :returns: port binding object + """ + try: + return (db_session.query(n1kv_models_v2.N1kvPortBinding). + filter_by(port_id=port_id). + one()) + except exc.NoResultFound: + raise c_exc.PortBindingNotFound(port_id=port_id) + + +def add_port_binding(db_session, port_id, policy_profile_id): + """ + Create port binding. + + Bind the port with policy profile. + :param db_session: database session + :param port_id: UUID of the port + :param policy_profile_id: UUID of the policy profile + """ + with db_session.begin(subtransactions=True): + binding = n1kv_models_v2.N1kvPortBinding(port_id=port_id, + profile_id=policy_profile_id) + db_session.add(binding) + + +def delete_segment_allocations(db_session, net_p): + """ + Delete the segment allocation entry from the table. + + :params db_session: database session + :params net_p: network profile object + """ + with db_session.begin(subtransactions=True): + seg_min, seg_max = get_segment_range(net_p) + if net_p['segment_type'] == c_const.NETWORK_TYPE_VLAN: + db_session.query(n1kv_models_v2.N1kvVlanAllocation).filter( + (n1kv_models_v2.N1kvVlanAllocation.physical_network == + net_p['physical_network']), + (n1kv_models_v2.N1kvVlanAllocation.vlan_id >= seg_min), + (n1kv_models_v2.N1kvVlanAllocation.vlan_id <= + seg_max)).delete() + elif net_p['segment_type'] == c_const.NETWORK_TYPE_OVERLAY: + db_session.query(n1kv_models_v2.N1kvVxlanAllocation).filter( + (n1kv_models_v2.N1kvVxlanAllocation.vxlan_id >= seg_min), + (n1kv_models_v2.N1kvVxlanAllocation.vxlan_id <= + seg_max)).delete() + + +def sync_vlan_allocations(db_session, net_p): + """ + Synchronize vlan_allocations table with configured VLAN ranges. + + Sync the network profile range with the vlan_allocations table for each + physical network. + :param db_session: database session + :param net_p: network profile dictionary + """ + with db_session.begin(subtransactions=True): + seg_min, seg_max = get_segment_range(net_p) + for vlan_id in range(seg_min, seg_max + 1): + try: + get_vlan_allocation(db_session, + net_p['physical_network'], + vlan_id) + except c_exc.VlanIDNotFound: + alloc = n1kv_models_v2.N1kvVlanAllocation( + physical_network=net_p['physical_network'], + vlan_id=vlan_id, + network_profile_id=net_p['id']) + db_session.add(alloc) + + +def get_vlan_allocation(db_session, physical_network, vlan_id): + """ + Retrieve vlan allocation. + + :param db_session: database session + :param physical network: string name for the physical network + :param vlan_id: integer representing the VLAN ID. + :returns: allocation object for given physical network and VLAN ID + """ + try: + return (db_session.query(n1kv_models_v2.N1kvVlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id).one()) + except exc.NoResultFound: + raise c_exc.VlanIDNotFound(vlan_id=vlan_id) + + +def reserve_vlan(db_session, network_profile): + """ + Reserve a VLAN ID within the range of the network profile. + + :param db_session: database session + :param network_profile: network profile object + """ + seg_min, seg_max = get_segment_range(network_profile) + segment_type = c_const.NETWORK_TYPE_VLAN + + with db_session.begin(subtransactions=True): + alloc = (db_session.query(n1kv_models_v2.N1kvVlanAllocation). + filter(sql.and_( + n1kv_models_v2.N1kvVlanAllocation.vlan_id >= seg_min, + n1kv_models_v2.N1kvVlanAllocation.vlan_id <= seg_max, + n1kv_models_v2.N1kvVlanAllocation.physical_network == + network_profile['physical_network'], + n1kv_models_v2.N1kvVlanAllocation.allocated == + sql.false()) + )).first() + if alloc: + segment_id = alloc.vlan_id + physical_network = alloc.physical_network + alloc.allocated = True + return (physical_network, segment_type, segment_id, "0.0.0.0") + raise c_exc.NoMoreNetworkSegments( + network_profile_name=network_profile.name) + + +def reserve_vxlan(db_session, network_profile): + """ + Reserve a VXLAN ID within the range of the network profile. + + :param db_session: database session + :param network_profile: network profile object + """ + seg_min, seg_max = get_segment_range(network_profile) + segment_type = c_const.NETWORK_TYPE_OVERLAY + physical_network = "" + + with db_session.begin(subtransactions=True): + alloc = (db_session.query(n1kv_models_v2.N1kvVxlanAllocation). + filter(sql.and_( + n1kv_models_v2.N1kvVxlanAllocation.vxlan_id >= + seg_min, + n1kv_models_v2.N1kvVxlanAllocation.vxlan_id <= + seg_max, + n1kv_models_v2.N1kvVxlanAllocation.allocated == + sql.false()) + ).first()) + if alloc: + segment_id = alloc.vxlan_id + alloc.allocated = True + if network_profile.sub_type == (c_const. + NETWORK_SUBTYPE_NATIVE_VXLAN): + return (physical_network, segment_type, + segment_id, get_multicast_ip(network_profile)) + else: + return (physical_network, segment_type, segment_id, "0.0.0.0") + raise n_exc.NoNetworkAvailable() + + +def alloc_network(db_session, network_profile_id): + """ + Allocate network using first available free segment ID in segment range. + + :param db_session: database session + :param network_profile_id: UUID representing the network profile + """ + with db_session.begin(subtransactions=True): + network_profile = get_network_profile(db_session, + network_profile_id) + if network_profile.segment_type == c_const.NETWORK_TYPE_VLAN: + return reserve_vlan(db_session, network_profile) + if network_profile.segment_type == c_const.NETWORK_TYPE_OVERLAY: + return reserve_vxlan(db_session, network_profile) + return (None, network_profile.segment_type, 0, "0.0.0.0") + + +def reserve_specific_vlan(db_session, physical_network, vlan_id): + """ + Reserve a specific VLAN ID for the network. + + :param db_session: database session + :param physical_network: string representing the name of physical network + :param vlan_id: integer value of the segmentation ID to be reserved + """ + with db_session.begin(subtransactions=True): + try: + alloc = (db_session.query(n1kv_models_v2.N1kvVlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + one()) + if alloc.allocated: + if vlan_id == c_const.FLAT_VLAN_ID: + raise n_exc.FlatNetworkInUse( + physical_network=physical_network) + else: + raise n_exc.VlanIdInUse(vlan_id=vlan_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(vlan)s on physical " + "network %(network)s from pool"), + {"vlan": vlan_id, "network": physical_network}) + alloc.allocated = True + db_session.add(alloc) + except exc.NoResultFound: + raise c_exc.VlanIDOutsidePool + + +def release_vlan(db_session, physical_network, vlan_id): + """ + Release a given VLAN ID. + + :param db_session: database session + :param physical_network: string representing the name of physical network + :param vlan_id: integer value of the segmentation ID to be released + """ + with db_session.begin(subtransactions=True): + try: + alloc = (db_session.query(n1kv_models_v2.N1kvVlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + one()) + alloc.allocated = False + except exc.NoResultFound: + LOG.warning(_("vlan_id %(vlan)s on physical network %(network)s " + "not found"), + {"vlan": vlan_id, "network": physical_network}) + + +def sync_vxlan_allocations(db_session, net_p): + """ + Synchronize vxlan_allocations table with configured vxlan ranges. + + :param db_session: database session + :param net_p: network profile dictionary + """ + seg_min, seg_max = get_segment_range(net_p) + if seg_max + 1 - seg_min > c_const.MAX_VXLAN_RANGE: + msg = (_("Unreasonable vxlan ID range %(vxlan_min)s - %(vxlan_max)s"), + {"vxlan_min": seg_min, "vxlan_max": seg_max}) + raise n_exc.InvalidInput(error_message=msg) + with db_session.begin(subtransactions=True): + for vxlan_id in range(seg_min, seg_max + 1): + try: + get_vxlan_allocation(db_session, vxlan_id) + except c_exc.VxlanIDNotFound: + alloc = n1kv_models_v2.N1kvVxlanAllocation( + network_profile_id=net_p['id'], vxlan_id=vxlan_id) + db_session.add(alloc) + + +def get_vxlan_allocation(db_session, vxlan_id): + """ + Retrieve VXLAN allocation for the given VXLAN ID. + + :param db_session: database session + :param vxlan_id: integer value representing the segmentation ID + :returns: allocation object + """ + try: + return (db_session.query(n1kv_models_v2.N1kvVxlanAllocation). + filter_by(vxlan_id=vxlan_id).one()) + except exc.NoResultFound: + raise c_exc.VxlanIDNotFound(vxlan_id=vxlan_id) + + +def reserve_specific_vxlan(db_session, vxlan_id): + """ + Reserve a specific VXLAN ID. + + :param db_session: database session + :param vxlan_id: integer value representing the segmentation ID + """ + with db_session.begin(subtransactions=True): + try: + alloc = (db_session.query(n1kv_models_v2.N1kvVxlanAllocation). + filter_by(vxlan_id=vxlan_id). + one()) + if alloc.allocated: + raise c_exc.VxlanIDInUse(vxlan_id=vxlan_id) + LOG.debug(_("Reserving specific vxlan %s from pool"), vxlan_id) + alloc.allocated = True + db_session.add(alloc) + except exc.NoResultFound: + raise c_exc.VxlanIDOutsidePool + + +def release_vxlan(db_session, vxlan_id): + """ + Release a given VXLAN ID. + + :param db_session: database session + :param vxlan_id: integer value representing the segmentation ID + """ + with db_session.begin(subtransactions=True): + try: + alloc = (db_session.query(n1kv_models_v2.N1kvVxlanAllocation). + filter_by(vxlan_id=vxlan_id). + one()) + alloc.allocated = False + except exc.NoResultFound: + LOG.warning(_("vxlan_id %s not found"), vxlan_id) + + +def set_port_status(port_id, status): + """ + Set the status of the port. + + :param port_id: UUID representing the port + :param status: string representing the new status + """ + db_session = db.get_session() + try: + port = db_session.query(models_v2.Port).filter_by(id=port_id).one() + port.status = status + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) + + +def get_vm_network(db_session, policy_profile_id, network_id): + """ + Retrieve a vm_network based on policy profile and network id. + + :param db_session: database session + :param policy_profile_id: UUID representing policy profile + :param network_id: UUID representing network + :returns: VM network object + """ + try: + return (db_session.query(n1kv_models_v2.N1kVmNetwork). + filter_by(profile_id=policy_profile_id, + network_id=network_id).one()) + except exc.NoResultFound: + name = (c_const.VM_NETWORK_NAME_PREFIX + policy_profile_id + + "_" + network_id) + raise c_exc.VMNetworkNotFound(name=name) + + +def add_vm_network(db_session, + name, + policy_profile_id, + network_id, + port_count): + """ + Create a VM network. + + Add a VM network for a unique combination of network and + policy profile. All ports having the same policy profile + on one network will be associated with one VM network. + :param db_session: database session + :param name: string representing the name of the VM network + :param policy_profile_id: UUID representing policy profile + :param network_id: UUID representing a network + :param port_count: integer representing the number of ports on vm network + """ + with db_session.begin(subtransactions=True): + vm_network = n1kv_models_v2.N1kVmNetwork( + name=name, + profile_id=policy_profile_id, + network_id=network_id, + port_count=port_count) + db_session.add(vm_network) + + +def update_vm_network_port_count(db_session, name, port_count): + """ + Update a VM network with new port count. + + :param db_session: database session + :param name: string representing the name of the VM network + :param port_count: integer representing the number of ports on VM network + """ + try: + with db_session.begin(subtransactions=True): + vm_network = (db_session.query(n1kv_models_v2.N1kVmNetwork). + filter_by(name=name).one()) + if port_count is not None: + vm_network.port_count = port_count + return vm_network + except exc.NoResultFound: + raise c_exc.VMNetworkNotFound(name=name) + + +def delete_vm_network(db_session, policy_profile_id, network_id): + """ + Delete a VM network. + + :param db_session: database session + :param policy_profile_id: UUID representing a policy profile + :param network_id: UUID representing a network + :returns: deleted VM network object + """ + with db_session.begin(subtransactions=True): + try: + vm_network = get_vm_network(db_session, + policy_profile_id, + network_id) + db_session.delete(vm_network) + db_session.query(n1kv_models_v2.N1kVmNetwork).filter_by( + name=vm_network["name"]).delete() + return vm_network + except exc.NoResultFound: + name = (c_const.VM_NETWORK_NAME_PREFIX + policy_profile_id + + "_" + network_id) + raise c_exc.VMNetworkNotFound(name=name) + + +def create_network_profile(db_session, network_profile): + """Create a network profile.""" + LOG.debug(_("create_network_profile()")) + with db_session.begin(subtransactions=True): + kwargs = {"name": network_profile["name"], + "segment_type": network_profile["segment_type"]} + if network_profile["segment_type"] == c_const.NETWORK_TYPE_VLAN: + kwargs["physical_network"] = network_profile["physical_network"] + kwargs["segment_range"] = network_profile["segment_range"] + elif network_profile["segment_type"] == c_const.NETWORK_TYPE_OVERLAY: + kwargs["multicast_ip_index"] = 0 + kwargs["multicast_ip_range"] = network_profile[ + "multicast_ip_range"] + kwargs["segment_range"] = network_profile["segment_range"] + kwargs["sub_type"] = network_profile["sub_type"] + elif network_profile["segment_type"] == c_const.NETWORK_TYPE_TRUNK: + kwargs["sub_type"] = network_profile["sub_type"] + net_profile = n1kv_models_v2.NetworkProfile(**kwargs) + db_session.add(net_profile) + return net_profile + + +def delete_network_profile(db_session, id): + """Delete Network Profile.""" + LOG.debug(_("delete_network_profile()")) + with db_session.begin(subtransactions=True): + try: + network_profile = get_network_profile(db_session, id) + db_session.delete(network_profile) + (db_session.query(n1kv_models_v2.ProfileBinding). + filter_by(profile_id=id).delete()) + return network_profile + except exc.NoResultFound: + raise c_exc.ProfileTenantBindingNotFound(profile_id=id) + + +def update_network_profile(db_session, id, network_profile): + """Update Network Profile.""" + LOG.debug(_("update_network_profile()")) + with db_session.begin(subtransactions=True): + profile = get_network_profile(db_session, id) + profile.update(network_profile) + return profile + + +def get_network_profile(db_session, id): + """Get Network Profile.""" + LOG.debug(_("get_network_profile()")) + try: + return db_session.query( + n1kv_models_v2.NetworkProfile).filter_by(id=id).one() + except exc.NoResultFound: + raise c_exc.NetworkProfileNotFound(profile=id) + + +def _get_network_profiles(db_session=None, physical_network=None): + """ + Retrieve all network profiles. + + Get Network Profiles on a particular physical network, if physical + network is specified. If no physical network is specified, return + all network profiles. + """ + db_session = db_session or db.get_session() + if physical_network: + return (db_session.query(n1kv_models_v2.NetworkProfile). + filter_by(physical_network=physical_network)) + return db_session.query(n1kv_models_v2.NetworkProfile) + + +def create_policy_profile(policy_profile): + """Create Policy Profile.""" + LOG.debug(_("create_policy_profile()")) + db_session = db.get_session() + with db_session.begin(subtransactions=True): + p_profile = n1kv_models_v2.PolicyProfile(id=policy_profile["id"], + name=policy_profile["name"]) + db_session.add(p_profile) + return p_profile + + +def delete_policy_profile(id): + """Delete Policy Profile.""" + LOG.debug(_("delete_policy_profile()")) + db_session = db.get_session() + with db_session.begin(subtransactions=True): + policy_profile = get_policy_profile(db_session, id) + db_session.delete(policy_profile) + + +def update_policy_profile(db_session, id, policy_profile): + """Update a policy profile.""" + LOG.debug(_("update_policy_profile()")) + with db_session.begin(subtransactions=True): + _profile = get_policy_profile(db_session, id) + _profile.update(policy_profile) + return _profile + + +def get_policy_profile(db_session, id): + """Get Policy Profile.""" + LOG.debug(_("get_policy_profile()")) + try: + return db_session.query( + n1kv_models_v2.PolicyProfile).filter_by(id=id).one() + except exc.NoResultFound: + raise c_exc.PolicyProfileIdNotFound(profile_id=id) + + +def get_policy_profiles(): + """Retrieve all policy profiles.""" + db_session = db.get_session() + with db_session.begin(subtransactions=True): + return db_session.query(n1kv_models_v2.PolicyProfile) + + +def create_profile_binding(db_session, tenant_id, profile_id, profile_type): + """Create Network/Policy Profile association with a tenant.""" + db_session = db_session or db.get_session() + if profile_type not in ["network", "policy"]: + raise n_exc.NeutronException(_("Invalid profile type")) + + if _profile_binding_exists(db_session, + tenant_id, + profile_id, + profile_type): + return get_profile_binding(db_session, tenant_id, profile_id) + + with db_session.begin(subtransactions=True): + binding = n1kv_models_v2.ProfileBinding(profile_type=profile_type, + profile_id=profile_id, + tenant_id=tenant_id) + db_session.add(binding) + return binding + + +def _profile_binding_exists(db_session, tenant_id, profile_id, profile_type): + LOG.debug(_("_profile_binding_exists()")) + return (db_session.query(n1kv_models_v2.ProfileBinding). + filter_by(tenant_id=tenant_id, profile_id=profile_id, + profile_type=profile_type).first()) + + +def get_profile_binding(db_session, tenant_id, profile_id): + """Get Network/Policy Profile - Tenant binding.""" + LOG.debug(_("get_profile_binding()")) + try: + return (db_session.query(n1kv_models_v2.ProfileBinding).filter_by( + tenant_id=tenant_id, profile_id=profile_id).one()) + except exc.NoResultFound: + raise c_exc.ProfileTenantBindingNotFound(profile_id=profile_id) + + +def delete_profile_binding(db_session, tenant_id, profile_id): + """Delete Policy Binding.""" + LOG.debug(_("delete_profile_binding()")) + db_session = db_session or db.get_session() + try: + binding = get_profile_binding(db_session, tenant_id, profile_id) + with db_session.begin(subtransactions=True): + db_session.delete(binding) + except c_exc.ProfileTenantBindingNotFound: + LOG.debug(_("Profile-Tenant binding missing for profile ID " + "%(profile_id)s and tenant ID %(tenant_id)s"), + {"profile_id": profile_id, "tenant_id": tenant_id}) + return + + +def _get_profile_bindings(db_session, profile_type=None): + """ + Retrieve a list of profile bindings. + + Get all profile-tenant bindings based on profile type. + If profile type is None, return profile-tenant binding for all + profile types. + """ + LOG.debug(_("_get_profile_bindings()")) + if profile_type: + profile_bindings = (db_session.query(n1kv_models_v2.ProfileBinding). + filter_by(profile_type=profile_type)) + return profile_bindings + return db_session.query(n1kv_models_v2.ProfileBinding) + + +class NetworkProfile_db_mixin(object): + + """Network Profile Mixin.""" + + def _replace_fake_tenant_id_with_real(self, context): + """ + Replace default tenant-id with admin tenant-ids. + + Default tenant-ids are populated in profile bindings when plugin is + initialized. Replace these tenant-ids with admin's tenant-id. + :param context: neutron api request context + """ + if context.is_admin and context.tenant_id: + tenant_id = context.tenant_id + db_session = context.session + with db_session.begin(subtransactions=True): + (db_session.query(n1kv_models_v2.ProfileBinding). + filter_by(tenant_id=c_const.TENANT_ID_NOT_SET). + update({'tenant_id': tenant_id})) + + def _get_network_collection_for_tenant(self, db_session, model, tenant_id): + net_profile_ids = (db_session.query(n1kv_models_v2.ProfileBinding. + profile_id). + filter_by(tenant_id=tenant_id). + filter_by(profile_type=c_const.NETWORK)) + network_profiles = (db_session.query(model).filter(model.id.in_( + pid[0] for pid in net_profile_ids))) + return [self._make_network_profile_dict(p) for p in network_profiles] + + def _make_profile_bindings_dict(self, profile_binding, fields=None): + res = {"profile_id": profile_binding["profile_id"], + "tenant_id": profile_binding["tenant_id"]} + return self._fields(res, fields) + + def _make_network_profile_dict(self, network_profile, fields=None): + res = {"id": network_profile["id"], + "name": network_profile["name"], + "segment_type": network_profile["segment_type"], + "sub_type": network_profile["sub_type"], + "segment_range": network_profile["segment_range"], + "multicast_ip_index": network_profile["multicast_ip_index"], + "multicast_ip_range": network_profile["multicast_ip_range"], + "physical_network": network_profile["physical_network"]} + return self._fields(res, fields) + + def _segment_in_use(self, db_session, network_profile): + """Verify whether a segment is allocated for given network profile.""" + with db_session.begin(subtransactions=True): + return (db_session.query(n1kv_models_v2.N1kvNetworkBinding). + filter_by(profile_id=network_profile['id'])).first() + + def get_network_profile_bindings(self, context, filters=None, fields=None): + """ + Retrieve a list of profile bindings for network profiles. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + profile bindings object. Values in this dictiontary are + an iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a profile + bindings dictionary. Only these fields will be returned + :returns: list of profile bindings + """ + if context.is_admin: + profile_bindings = _get_profile_bindings( + context.session, + profile_type=c_const.NETWORK) + return [self._make_profile_bindings_dict(pb) + for pb in profile_bindings] + + def create_network_profile(self, context, network_profile): + """ + Create a network profile. + + :param context: neutron api request context + :param network_profile: network profile dictionary + :returns: network profile dictionary + """ + self._replace_fake_tenant_id_with_real(context) + p = network_profile["network_profile"] + self._validate_network_profile_args(context, p) + with context.session.begin(subtransactions=True): + net_profile = create_network_profile(context.session, p) + if net_profile.segment_type == c_const.NETWORK_TYPE_VLAN: + sync_vlan_allocations(context.session, net_profile) + elif net_profile.segment_type == c_const.NETWORK_TYPE_OVERLAY: + sync_vxlan_allocations(context.session, net_profile) + create_profile_binding(context.session, + context.tenant_id, + net_profile.id, + c_const.NETWORK) + if p.get("add_tenant"): + self.add_network_profile_tenant(context.session, + net_profile.id, + p["add_tenant"]) + return self._make_network_profile_dict(net_profile) + + def delete_network_profile(self, context, id): + """ + Delete a network profile. + + :param context: neutron api request context + :param id: UUID representing network profile to delete + :returns: deleted network profile dictionary + """ + # Check whether the network profile is in use. + if self._segment_in_use(context.session, + get_network_profile(context.session, id)): + raise c_exc.NetworkProfileInUse(profile=id) + # Delete and return the network profile if it is not in use. + _profile = delete_network_profile(context.session, id) + return self._make_network_profile_dict(_profile) + + def update_network_profile(self, context, id, network_profile): + """ + Update a network profile. + + Add/remove network profile to tenant-id binding for the corresponding + options and if user is admin. + :param context: neutron api request context + :param id: UUID representing network profile to update + :param network_profile: network profile dictionary + :returns: updated network profile dictionary + """ + # Flag to check whether network profile is updated or not. + is_updated = False + p = network_profile["network_profile"] + original_net_p = get_network_profile(context.session, id) + # Update network profile to tenant id binding. + if context.is_admin and "add_tenant" in p: + self.add_network_profile_tenant(context.session, id, + p["add_tenant"]) + is_updated = True + if context.is_admin and "remove_tenant" in p: + delete_profile_binding(context.session, p["remove_tenant"], id) + is_updated = True + if original_net_p.segment_type == c_const.NETWORK_TYPE_TRUNK: + #TODO(abhraut): Remove check when Trunk supports segment range. + if p.get('segment_range'): + msg = _("segment_range not required for TRUNK") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if original_net_p.segment_type in [c_const.NETWORK_TYPE_VLAN, + c_const.NETWORK_TYPE_TRUNK]: + if p.get("multicast_ip_range"): + msg = _("multicast_ip_range not required") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + # Update segment range if network profile is not in use. + if (p.get("segment_range") and + p.get("segment_range") != original_net_p.segment_range): + if not self._segment_in_use(context.session, original_net_p): + delete_segment_allocations(context.session, original_net_p) + updated_net_p = update_network_profile(context.session, id, p) + self._validate_segment_range_uniqueness(context, + updated_net_p, id) + if original_net_p.segment_type == c_const.NETWORK_TYPE_VLAN: + sync_vlan_allocations(context.session, updated_net_p) + if original_net_p.segment_type == c_const.NETWORK_TYPE_OVERLAY: + sync_vxlan_allocations(context.session, updated_net_p) + is_updated = True + else: + raise c_exc.NetworkProfileInUse(profile=id) + if (p.get('multicast_ip_range') and + (p.get("multicast_ip_range") != + original_net_p.get("multicast_ip_range"))): + self._validate_multicast_ip_range(p) + if not self._segment_in_use(context.session, original_net_p): + is_updated = True + else: + raise c_exc.NetworkProfileInUse(profile=id) + # Update network profile if name is updated and the network profile + # is not yet updated. + if "name" in p and not is_updated: + is_updated = True + # Return network profile if it is successfully updated. + if is_updated: + return self._make_network_profile_dict( + update_network_profile(context.session, id, p)) + + def get_network_profile(self, context, id, fields=None): + """ + Retrieve a network profile. + + :param context: neutron api request context + :param id: UUID representing the network profile to retrieve + :params fields: a list of strings that are valid keys in a network + profile dictionary. Only these fields will be returned + :returns: network profile dictionary + """ + profile = get_network_profile(context.session, id) + return self._make_network_profile_dict(profile, fields) + + def get_network_profiles(self, context, filters=None, fields=None): + """ + Retrieve a list of all network profiles. + + Retrieve all network profiles if tenant is admin. For a non-admin + tenant, retrieve all network profiles belonging to this tenant only. + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + network profile object. Values in this dictiontary are + an iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a network + profile dictionary. Only these fields will be returned + :returns: list of all network profiles + """ + if context.is_admin: + return self._get_collection(context, n1kv_models_v2.NetworkProfile, + self._make_network_profile_dict, + filters=filters, fields=fields) + return self._get_network_collection_for_tenant(context.session, + n1kv_models_v2. + NetworkProfile, + context.tenant_id) + + def add_network_profile_tenant(self, + db_session, + network_profile_id, + tenant_id): + """ + Add a tenant to a network profile. + + :param db_session: database session + :param network_profile_id: UUID representing network profile + :param tenant_id: UUID representing the tenant + :returns: profile binding object + """ + return create_profile_binding(db_session, + tenant_id, + network_profile_id, + c_const.NETWORK) + + def network_profile_exists(self, context, id): + """ + Verify whether a network profile for given id exists. + + :param context: neutron api request context + :param id: UUID representing network profile + :returns: true if network profile exist else False + """ + try: + get_network_profile(context.session, id) + return True + except c_exc.NetworkProfileNotFound(profile=id): + return False + + def _get_segment_range(self, data): + return (int(seg) for seg in data.split("-")[:2]) + + def _validate_network_profile_args(self, context, p): + """ + Validate completeness of Nexus1000V network profile arguments. + + :param context: neutron api request context + :param p: network profile object + """ + self._validate_network_profile(p) + segment_type = p['segment_type'].lower() + if segment_type != c_const.NETWORK_TYPE_TRUNK: + self._validate_segment_range_uniqueness(context, p) + + def _validate_segment_range(self, network_profile): + """ + Validate segment range values. + + :param network_profile: network profile object + """ + if not re.match(r"(\d+)\-(\d+)", network_profile["segment_range"]): + msg = _("Invalid segment range. example range: 500-550") + raise n_exc.InvalidInput(error_message=msg) + + def _validate_multicast_ip_range(self, network_profile): + """ + Validate multicast ip range values. + + :param network_profile: network profile object + """ + try: + min_ip, max_ip = (network_profile + ['multicast_ip_range'].split('-', 1)) + except ValueError: + msg = _("Invalid multicast ip address range. " + "example range: 224.1.1.1-224.1.1.10") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + for ip in [min_ip, max_ip]: + try: + if not netaddr.IPAddress(ip).is_multicast(): + msg = _("%s is not a valid multicast ip address") % ip + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if netaddr.IPAddress(ip) <= netaddr.IPAddress('224.0.0.255'): + msg = _("%s is reserved multicast ip address") % ip + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + except netaddr.AddrFormatError: + msg = _("%s is not a valid ip address") % ip + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if netaddr.IPAddress(min_ip) > netaddr.IPAddress(max_ip): + msg = (_("Invalid multicast IP range '%(min_ip)s-%(max_ip)s':" + " Range should be from low address to high address") % + {'min_ip': min_ip, 'max_ip': max_ip}) + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + + def _validate_network_profile(self, net_p): + """ + Validate completeness of a network profile arguments. + + :param net_p: network profile object + """ + if any(net_p[arg] == "" for arg in ["segment_type"]): + msg = _("Arguments segment_type missing" + " for network profile") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + segment_type = net_p["segment_type"].lower() + if segment_type not in [c_const.NETWORK_TYPE_VLAN, + c_const.NETWORK_TYPE_OVERLAY, + c_const.NETWORK_TYPE_TRUNK, + c_const.NETWORK_TYPE_MULTI_SEGMENT]: + msg = _("segment_type should either be vlan, overlay, " + "multi-segment or trunk") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if segment_type == c_const.NETWORK_TYPE_VLAN: + if "physical_network" not in net_p: + msg = _("Argument physical_network missing " + "for network profile") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if segment_type == c_const.NETWORK_TYPE_TRUNK: + if net_p["segment_range"]: + msg = _("segment_range not required for trunk") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if segment_type in [c_const.NETWORK_TYPE_TRUNK, + c_const.NETWORK_TYPE_OVERLAY]: + if not attributes.is_attr_set(net_p.get("sub_type")): + msg = _("Argument sub_type missing " + "for network profile") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if segment_type in [c_const.NETWORK_TYPE_VLAN, + c_const.NETWORK_TYPE_OVERLAY]: + if "segment_range" not in net_p: + msg = _("Argument segment_range missing " + "for network profile") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + self._validate_segment_range(net_p) + if segment_type == c_const.NETWORK_TYPE_OVERLAY: + if net_p['sub_type'] != c_const.NETWORK_SUBTYPE_NATIVE_VXLAN: + net_p['multicast_ip_range'] = '0.0.0.0' + else: + multicast_ip_range = net_p.get("multicast_ip_range") + if not attributes.is_attr_set(multicast_ip_range): + msg = _("Argument multicast_ip_range missing" + " for VXLAN multicast network profile") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + self._validate_multicast_ip_range(net_p) + else: + net_p['multicast_ip_range'] = '0.0.0.0' + + def _validate_segment_range_uniqueness(self, context, net_p, id=None): + """ + Validate that segment range doesn't overlap. + + :param context: neutron api request context + :param net_p: network profile dictionary + :param id: UUID representing the network profile being updated + """ + segment_type = net_p["segment_type"].lower() + seg_min, seg_max = self._get_segment_range(net_p['segment_range']) + if segment_type == c_const.NETWORK_TYPE_VLAN: + if not ((seg_min <= seg_max) and + ((seg_min in range(constants.MIN_VLAN_TAG, + c_const.NEXUS_VLAN_RESERVED_MIN) and + seg_max in range(constants.MIN_VLAN_TAG, + c_const.NEXUS_VLAN_RESERVED_MIN)) or + (seg_min in range(c_const.NEXUS_VLAN_RESERVED_MAX + 1, + constants.MAX_VLAN_TAG) and + seg_max in range(c_const.NEXUS_VLAN_RESERVED_MAX + 1, + constants.MAX_VLAN_TAG)))): + msg = (_("Segment range is invalid, select from " + "%(min)s-%(nmin)s, %(nmax)s-%(max)s") % + {"min": constants.MIN_VLAN_TAG, + "nmin": c_const.NEXUS_VLAN_RESERVED_MIN - 1, + "nmax": c_const.NEXUS_VLAN_RESERVED_MAX + 1, + "max": constants.MAX_VLAN_TAG - 1}) + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + profiles = _get_network_profiles( + db_session=context.session, + physical_network=net_p["physical_network"] + ) + elif segment_type in [c_const.NETWORK_TYPE_OVERLAY, + c_const.NETWORK_TYPE_MULTI_SEGMENT, + c_const.NETWORK_TYPE_TRUNK]: + if (seg_min > seg_max or + seg_min < c_const.NEXUS_VXLAN_MIN or + seg_max > c_const.NEXUS_VXLAN_MAX): + msg = (_("segment range is invalid. Valid range is : " + "%(min)s-%(max)s") % + {"min": c_const.NEXUS_VXLAN_MIN, + "max": c_const.NEXUS_VXLAN_MAX}) + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + profiles = _get_network_profiles(db_session=context.session) + if profiles: + for profile in profiles: + if id and profile.id == id: + continue + name = profile.name + segment_range = profile.segment_range + if net_p["name"] == name: + msg = (_("NetworkProfile name %s already exists"), + net_p["name"]) + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if (c_const.NETWORK_TYPE_MULTI_SEGMENT in + [profile.segment_type, net_p["segment_type"]] or + c_const.NETWORK_TYPE_TRUNK in + [profile.segment_type, net_p["segment_type"]]): + continue + seg_min, seg_max = self._get_segment_range( + net_p["segment_range"]) + profile_seg_min, profile_seg_max = self._get_segment_range( + segment_range) + if ((profile_seg_min <= seg_min <= profile_seg_max) or + (profile_seg_min <= seg_max <= profile_seg_max) or + ((seg_min <= profile_seg_min) and + (seg_max >= profile_seg_max))): + msg = _("Segment range overlaps with another profile") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + + def _get_network_profile_by_name(self, db_session, name): + """ + Retrieve network profile based on name. + + :param db_session: database session + :param name: string representing the name for the network profile + :returns: network profile object + """ + with db_session.begin(subtransactions=True): + try: + return (db_session.query(n1kv_models_v2.NetworkProfile). + filter_by(name=name).one()) + except exc.NoResultFound: + raise c_exc.NetworkProfileNotFound(profile=name) + + +class PolicyProfile_db_mixin(object): + + """Policy Profile Mixin.""" + + def _get_policy_collection_for_tenant(self, db_session, model, tenant_id): + profile_ids = (db_session.query(n1kv_models_v2. + ProfileBinding.profile_id) + .filter_by(tenant_id=tenant_id). + filter_by(profile_type=c_const.POLICY).all()) + profiles = db_session.query(model).filter(model.id.in_( + pid[0] for pid in profile_ids)) + return [self._make_policy_profile_dict(p) for p in profiles] + + def _make_policy_profile_dict(self, policy_profile, fields=None): + res = {"id": policy_profile["id"], "name": policy_profile["name"]} + return self._fields(res, fields) + + def _make_profile_bindings_dict(self, profile_binding, fields=None): + res = {"profile_id": profile_binding["profile_id"], + "tenant_id": profile_binding["tenant_id"]} + return self._fields(res, fields) + + def _policy_profile_exists(self, id): + db_session = db.get_session() + return (db_session.query(n1kv_models_v2.PolicyProfile). + filter_by(id=id).first()) + + def get_policy_profile(self, context, id, fields=None): + """ + Retrieve a policy profile for the given UUID. + + :param context: neutron api request context + :param id: UUID representing policy profile to fetch + :params fields: a list of strings that are valid keys in a policy + profile dictionary. Only these fields will be returned + :returns: policy profile dictionary + """ + profile = get_policy_profile(context.session, id) + return self._make_policy_profile_dict(profile, fields) + + def get_policy_profiles(self, context, filters=None, fields=None): + """ + Retrieve a list of policy profiles. + + Retrieve all policy profiles if tenant is admin. For a non-admin + tenant, retrieve all policy profiles belonging to this tenant only. + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + policy profile object. Values in this dictiontary are + an iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a policy + profile dictionary. Only these fields will be returned + :returns: list of all policy profiles + """ + if context.is_admin: + return self._get_collection(context, n1kv_models_v2.PolicyProfile, + self._make_policy_profile_dict, + filters=filters, fields=fields) + else: + return self._get_policy_collection_for_tenant(context.session, + n1kv_models_v2. + PolicyProfile, + context.tenant_id) + + def get_policy_profile_bindings(self, context, filters=None, fields=None): + """ + Retrieve a list of profile bindings for policy profiles. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + profile bindings object. Values in this dictiontary are + an iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a profile + bindings dictionary. Only these fields will be returned + :returns: list of profile bindings + """ + if context.is_admin: + profile_bindings = _get_profile_bindings( + context.session, + profile_type=c_const.POLICY) + return [self._make_profile_bindings_dict(pb) + for pb in profile_bindings] + + def update_policy_profile(self, context, id, policy_profile): + """ + Update a policy profile. + + Add/remove policy profile to tenant-id binding for the corresponding + option and if user is admin. + :param context: neutron api request context + :param id: UUID representing policy profile to update + :param policy_profile: policy profile dictionary + :returns: updated policy profile dictionary + """ + p = policy_profile["policy_profile"] + if context.is_admin and "add_tenant" in p: + self.add_policy_profile_tenant(context.session, + id, + p["add_tenant"]) + return self._make_policy_profile_dict(get_policy_profile( + context.session, id)) + if context.is_admin and "remove_tenant" in p: + delete_profile_binding(context.session, p["remove_tenant"], id) + return self._make_policy_profile_dict(get_policy_profile( + context.session, id)) + return self._make_policy_profile_dict( + update_policy_profile(context.session, id, p)) + + def add_policy_profile_tenant(self, + db_session, + policy_profile_id, + tenant_id): + """ + Add a tenant to a policy profile binding. + + :param db_session: database session + :param policy_profile_id: UUID representing policy profile + :param tenant_id: UUID representing the tenant + :returns: profile binding object + """ + return create_profile_binding(db_session, + tenant_id, + policy_profile_id, + c_const.POLICY) + + def remove_policy_profile_tenant(self, policy_profile_id, tenant_id): + """ + Remove a tenant to a policy profile binding. + + :param policy_profile_id: UUID representing policy profile + :param tenant_id: UUID representing the tenant + """ + delete_profile_binding(None, tenant_id, policy_profile_id) + + def _delete_policy_profile(self, policy_profile_id): + """Delete policy profile and associated binding.""" + db_session = db.get_session() + with db_session.begin(subtransactions=True): + (db_session.query(n1kv_models_v2.PolicyProfile). + filter_by(id=policy_profile_id).delete()) + + def _get_policy_profile_by_name(self, name): + """ + Retrieve policy profile based on name. + + :param name: string representing the name for the policy profile + :returns: policy profile object + """ + db_session = db.get_session() + with db_session.begin(subtransactions=True): + return (db_session.query(n1kv_models_v2.PolicyProfile). + filter_by(name=name).one()) + + def _remove_all_fake_policy_profiles(self): + """ + Remove all policy profiles associated with fake tenant id. + + This will find all Profile ID where tenant is not set yet - set A + and profiles where tenant was already set - set B + and remove what is in both and no tenant id set + """ + db_session = db.get_session() + with db_session.begin(subtransactions=True): + a_set_q = (db_session.query(n1kv_models_v2.ProfileBinding). + filter_by(tenant_id=c_const.TENANT_ID_NOT_SET, + profile_type=c_const.POLICY)) + a_set = set(i.profile_id for i in a_set_q) + b_set_q = (db_session.query(n1kv_models_v2.ProfileBinding). + filter(sql.and_(n1kv_models_v2.ProfileBinding. + tenant_id != c_const.TENANT_ID_NOT_SET, + n1kv_models_v2.ProfileBinding. + profile_type == c_const.POLICY))) + b_set = set(i.profile_id for i in b_set_q) + (db_session.query(n1kv_models_v2.ProfileBinding). + filter(sql.and_(n1kv_models_v2.ProfileBinding.profile_id. + in_(a_set & b_set), + n1kv_models_v2.ProfileBinding.tenant_id == + c_const.TENANT_ID_NOT_SET)). + delete(synchronize_session="fetch")) + + def _add_policy_profile(self, + policy_profile_name, + policy_profile_id, + tenant_id=None): + """ + Add Policy profile and tenant binding. + + :param policy_profile_name: string representing the name for the + policy profile + :param policy_profile_id: UUID representing the policy profile + :param tenant_id: UUID representing the tenant + """ + policy_profile = {"id": policy_profile_id, "name": policy_profile_name} + tenant_id = tenant_id or c_const.TENANT_ID_NOT_SET + if not self._policy_profile_exists(policy_profile_id): + create_policy_profile(policy_profile) + create_profile_binding(None, + tenant_id, + policy_profile["id"], + c_const.POLICY) diff --git a/neutron/plugins/cisco/db/n1kv_models_v2.py b/neutron/plugins/cisco/db/n1kv_models_v2.py new file mode 100644 index 000000000..6c81aabba --- /dev/null +++ b/neutron/plugins/cisco/db/n1kv_models_v2.py @@ -0,0 +1,185 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems Inc. +# @author: Rudrajit Tapadar, Cisco Systems Inc. + +import sqlalchemy as sa + +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.plugins.cisco.common import cisco_constants + + +LOG = logging.getLogger(__name__) + + +class N1kvVlanAllocation(model_base.BASEV2): + + """Represents allocation state of vlan_id on physical network.""" + __tablename__ = 'cisco_n1kv_vlan_allocations' + + physical_network = sa.Column(sa.String(64), + nullable=False, + primary_key=True) + vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False, default=False) + network_profile_id = sa.Column(sa.String(36), + sa.ForeignKey('cisco_network_profiles.id', + ondelete="CASCADE"), + nullable=False) + + +class N1kvVxlanAllocation(model_base.BASEV2): + + """Represents allocation state of vxlan_id.""" + __tablename__ = 'cisco_n1kv_vxlan_allocations' + + vxlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False, default=False) + network_profile_id = sa.Column(sa.String(36), + sa.ForeignKey('cisco_network_profiles.id', + ondelete="CASCADE"), + nullable=False) + + +class N1kvPortBinding(model_base.BASEV2): + + """Represents binding of ports to policy profile.""" + __tablename__ = 'cisco_n1kv_port_bindings' + + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + profile_id = sa.Column(sa.String(36), + sa.ForeignKey('cisco_policy_profiles.id')) + + +class N1kvNetworkBinding(model_base.BASEV2): + + """Represents binding of virtual network to physical realization.""" + __tablename__ = 'cisco_n1kv_network_bindings' + + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + network_type = sa.Column(sa.String(32), nullable=False) + physical_network = sa.Column(sa.String(64)) + segmentation_id = sa.Column(sa.Integer) + multicast_ip = sa.Column(sa.String(32)) + profile_id = sa.Column(sa.String(36), + sa.ForeignKey('cisco_network_profiles.id')) + + +class N1kVmNetwork(model_base.BASEV2): + + """Represents VM Network information.""" + __tablename__ = 'cisco_n1kv_vmnetworks' + + name = sa.Column(sa.String(80), primary_key=True) + profile_id = sa.Column(sa.String(36), + sa.ForeignKey('cisco_policy_profiles.id')) + network_id = sa.Column(sa.String(36)) + port_count = sa.Column(sa.Integer) + + +class NetworkProfile(model_base.BASEV2, models_v2.HasId): + + """ + Nexus1000V Network Profiles + + segment_type - VLAN, OVERLAY, TRUNK, MULTI_SEGMENT + sub_type - TRUNK_VLAN, TRUNK_VXLAN, native_vxlan, enhanced_vxlan + segment_range - '-' + multicast_ip_index - + multicast_ip_range - '-' + physical_network - Name for the physical network + """ + __tablename__ = 'cisco_network_profiles' + + name = sa.Column(sa.String(255)) + segment_type = sa.Column(sa.Enum(cisco_constants.NETWORK_TYPE_VLAN, + cisco_constants.NETWORK_TYPE_OVERLAY, + cisco_constants.NETWORK_TYPE_TRUNK, + cisco_constants. + NETWORK_TYPE_MULTI_SEGMENT, + name='segment_type'), + nullable=False) + sub_type = sa.Column(sa.String(255)) + segment_range = sa.Column(sa.String(255)) + multicast_ip_index = sa.Column(sa.Integer, default=0) + multicast_ip_range = sa.Column(sa.String(255)) + physical_network = sa.Column(sa.String(255)) + + +class PolicyProfile(model_base.BASEV2): + + """ + Nexus1000V Network Profiles + + Both 'id' and 'name' are coming from Nexus1000V switch + """ + __tablename__ = 'cisco_policy_profiles' + + id = sa.Column(sa.String(36), primary_key=True) + name = sa.Column(sa.String(255)) + + +class ProfileBinding(model_base.BASEV2): + + """ + Represents a binding of Network Profile + or Policy Profile to tenant_id + """ + __tablename__ = 'cisco_n1kv_profile_bindings' + + profile_type = sa.Column(sa.Enum(cisco_constants.NETWORK, + cisco_constants.POLICY, + name='profile_type')) + tenant_id = sa.Column(sa.String(36), + primary_key=True, + default=cisco_constants.TENANT_ID_NOT_SET) + profile_id = sa.Column(sa.String(36), primary_key=True) + + +class N1kvTrunkSegmentBinding(model_base.BASEV2): + + """Represents binding of segments in trunk networks.""" + __tablename__ = 'cisco_n1kv_trunk_segments' + + trunk_segment_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', + ondelete="CASCADE"), + primary_key=True) + segment_id = sa.Column(sa.String(36), nullable=False, primary_key=True) + dot1qtag = sa.Column(sa.String(36), nullable=False, primary_key=True) + + +class N1kvMultiSegmentNetworkBinding(model_base.BASEV2): + + """Represents binding of segments in multi-segment networks.""" + __tablename__ = 'cisco_n1kv_multi_segments' + + multi_segment_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', + ondelete="CASCADE"), + primary_key=True) + segment1_id = sa.Column(sa.String(36), nullable=False, primary_key=True) + segment2_id = sa.Column(sa.String(36), nullable=False, primary_key=True) + encap_profile_name = sa.Column(sa.String(36)) diff --git a/neutron/plugins/cisco/db/network_db_v2.py b/neutron/plugins/cisco/db/network_db_v2.py new file mode 100644 index 000000000..94c5a37de --- /dev/null +++ b/neutron/plugins/cisco/db/network_db_v2.py @@ -0,0 +1,290 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rohit Agarwalla, Cisco Systems, Inc. + +from sqlalchemy.orm import exc + +from neutron.db import api as db +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.db import network_models_v2 +# Do NOT remove this import. It is required for all the models to be seen +# by db.initialize() when called from VirtualPhysicalSwitchModelV2.__init__. +from neutron.plugins.cisco.db import nexus_models_v2 # noqa +from neutron.plugins.openvswitch import ovs_models_v2 + + +LOG = logging.getLogger(__name__) + + +def get_all_qoss(tenant_id): + """Lists all the qos to tenant associations.""" + LOG.debug(_("get_all_qoss() called")) + session = db.get_session() + return (session.query(network_models_v2.QoS). + filter_by(tenant_id=tenant_id).all()) + + +def get_qos(tenant_id, qos_id): + """Lists the qos given a tenant_id and qos_id.""" + LOG.debug(_("get_qos() called")) + session = db.get_session() + try: + return (session.query(network_models_v2.QoS). + filter_by(tenant_id=tenant_id). + filter_by(qos_id=qos_id).one()) + except exc.NoResultFound: + raise c_exc.QosNotFound(qos_id=qos_id, + tenant_id=tenant_id) + + +def add_qos(tenant_id, qos_name, qos_desc): + """Adds a qos to tenant association.""" + LOG.debug(_("add_qos() called")) + session = db.get_session() + try: + qos = (session.query(network_models_v2.QoS). + filter_by(tenant_id=tenant_id). + filter_by(qos_name=qos_name).one()) + raise c_exc.QosNameAlreadyExists(qos_name=qos_name, + tenant_id=tenant_id) + except exc.NoResultFound: + qos = network_models_v2.QoS(qos_id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + qos_name=qos_name, + qos_desc=qos_desc) + session.add(qos) + session.flush() + return qos + + +def remove_qos(tenant_id, qos_id): + """Removes a qos to tenant association.""" + session = db.get_session() + try: + qos = (session.query(network_models_v2.QoS). + filter_by(tenant_id=tenant_id). + filter_by(qos_id=qos_id).one()) + session.delete(qos) + session.flush() + return qos + except exc.NoResultFound: + pass + + +def update_qos(tenant_id, qos_id, new_qos_name=None): + """Updates a qos to tenant association.""" + session = db.get_session() + try: + qos = (session.query(network_models_v2.QoS). + filter_by(tenant_id=tenant_id). + filter_by(qos_id=qos_id).one()) + if new_qos_name: + qos["qos_name"] = new_qos_name + session.merge(qos) + session.flush() + return qos + except exc.NoResultFound: + raise c_exc.QosNotFound(qos_id=qos_id, + tenant_id=tenant_id) + + +def get_all_credentials(): + """Lists all the creds for a tenant.""" + session = db.get_session() + return (session.query(network_models_v2.Credential).all()) + + +def get_credential(credential_id): + """Lists the creds for given a cred_id.""" + session = db.get_session() + try: + return (session.query(network_models_v2.Credential). + filter_by(credential_id=credential_id).one()) + except exc.NoResultFound: + raise c_exc.CredentialNotFound(credential_id=credential_id) + + +def get_credential_name(credential_name): + """Lists the creds for given a cred_name.""" + session = db.get_session() + try: + return (session.query(network_models_v2.Credential). + filter_by(credential_name=credential_name).one()) + except exc.NoResultFound: + raise c_exc.CredentialNameNotFound(credential_name=credential_name) + + +def add_credential(credential_name, user_name, password, type): + """Create a credential.""" + session = db.get_session() + try: + cred = (session.query(network_models_v2.Credential). + filter_by(credential_name=credential_name).one()) + raise c_exc.CredentialAlreadyExists(credential_name=credential_name) + except exc.NoResultFound: + cred = network_models_v2.Credential( + credential_id=uuidutils.generate_uuid(), + credential_name=credential_name, + user_name=user_name, + password=password, + type=type) + session.add(cred) + session.flush() + return cred + + +def remove_credential(credential_id): + """Removes a credential.""" + session = db.get_session() + try: + cred = (session.query(network_models_v2.Credential). + filter_by(credential_id=credential_id).one()) + session.delete(cred) + session.flush() + return cred + except exc.NoResultFound: + pass + + +def update_credential(credential_id, + new_user_name=None, new_password=None): + """Updates a credential for a tenant.""" + session = db.get_session() + try: + cred = (session.query(network_models_v2.Credential). + filter_by(credential_id=credential_id).one()) + if new_user_name: + cred["user_name"] = new_user_name + if new_password: + cred["password"] = new_password + session.merge(cred) + session.flush() + return cred + except exc.NoResultFound: + raise c_exc.CredentialNotFound(credential_id=credential_id) + + +def get_all_n1kv_credentials(): + session = db.get_session() + return (session.query(network_models_v2.Credential). + filter_by(type='n1kv')) + + +def add_provider_network(network_id, network_type, segmentation_id): + """Add a network to the provider network table.""" + session = db.get_session() + if session.query(network_models_v2.ProviderNetwork).filter_by( + network_id=network_id).first(): + raise c_exc.ProviderNetworkExists(network_id) + pnet = network_models_v2.ProviderNetwork(network_id=network_id, + network_type=network_type, + segmentation_id=segmentation_id) + session.add(pnet) + session.flush() + + +def remove_provider_network(network_id): + """Remove network_id from the provider network table. + + :param network_id: Any network id. If it is not in the table, do nothing. + :return: network_id if it was in the table and successfully removed. + """ + session = db.get_session() + pnet = (session.query(network_models_v2.ProviderNetwork). + filter_by(network_id=network_id).first()) + if pnet: + session.delete(pnet) + session.flush() + return network_id + + +def is_provider_network(network_id): + """Return True if network_id is in the provider network table.""" + session = db.get_session() + if session.query(network_models_v2.ProviderNetwork).filter_by( + network_id=network_id).first(): + return True + + +def is_provider_vlan(vlan_id): + """Check for a for a vlan provider network with the specified vland_id. + + Returns True if the provider network table contains a vlan network + with the specified vlan_id. + """ + session = db.get_session() + if (session.query(network_models_v2.ProviderNetwork). + filter_by(network_type=const.NETWORK_TYPE_VLAN, + segmentation_id=vlan_id).first()): + return True + + +def get_ovs_vlans(): + session = db.get_session() + bindings = (session.query(ovs_models_v2.VlanAllocation.vlan_id). + filter_by(allocated=True)) + return [binding.vlan_id for binding in bindings] + + +class Credential_db_mixin(object): + + """Mixin class for Cisco Credentials as a resource.""" + + def _make_credential_dict(self, credential, fields=None): + res = {'credential_id': credential['credential_id'], + 'credential_name': credential['credential_name'], + 'user_name': credential['user_name'], + 'password': credential['password'], + 'type': credential['type']} + return self._fields(res, fields) + + def create_credential(self, context, credential): + """Create a credential.""" + c = credential['credential'] + cred = add_credential(c['credential_name'], + c['user_name'], + c['password'], + c['type']) + return self._make_credential_dict(cred) + + def get_credentials(self, context, filters=None, fields=None): + """Retrieve a list of credentials.""" + return self._get_collection(context, + network_models_v2.Credential, + self._make_credential_dict, + filters=filters, + fields=fields) + + def get_credential(self, context, id, fields=None): + """Retireve the requested credential based on its id.""" + credential = get_credential(id) + return self._make_credential_dict(credential, fields) + + def update_credential(self, context, id, credential): + """Update a credential based on its id.""" + c = credential['credential'] + cred = update_credential(id, + c['user_name'], + c['password']) + return self._make_credential_dict(cred) + + def delete_credential(self, context, id): + """Delete a credential based on its id.""" + return remove_credential(id) diff --git a/neutron/plugins/cisco/db/network_models_v2.py b/neutron/plugins/cisco/db/network_models_v2.py new file mode 100644 index 000000000..49768371d --- /dev/null +++ b/neutron/plugins/cisco/db/network_models_v2.py @@ -0,0 +1,56 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rohit Agarwalla, Cisco Systems, Inc. + +import sqlalchemy as sa + +from neutron.db import model_base + + +class QoS(model_base.BASEV2): + """Represents QoS policies for a tenant.""" + + __tablename__ = 'cisco_qos_policies' + + qos_id = sa.Column(sa.String(255)) + tenant_id = sa.Column(sa.String(255), primary_key=True) + qos_name = sa.Column(sa.String(255), primary_key=True) + qos_desc = sa.Column(sa.String(255)) + + +class Credential(model_base.BASEV2): + """Represents credentials for a tenant to control Cisco switches.""" + + __tablename__ = 'cisco_credentials' + + credential_id = sa.Column(sa.String(255)) + credential_name = sa.Column(sa.String(255), primary_key=True) + user_name = sa.Column(sa.String(255)) + password = sa.Column(sa.String(255)) + type = sa.Column(sa.String(255)) + + +class ProviderNetwork(model_base.BASEV2): + """Represents networks that were created as provider networks.""" + + __tablename__ = 'cisco_provider_networks' + + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + network_type = sa.Column(sa.String(255), nullable=False) + segmentation_id = sa.Column(sa.Integer, nullable=False) diff --git a/neutron/plugins/cisco/db/nexus_db_v2.py b/neutron/plugins/cisco/db/nexus_db_v2.py new file mode 100644 index 000000000..a11a8a041 --- /dev/null +++ b/neutron/plugins/cisco/db/nexus_db_v2.py @@ -0,0 +1,154 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rohit Agarwalla, Cisco Systems, Inc. +# @author: Arvind Somya, Cisco Systems, Inc. (asomya@cisco.com) +# + +import sqlalchemy.orm.exc as sa_exc + +import neutron.db.api as db +from neutron.openstack.common import log as logging +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.db import nexus_models_v2 + + +LOG = logging.getLogger(__name__) + + +def get_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): + """Lists a nexusport binding.""" + LOG.debug(_("get_nexusport_binding() called")) + return _lookup_all_nexus_bindings(port_id=port_id, + vlan_id=vlan_id, + switch_ip=switch_ip, + instance_id=instance_id) + + +def get_nexusvlan_binding(vlan_id, switch_ip): + """Lists a vlan and switch binding.""" + LOG.debug(_("get_nexusvlan_binding() called")) + return _lookup_all_nexus_bindings(vlan_id=vlan_id, switch_ip=switch_ip) + + +def add_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): + """Adds a nexusport binding.""" + LOG.debug(_("add_nexusport_binding() called")) + session = db.get_session() + binding = nexus_models_v2.NexusPortBinding(port_id=port_id, + vlan_id=vlan_id, + switch_ip=switch_ip, + instance_id=instance_id) + session.add(binding) + session.flush() + return binding + + +def remove_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): + """Removes a nexusport binding.""" + LOG.debug(_("remove_nexusport_binding() called")) + session = db.get_session() + binding = _lookup_all_nexus_bindings(session=session, + vlan_id=vlan_id, + switch_ip=switch_ip, + port_id=port_id, + instance_id=instance_id) + for bind in binding: + session.delete(bind) + session.flush() + return binding + + +def update_nexusport_binding(port_id, new_vlan_id): + """Updates nexusport binding.""" + if not new_vlan_id: + LOG.warning(_("update_nexusport_binding called with no vlan")) + return + LOG.debug(_("update_nexusport_binding called")) + session = db.get_session() + binding = _lookup_one_nexus_binding(session=session, port_id=port_id) + binding.vlan_id = new_vlan_id + session.merge(binding) + session.flush() + return binding + + +def get_nexusvm_bindings(vlan_id, instance_id): + """Lists nexusvm bindings.""" + LOG.debug(_("get_nexusvm_binding() called")) + + return _lookup_all_nexus_bindings(vlan_id=vlan_id, + instance_id=instance_id) + + +def get_port_vlan_switch_binding(port_id, vlan_id, switch_ip): + """Lists nexusvm bindings.""" + LOG.debug(_("get_port_vlan_switch_binding() called")) + return _lookup_all_nexus_bindings(port_id=port_id, + switch_ip=switch_ip, + vlan_id=vlan_id) + + +def get_port_switch_bindings(port_id, switch_ip): + """List all vm/vlan bindings on a Nexus switch port.""" + LOG.debug(_("get_port_switch_bindings() called, " + "port:'%(port_id)s', switch:'%(switch_ip)s'"), + {'port_id': port_id, 'switch_ip': switch_ip}) + try: + return _lookup_all_nexus_bindings(port_id=port_id, + switch_ip=switch_ip) + except c_exc.NexusPortBindingNotFound: + pass + + +def get_nexussvi_bindings(): + """Lists nexus svi bindings.""" + LOG.debug(_("get_nexussvi_bindings() called")) + return _lookup_all_nexus_bindings(port_id='router') + + +def _lookup_nexus_bindings(query_type, session=None, **bfilter): + """Look up 'query_type' Nexus bindings matching the filter. + + :param query_type: 'all', 'one' or 'first' + :param session: db session + :param bfilter: filter for bindings query + :return: bindings if query gave a result, else + raise NexusPortBindingNotFound. + """ + if session is None: + session = db.get_session() + query_method = getattr(session.query( + nexus_models_v2.NexusPortBinding).filter_by(**bfilter), query_type) + try: + bindings = query_method() + if bindings: + return bindings + except sa_exc.NoResultFound: + pass + raise c_exc.NexusPortBindingNotFound(**bfilter) + + +def _lookup_all_nexus_bindings(session=None, **bfilter): + return _lookup_nexus_bindings('all', session, **bfilter) + + +def _lookup_one_nexus_binding(session=None, **bfilter): + return _lookup_nexus_bindings('one', session, **bfilter) + + +def _lookup_first_nexus_binding(session=None, **bfilter): + return _lookup_nexus_bindings('first', session, **bfilter) diff --git a/neutron/plugins/cisco/db/nexus_models_v2.py b/neutron/plugins/cisco/db/nexus_models_v2.py new file mode 100644 index 000000000..e639e47c6 --- /dev/null +++ b/neutron/plugins/cisco/db/nexus_models_v2.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Rohit Agarwalla, Cisco Systems, Inc. + +import sqlalchemy as sa + +from neutron.db import model_base + + +class NexusPortBinding(model_base.BASEV2): + """Represents a binding of VM's to nexus ports.""" + + __tablename__ = "cisco_nexusport_bindings" + + id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) + port_id = sa.Column(sa.String(255)) + vlan_id = sa.Column(sa.Integer, nullable=False) + switch_ip = sa.Column(sa.String(255), nullable=False) + instance_id = sa.Column(sa.String(255), nullable=False) + + def __repr__(self): + """Just the binding, without the id key.""" + return ("" % + (self.port_id, self.vlan_id, self.switch_ip, self.instance_id)) + + def __eq__(self, other): + """Compare only the binding, without the id key.""" + return ( + self.port_id == other.port_id and + self.vlan_id == other.vlan_id and + self.switch_ip == other.switch_ip and + self.instance_id == other.instance_id + ) diff --git a/neutron/plugins/cisco/extensions/__init__.py b/neutron/plugins/cisco/extensions/__init__.py new file mode 100644 index 000000000..63082aba2 --- /dev/null +++ b/neutron/plugins/cisco/extensions/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/cisco/extensions/_credential_view.py b/neutron/plugins/cisco/extensions/_credential_view.py new file mode 100644 index 000000000..9dcbbc81e --- /dev/null +++ b/neutron/plugins/cisco/extensions/_credential_view.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ying Liu, Cisco Systems, Inc. +# + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(object): + """ViewBuilder for Credential, derived from neutron.views.networks.""" + + def __init__(self, base_url): + """Initialize builder. + + :param base_url: url of the root wsgi application + """ + self.base_url = base_url + + def build(self, credential_data, is_detail=False): + """Generic method used to generate a credential entity.""" + if is_detail: + credential = self._build_detail(credential_data) + else: + credential = self._build_simple(credential_data) + return credential + + def _build_simple(self, credential_data): + """Return a simple description of credential.""" + return dict(credential=dict(id=credential_data['credential_id'])) + + def _build_detail(self, credential_data): + """Return a detailed description of credential.""" + return dict(credential=dict(id=credential_data['credential_id'], + name=credential_data['user_name'], + password=credential_data['password'])) diff --git a/neutron/plugins/cisco/extensions/_qos_view.py b/neutron/plugins/cisco/extensions/_qos_view.py new file mode 100644 index 000000000..81ef5fef6 --- /dev/null +++ b/neutron/plugins/cisco/extensions/_qos_view.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ying Liu, Cisco Systems, Inc. +# + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(object): + """ViewBuilder for QoS, derived from neutron.views.networks.""" + + def __init__(self, base_url): + """Initialize builder. + + :param base_url: url of the root wsgi application + """ + self.base_url = base_url + + def build(self, qos_data, is_detail=False): + """Generic method used to generate a QoS entity.""" + if is_detail: + qos = self._build_detail(qos_data) + else: + qos = self._build_simple(qos_data) + return qos + + def _build_simple(self, qos_data): + """Return a simple description of qos.""" + return dict(qos=dict(id=qos_data['qos_id'])) + + def _build_detail(self, qos_data): + """Return a detailed description of qos.""" + return dict(qos=dict(id=qos_data['qos_id'], + name=qos_data['qos_name'], + description=qos_data['qos_desc'])) diff --git a/neutron/plugins/cisco/extensions/credential.py b/neutron/plugins/cisco/extensions/credential.py new file mode 100644 index 000000000..8838136c1 --- /dev/null +++ b/neutron/plugins/cisco/extensions/credential.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ying Liu, Cisco Systems, Inc. +# @author: Abhishek Raut, Cisco Systems, Inc + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron import manager + + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + 'credentials': { + 'credential_id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'is_visible': True}, + 'credential_name': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'is_visible': False, 'default': ''}, + 'type': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'user_name': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'password': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + }, +} + + +class Credential(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + """Returns Extended Resource Name.""" + return "Cisco Credential" + + @classmethod + def get_alias(cls): + """Returns Extended Resource Alias.""" + return "credential" + + @classmethod + def get_description(cls): + """Returns Extended Resource Description.""" + return "Credential include username and password" + + @classmethod + def get_namespace(cls): + """Returns Extended Resource Namespace.""" + return "http://docs.ciscocloud.com/api/ext/credential/v2.0" + + @classmethod + def get_updated(cls): + """Returns Extended Resource Update Time.""" + return "2011-07-25T13:25:27-06:00" + + @classmethod + def get_resources(cls): + """Returns Extended Resources.""" + resource_name = "credential" + collection_name = resource_name + "s" + plugin = manager.NeutronManager.get_plugin() + params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict()) + controller = base.create_resource(collection_name, + resource_name, + plugin, params) + return [extensions.ResourceExtension(collection_name, + controller)] diff --git a/neutron/plugins/cisco/extensions/n1kv.py b/neutron/plugins/cisco/extensions/n1kv.py new file mode 100644 index 000000000..352ad816a --- /dev/null +++ b/neutron/plugins/cisco/extensions/n1kv.py @@ -0,0 +1,106 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems, Inc. +# @author: Rudrajit Tapadar, Cisco Systems, Inc. +# @author: Aruna Kushwaha, Cisco Systems, Inc. +# @author: Sergey Sudakovich, Cisco Systems, Inc. + +from neutron.api import extensions +from neutron.api.v2 import attributes + + +PROFILE_ID = 'n1kv:profile_id' +MULTICAST_IP = 'n1kv:multicast_ip' +SEGMENT_ADD = 'n1kv:segment_add' +SEGMENT_DEL = 'n1kv:segment_del' +MEMBER_SEGMENTS = 'n1kv:member_segments' + +EXTENDED_ATTRIBUTES_2_0 = { + 'networks': { + PROFILE_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + MULTICAST_IP: {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + SEGMENT_ADD: {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + SEGMENT_DEL: {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + MEMBER_SEGMENTS: {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + }, + 'ports': { + PROFILE_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True} + } +} + + +class N1kv(extensions.ExtensionDescriptor): + + """Extension class supporting N1kv profiles. + + This class is used by neutron's extension framework to make + metadata about the n1kv profile extension available to + clients. No new resources are defined by this extension. Instead, + the existing network resource's request and response messages are + extended with attributes in the n1kv profile namespace. + + To create a network based on n1kv profile using the CLI with admin rights: + + (shell) net-create --tenant_id \ + --n1kv:profile_id + (shell) port-create --tenant_id \ + --n1kv:profile_id + + + With admin rights, network dictionaries returned from CLI commands + will also include n1kv profile attributes. + """ + + @classmethod + def get_name(cls): + return "n1kv" + + @classmethod + def get_alias(cls): + return "n1kv" + + @classmethod + def get_description(cls): + return "Expose network profile" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/n1kv/api/v2.0" + + @classmethod + def get_updated(cls): + return "2012-11-15T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/plugins/cisco/extensions/network_profile.py b/neutron/plugins/cisco/extensions/network_profile.py new file mode 100644 index 000000000..bb05bd944 --- /dev/null +++ b/neutron/plugins/cisco/extensions/network_profile.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems, Inc. +# @author: Sergey Sudakovich, Cisco Systems, Inc. +# @author: Rudrajit Tapadar, Cisco Systems, Inc. + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron import manager + + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + 'network_profiles': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'segment_type': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': ''}, + 'sub_type': {'allow_post': True, 'allow_put': False, + 'is_visible': True, + 'default': attributes.ATTR_NOT_SPECIFIED}, + 'segment_range': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'multicast_ip_range': {'allow_post': True, 'allow_put': True, + 'is_visible': True, + 'default': attributes.ATTR_NOT_SPECIFIED}, + 'multicast_ip_index': {'allow_post': False, 'allow_put': False, + 'is_visible': False, 'default': '0'}, + 'physical_network': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': ''}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'is_visible': False, 'default': ''}, + 'add_tenant': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None}, + 'remove_tenant': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None}, + }, + 'network_profile_bindings': { + 'profile_id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'is_visible': True}, + }, +} + + +class Network_profile(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Cisco N1kv Network Profiles" + + @classmethod + def get_alias(cls): + return 'network_profile' + + @classmethod + def get_description(cls): + return ("Profile includes the type of profile for N1kv") + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/n1kv/network-profile/api/v2.0" + + @classmethod + def get_updated(cls): + return "2012-07-20T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Extended Resources.""" + exts = [] + plugin = manager.NeutronManager.get_plugin() + for resource_name in ['network_profile', 'network_profile_binding']: + collection_name = resource_name + "s" + controller = base.create_resource( + collection_name, + resource_name, + plugin, + RESOURCE_ATTRIBUTE_MAP.get(collection_name)) + ex = extensions.ResourceExtension(collection_name, + controller) + exts.append(ex) + return exts diff --git a/neutron/plugins/cisco/extensions/policy_profile.py b/neutron/plugins/cisco/extensions/policy_profile.py new file mode 100644 index 000000000..af3c25083 --- /dev/null +++ b/neutron/plugins/cisco/extensions/policy_profile.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems, Inc. +# @author: Sergey Sudakovich, Cisco Systems, Inc. + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron import manager + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + 'policy_profiles': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'is_visible': True}, + 'name': {'allow_post': False, 'allow_put': False, + 'is_visible': True, 'default': ''}, + 'add_tenant': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None}, + 'remove_tenant': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None}, + }, + 'policy_profile_bindings': { + 'profile_id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'is_visible': True}, + }, +} + + +class Policy_profile(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Cisco Nexus1000V Policy Profiles" + + @classmethod + def get_alias(cls): + return 'policy_profile' + + @classmethod + def get_description(cls): + return "Profile includes the type of profile for N1kv" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/n1kv/policy-profile/api/v2.0" + + @classmethod + def get_updated(cls): + return "2012-07-20T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Extended Resources.""" + exts = [] + plugin = manager.NeutronManager.get_plugin() + for resource_name in ['policy_profile', 'policy_profile_binding']: + collection_name = resource_name + "s" + controller = base.create_resource( + collection_name, + resource_name, + plugin, + RESOURCE_ATTRIBUTE_MAP.get(collection_name)) + ex = extensions.ResourceExtension(collection_name, + controller) + exts.append(ex) + return exts diff --git a/neutron/plugins/cisco/extensions/qos.py b/neutron/plugins/cisco/extensions/qos.py new file mode 100644 index 000000000..255601b5b --- /dev/null +++ b/neutron/plugins/cisco/extensions/qos.py @@ -0,0 +1,156 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Cisco Systems, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ying Liu, Cisco Systems, Inc. +# + +from webob import exc + +from neutron.api import api_common as common +from neutron.api import extensions +from neutron import manager +from neutron.plugins.cisco.common import cisco_exceptions as exception +from neutron.plugins.cisco.common import cisco_faults as faults +from neutron.plugins.cisco.extensions import _qos_view as qos_view +from neutron import wsgi + + +class Qos(extensions.ExtensionDescriptor): + """Qos extension file.""" + + @classmethod + def get_name(cls): + """Returns Ext Resource Name.""" + return "Cisco qos" + + @classmethod + def get_alias(cls): + """Returns Ext Resource Alias.""" + return "Cisco qos" + + @classmethod + def get_description(cls): + """Returns Ext Resource Description.""" + return "qos includes qos_name and qos_desc" + + @classmethod + def get_namespace(cls): + """Returns Ext Resource Namespace.""" + return "http://docs.ciscocloud.com/api/ext/qos/v1.0" + + @classmethod + def get_updated(cls): + """Returns Ext Resource update.""" + return "2011-07-25T13:25:27-06:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + parent_resource = dict(member_name="tenant", + collection_name="extensions/csco/tenants") + + controller = QosController(manager.NeutronManager.get_plugin()) + return [extensions.ResourceExtension('qoss', controller, + parent=parent_resource)] + + +class QosController(common.NeutronController, wsgi.Controller): + """qos API controller based on NeutronController.""" + + _qos_ops_param_list = [ + {'param-name': 'qos_name', 'required': True}, + {'param-name': 'qos_desc', 'required': True}, + ] + + _serialization_metadata = { + "application/xml": { + "attributes": { + "qos": ["id", "name"], + }, + }, + } + + def __init__(self, plugin): + self._resource_name = 'qos' + self._plugin = plugin + + def index(self, request, tenant_id): + """Returns a list of qos ids.""" + return self._items(request, tenant_id, is_detail=False) + + def _items(self, request, tenant_id, is_detail): + """Returns a list of qoss.""" + qoss = self._plugin.get_all_qoss(tenant_id) + builder = qos_view.get_view_builder(request) + result = [builder.build(qos, is_detail)['qos'] for qos in qoss] + return dict(qoss=result) + + # pylint: disable-msg=E1101 + def show(self, request, tenant_id, id): + """Returns qos details for the given qos id.""" + try: + qos = self._plugin.get_qos_details(tenant_id, id) + builder = qos_view.get_view_builder(request) + #build response with details + result = builder.build(qos, True) + return dict(qoss=result) + except exception.QosNotFound as exp: + return faults.Fault(faults.QosNotFound(exp)) + + def create(self, request, tenant_id): + """Creates a new qos for a given tenant.""" + #look for qos name in request + try: + body = self._deserialize(request.body, request.get_content_type()) + req_body = self._prepare_request_body(body, + self._qos_ops_param_list) + req_params = req_body[self._resource_name] + except exc.HTTPError as exp: + return faults.Fault(exp) + qos = self._plugin.create_qos(tenant_id, + req_params['qos_name'], + req_params['qos_desc']) + builder = qos_view.get_view_builder(request) + result = builder.build(qos) + return dict(qoss=result) + + def update(self, request, tenant_id, id): + """Updates the name for the qos with the given id.""" + try: + body = self._deserialize(request.body, request.get_content_type()) + req_body = self._prepare_request_body(body, + self._qos_ops_param_list) + req_params = req_body[self._resource_name] + except exc.HTTPError as exp: + return faults.Fault(exp) + try: + qos = self._plugin.rename_qos(tenant_id, id, + req_params['qos_name']) + + builder = qos_view.get_view_builder(request) + result = builder.build(qos, True) + return dict(qoss=result) + except exception.QosNotFound as exp: + return faults.Fault(faults.QosNotFound(exp)) + + def delete(self, request, tenant_id, id): + """Destroys the qos with the given id.""" + try: + self._plugin.delete_qos(tenant_id, id) + return exc.HTTPOk() + except exception.QosNotFound as exp: + return faults.Fault(faults.QosNotFound(exp)) diff --git a/neutron/plugins/cisco/l2device_plugin_base.py b/neutron/plugins/cisco/l2device_plugin_base.py new file mode 100644 index 000000000..ef75e1188 --- /dev/null +++ b/neutron/plugins/cisco/l2device_plugin_base.py @@ -0,0 +1,175 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. + +import abc +import inspect +import six + + +@six.add_metaclass(abc.ABCMeta) +class L2DevicePluginBase(object): + """Base class for a device-specific plugin. + + An example of a device-specific plugin is a Nexus switch plugin. + The network model relies on device-category-specific plugins to perform + the configuration on each device. + """ + + @abc.abstractmethod + def create_network(self, tenant_id, net_name, net_id, vlan_name, vlan_id, + **kwargs): + """Create network. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def delete_network(self, tenant_id, net_id, **kwargs): + """Delete network. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def update_network(self, tenant_id, net_id, name, **kwargs): + """Update network. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs): + """Create port. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def delete_port(self, tenant_id, net_id, port_id, **kwargs): + """Delete port. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def update_port(self, tenant_id, net_id, port_id, **kwargs): + """Update port. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id, + **kwargs): + """Plug interface. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def unplug_interface(self, tenant_id, net_id, port_id, **kwargs): + """Unplug interface. + + :returns: + :raises: + """ + pass + + def create_subnet(self, tenant_id, net_id, ip_version, + subnet_cidr, **kwargs): + """Create subnet. + + :returns: + :raises: + """ + pass + + def get_subnets(self, tenant_id, net_id, **kwargs): + """Get subnets. + + :returns: + :raises: + """ + pass + + def get_subnet(self, tenant_id, net_id, subnet_id, **kwargs): + """Get subnet. + + :returns: + :raises: + """ + pass + + def update_subnet(self, tenant_id, net_id, subnet_id, **kwargs): + """Update subnet. + + :returns: + :raises: + """ + pass + + def delete_subnet(self, tenant_id, net_id, subnet_id, **kwargs): + """Delete subnet. + + :returns: + :raises: + """ + pass + + @classmethod + def __subclasshook__(cls, klass): + """Check plugin class. + + The __subclasshook__ method is a class method + that will be called every time a class is tested + using issubclass(klass, Plugin). + In that case, it will check that every method + marked with the abstractmethod decorator is + provided by the plugin class. + """ + if cls is L2DevicePluginBase: + for method in cls.__abstractmethods__: + method_ok = False + for base in klass.__mro__: + if method in base.__dict__: + fn_obj = base.__dict__[method] + if inspect.isfunction(fn_obj): + abstract_fn_obj = cls.__dict__[method] + arg_count = fn_obj.func_code.co_argcount + expected_arg_count = \ + abstract_fn_obj.func_code.co_argcount + method_ok = arg_count == expected_arg_count + if method_ok: + continue + return NotImplemented + return True + return NotImplemented diff --git a/neutron/plugins/cisco/models/__init__.py b/neutron/plugins/cisco/models/__init__.py new file mode 100644 index 000000000..833357b73 --- /dev/null +++ b/neutron/plugins/cisco/models/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. diff --git a/neutron/plugins/cisco/models/virt_phy_sw_v2.py b/neutron/plugins/cisco/models/virt_phy_sw_v2.py new file mode 100644 index 000000000..b7452f363 --- /dev/null +++ b/neutron/plugins/cisco/models/virt_phy_sw_v2.py @@ -0,0 +1,553 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cisco Systems, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# @author: Rohit Agarwalla, Cisco Systems, Inc. +# + +import inspect +import logging +import sys + +from neutron.api.v2 import attributes +from neutron.db import api as db_api +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron import neutron_plugin_base_v2 +from neutron.openstack.common import importutils +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import cisco_credentials_v2 as cred +from neutron.plugins.cisco.common import cisco_exceptions as cexc +from neutron.plugins.cisco.common import config as conf +from neutron.plugins.cisco.db import network_db_v2 as cdb +from neutron.plugins.openvswitch import ovs_db_v2 as odb + + +LOG = logging.getLogger(__name__) + + +class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2): + """Virtual Physical Switch Model. + + This implementation works with OVS and Nexus plugin for the + following topology: + One or more servers to a nexus switch. + """ + __native_bulk_support = True + supported_extension_aliases = ["provider", "binding"] + _methods_to_delegate = ['create_network_bulk', + 'get_network', 'get_networks', + 'create_port_bulk', + 'get_port', 'get_ports', + 'create_subnet', 'create_subnet_bulk', + 'delete_subnet', 'update_subnet', + 'get_subnet', 'get_subnets', + 'create_or_update_agent', 'report_state'] + + def __init__(self): + """Initialize the segmentation manager. + + Checks which device plugins are configured, and load the inventories + those device plugins for which the inventory is configured. + """ + conf.CiscoConfigOptions() + + self._plugins = {} + for key in conf.CISCO_PLUGINS.keys(): + plugin_obj = conf.CISCO_PLUGINS[key] + if plugin_obj is not None: + self._plugins[key] = importutils.import_object(plugin_obj) + LOG.debug(_("Loaded device plugin %s"), + conf.CISCO_PLUGINS[key]) + + if ((const.VSWITCH_PLUGIN in self._plugins) and + hasattr(self._plugins[const.VSWITCH_PLUGIN], + "supported_extension_aliases")): + self.supported_extension_aliases.extend( + self._plugins[const.VSWITCH_PLUGIN]. + supported_extension_aliases) + # At this point, all the database models should have been loaded. It's + # possible that configure_db() may have been called by one of the + # plugins loaded in above. Otherwise, this call is to make sure that + # the database is initialized + db_api.configure_db() + + # Initialize credential store after database initialization + cred.Store.initialize() + LOG.debug(_("%(module)s.%(name)s init done"), + {'module': __name__, + 'name': self.__class__.__name__}) + + # Check whether we have a valid Nexus driver loaded + self.is_nexus_plugin = False + nexus_driver = conf.CISCO.nexus_driver + if nexus_driver.endswith('CiscoNEXUSDriver'): + self.is_nexus_plugin = True + + def __getattribute__(self, name): + """Delegate calls to OVS sub-plugin. + + This delegates the calls to the methods implemented only by the OVS + sub-plugin. Note: Currently, bulking is handled by the caller + (PluginV2), and this model class expects to receive only non-bulking + calls. If, however, a bulking call is made, this will method will + delegate the call to the OVS plugin. + """ + super_getattribute = super(VirtualPhysicalSwitchModelV2, + self).__getattribute__ + methods = super_getattribute('_methods_to_delegate') + + if name in methods: + plugin = super_getattribute('_plugins')[const.VSWITCH_PLUGIN] + return getattr(plugin, name) + + try: + return super_getattribute(name) + except AttributeError: + plugin = super_getattribute('_plugins')[const.VSWITCH_PLUGIN] + return getattr(plugin, name) + + def _func_name(self, offset=0): + """Get the name of the calling function.""" + frame_record = inspect.stack()[1 + offset] + func_name = frame_record[3] + return func_name + + def _invoke_plugin_per_device(self, plugin_key, function_name, + args, **kwargs): + """Invoke plugin per device. + + Invokes a device plugin's relevant functions (based on the + plugin implementation) for completing this operation. + """ + if plugin_key not in self._plugins: + LOG.info(_("No %s Plugin loaded"), plugin_key) + LOG.info(_("%(plugin_key)s: %(function_name)s with args %(args)s " + "ignored"), + {'plugin_key': plugin_key, + 'function_name': function_name, + 'args': args}) + else: + func = getattr(self._plugins[plugin_key], function_name) + return func(*args, **kwargs) + + def _get_segmentation_id(self, network_id): + binding_seg_id = odb.get_network_binding(None, network_id) + if not binding_seg_id: + raise cexc.NetworkSegmentIDNotFound(net_id=network_id) + return binding_seg_id.segmentation_id + + def _get_provider_vlan_id(self, network): + if (all(attributes.is_attr_set(network.get(attr)) + for attr in (provider.NETWORK_TYPE, + provider.PHYSICAL_NETWORK, + provider.SEGMENTATION_ID)) + and + network[provider.NETWORK_TYPE] == const.NETWORK_TYPE_VLAN): + return network[provider.SEGMENTATION_ID] + + def create_network(self, context, network): + """Create network. + + Perform this operation in the context of the configured device + plugins. + """ + LOG.debug(_("create_network() called")) + provider_vlan_id = self._get_provider_vlan_id(network[const.NETWORK]) + args = [context, network] + ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + args) + # The vswitch plugin did all the verification. If it's a provider + # vlan network, save it for the nexus plugin to use later. + if provider_vlan_id: + network_id = ovs_output[const.NET_ID] + cdb.add_provider_network(network_id, + const.NETWORK_TYPE_VLAN, + provider_vlan_id) + LOG.debug(_("Provider network added to DB: %(network_id)s, " + "%(vlan_id)s"), + {'network_id': network_id, 'vlan_id': provider_vlan_id}) + return ovs_output + + def update_network(self, context, id, network): + """Update network. + + Perform this operation in the context of the configured device + plugins. + + Note that the Nexus sub-plugin does not need to be notified + (and the Nexus switch does not need to be [re]configured) + for an update network operation because the Nexus sub-plugin + is agnostic of all network-level attributes except the + segmentation ID. Furthermore, updating of the segmentation ID + is not supported by the OVS plugin since it is considered a + provider attribute, so it is not supported by this method. + """ + LOG.debug(_("update_network() called")) + + # We can only support updating of provider attributes if all the + # configured sub-plugins support it. Currently we have no method + # in place for checking whether a sub-plugin supports it, + # so assume not. + provider._raise_if_updates_provider_attributes(network['network']) + + args = [context, id, network] + return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + args) + + def delete_network(self, context, id): + """Delete network. + + Perform this operation in the context of the configured device + plugins. + """ + args = [context, id] + ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + args) + if cdb.remove_provider_network(id): + LOG.debug(_("Provider network removed from DB: %s"), id) + return ovs_output + + def get_network(self, context, id, fields=None): + """Get network. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + """Get networks. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def _invoke_nexus_for_net_create(self, context, tenant_id, net_id, + instance_id, host_id): + if not self.is_nexus_plugin: + return False + + network = self.get_network(context, net_id) + vlan_id = self._get_segmentation_id(net_id) + vlan_name = conf.CISCO.vlan_name_prefix + str(vlan_id) + network[const.NET_VLAN_ID] = vlan_id + network[const.NET_VLAN_NAME] = vlan_name + attachment = { + const.TENANT_ID: tenant_id, + const.INSTANCE_ID: instance_id, + const.HOST_NAME: host_id, + } + self._invoke_plugin_per_device( + const.NEXUS_PLUGIN, + 'create_network', + [network, attachment]) + + def _check_valid_port_device_owner(self, port): + """Check the port for valid device_owner. + + Don't call the nexus plugin for router and dhcp + port owners. + """ + return port['device_owner'].startswith('compute') + + def _get_port_host_id_from_bindings(self, port): + """Get host_id from portbindings.""" + host_id = None + + if (portbindings.HOST_ID in port and + attributes.is_attr_set(port[portbindings.HOST_ID])): + host_id = port[portbindings.HOST_ID] + + return host_id + + def create_port(self, context, port): + """Create port. + + Perform this operation in the context of the configured device + plugins. + """ + LOG.debug(_("create_port() called")) + args = [context, port] + ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + args) + instance_id = port['port']['device_id'] + + # Only call nexus plugin if there's a valid instance_id, host_id + # and device_owner + try: + host_id = self._get_port_host_id_from_bindings(port['port']) + if (instance_id and host_id and + self._check_valid_port_device_owner(port['port'])): + net_id = port['port']['network_id'] + tenant_id = port['port']['tenant_id'] + self._invoke_nexus_for_net_create( + context, tenant_id, net_id, instance_id, host_id) + except Exception: + # Create network on the Nexus plugin has failed, so we need + # to rollback the port creation on the VSwitch plugin. + exc_info = sys.exc_info() + try: + id = ovs_output['id'] + args = [context, id] + ovs_output = self._invoke_plugin_per_device( + const.VSWITCH_PLUGIN, + 'delete_port', + args) + finally: + # Re-raise the original exception + raise exc_info[0], exc_info[1], exc_info[2] + return ovs_output + + def get_port(self, context, id, fields=None): + """Get port. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def get_ports(self, context, filters=None, fields=None): + """Get ports. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def _check_nexus_net_create_needed(self, new_port, old_port): + """Check if nexus plugin should be invoked for net_create. + + In the following cases, the plugin should be invoked: + -- a port is attached to a VM instance. The old host id is None + -- VM migration. The old host id has a valid value + + When the plugin needs to be invoked, return the old_host_id, + and a list of calling arguments. + Otherwise, return '' for old host id and an empty list + """ + old_device_id = old_port['device_id'] + new_device_id = new_port.get('device_id') + new_host_id = self._get_port_host_id_from_bindings(new_port) + tenant_id = old_port['tenant_id'] + net_id = old_port['network_id'] + old_host_id = self._get_port_host_id_from_bindings(old_port) + + LOG.debug(_("tenant_id: %(tid)s, net_id: %(nid)s, " + "old_device_id: %(odi)s, new_device_id: %(ndi)s, " + "old_host_id: %(ohi)s, new_host_id: %(nhi)s, " + "old_device_owner: %(odo)s, new_device_owner: %(ndo)s"), + {'tid': tenant_id, 'nid': net_id, + 'odi': old_device_id, 'ndi': new_device_id, + 'ohi': old_host_id, 'nhi': new_host_id, + 'odo': old_port.get('device_owner'), + 'ndo': new_port.get('device_owner')}) + + # A port is attached to an instance + if (new_device_id and not old_device_id and new_host_id and + self._check_valid_port_device_owner(new_port)): + return '', [tenant_id, net_id, new_device_id, new_host_id] + + # An instance is being migrated + if (old_device_id and old_host_id and new_host_id != old_host_id and + self._check_valid_port_device_owner(old_port)): + return old_host_id, [tenant_id, net_id, old_device_id, new_host_id] + + # no need to invoke the plugin + return '', [] + + def update_port(self, context, id, port): + """Update port. + + Perform this operation in the context of the configured device + plugins. + """ + LOG.debug(_("update_port() called")) + old_port = self.get_port(context, id) + args = [context, id, port] + ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + args) + try: + # Check if the nexus plugin needs to be invoked + old_host_id, create_args = self._check_nexus_net_create_needed( + port['port'], old_port) + + # In the case of migration, invoke it to remove + # the previous port binding + if old_host_id: + vlan_id = self._get_segmentation_id(old_port['network_id']) + delete_args = [old_port['device_id'], vlan_id] + self._invoke_plugin_per_device(const.NEXUS_PLUGIN, + "delete_port", + delete_args) + + # Invoke the Nexus plugin to create a net and/or new port binding + if create_args: + self._invoke_nexus_for_net_create(context, *create_args) + + return ovs_output + except Exception: + exc_info = sys.exc_info() + LOG.error(_("Unable to update port '%s' on Nexus switch"), + old_port['name'], exc_info=exc_info) + try: + # Roll back vSwitch plugin to original port attributes. + args = [context, id, {'port': old_port}] + self._invoke_plugin_per_device( + const.VSWITCH_PLUGIN, + self._func_name(), + args) + finally: + # Re-raise the original exception + raise exc_info[0], exc_info[1], exc_info[2] + + def delete_port(self, context, id, l3_port_check=True): + """Delete port. + + Perform this operation in the context of the configured device + plugins. + """ + LOG.debug(_("delete_port() called")) + port = self.get_port(context, id) + + host_id = self._get_port_host_id_from_bindings(port) + + if (self.is_nexus_plugin and host_id and + self._check_valid_port_device_owner(port)): + vlan_id = self._get_segmentation_id(port['network_id']) + n_args = [port['device_id'], vlan_id] + self._invoke_plugin_per_device(const.NEXUS_PLUGIN, + self._func_name(), + n_args) + try: + args = [context, id] + ovs_output = self._invoke_plugin_per_device( + const.VSWITCH_PLUGIN, self._func_name(), + args, l3_port_check=l3_port_check) + except Exception: + exc_info = sys.exc_info() + # Roll back the delete port on the Nexus plugin + try: + tenant_id = port['tenant_id'] + net_id = port['network_id'] + instance_id = port['device_id'] + host_id = port[portbindings.HOST_ID] + self._invoke_nexus_for_net_create(context, tenant_id, net_id, + instance_id, host_id) + finally: + # Raise the original exception. + raise exc_info[0], exc_info[1], exc_info[2] + + return ovs_output + + def add_router_interface(self, context, router_id, interface_info): + """Add a router interface on a subnet. + + Only invoke the Nexus plugin to create SVI if L3 support on + the Nexus switches is enabled and a Nexus plugin is loaded, + otherwise send it to the vswitch plugin + """ + if (conf.CISCO.nexus_l3_enable and self.is_nexus_plugin): + LOG.debug(_("L3 enabled on Nexus plugin, create SVI on switch")) + if 'subnet_id' not in interface_info: + raise cexc.SubnetNotSpecified() + if 'port_id' in interface_info: + raise cexc.PortIdForNexusSvi() + subnet = self.get_subnet(context, interface_info['subnet_id']) + gateway_ip = subnet['gateway_ip'] + # Get gateway IP address and netmask + cidr = subnet['cidr'] + netmask = cidr.split('/', 1)[1] + gateway_ip = gateway_ip + '/' + netmask + network_id = subnet['network_id'] + vlan_id = self._get_segmentation_id(network_id) + vlan_name = conf.CISCO.vlan_name_prefix + str(vlan_id) + + n_args = [vlan_name, vlan_id, subnet['id'], gateway_ip, router_id] + return self._invoke_plugin_per_device(const.NEXUS_PLUGIN, + self._func_name(), + n_args) + else: + LOG.debug(_("L3 disabled or not Nexus plugin, send to vswitch")) + n_args = [context, router_id, interface_info] + return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + n_args) + + def remove_router_interface(self, context, router_id, interface_info): + """Remove a router interface. + + Only invoke the Nexus plugin to delete SVI if L3 support on + the Nexus switches is enabled and a Nexus plugin is loaded, + otherwise send it to the vswitch plugin + """ + if (conf.CISCO.nexus_l3_enable and self.is_nexus_plugin): + LOG.debug(_("L3 enabled on Nexus plugin, delete SVI from switch")) + + subnet = self.get_subnet(context, interface_info['subnet_id']) + network_id = subnet['network_id'] + vlan_id = self._get_segmentation_id(network_id) + n_args = [vlan_id, router_id] + + return self._invoke_plugin_per_device(const.NEXUS_PLUGIN, + self._func_name(), + n_args) + else: + LOG.debug(_("L3 disabled or not Nexus plugin, send to vswitch")) + n_args = [context, router_id, interface_info] + return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + n_args) + + def create_subnet(self, context, subnet): + """Create subnet. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def update_subnet(self, context, id, subnet): + """Update subnet. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def get_subnet(self, context, id, fields=None): + """Get subnet. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def delete_subnet(self, context, id, kwargs): + """Delete subnet. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def get_subnets(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + """Get subnets. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover diff --git a/neutron/plugins/cisco/n1kv/__init__.py b/neutron/plugins/cisco/n1kv/__init__.py new file mode 100644 index 000000000..59a411933 --- /dev/null +++ b/neutron/plugins/cisco/n1kv/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems, Inc. +# diff --git a/neutron/plugins/cisco/n1kv/n1kv_client.py b/neutron/plugins/cisco/n1kv/n1kv_client.py new file mode 100644 index 000000000..541750835 --- /dev/null +++ b/neutron/plugins/cisco/n1kv/n1kv_client.py @@ -0,0 +1,541 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems, Inc. +# @author: Rudrajit Tapadar, Cisco Systems, Inc. + +import base64 +import eventlet +import netaddr +import requests + +from neutron.common import exceptions as n_exc +from neutron.extensions import providernet +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.cisco.common import cisco_constants as c_const +from neutron.plugins.cisco.common import cisco_credentials_v2 as c_cred +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.common import config as c_conf +from neutron.plugins.cisco.db import network_db_v2 +from neutron.plugins.cisco.extensions import n1kv + +LOG = logging.getLogger(__name__) + + +class Client(object): + + """ + Client for the Cisco Nexus1000V Neutron Plugin. + + This client implements functions to communicate with + Cisco Nexus1000V VSM. + + For every Neutron objects, Cisco Nexus1000V Neutron Plugin + creates a corresponding object in the controller (Cisco + Nexus1000V VSM). + + CONCEPTS: + + Following are few concepts used in Nexus1000V VSM: + + port-profiles: + Policy profiles correspond to port profiles on Nexus1000V VSM. + Port profiles are the primary mechanism by which network policy is + defined and applied to switch interfaces in a Nexus 1000V system. + + network-segment: + Each network-segment represents a broadcast domain. + + network-segment-pool: + A network-segment-pool contains one or more network-segments. + + logical-network: + A logical-network contains one or more network-segment-pools. + + bridge-domain: + A bridge-domain is created when the network-segment is of type VXLAN. + Each VXLAN <--> VLAN combination can be thought of as a bridge domain. + + ip-pool: + Each ip-pool represents a subnet on the Nexus1000V VSM. + + vm-network: + vm-network refers to a network-segment and policy-profile. + It maintains a list of ports that uses the network-segment and + policy-profile this vm-network refers to. + + events: + Events correspond to commands that are logged on Nexus1000V VSM. + Events are used to poll for a certain resource on Nexus1000V VSM. + Event type of port_profile: Return all updates/create/deletes + of port profiles from the VSM. + Event type of port_profile_update: Return only updates regarding + policy-profiles. + Event type of port_profile_delete: Return only deleted policy profiles. + + + WORK FLOW: + + For every network profile a corresponding logical-network and + a network-segment-pool, under this logical-network, will be created. + + For every network created from a given network profile, a + network-segment will be added to the network-segment-pool corresponding + to that network profile. + + A port is created on a network and associated with a policy-profile. + Hence for every unique combination of a network and a policy-profile, a + unique vm-network will be created and a reference to the port will be + added. If the same combination of network and policy-profile is used by + another port, the references to that port will be added to the same + vm-network. + + + """ + + # Define paths for the URI where the client connects for HTTP requests. + port_profiles_path = "/virtual-port-profile" + network_segment_path = "/network-segment/%s" + network_segment_pool_path = "/network-segment-pool/%s" + ip_pool_path = "/ip-pool-template/%s" + ports_path = "/kvm/vm-network/%s/ports" + port_path = "/kvm/vm-network/%s/ports/%s" + vm_networks_path = "/kvm/vm-network" + vm_network_path = "/kvm/vm-network/%s" + bridge_domains_path = "/kvm/bridge-domain" + bridge_domain_path = "/kvm/bridge-domain/%s" + logical_network_path = "/logical-network/%s" + events_path = "/kvm/events" + clusters_path = "/cluster" + encap_profiles_path = "/encapsulation-profile" + encap_profile_path = "/encapsulation-profile/%s" + + pool = eventlet.GreenPool(c_conf.CISCO_N1K.http_pool_size) + + def __init__(self, **kwargs): + """Initialize a new client for the plugin.""" + self.format = 'json' + self.hosts = self._get_vsm_hosts() + self.action_prefix = 'http://%s/api/n1k' % self.hosts[0] + self.timeout = c_const.DEFAULT_HTTP_TIMEOUT + + def list_port_profiles(self): + """ + Fetch all policy profiles from the VSM. + + :returns: JSON string + """ + return self._get(self.port_profiles_path) + + def create_bridge_domain(self, network, overlay_subtype): + """ + Create a bridge domain on VSM. + + :param network: network dict + :param overlay_subtype: string representing subtype of overlay network + """ + body = {'name': network['id'] + c_const.BRIDGE_DOMAIN_SUFFIX, + 'segmentId': network[providernet.SEGMENTATION_ID], + 'subType': overlay_subtype, + 'tenantId': network['tenant_id']} + if overlay_subtype == c_const.NETWORK_SUBTYPE_NATIVE_VXLAN: + body['groupIp'] = network[n1kv.MULTICAST_IP] + return self._post(self.bridge_domains_path, + body=body) + + def delete_bridge_domain(self, name): + """ + Delete a bridge domain on VSM. + + :param name: name of the bridge domain to be deleted + """ + return self._delete(self.bridge_domain_path % name) + + def create_network_segment(self, network, network_profile): + """ + Create a network segment on the VSM. + + :param network: network dict + :param network_profile: network profile dict + """ + body = {'publishName': network['id'], + 'description': network['name'], + 'id': network['id'], + 'tenantId': network['tenant_id'], + 'networkSegmentPool': network_profile['id'], } + if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_VLAN: + body['vlan'] = network[providernet.SEGMENTATION_ID] + elif network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY: + body['bridgeDomain'] = (network['id'] + + c_const.BRIDGE_DOMAIN_SUFFIX) + if network_profile['segment_type'] == c_const.NETWORK_TYPE_TRUNK: + body['mode'] = c_const.NETWORK_TYPE_TRUNK + body['segmentType'] = network_profile['sub_type'] + if network_profile['sub_type'] == c_const.NETWORK_TYPE_VLAN: + body['addSegments'] = network['add_segment_list'] + body['delSegments'] = network['del_segment_list'] + else: + body['encapProfile'] = (network['id'] + + c_const.ENCAPSULATION_PROFILE_SUFFIX) + else: + body['mode'] = 'access' + body['segmentType'] = network_profile['segment_type'] + return self._post(self.network_segment_path % network['id'], + body=body) + + def update_network_segment(self, network_segment_id, body): + """ + Update a network segment on the VSM. + + Network segment on VSM can be updated to associate it with an ip-pool + or update its description and segment id. + + :param network_segment_id: UUID representing the network segment + :param body: dict of arguments to be updated + """ + return self._post(self.network_segment_path % network_segment_id, + body=body) + + def delete_network_segment(self, network_segment_id): + """ + Delete a network segment on the VSM. + + :param network_segment_id: UUID representing the network segment + """ + return self._delete(self.network_segment_path % network_segment_id) + + def create_logical_network(self, network_profile, tenant_id): + """ + Create a logical network on the VSM. + + :param network_profile: network profile dict + :param tenant_id: UUID representing the tenant + """ + LOG.debug(_("Logical network")) + body = {'description': network_profile['name'], + 'tenantId': tenant_id} + logical_network_name = (network_profile['id'] + + c_const.LOGICAL_NETWORK_SUFFIX) + return self._post(self.logical_network_path % logical_network_name, + body=body) + + def delete_logical_network(self, logical_network_name): + """ + Delete a logical network on VSM. + + :param logical_network_name: string representing name of the logical + network + """ + return self._delete( + self.logical_network_path % logical_network_name) + + def create_network_segment_pool(self, network_profile, tenant_id): + """ + Create a network segment pool on the VSM. + + :param network_profile: network profile dict + :param tenant_id: UUID representing the tenant + """ + LOG.debug(_("network_segment_pool")) + logical_network_name = (network_profile['id'] + + c_const.LOGICAL_NETWORK_SUFFIX) + body = {'name': network_profile['name'], + 'description': network_profile['name'], + 'id': network_profile['id'], + 'logicalNetwork': logical_network_name, + 'tenantId': tenant_id} + return self._post( + self.network_segment_pool_path % network_profile['id'], + body=body) + + def update_network_segment_pool(self, network_profile): + """ + Update a network segment pool on the VSM. + + :param network_profile: network profile dict + """ + body = {'name': network_profile['name'], + 'description': network_profile['name']} + return self._post(self.network_segment_pool_path % + network_profile['id'], body=body) + + def delete_network_segment_pool(self, network_segment_pool_id): + """ + Delete a network segment pool on the VSM. + + :param network_segment_pool_id: UUID representing the network + segment pool + """ + return self._delete(self.network_segment_pool_path % + network_segment_pool_id) + + def create_ip_pool(self, subnet): + """ + Create an ip-pool on the VSM. + + :param subnet: subnet dict + """ + if subnet['cidr']: + try: + ip = netaddr.IPNetwork(subnet['cidr']) + netmask = str(ip.netmask) + network_address = str(ip.network) + except (ValueError, netaddr.AddrFormatError): + msg = _("Invalid input for CIDR") + raise n_exc.InvalidInput(error_message=msg) + else: + netmask = network_address = "" + + if subnet['allocation_pools']: + address_range_start = subnet['allocation_pools'][0]['start'] + address_range_end = subnet['allocation_pools'][0]['end'] + else: + address_range_start = None + address_range_end = None + + body = {'addressRangeStart': address_range_start, + 'addressRangeEnd': address_range_end, + 'ipAddressSubnet': netmask, + 'description': subnet['name'], + 'gateway': subnet['gateway_ip'], + 'dhcp': subnet['enable_dhcp'], + 'dnsServersList': subnet['dns_nameservers'], + 'networkAddress': network_address, + 'tenantId': subnet['tenant_id']} + return self._post(self.ip_pool_path % subnet['id'], + body=body) + + def update_ip_pool(self, subnet): + """ + Update an ip-pool on the VSM. + + :param subnet: subnet dictionary + """ + body = {'description': subnet['name'], + 'dhcp': subnet['enable_dhcp'], + 'dnsServersList': subnet['dns_nameservers']} + return self._post(self.ip_pool_path % subnet['id'], + body=body) + + def delete_ip_pool(self, subnet_id): + """ + Delete an ip-pool on the VSM. + + :param subnet_id: UUID representing the subnet + """ + return self._delete(self.ip_pool_path % subnet_id) + + def create_vm_network(self, + port, + vm_network_name, + policy_profile): + """ + Create a VM network on the VSM. + + :param port: port dict + :param vm_network_name: name of the VM network + :param policy_profile: policy profile dict + """ + body = {'name': vm_network_name, + 'networkSegmentId': port['network_id'], + 'networkSegment': port['network_id'], + 'portProfile': policy_profile['name'], + 'portProfileId': policy_profile['id'], + 'tenantId': port['tenant_id'], + 'portId': port['id'], + 'macAddress': port['mac_address'], + } + if port.get('fixed_ips'): + body['ipAddress'] = port['fixed_ips'][0]['ip_address'] + body['subnetId'] = port['fixed_ips'][0]['subnet_id'] + return self._post(self.vm_networks_path, + body=body) + + def delete_vm_network(self, vm_network_name): + """ + Delete a VM network on the VSM. + + :param vm_network_name: name of the VM network + """ + return self._delete(self.vm_network_path % vm_network_name) + + def create_n1kv_port(self, port, vm_network_name): + """ + Create a port on the VSM. + + :param port: port dict + :param vm_network_name: name of the VM network which imports this port + """ + body = {'id': port['id'], + 'macAddress': port['mac_address']} + if port.get('fixed_ips'): + body['ipAddress'] = port['fixed_ips'][0]['ip_address'] + body['subnetId'] = port['fixed_ips'][0]['subnet_id'] + return self._post(self.ports_path % vm_network_name, + body=body) + + def update_n1kv_port(self, vm_network_name, port_id, body): + """ + Update a port on the VSM. + + Update the mac address associated with the port + + :param vm_network_name: name of the VM network which imports this port + :param port_id: UUID of the port + :param body: dict of the arguments to be updated + """ + return self._post(self.port_path % (vm_network_name, port_id), + body=body) + + def delete_n1kv_port(self, vm_network_name, port_id): + """ + Delete a port on the VSM. + + :param vm_network_name: name of the VM network which imports this port + :param port_id: UUID of the port + """ + return self._delete(self.port_path % (vm_network_name, port_id)) + + def _do_request(self, method, action, body=None, + headers=None): + """ + Perform the HTTP request. + + The response is in either JSON format or plain text. A GET method will + invoke a JSON response while a PUT/POST/DELETE returns message from the + VSM in plain text format. + Exception is raised when VSM replies with an INTERNAL SERVER ERROR HTTP + status code (500) i.e. an error has occurred on the VSM or SERVICE + UNAVAILABLE (503) i.e. VSM is not reachable. + + :param method: type of the HTTP request. POST, GET, PUT or DELETE + :param action: path to which the client makes request + :param body: dict for arguments which are sent as part of the request + :param headers: header for the HTTP request + :returns: JSON or plain text in HTTP response + """ + action = self.action_prefix + action + if not headers and self.hosts: + headers = self._get_auth_header(self.hosts[0]) + headers['Content-Type'] = self._set_content_type('json') + headers['Accept'] = self._set_content_type('json') + if body: + body = jsonutils.dumps(body, indent=2) + LOG.debug(_("req: %s"), body) + try: + resp = self.pool.spawn(requests.request, + method, + url=action, + data=body, + headers=headers, + timeout=self.timeout).wait() + except Exception as e: + raise c_exc.VSMConnectionFailed(reason=e) + LOG.debug(_("status_code %s"), resp.status_code) + if resp.status_code == requests.codes.OK: + if 'application/json' in resp.headers['content-type']: + try: + return resp.json() + except ValueError: + return {} + elif 'text/plain' in resp.headers['content-type']: + LOG.debug(_("VSM: %s"), resp.text) + else: + raise c_exc.VSMError(reason=resp.text) + + def _set_content_type(self, format=None): + """ + Set the mime-type to either 'xml' or 'json'. + + :param format: format to be set. + :return: mime-type string + """ + if not format: + format = self.format + return "application/%s" % format + + def _delete(self, action, body=None, headers=None): + return self._do_request("DELETE", action, body=body, + headers=headers) + + def _get(self, action, body=None, headers=None): + return self._do_request("GET", action, body=body, + headers=headers) + + def _post(self, action, body=None, headers=None): + return self._do_request("POST", action, body=body, + headers=headers) + + def _put(self, action, body=None, headers=None): + return self._do_request("PUT", action, body=body, + headers=headers) + + def _get_vsm_hosts(self): + """ + Retrieve a list of VSM ip addresses. + + :return: list of host ip addresses + """ + return [cr[c_const.CREDENTIAL_NAME] for cr in + network_db_v2.get_all_n1kv_credentials()] + + def _get_auth_header(self, host_ip): + """ + Retrieve header with auth info for the VSM. + + :param host_ip: IP address of the VSM + :return: authorization header dict + """ + username = c_cred.Store.get_username(host_ip) + password = c_cred.Store.get_password(host_ip) + auth = base64.encodestring("%s:%s" % (username, password)).rstrip() + header = {"Authorization": "Basic %s" % auth} + return header + + def get_clusters(self): + """Fetches a list of all vxlan gateway clusters.""" + return self._get(self.clusters_path) + + def create_encapsulation_profile(self, encap): + """ + Create an encapsulation profile on VSM. + + :param encap: encapsulation dict + """ + body = {'name': encap['name'], + 'addMappings': encap['add_segment_list'], + 'delMappings': encap['del_segment_list']} + return self._post(self.encap_profiles_path, + body=body) + + def update_encapsulation_profile(self, context, profile_name, body): + """ + Adds a vlan to bridge-domain mapping to an encapsulation profile. + + :param profile_name: Name of the encapsulation profile + :param body: mapping dictionary + """ + return self._post(self.encap_profile_path + % profile_name, body=body) + + def delete_encapsulation_profile(self, name): + """ + Delete an encapsulation profile on VSM. + + :param name: name of the encapsulation profile to be deleted + """ + return self._delete(self.encap_profile_path % name) diff --git a/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py b/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py new file mode 100644 index 000000000..6ef51f3d0 --- /dev/null +++ b/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py @@ -0,0 +1,1438 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Aruna Kushwaha, Cisco Systems, Inc. +# @author: Rudrajit Tapadar, Cisco Systems, Inc. +# @author: Abhishek Raut, Cisco Systems, Inc. +# @author: Sergey Sudakovich, Cisco Systems, Inc. + +import eventlet + +from oslo.config import cfg as q_conf + +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_db +from neutron.extensions import portbindings +from neutron.extensions import providernet +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils as uuidutils +from neutron.plugins.cisco.common import cisco_constants as c_const +from neutron.plugins.cisco.common import cisco_credentials_v2 as c_cred +from neutron.plugins.cisco.common import cisco_exceptions +from neutron.plugins.cisco.common import config as c_conf +from neutron.plugins.cisco.db import n1kv_db_v2 +from neutron.plugins.cisco.db import network_db_v2 +from neutron.plugins.cisco.extensions import n1kv +from neutron.plugins.cisco.n1kv import n1kv_client +from neutron.plugins.common import constants as svc_constants + + +LOG = logging.getLogger(__name__) + + +class N1kvRpcCallbacks(rpc_compat.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin): + + """Class to handle agent RPC calls.""" + + # Set RPC API version to 1.1 by default. + RPC_API_VERSION = '1.1' + + +class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + portbindings_db.PortBindingMixin, + n1kv_db_v2.NetworkProfile_db_mixin, + n1kv_db_v2.PolicyProfile_db_mixin, + network_db_v2.Credential_db_mixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin): + + """ + Implement the Neutron abstractions using Cisco Nexus1000V. + + Refer README file for the architecture, new features, and + workflow + + """ + + # This attribute specifies whether the plugin supports or not + # bulk operations. + __native_bulk_support = False + supported_extension_aliases = ["provider", "agent", + "n1kv", "network_profile", + "policy_profile", "external-net", "router", + "binding", "credential", + "l3_agent_scheduler", + "dhcp_agent_scheduler"] + + def __init__(self, configfile=None): + """ + Initialize Nexus1000V Neutron plugin. + + 1. Initialize VIF type to OVS + 2. Initialize Nexus1000v and Credential DB + 3. Establish communication with Cisco Nexus1000V + """ + super(N1kvNeutronPluginV2, self).__init__() + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + c_cred.Store.initialize() + self._setup_vsm() + self._setup_rpc() + self.network_scheduler = importutils.import_object( + q_conf.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + q_conf.CONF.router_scheduler_driver + ) + + def _setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = rpc_compat.create_connection(new=True) + self.endpoints = [N1kvRpcCallbacks(), agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def _setup_vsm(self): + """ + Setup Cisco Nexus 1000V related parameters and pull policy profiles. + + Retrieve all the policy profiles from the VSM when the plugin is + is instantiated for the first time and then continue to poll for + policy profile updates. + """ + LOG.debug(_('_setup_vsm')) + self.agent_vsm = True + # Poll VSM for create/delete of policy profile. + eventlet.spawn(self._poll_policy_profiles) + + def _poll_policy_profiles(self): + """Start a green thread to pull policy profiles from VSM.""" + while True: + self._populate_policy_profiles() + eventlet.sleep(c_conf.CISCO_N1K.poll_duration) + + def _populate_policy_profiles(self): + """ + Populate all the policy profiles from VSM. + + The tenant id is not available when the policy profiles are polled + from the VSM. Hence we associate the policy profiles with fake + tenant-ids. + """ + LOG.debug(_('_populate_policy_profiles')) + try: + n1kvclient = n1kv_client.Client() + policy_profiles = n1kvclient.list_port_profiles() + vsm_profiles = {} + plugin_profiles_set = set() + # Fetch policy profiles from VSM + for profile_name in policy_profiles: + profile_id = (policy_profiles + [profile_name][c_const.PROPERTIES][c_const.ID]) + vsm_profiles[profile_id] = profile_name + # Fetch policy profiles previously populated + for profile in n1kv_db_v2.get_policy_profiles(): + plugin_profiles_set.add(profile.id) + vsm_profiles_set = set(vsm_profiles) + # Update database if the profile sets differ. + if vsm_profiles_set ^ plugin_profiles_set: + # Add profiles in database if new profiles were created in VSM + for pid in vsm_profiles_set - plugin_profiles_set: + self._add_policy_profile(vsm_profiles[pid], pid) + + # Delete profiles from database if profiles were deleted in VSM + for pid in plugin_profiles_set - vsm_profiles_set: + self._delete_policy_profile(pid) + self._remove_all_fake_policy_profiles() + except (cisco_exceptions.VSMError, + cisco_exceptions.VSMConnectionFailed): + LOG.warning(_('No policy profile populated from VSM')) + + def _extend_network_dict_provider(self, context, network): + """Add extended network parameters.""" + binding = n1kv_db_v2.get_network_binding(context.session, + network['id']) + network[providernet.NETWORK_TYPE] = binding.network_type + if binding.network_type == c_const.NETWORK_TYPE_OVERLAY: + network[providernet.PHYSICAL_NETWORK] = None + network[providernet.SEGMENTATION_ID] = binding.segmentation_id + network[n1kv.MULTICAST_IP] = binding.multicast_ip + elif binding.network_type == c_const.NETWORK_TYPE_VLAN: + network[providernet.PHYSICAL_NETWORK] = binding.physical_network + network[providernet.SEGMENTATION_ID] = binding.segmentation_id + elif binding.network_type == c_const.NETWORK_TYPE_TRUNK: + network[providernet.PHYSICAL_NETWORK] = binding.physical_network + network[providernet.SEGMENTATION_ID] = None + network[n1kv.MULTICAST_IP] = None + elif binding.network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: + network[providernet.PHYSICAL_NETWORK] = None + network[providernet.SEGMENTATION_ID] = None + network[n1kv.MULTICAST_IP] = None + + def _process_provider_create(self, context, attrs): + network_type = attrs.get(providernet.NETWORK_TYPE) + physical_network = attrs.get(providernet.PHYSICAL_NETWORK) + segmentation_id = attrs.get(providernet.SEGMENTATION_ID) + + network_type_set = attributes.is_attr_set(network_type) + physical_network_set = attributes.is_attr_set(physical_network) + segmentation_id_set = attributes.is_attr_set(segmentation_id) + + if not (network_type_set or physical_network_set or + segmentation_id_set): + return (None, None, None) + + if not network_type_set: + msg = _("provider:network_type required") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == c_const.NETWORK_TYPE_VLAN: + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + if segmentation_id < 1 or segmentation_id > 4094: + msg = _("provider:segmentation_id out of range " + "(1 through 4094)") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == c_const.NETWORK_TYPE_OVERLAY: + if physical_network_set: + msg = _("provider:physical_network specified for Overlay " + "network") + raise n_exc.InvalidInput(error_message=msg) + else: + physical_network = None + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + if segmentation_id < 5000: + msg = _("provider:segmentation_id out of range " + "(5000+)") + raise n_exc.InvalidInput(error_message=msg) + else: + msg = _("provider:network_type %s not supported"), network_type + raise n_exc.InvalidInput(error_message=msg) + + if network_type == c_const.NETWORK_TYPE_VLAN: + if physical_network_set: + network_profiles = n1kv_db_v2.get_network_profiles() + for network_profile in network_profiles: + if physical_network == network_profile[ + 'physical_network']: + break + else: + msg = (_("Unknown provider:physical_network %s"), + physical_network) + raise n_exc.InvalidInput(error_message=msg) + else: + msg = _("provider:physical_network required") + raise n_exc.InvalidInput(error_message=msg) + + return (network_type, physical_network, segmentation_id) + + def _check_provider_update(self, context, attrs): + """Handle Provider network updates.""" + network_type = attrs.get(providernet.NETWORK_TYPE) + physical_network = attrs.get(providernet.PHYSICAL_NETWORK) + segmentation_id = attrs.get(providernet.SEGMENTATION_ID) + + network_type_set = attributes.is_attr_set(network_type) + physical_network_set = attributes.is_attr_set(physical_network) + segmentation_id_set = attributes.is_attr_set(segmentation_id) + + if not (network_type_set or physical_network_set or + segmentation_id_set): + return + + # TBD : Need to handle provider network updates + msg = _("Plugin does not support updating provider attributes") + raise n_exc.InvalidInput(error_message=msg) + + def _get_cluster(self, segment1, segment2, clusters): + """ + Returns a cluster to apply the segment mapping + + :param segment1: UUID of segment to be mapped + :param segment2: UUID of segment to be mapped + :param clusters: List of clusters + """ + for cluster in sorted(clusters, key=lambda k: k['size']): + for mapping in cluster[c_const.MAPPINGS]: + for segment in mapping[c_const.SEGMENTS]: + if segment1 in segment or segment2 in segment: + break + else: + cluster['size'] += 2 + return cluster['encapProfileName'] + break + return + + def _extend_mapping_dict(self, context, mapping_dict, segment): + """ + Extend a mapping dictionary with dot1q tag and bridge-domain name. + + :param context: neutron api request context + :param mapping_dict: dictionary to populate values + :param segment: id of the segment being populated + """ + net = self.get_network(context, segment) + if net[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_VLAN: + mapping_dict['dot1q'] = str(net[providernet.SEGMENTATION_ID]) + else: + mapping_dict['bridgeDomain'] = (net['name'] + + c_const.BRIDGE_DOMAIN_SUFFIX) + + def _send_add_multi_segment_request(self, context, net_id, segment_pairs): + """ + Send Add multi-segment network request to VSM. + + :param context: neutron api request context + :param net_id: UUID of the multi-segment network + :param segment_pairs: List of segments in UUID pairs + that need to be bridged + """ + + if not segment_pairs: + return + + session = context.session + n1kvclient = n1kv_client.Client() + clusters = n1kvclient.get_clusters() + online_clusters = [] + encap_dict = {} + for cluster in clusters['body'][c_const.SET]: + cluster = cluster[c_const.PROPERTIES] + if cluster[c_const.STATE] == c_const.ONLINE: + cluster['size'] = 0 + for mapping in cluster[c_const.MAPPINGS]: + cluster['size'] += ( + len(mapping[c_const.SEGMENTS])) + online_clusters.append(cluster) + for (segment1, segment2) in segment_pairs: + encap_profile = self._get_cluster(segment1, segment2, + online_clusters) + if encap_profile is not None: + if encap_profile in encap_dict: + profile_dict = encap_dict[encap_profile] + else: + profile_dict = {'name': encap_profile, + 'addMappings': [], + 'delMappings': []} + encap_dict[encap_profile] = profile_dict + mapping_dict = {} + self._extend_mapping_dict(context, + mapping_dict, segment1) + self._extend_mapping_dict(context, + mapping_dict, segment2) + profile_dict['addMappings'].append(mapping_dict) + n1kv_db_v2.add_multi_segment_encap_profile_name(session, + net_id, + (segment1, + segment2), + encap_profile) + else: + raise cisco_exceptions.NoClusterFound + + for profile in encap_dict: + n1kvclient.update_encapsulation_profile(context, profile, + encap_dict[profile]) + + def _send_del_multi_segment_request(self, context, net_id, segment_pairs): + """ + Send Delete multi-segment network request to VSM. + + :param context: neutron api request context + :param net_id: UUID of the multi-segment network + :param segment_pairs: List of segments in UUID pairs + whose bridging needs to be removed + """ + if not segment_pairs: + return + session = context.session + encap_dict = {} + n1kvclient = n1kv_client.Client() + for (segment1, segment2) in segment_pairs: + binding = ( + n1kv_db_v2.get_multi_segment_network_binding(session, net_id, + (segment1, + segment2))) + encap_profile = binding['encap_profile_name'] + if encap_profile in encap_dict: + profile_dict = encap_dict[encap_profile] + else: + profile_dict = {'name': encap_profile, + 'addMappings': [], + 'delMappings': []} + encap_dict[encap_profile] = profile_dict + mapping_dict = {} + self._extend_mapping_dict(context, + mapping_dict, segment1) + self._extend_mapping_dict(context, + mapping_dict, segment2) + profile_dict['delMappings'].append(mapping_dict) + + for profile in encap_dict: + n1kvclient.update_encapsulation_profile(context, profile, + encap_dict[profile]) + + def _get_encap_segments(self, context, segment_pairs): + """ + Get the list of segments in encapsulation profile format. + + :param context: neutron api request context + :param segment_pairs: List of segments that need to be bridged + """ + member_list = [] + for pair in segment_pairs: + (segment, dot1qtag) = pair + member_dict = {} + net = self.get_network(context, segment) + member_dict['bridgeDomain'] = (net['name'] + + c_const.BRIDGE_DOMAIN_SUFFIX) + member_dict['dot1q'] = dot1qtag + member_list.append(member_dict) + return member_list + + def _populate_member_segments(self, context, network, segment_pairs, oper): + """ + Populate trunk network dict with member segments. + + :param context: neutron api request context + :param network: Dictionary containing the trunk network information + :param segment_pairs: List of segments in UUID pairs + that needs to be trunked + :param oper: Operation to be performed + """ + LOG.debug(_('_populate_member_segments %s'), segment_pairs) + trunk_list = [] + for (segment, dot1qtag) in segment_pairs: + net = self.get_network(context, segment) + member_dict = {'segment': net['name'], + 'dot1qtag': dot1qtag} + trunk_list.append(member_dict) + if oper == n1kv.SEGMENT_ADD: + network['add_segment_list'] = trunk_list + elif oper == n1kv.SEGMENT_DEL: + network['del_segment_list'] = trunk_list + + def _parse_multi_segments(self, context, attrs, param): + """ + Parse the multi-segment network attributes. + + :param context: neutron api request context + :param attrs: Attributes of the network + :param param: Additional parameter indicating an add + or del operation + :returns: List of segment UUIDs in set pairs + """ + pair_list = [] + valid_seg_types = [c_const.NETWORK_TYPE_VLAN, + c_const.NETWORK_TYPE_OVERLAY] + segments = attrs.get(param) + if not attributes.is_attr_set(segments): + return pair_list + for pair in segments.split(','): + segment1, sep, segment2 = pair.partition(':') + if (uuidutils.is_uuid_like(segment1) and + uuidutils.is_uuid_like(segment2)): + binding1 = n1kv_db_v2.get_network_binding(context.session, + segment1) + binding2 = n1kv_db_v2.get_network_binding(context.session, + segment2) + if (binding1.network_type not in valid_seg_types or + binding2.network_type not in valid_seg_types or + binding1.network_type == binding2.network_type): + msg = _("Invalid pairing supplied") + raise n_exc.InvalidInput(error_message=msg) + else: + pair_list.append((segment1, segment2)) + else: + LOG.debug(_('Invalid UUID supplied in %s'), pair) + msg = _("Invalid UUID supplied") + raise n_exc.InvalidInput(error_message=msg) + return pair_list + + def _parse_trunk_segments(self, context, attrs, param, physical_network, + sub_type): + """ + Parse the trunk network attributes. + + :param context: neutron api request context + :param attrs: Attributes of the network + :param param: Additional parameter indicating an add + or del operation + :param physical_network: Physical network of the trunk segment + :param sub_type: Sub-type of the trunk segment + :returns: List of segment UUIDs and dot1qtag (for vxlan) in set pairs + """ + pair_list = [] + segments = attrs.get(param) + if not attributes.is_attr_set(segments): + return pair_list + for pair in segments.split(','): + segment, sep, dot1qtag = pair.partition(':') + if sub_type == c_const.NETWORK_TYPE_VLAN: + dot1qtag = '' + if uuidutils.is_uuid_like(segment): + binding = n1kv_db_v2.get_network_binding(context.session, + segment) + if binding.network_type == c_const.NETWORK_TYPE_TRUNK: + msg = _("Cannot add a trunk segment '%s' as a member of " + "another trunk segment") % segment + raise n_exc.InvalidInput(error_message=msg) + elif binding.network_type == c_const.NETWORK_TYPE_VLAN: + if sub_type == c_const.NETWORK_TYPE_OVERLAY: + msg = _("Cannot add vlan segment '%s' as a member of " + "a vxlan trunk segment") % segment + raise n_exc.InvalidInput(error_message=msg) + if not physical_network: + physical_network = binding.physical_network + elif physical_network != binding.physical_network: + msg = _("Network UUID '%s' belongs to a different " + "physical network") % segment + raise n_exc.InvalidInput(error_message=msg) + elif binding.network_type == c_const.NETWORK_TYPE_OVERLAY: + if sub_type == c_const.NETWORK_TYPE_VLAN: + msg = _("Cannot add vxlan segment '%s' as a member of " + "a vlan trunk segment") % segment + raise n_exc.InvalidInput(error_message=msg) + try: + if not utils.is_valid_vlan_tag(int(dot1qtag)): + msg = _("Vlan tag '%s' is out of range") % dot1qtag + raise n_exc.InvalidInput(error_message=msg) + except ValueError: + msg = _("Vlan tag '%s' is not an integer " + "value") % dot1qtag + raise n_exc.InvalidInput(error_message=msg) + pair_list.append((segment, dot1qtag)) + else: + LOG.debug(_('%s is not a valid uuid'), segment) + msg = _("'%s' is not a valid UUID") % segment + raise n_exc.InvalidInput(error_message=msg) + return pair_list + + def _extend_network_dict_member_segments(self, context, network): + """Add the extended parameter member segments to the network.""" + members = [] + binding = n1kv_db_v2.get_network_binding(context.session, + network['id']) + if binding.network_type == c_const.NETWORK_TYPE_TRUNK: + members = n1kv_db_v2.get_trunk_members(context.session, + network['id']) + elif binding.network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: + members = n1kv_db_v2.get_multi_segment_members(context.session, + network['id']) + network[n1kv.MEMBER_SEGMENTS] = members + + def _extend_network_dict_profile(self, context, network): + """Add the extended parameter network profile to the network.""" + binding = n1kv_db_v2.get_network_binding(context.session, + network['id']) + network[n1kv.PROFILE_ID] = binding.profile_id + + def _extend_port_dict_profile(self, context, port): + """Add the extended parameter port profile to the port.""" + binding = n1kv_db_v2.get_port_binding(context.session, + port['id']) + port[n1kv.PROFILE_ID] = binding.profile_id + + def _process_network_profile(self, context, network): + """Validate network profile exists.""" + profile_id = network.get(n1kv.PROFILE_ID) + profile_id_set = attributes.is_attr_set(profile_id) + if not profile_id_set: + profile_name = c_conf.CISCO_N1K.default_network_profile + net_p = self._get_network_profile_by_name(context.session, + profile_name) + profile_id = net_p['id'] + network['n1kv:profile_id'] = profile_id + return profile_id + + def _process_policy_profile(self, context, attrs): + """Validates whether policy profile exists.""" + profile_id = attrs.get(n1kv.PROFILE_ID) + profile_id_set = attributes.is_attr_set(profile_id) + if not profile_id_set: + msg = _("n1kv:profile_id does not exist") + raise n_exc.InvalidInput(error_message=msg) + if not self._policy_profile_exists(profile_id): + msg = _("n1kv:profile_id does not exist") + raise n_exc.InvalidInput(error_message=msg) + + return profile_id + + def _send_create_logical_network_request(self, network_profile, tenant_id): + """ + Send create logical network request to VSM. + + :param network_profile: network profile dictionary + :param tenant_id: UUID representing the tenant + """ + LOG.debug(_('_send_create_logical_network')) + n1kvclient = n1kv_client.Client() + n1kvclient.create_logical_network(network_profile, tenant_id) + + def _send_delete_logical_network_request(self, network_profile): + """ + Send delete logical network request to VSM. + + :param network_profile: network profile dictionary + """ + LOG.debug('_send_delete_logical_network') + n1kvclient = n1kv_client.Client() + logical_network_name = (network_profile['id'] + + c_const.LOGICAL_NETWORK_SUFFIX) + n1kvclient.delete_logical_network(logical_network_name) + + def _send_create_network_profile_request(self, context, profile): + """ + Send create network profile request to VSM. + + :param context: neutron api request context + :param profile: network profile dictionary + """ + LOG.debug(_('_send_create_network_profile_request: %s'), profile['id']) + n1kvclient = n1kv_client.Client() + n1kvclient.create_network_segment_pool(profile, context.tenant_id) + + def _send_update_network_profile_request(self, profile): + """ + Send update network profile request to VSM. + + :param profile: network profile dictionary + """ + LOG.debug(_('_send_update_network_profile_request: %s'), profile['id']) + n1kvclient = n1kv_client.Client() + n1kvclient.update_network_segment_pool(profile) + + def _send_delete_network_profile_request(self, profile): + """ + Send delete network profile request to VSM. + + :param profile: network profile dictionary + """ + LOG.debug(_('_send_delete_network_profile_request: %s'), + profile['name']) + n1kvclient = n1kv_client.Client() + n1kvclient.delete_network_segment_pool(profile['id']) + + def _send_create_network_request(self, context, network, segment_pairs): + """ + Send create network request to VSM. + + Create a bridge domain for network of type Overlay. + :param context: neutron api request context + :param network: network dictionary + :param segment_pairs: List of segments in UUID pairs + that need to be bridged + """ + LOG.debug(_('_send_create_network_request: %s'), network['id']) + profile = self.get_network_profile(context, + network[n1kv.PROFILE_ID]) + n1kvclient = n1kv_client.Client() + if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY: + n1kvclient.create_bridge_domain(network, profile['sub_type']) + if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_TRUNK: + self._populate_member_segments(context, network, segment_pairs, + n1kv.SEGMENT_ADD) + network['del_segment_list'] = [] + if profile['sub_type'] == c_const.NETWORK_TYPE_OVERLAY: + encap_dict = {'name': (network['name'] + + c_const.ENCAPSULATION_PROFILE_SUFFIX), + 'add_segment_list': ( + self._get_encap_segments(context, + segment_pairs)), + 'del_segment_list': []} + n1kvclient.create_encapsulation_profile(encap_dict) + n1kvclient.create_network_segment(network, profile) + + def _send_update_network_request(self, context, network, add_segments, + del_segments): + """ + Send update network request to VSM. + + :param context: neutron api request context + :param network: network dictionary + :param add_segments: List of segments bindings + that need to be deleted + :param del_segments: List of segments bindings + that need to be deleted + """ + LOG.debug(_('_send_update_network_request: %s'), network['id']) + db_session = context.session + profile = n1kv_db_v2.get_network_profile( + db_session, network[n1kv.PROFILE_ID]) + n1kvclient = n1kv_client.Client() + body = {'description': network['name'], + 'id': network['id'], + 'networkSegmentPool': profile['id'], + 'vlan': network[providernet.SEGMENTATION_ID], + 'mode': 'access', + 'segmentType': profile['segment_type'], + 'addSegments': [], + 'delSegments': []} + if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_TRUNK: + self._populate_member_segments(context, network, add_segments, + n1kv.SEGMENT_ADD) + self._populate_member_segments(context, network, del_segments, + n1kv.SEGMENT_DEL) + body['mode'] = c_const.NETWORK_TYPE_TRUNK + body['segmentType'] = profile['sub_type'] + body['addSegments'] = network['add_segment_list'] + body['delSegments'] = network['del_segment_list'] + LOG.debug(_('add_segments=%s'), body['addSegments']) + LOG.debug(_('del_segments=%s'), body['delSegments']) + if profile['sub_type'] == c_const.NETWORK_TYPE_OVERLAY: + encap_profile = (network['id'] + + c_const.ENCAPSULATION_PROFILE_SUFFIX) + encap_dict = {'name': encap_profile, + 'addMappings': ( + self._get_encap_segments(context, + add_segments)), + 'delMappings': ( + self._get_encap_segments(context, + del_segments))} + n1kvclient.update_encapsulation_profile(context, encap_profile, + encap_dict) + n1kvclient.update_network_segment(network['id'], body) + + def _send_delete_network_request(self, context, network): + """ + Send delete network request to VSM. + + Delete bridge domain if network is of type Overlay. + Delete encapsulation profile if network is of type OVERLAY Trunk. + :param context: neutron api request context + :param network: network dictionary + """ + LOG.debug(_('_send_delete_network_request: %s'), network['id']) + n1kvclient = n1kv_client.Client() + session = context.session + if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY: + name = network['id'] + c_const.BRIDGE_DOMAIN_SUFFIX + n1kvclient.delete_bridge_domain(name) + elif network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_TRUNK: + profile = self.get_network_profile( + context, network[n1kv.PROFILE_ID]) + if profile['sub_type'] == c_const.NETWORK_TYPE_OVERLAY: + profile_name = (network['id'] + + c_const.ENCAPSULATION_PROFILE_SUFFIX) + n1kvclient.delete_encapsulation_profile(profile_name) + elif (network[providernet.NETWORK_TYPE] == + c_const.NETWORK_TYPE_MULTI_SEGMENT): + encap_dict = n1kv_db_v2.get_multi_segment_encap_dict(session, + network['id']) + for profile in encap_dict: + profile_dict = {'name': profile, + 'addSegments': [], + 'delSegments': []} + for segment_pair in encap_dict[profile]: + mapping_dict = {} + (segment1, segment2) = segment_pair + self._extend_mapping_dict(context, + mapping_dict, segment1) + self._extend_mapping_dict(context, + mapping_dict, segment2) + profile_dict['delSegments'].append(mapping_dict) + n1kvclient.update_encapsulation_profile(context, profile, + profile_dict) + n1kvclient.delete_network_segment(network['id']) + + def _send_create_subnet_request(self, context, subnet): + """ + Send create subnet request to VSM. + + :param context: neutron api request context + :param subnet: subnet dictionary + """ + LOG.debug(_('_send_create_subnet_request: %s'), subnet['id']) + n1kvclient = n1kv_client.Client() + n1kvclient.create_ip_pool(subnet) + + def _send_update_subnet_request(self, subnet): + """ + Send update subnet request to VSM. + + :param subnet: subnet dictionary + """ + LOG.debug(_('_send_update_subnet_request: %s'), subnet['name']) + n1kvclient = n1kv_client.Client() + n1kvclient.update_ip_pool(subnet) + + def _send_delete_subnet_request(self, context, subnet): + """ + Send delete subnet request to VSM. + + :param context: neutron api request context + :param subnet: subnet dictionary + """ + LOG.debug(_('_send_delete_subnet_request: %s'), subnet['name']) + body = {'ipPool': subnet['id'], 'deleteSubnet': True} + n1kvclient = n1kv_client.Client() + n1kvclient.update_network_segment(subnet['network_id'], body=body) + n1kvclient.delete_ip_pool(subnet['id']) + + def _send_create_port_request(self, + context, + port, + port_count, + policy_profile, + vm_network_name): + """ + Send create port request to VSM. + + Create a VM network for a network and policy profile combination. + If the VM network already exists, bind this port to the existing + VM network on the VSM. + :param context: neutron api request context + :param port: port dictionary + :param port_count: integer representing the number of ports in one + VM Network + :param policy_profile: object of type policy profile + :param vm_network_name: string representing the name of the VM + network + """ + LOG.debug(_('_send_create_port_request: %s'), port) + n1kvclient = n1kv_client.Client() + if port_count == 1: + n1kvclient.create_vm_network(port, + vm_network_name, + policy_profile) + else: + n1kvclient.create_n1kv_port(port, vm_network_name) + + def _send_update_port_request(self, port_id, mac_address, vm_network_name): + """ + Send update port request to VSM. + + :param port_id: UUID representing port to update + :param mac_address: string representing the mac address + :param vm_network_name: VM network name to which the port is bound + """ + LOG.debug(_('_send_update_port_request: %s'), port_id) + body = {'portId': port_id, + 'macAddress': mac_address} + n1kvclient = n1kv_client.Client() + n1kvclient.update_n1kv_port(vm_network_name, port_id, body) + + def _send_delete_port_request(self, context, port, vm_network): + """ + Send delete port request to VSM. + + Delete the port on the VSM. If it is the last port on the VM Network, + delete the VM Network. + :param context: neutron api request context + :param port: port object which is to be deleted + :param vm_network: VM network object with which the port is associated + """ + LOG.debug(_('_send_delete_port_request: %s'), port['id']) + n1kvclient = n1kv_client.Client() + n1kvclient.delete_n1kv_port(vm_network['name'], port['id']) + if vm_network['port_count'] == 0: + n1kvclient.delete_vm_network(vm_network['name']) + + def _get_segmentation_id(self, context, id): + """ + Retrieve segmentation ID for a given network. + + :param context: neutron api request context + :param id: UUID of the network + :returns: segmentation ID for the network + """ + session = context.session + binding = n1kv_db_v2.get_network_binding(session, id) + return binding.segmentation_id + + def create_network(self, context, network): + """ + Create network based on network profile. + + :param context: neutron api request context + :param network: network dictionary + :returns: network object + """ + (network_type, physical_network, + segmentation_id) = self._process_provider_create(context, + network['network']) + profile_id = self._process_network_profile(context, network['network']) + segment_pairs = None + LOG.debug(_('Create network: profile_id=%s'), profile_id) + session = context.session + with session.begin(subtransactions=True): + if not network_type: + # tenant network + (physical_network, network_type, segmentation_id, + multicast_ip) = n1kv_db_v2.alloc_network(session, + profile_id) + LOG.debug(_('Physical_network %(phy_net)s, ' + 'seg_type %(net_type)s, ' + 'seg_id %(seg_id)s, ' + 'multicast_ip %(multicast_ip)s'), + {'phy_net': physical_network, + 'net_type': network_type, + 'seg_id': segmentation_id, + 'multicast_ip': multicast_ip}) + if network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: + segment_pairs = ( + self._parse_multi_segments(context, network['network'], + n1kv.SEGMENT_ADD)) + LOG.debug(_('Seg list %s '), segment_pairs) + elif network_type == c_const.NETWORK_TYPE_TRUNK: + network_profile = self.get_network_profile(context, + profile_id) + segment_pairs = ( + self._parse_trunk_segments(context, network['network'], + n1kv.SEGMENT_ADD, + physical_network, + network_profile['sub_type'] + )) + LOG.debug(_('Seg list %s '), segment_pairs) + else: + if not segmentation_id: + raise n_exc.TenantNetworksDisabled() + else: + # provider network + if network_type == c_const.NETWORK_TYPE_VLAN: + network_profile = self.get_network_profile(context, + profile_id) + seg_min, seg_max = self._get_segment_range( + network_profile['segment_range']) + if not seg_min <= segmentation_id <= seg_max: + raise cisco_exceptions.VlanIDOutsidePool + n1kv_db_v2.reserve_specific_vlan(session, + physical_network, + segmentation_id) + multicast_ip = "0.0.0.0" + net = super(N1kvNeutronPluginV2, self).create_network(context, + network) + n1kv_db_v2.add_network_binding(session, + net['id'], + network_type, + physical_network, + segmentation_id, + multicast_ip, + profile_id, + segment_pairs) + self._process_l3_create(context, net, network['network']) + self._extend_network_dict_provider(context, net) + self._extend_network_dict_profile(context, net) + try: + if network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: + self._send_add_multi_segment_request(context, net['id'], + segment_pairs) + else: + self._send_create_network_request(context, net, segment_pairs) + except(cisco_exceptions.VSMError, + cisco_exceptions.VSMConnectionFailed): + super(N1kvNeutronPluginV2, self).delete_network(context, net['id']) + else: + LOG.debug(_("Created network: %s"), net['id']) + return net + + def update_network(self, context, id, network): + """ + Update network parameters. + + :param context: neutron api request context + :param id: UUID representing the network to update + :returns: updated network object + """ + self._check_provider_update(context, network['network']) + add_segments = [] + del_segments = [] + + session = context.session + with session.begin(subtransactions=True): + net = super(N1kvNeutronPluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + binding = n1kv_db_v2.get_network_binding(session, id) + if binding.network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: + add_segments = ( + self._parse_multi_segments(context, network['network'], + n1kv.SEGMENT_ADD)) + n1kv_db_v2.add_multi_segment_binding(session, + net['id'], add_segments) + del_segments = ( + self._parse_multi_segments(context, network['network'], + n1kv.SEGMENT_DEL)) + self._send_add_multi_segment_request(context, net['id'], + add_segments) + self._send_del_multi_segment_request(context, net['id'], + del_segments) + n1kv_db_v2.del_multi_segment_binding(session, + net['id'], del_segments) + elif binding.network_type == c_const.NETWORK_TYPE_TRUNK: + network_profile = self.get_network_profile(context, + binding.profile_id) + add_segments = ( + self._parse_trunk_segments(context, network['network'], + n1kv.SEGMENT_ADD, + binding.physical_network, + network_profile['sub_type'])) + n1kv_db_v2.add_trunk_segment_binding(session, + net['id'], add_segments) + del_segments = ( + self._parse_trunk_segments(context, network['network'], + n1kv.SEGMENT_DEL, + binding.physical_network, + network_profile['sub_type'])) + n1kv_db_v2.del_trunk_segment_binding(session, + net['id'], del_segments) + self._extend_network_dict_provider(context, net) + self._extend_network_dict_profile(context, net) + if binding.network_type != c_const.NETWORK_TYPE_MULTI_SEGMENT: + self._send_update_network_request(context, net, add_segments, + del_segments) + LOG.debug(_("Updated network: %s"), net['id']) + return net + + def delete_network(self, context, id): + """ + Delete a network. + + :param context: neutron api request context + :param id: UUID representing the network to delete + """ + session = context.session + with session.begin(subtransactions=True): + binding = n1kv_db_v2.get_network_binding(session, id) + network = self.get_network(context, id) + if n1kv_db_v2.is_trunk_member(session, id): + msg = _("Cannot delete network '%s' " + "that is member of a trunk segment") % network['name'] + raise n_exc.InvalidInput(error_message=msg) + if n1kv_db_v2.is_multi_segment_member(session, id): + msg = _("Cannot delete network '%s' that is a member of a " + "multi-segment network") % network['name'] + raise n_exc.InvalidInput(error_message=msg) + if binding.network_type == c_const.NETWORK_TYPE_OVERLAY: + n1kv_db_v2.release_vxlan(session, binding.segmentation_id) + elif binding.network_type == c_const.NETWORK_TYPE_VLAN: + n1kv_db_v2.release_vlan(session, binding.physical_network, + binding.segmentation_id) + self._process_l3_delete(context, id) + super(N1kvNeutronPluginV2, self).delete_network(context, id) + # the network_binding record is deleted via cascade from + # the network record, so explicit removal is not necessary + self._send_delete_network_request(context, network) + LOG.debug(_("Deleted network: %s"), id) + + def get_network(self, context, id, fields=None): + """ + Retrieve a Network. + + :param context: neutron api request context + :param id: UUID representing the network to fetch + :returns: requested network dictionary + """ + LOG.debug(_("Get network: %s"), id) + net = super(N1kvNeutronPluginV2, self).get_network(context, id, None) + self._extend_network_dict_provider(context, net) + self._extend_network_dict_profile(context, net) + self._extend_network_dict_member_segments(context, net) + return self._fields(net, fields) + + def get_networks(self, context, filters=None, fields=None): + """ + Retrieve a list of networks. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + network object. Values in this dictiontary are an + iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a network + dictionary. Only these fields will be returned. + :returns: list of network dictionaries. + """ + LOG.debug(_("Get networks")) + nets = super(N1kvNeutronPluginV2, self).get_networks(context, filters, + None) + for net in nets: + self._extend_network_dict_provider(context, net) + self._extend_network_dict_profile(context, net) + + return [self._fields(net, fields) for net in nets] + + def create_port(self, context, port): + """ + Create neutron port. + + Create a port. Use a default policy profile for ports created for dhcp + and router interface. Default policy profile name is configured in the + /etc/neutron/cisco_plugins.ini file. + + :param context: neutron api request context + :param port: port dictionary + :returns: port object + """ + p_profile = None + port_count = None + vm_network_name = None + profile_id_set = False + + # Set the network policy profile id for auto generated L3/DHCP ports + if ('device_id' in port['port'] and port['port']['device_owner'] in + [constants.DEVICE_OWNER_DHCP, constants.DEVICE_OWNER_ROUTER_INTF, + constants.DEVICE_OWNER_ROUTER_GW, + constants.DEVICE_OWNER_FLOATINGIP]): + p_profile_name = c_conf.CISCO_N1K.network_node_policy_profile + p_profile = self._get_policy_profile_by_name(p_profile_name) + if p_profile: + port['port']['n1kv:profile_id'] = p_profile['id'] + + if n1kv.PROFILE_ID in port['port']: + profile_id = port['port'].get(n1kv.PROFILE_ID) + profile_id_set = attributes.is_attr_set(profile_id) + + # Set the default policy profile id for ports if no id is set + if not profile_id_set: + p_profile_name = c_conf.CISCO_N1K.default_policy_profile + p_profile = self._get_policy_profile_by_name(p_profile_name) + if p_profile: + port['port']['n1kv:profile_id'] = p_profile['id'] + profile_id_set = True + + profile_id = self._process_policy_profile(context, + port['port']) + LOG.debug(_('Create port: profile_id=%s'), profile_id) + session = context.session + with session.begin(subtransactions=True): + pt = super(N1kvNeutronPluginV2, self).create_port(context, + port) + n1kv_db_v2.add_port_binding(session, pt['id'], profile_id) + self._extend_port_dict_profile(context, pt) + try: + vm_network = n1kv_db_v2.get_vm_network( + context.session, + profile_id, + pt['network_id']) + except cisco_exceptions.VMNetworkNotFound: + # Create a VM Network if no VM network exists. + vm_network_name = "%s%s_%s" % (c_const.VM_NETWORK_NAME_PREFIX, + profile_id, + pt['network_id']) + port_count = 1 + n1kv_db_v2.add_vm_network(context.session, + vm_network_name, + profile_id, + pt['network_id'], + port_count) + else: + # Update port count of the VM network. + vm_network_name = vm_network['name'] + port_count = vm_network['port_count'] + 1 + n1kv_db_v2.update_vm_network_port_count(context.session, + vm_network_name, + port_count) + self._process_portbindings_create_and_update(context, + port['port'], + pt) + # Extract policy profile for VM network create in VSM. + if not p_profile: + p_profile = n1kv_db_v2.get_policy_profile(session, profile_id) + try: + self._send_create_port_request(context, + pt, + port_count, + p_profile, + vm_network_name) + except(cisco_exceptions.VSMError, + cisco_exceptions.VSMConnectionFailed): + super(N1kvNeutronPluginV2, self).delete_port(context, pt['id']) + else: + LOG.debug(_("Created port: %s"), pt) + return pt + + def update_port(self, context, id, port): + """ + Update port parameters. + + :param context: neutron api request context + :param id: UUID representing the port to update + :returns: updated port object + """ + LOG.debug(_("Update port: %s"), id) + with context.session.begin(subtransactions=True): + updated_port = super(N1kvNeutronPluginV2, + self).update_port(context, id, port) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + self._extend_port_dict_profile(context, updated_port) + return updated_port + + def delete_port(self, context, id, l3_port_check=True): + """ + Delete a port. + + :param context: neutron api request context + :param id: UUID representing the port to delete + """ + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + with context.session.begin(subtransactions=True): + port = self.get_port(context, id) + vm_network = n1kv_db_v2.get_vm_network(context.session, + port[n1kv.PROFILE_ID], + port['network_id']) + vm_network['port_count'] -= 1 + n1kv_db_v2.update_vm_network_port_count(context.session, + vm_network['name'], + vm_network['port_count']) + if vm_network['port_count'] == 0: + n1kv_db_v2.delete_vm_network(context.session, + port[n1kv.PROFILE_ID], + port['network_id']) + self.disassociate_floatingips(context, id) + super(N1kvNeutronPluginV2, self).delete_port(context, id) + self._send_delete_port_request(context, port, vm_network) + + def get_port(self, context, id, fields=None): + """ + Retrieve a port. + :param context: neutron api request context + :param id: UUID representing the port to retrieve + :param fields: a list of strings that are valid keys in a port + dictionary. Only these fields will be returned. + :returns: port dictionary + """ + LOG.debug(_("Get port: %s"), id) + port = super(N1kvNeutronPluginV2, self).get_port(context, id, None) + self._extend_port_dict_profile(context, port) + return self._fields(port, fields) + + def get_ports(self, context, filters=None, fields=None): + """ + Retrieve a list of ports. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + port object. Values in this dictiontary are an + iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a port + dictionary. Only these fields will be returned. + :returns: list of port dictionaries + """ + LOG.debug(_("Get ports")) + ports = super(N1kvNeutronPluginV2, self).get_ports(context, filters, + None) + for port in ports: + self._extend_port_dict_profile(context, port) + + return [self._fields(port, fields) for port in ports] + + def create_subnet(self, context, subnet): + """ + Create subnet for a given network. + + :param context: neutron api request context + :param subnet: subnet dictionary + :returns: subnet object + """ + LOG.debug(_('Create subnet')) + sub = super(N1kvNeutronPluginV2, self).create_subnet(context, subnet) + try: + self._send_create_subnet_request(context, sub) + except(cisco_exceptions.VSMError, + cisco_exceptions.VSMConnectionFailed): + super(N1kvNeutronPluginV2, self).delete_subnet(context, sub['id']) + else: + LOG.debug(_("Created subnet: %s"), sub['id']) + return sub + + def update_subnet(self, context, id, subnet): + """ + Update a subnet. + + :param context: neutron api request context + :param id: UUID representing subnet to update + :returns: updated subnet object + """ + LOG.debug(_('Update subnet')) + sub = super(N1kvNeutronPluginV2, self).update_subnet(context, + id, + subnet) + self._send_update_subnet_request(sub) + return sub + + def delete_subnet(self, context, id): + """ + Delete a subnet. + + :param context: neutron api request context + :param id: UUID representing subnet to delete + :returns: deleted subnet object + """ + LOG.debug(_('Delete subnet: %s'), id) + subnet = self.get_subnet(context, id) + self._send_delete_subnet_request(context, subnet) + return super(N1kvNeutronPluginV2, self).delete_subnet(context, id) + + def get_subnet(self, context, id, fields=None): + """ + Retrieve a subnet. + + :param context: neutron api request context + :param id: UUID representing subnet to retrieve + :params fields: a list of strings that are valid keys in a subnet + dictionary. Only these fields will be returned. + :returns: subnet object + """ + LOG.debug(_("Get subnet: %s"), id) + subnet = super(N1kvNeutronPluginV2, self).get_subnet(context, id, + None) + return self._fields(subnet, fields) + + def get_subnets(self, context, filters=None, fields=None): + """ + Retrieve a list of subnets. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + subnet object. Values in this dictiontary are an + iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a subnet + dictionary. Only these fields will be returned. + :returns: list of dictionaries of subnets + """ + LOG.debug(_("Get subnets")) + subnets = super(N1kvNeutronPluginV2, self).get_subnets(context, + filters, + None) + return [self._fields(subnet, fields) for subnet in subnets] + + def create_network_profile(self, context, network_profile): + """ + Create a network profile. + + Create a network profile, which represents a pool of networks + belonging to one type (VLAN or Overlay). On creation of network + profile, we retrieve the admin tenant-id which we use to replace + the previously stored fake tenant-id in tenant-profile bindings. + :param context: neutron api request context + :param network_profile: network profile dictionary + :returns: network profile object + """ + self._replace_fake_tenant_id_with_real(context) + with context.session.begin(subtransactions=True): + net_p = super(N1kvNeutronPluginV2, + self).create_network_profile(context, + network_profile) + try: + self._send_create_logical_network_request(net_p, + context.tenant_id) + except(cisco_exceptions.VSMError, + cisco_exceptions.VSMConnectionFailed): + n1kv_db_v2.delete_profile_binding(context.session, + context.tenant_id, + net_p['id']) + try: + self._send_create_network_profile_request(context, net_p) + except(cisco_exceptions.VSMError, + cisco_exceptions.VSMConnectionFailed): + n1kv_db_v2.delete_profile_binding(context.session, + context.tenant_id, + net_p['id']) + self._send_delete_logical_network_request(net_p) + return net_p + + def delete_network_profile(self, context, id): + """ + Delete a network profile. + + :param context: neutron api request context + :param id: UUID of the network profile to delete + :returns: deleted network profile object + """ + with context.session.begin(subtransactions=True): + net_p = super(N1kvNeutronPluginV2, + self).delete_network_profile(context, id) + self._send_delete_network_profile_request(net_p) + self._send_delete_logical_network_request(net_p) + + def update_network_profile(self, context, net_profile_id, network_profile): + """ + Update a network profile. + + :param context: neutron api request context + :param net_profile_id: UUID of the network profile to update + :param network_profile: dictionary containing network profile object + """ + session = context.session + with session.begin(subtransactions=True): + net_p = (super(N1kvNeutronPluginV2, self). + update_network_profile(context, + net_profile_id, + network_profile)) + self._send_update_network_profile_request(net_p) + return net_p + + def create_router(self, context, router): + """ + Handle creation of router. + + Schedule router to L3 agent as part of the create handling. + :param context: neutron api request context + :param router: router dictionary + :returns: router object + """ + session = context.session + with session.begin(subtransactions=True): + rtr = (super(N1kvNeutronPluginV2, self). + create_router(context, router)) + LOG.debug(_("Scheduling router %s"), rtr['id']) + self.schedule_router(context, rtr['id']) + return rtr diff --git a/neutron/plugins/cisco/network_plugin.py b/neutron/plugins/cisco/network_plugin.py new file mode 100644 index 000000000..ee35fec81 --- /dev/null +++ b/neutron/plugins/cisco/network_plugin.py @@ -0,0 +1,176 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. + +import logging + +import webob.exc as wexc + +from neutron.api import extensions as neutron_extensions +from neutron.api.v2 import base +from neutron.db import db_base_plugin_v2 +from neutron.openstack.common import importutils +from neutron.plugins.cisco.common import cisco_exceptions as cexc +from neutron.plugins.cisco.common import config +from neutron.plugins.cisco.db import network_db_v2 as cdb +from neutron.plugins.cisco import extensions + +LOG = logging.getLogger(__name__) + + +class PluginV2(db_base_plugin_v2.NeutronDbPluginV2): + """Meta-Plugin with v2 API support for multiple sub-plugins.""" + _supported_extension_aliases = ["credential", "Cisco qos"] + _methods_to_delegate = ['create_network', + 'delete_network', 'update_network', 'get_network', + 'get_networks', + 'create_port', 'delete_port', + 'update_port', 'get_port', 'get_ports', + 'create_subnet', + 'delete_subnet', 'update_subnet', + 'get_subnet', 'get_subnets', ] + + CISCO_FAULT_MAP = { + cexc.CredentialAlreadyExists: wexc.HTTPBadRequest, + cexc.CredentialNameNotFound: wexc.HTTPNotFound, + cexc.CredentialNotFound: wexc.HTTPNotFound, + cexc.NetworkSegmentIDNotFound: wexc.HTTPNotFound, + cexc.NetworkVlanBindingAlreadyExists: wexc.HTTPBadRequest, + cexc.NexusComputeHostNotConfigured: wexc.HTTPNotFound, + cexc.NexusConfigFailed: wexc.HTTPBadRequest, + cexc.NexusConnectFailed: wexc.HTTPServiceUnavailable, + cexc.NexusPortBindingNotFound: wexc.HTTPNotFound, + cexc.NoMoreNics: wexc.HTTPBadRequest, + cexc.PortIdForNexusSvi: wexc.HTTPBadRequest, + cexc.PortVnicBindingAlreadyExists: wexc.HTTPBadRequest, + cexc.PortVnicNotFound: wexc.HTTPNotFound, + cexc.QosNameAlreadyExists: wexc.HTTPBadRequest, + cexc.QosNotFound: wexc.HTTPNotFound, + cexc.SubnetNotSpecified: wexc.HTTPBadRequest, + cexc.VlanIDNotAvailable: wexc.HTTPNotFound, + cexc.VlanIDNotFound: wexc.HTTPNotFound, + } + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + if hasattr(self._model, "supported_extension_aliases"): + aliases.extend(self._model.supported_extension_aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + """Load the model class.""" + self._model_name = config.CISCO.model_class + self._model = importutils.import_object(self._model_name) + native_bulk_attr_name = ("_%s__native_bulk_support" + % self._model.__class__.__name__) + self.__native_bulk_support = getattr(self._model, + native_bulk_attr_name, False) + + neutron_extensions.append_api_extensions_path(extensions.__path__) + + # Extend the fault map + self._extend_fault_map() + + LOG.debug(_("Plugin initialization complete")) + + def __getattribute__(self, name): + """Delegate core API calls to the model class. + + Core API calls are delegated directly to the configured model class. + Note: Bulking calls will be handled by this class, and turned into + non-bulking calls to be considered for delegation. + """ + methods = object.__getattribute__(self, "_methods_to_delegate") + if name in methods: + return getattr(object.__getattribute__(self, "_model"), + name) + else: + return object.__getattribute__(self, name) + + def __getattr__(self, name): + """Delegate calls to the extensions. + + This delegates the calls to the extensions explicitly implemented by + the model. + """ + if hasattr(self._model, name): + return getattr(self._model, name) + else: + # Must make sure we re-raise the error that led us here, since + # otherwise getattr() and even hasattr() doesn't work correctly. + raise AttributeError( + _("'%(model)s' object has no attribute '%(name)s'") % + {'model': self._model_name, 'name': name}) + + def _extend_fault_map(self): + """Extend the Neutron Fault Map for Cisco exceptions. + + Map exceptions which are specific to the Cisco Plugin + to standard HTTP exceptions. + + """ + base.FAULT_MAP.update(self.CISCO_FAULT_MAP) + + """ + Extension API implementation + """ + def get_all_qoss(self, tenant_id): + """Get all QoS levels.""" + LOG.debug(_("get_all_qoss() called")) + qoslist = cdb.get_all_qoss(tenant_id) + return qoslist + + def get_qos_details(self, tenant_id, qos_id): + """Get QoS Details.""" + LOG.debug(_("get_qos_details() called")) + return cdb.get_qos(tenant_id, qos_id) + + def create_qos(self, tenant_id, qos_name, qos_desc): + """Create a QoS level.""" + LOG.debug(_("create_qos() called")) + qos = cdb.add_qos(tenant_id, qos_name, str(qos_desc)) + return qos + + def delete_qos(self, tenant_id, qos_id): + """Delete a QoS level.""" + LOG.debug(_("delete_qos() called")) + return cdb.remove_qos(tenant_id, qos_id) + + def rename_qos(self, tenant_id, qos_id, new_name): + """Rename QoS level.""" + LOG.debug(_("rename_qos() called")) + return cdb.update_qos(tenant_id, qos_id, new_name) + + def get_all_credentials(self): + """Get all credentials.""" + LOG.debug(_("get_all_credentials() called")) + credential_list = cdb.get_all_credentials() + return credential_list + + def get_credential_details(self, credential_id): + """Get a particular credential.""" + LOG.debug(_("get_credential_details() called")) + return cdb.get_credential(credential_id) + + def rename_credential(self, credential_id, new_name, new_password): + """Rename the particular credential resource.""" + LOG.debug(_("rename_credential() called")) + return cdb.update_credential(credential_id, new_name, + new_password=new_password) diff --git a/neutron/plugins/cisco/nexus/__init__.py b/neutron/plugins/cisco/nexus/__init__.py new file mode 100644 index 000000000..963eb547f --- /dev/null +++ b/neutron/plugins/cisco/nexus/__init__.py @@ -0,0 +1,21 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# @author: Edgar Magana, Cisco Systems, Inc. +""" +Init module for Nexus Driver +""" diff --git a/neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py b/neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py new file mode 100644 index 000000000..bef145f03 --- /dev/null +++ b/neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py @@ -0,0 +1,196 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Debojyoti Dutta, Cisco Systems, Inc. +# @author: Edgar Magana, Cisco Systems Inc. +# +""" +Implements a Nexus-OS NETCONF over SSHv2 API Client +""" + +import logging + +from ncclient import manager + +from neutron.openstack.common import excutils +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import cisco_credentials_v2 as cred +from neutron.plugins.cisco.common import cisco_exceptions as cexc +from neutron.plugins.cisco.common import config as conf +from neutron.plugins.cisco.db import nexus_db_v2 +from neutron.plugins.cisco.nexus import cisco_nexus_snippets as snipp + +LOG = logging.getLogger(__name__) + + +class CiscoNEXUSDriver(): + """Nexus Driver Main Class.""" + def __init__(self): + cisco_switches = conf.get_device_dictionary() + self.nexus_switches = dict(((key[1], key[2]), val) + for key, val in cisco_switches.items() + if key[0] == 'NEXUS_SWITCH') + self.credentials = {} + self.connections = {} + + def _edit_config(self, nexus_host, target='running', config='', + allowed_exc_strs=None): + """Modify switch config for a target config type. + + :param nexus_host: IP address of switch to configure + :param target: Target config type + :param config: Configuration string in XML format + :param allowed_exc_strs: Exceptions which have any of these strings + as a subset of their exception message + (str(exception)) can be ignored + + :raises: NexusConfigFailed + + """ + if not allowed_exc_strs: + allowed_exc_strs = [] + mgr = self.nxos_connect(nexus_host) + try: + mgr.edit_config(target, config=config) + except Exception as e: + for exc_str in allowed_exc_strs: + if exc_str in str(e): + break + else: + # Raise a Neutron exception. Include a description of + # the original ncclient exception. No need to preserve T/B + raise cexc.NexusConfigFailed(config=config, exc=e) + + def get_credential(self, nexus_ip): + if nexus_ip not in self.credentials: + nexus_username = cred.Store.get_username(nexus_ip) + nexus_password = cred.Store.get_password(nexus_ip) + self.credentials[nexus_ip] = { + const.USERNAME: nexus_username, + const.PASSWORD: nexus_password + } + return self.credentials[nexus_ip] + + def nxos_connect(self, nexus_host): + """Make SSH connection to the Nexus Switch.""" + if getattr(self.connections.get(nexus_host), 'connected', None): + return self.connections[nexus_host] + + nexus_ssh_port = int(self.nexus_switches[nexus_host, 'ssh_port']) + nexus_creds = self.get_credential(nexus_host) + nexus_user = nexus_creds[const.USERNAME] + nexus_password = nexus_creds[const.PASSWORD] + try: + man = manager.connect(host=nexus_host, + port=nexus_ssh_port, + username=nexus_user, + password=nexus_password) + self.connections[nexus_host] = man + except Exception as e: + # Raise a Neutron exception. Include a description of + # the original ncclient exception. No need to preserve T/B. + raise cexc.NexusConnectFailed(nexus_host=nexus_host, exc=e) + + return self.connections[nexus_host] + + def create_xml_snippet(self, cutomized_config): + """Create XML snippet. + + Creates the Proper XML structure for the Nexus Switch Configuration. + """ + conf_xml_snippet = snipp.EXEC_CONF_SNIPPET % (cutomized_config) + return conf_xml_snippet + + def create_vlan(self, nexus_host, vlanid, vlanname): + """Create a VLAN on Nexus Switch given the VLAN ID and Name.""" + confstr = self.create_xml_snippet( + snipp.CMD_VLAN_CONF_SNIPPET % (vlanid, vlanname)) + self._edit_config(nexus_host, target='running', config=confstr) + + # Enable VLAN active and no-shutdown states. Some versions of + # Nexus switch do not allow state changes for the extended VLAN + # range (1006-4094), but these errors can be ignored (default + # values are appropriate). + state_config = [snipp.CMD_VLAN_ACTIVE_SNIPPET, + snipp.CMD_VLAN_NO_SHUTDOWN_SNIPPET] + for snippet in state_config: + try: + confstr = self.create_xml_snippet(snippet % vlanid) + self._edit_config( + nexus_host, + target='running', + config=confstr, + allowed_exc_strs=["Can't modify state for extended", + "Command is only allowed on VLAN"]) + except cexc.NexusConfigFailed: + with excutils.save_and_reraise_exception(): + self.delete_vlan(nexus_host, vlanid) + + def delete_vlan(self, nexus_host, vlanid): + """Delete a VLAN on Nexus Switch given the VLAN ID.""" + confstr = snipp.CMD_NO_VLAN_CONF_SNIPPET % vlanid + confstr = self.create_xml_snippet(confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def enable_vlan_on_trunk_int(self, nexus_host, vlanid, etype, interface): + """Enable a VLAN on a trunk interface.""" + # If one or more VLANs are already configured on this interface, + # include the 'add' keyword. + if nexus_db_v2.get_port_switch_bindings('%s:%s' % (etype, interface), + nexus_host): + snippet = snipp.CMD_INT_VLAN_ADD_SNIPPET + else: + snippet = snipp.CMD_INT_VLAN_SNIPPET + confstr = snippet % (etype, interface, vlanid, etype) + confstr = self.create_xml_snippet(confstr) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def disable_vlan_on_trunk_int(self, nexus_host, vlanid, etype, interface): + """Disable a VLAN on a trunk interface.""" + confstr = snipp.CMD_NO_VLAN_INT_SNIPPET % (etype, interface, + vlanid, etype) + confstr = self.create_xml_snippet(confstr) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def create_and_trunk_vlan(self, nexus_host, vlan_id, vlan_name, + etype, nexus_port): + """Create VLAN and trunk it on the specified ports.""" + self.create_vlan(nexus_host, vlan_id, vlan_name) + LOG.debug(_("NexusDriver created VLAN: %s"), vlan_id) + if nexus_port: + self.enable_vlan_on_trunk_int(nexus_host, vlan_id, + etype, nexus_port) + + def delete_and_untrunk_vlan(self, nexus_host, vlan_id, etype, nexus_port): + """Delete VLAN and untrunk it from the specified ports.""" + self.delete_vlan(nexus_host, vlan_id) + if nexus_port: + self.disable_vlan_on_trunk_int(nexus_host, vlan_id, + etype, nexus_port) + + def create_vlan_svi(self, nexus_host, vlan_id, gateway_ip): + confstr = snipp.CMD_VLAN_SVI_SNIPPET % (vlan_id, gateway_ip) + confstr = self.create_xml_snippet(confstr) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def delete_vlan_svi(self, nexus_host, vlan_id): + confstr = snipp.CMD_NO_VLAN_SVI_SNIPPET % vlan_id + confstr = self.create_xml_snippet(confstr) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) diff --git a/neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py b/neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py new file mode 100644 index 000000000..e9e34811a --- /dev/null +++ b/neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py @@ -0,0 +1,347 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Cisco Systems, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# @author: Edgar Magana, Cisco Systems, Inc. +# @author: Arvind Somya, Cisco Systems, Inc. (asomya@cisco.com) +# + +""" +PlugIn for Nexus OS driver +""" + +import logging + +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import cisco_exceptions as cisco_exc +from neutron.plugins.cisco.common import config as conf +from neutron.plugins.cisco.db import network_db_v2 as cdb +from neutron.plugins.cisco.db import nexus_db_v2 as nxos_db +from neutron.plugins.cisco import l2device_plugin_base + + +LOG = logging.getLogger(__name__) + + +class NexusPlugin(l2device_plugin_base.L2DevicePluginBase): + """Nexus PlugIn Main Class.""" + _networks = {} + + def __init__(self): + """Extract configuration parameters from the configuration file.""" + self._client = importutils.import_object(conf.CISCO.nexus_driver) + LOG.debug(_("Loaded driver %s"), conf.CISCO.nexus_driver) + self._nexus_switches = conf.get_device_dictionary() + + def create_network(self, network, attachment): + """Create or update a network when an attachment is changed. + + This method is not invoked at the usual plugin create_network() time. + Instead, it is invoked on create/update port. + + :param network: Network on which the port operation is happening + :param attachment: Details about the owner of the port + + Create a VLAN in the appropriate switch/port, and configure the + appropriate interfaces for this VLAN. + """ + LOG.debug(_("NexusPlugin:create_network() called")) + # Grab the switch IPs and ports for this host + host_connections = [] + host = attachment['host_name'] + for switch_type, switch_ip, attr in self._nexus_switches: + if str(attr) == str(host): + port = self._nexus_switches[switch_type, switch_ip, attr] + # Get ether type for port, assume an ethernet type + # if none specified. + if ':' in port: + etype, port_id = port.split(':') + else: + etype, port_id = 'ethernet', port + host_connections.append((switch_ip, etype, port_id)) + if not host_connections: + raise cisco_exc.NexusComputeHostNotConfigured(host=host) + + vlan_id = network[const.NET_VLAN_ID] + vlan_name = network[const.NET_VLAN_NAME] + auto_create = True + auto_trunk = True + if cdb.is_provider_vlan(vlan_id): + vlan_name = ''.join([conf.CISCO.provider_vlan_name_prefix, + str(vlan_id)]) + auto_create = conf.CISCO.provider_vlan_auto_create + auto_trunk = conf.CISCO.provider_vlan_auto_trunk + + # Check if this network is already in the DB + for switch_ip, etype, port_id in host_connections: + vlan_created = False + vlan_trunked = False + eport_id = '%s:%s' % (etype, port_id) + # Check for switch vlan bindings + try: + # This vlan has already been created on this switch + # via another operation, like SVI bindings. + nxos_db.get_nexusvlan_binding(vlan_id, switch_ip) + vlan_created = True + auto_create = False + except cisco_exc.NexusPortBindingNotFound: + # No changes, proceed as normal + pass + + try: + nxos_db.get_port_vlan_switch_binding(eport_id, vlan_id, + switch_ip) + except cisco_exc.NexusPortBindingNotFound: + if auto_create and auto_trunk: + # Create vlan and trunk vlan on the port + LOG.debug(_("Nexus: create & trunk vlan %s"), vlan_name) + self._client.create_and_trunk_vlan( + switch_ip, vlan_id, vlan_name, etype, port_id) + vlan_created = True + vlan_trunked = True + elif auto_create: + # Create vlan but do not trunk it on the port + LOG.debug(_("Nexus: create vlan %s"), vlan_name) + self._client.create_vlan(switch_ip, vlan_id, vlan_name) + vlan_created = True + elif auto_trunk: + # Only trunk vlan on the port + LOG.debug(_("Nexus: trunk vlan %s"), vlan_name) + self._client.enable_vlan_on_trunk_int( + switch_ip, vlan_id, etype, port_id) + vlan_trunked = True + + try: + instance = attachment[const.INSTANCE_ID] + nxos_db.add_nexusport_binding(eport_id, str(vlan_id), + switch_ip, instance) + except Exception: + with excutils.save_and_reraise_exception(): + # Add binding failed, roll back any vlan creation/enabling + if vlan_created and vlan_trunked: + LOG.debug(_("Nexus: delete & untrunk vlan %s"), + vlan_name) + self._client.delete_and_untrunk_vlan(switch_ip, + vlan_id, + etype, port_id) + elif vlan_created: + LOG.debug(_("Nexus: delete vlan %s"), vlan_name) + self._client.delete_vlan(switch_ip, vlan_id) + elif vlan_trunked: + LOG.debug(_("Nexus: untrunk vlan %s"), vlan_name) + self._client.disable_vlan_on_trunk_int(switch_ip, + vlan_id, + etype, + port_id) + + net_id = network[const.NET_ID] + new_net_dict = {const.NET_ID: net_id, + const.NET_NAME: network[const.NET_NAME], + const.NET_PORTS: {}, + const.NET_VLAN_NAME: vlan_name, + const.NET_VLAN_ID: vlan_id} + self._networks[net_id] = new_net_dict + return new_net_dict + + def add_router_interface(self, vlan_name, vlan_id, subnet_id, + gateway_ip, router_id): + """Create VLAN SVI on the Nexus switch.""" + # Find a switch to create the SVI on + switch_ip = self._find_switch_for_svi() + if not switch_ip: + raise cisco_exc.NoNexusSviSwitch() + + # Check if this vlan exists on the switch already + try: + nxos_db.get_nexusvlan_binding(vlan_id, switch_ip) + except cisco_exc.NexusPortBindingNotFound: + # Create vlan and trunk vlan on the port + self._client.create_and_trunk_vlan( + switch_ip, vlan_id, vlan_name, etype=None, nexus_port=None) + # Check if a router interface has already been created + try: + nxos_db.get_nexusvm_bindings(vlan_id, router_id) + raise cisco_exc.SubnetInterfacePresent(subnet_id=subnet_id, + router_id=router_id) + except cisco_exc.NexusPortBindingNotFound: + self._client.create_vlan_svi(switch_ip, vlan_id, gateway_ip) + nxos_db.add_nexusport_binding('router', str(vlan_id), + switch_ip, router_id) + + return True + + def remove_router_interface(self, vlan_id, router_id): + """Remove VLAN SVI from the Nexus Switch.""" + # Grab switch_ip from database + switch_ip = nxos_db.get_nexusvm_bindings(vlan_id, + router_id)[0].switch_ip + + # Delete the SVI interface from the switch + self._client.delete_vlan_svi(switch_ip, vlan_id) + + # Invoke delete_port to delete this row + # And delete vlan if required + return self.delete_port(router_id, vlan_id) + + def _find_switch_for_svi(self): + """Get a switch to create the SVI on.""" + LOG.debug(_("Grabbing a switch to create SVI")) + nexus_switches = self._client.nexus_switches + if conf.CISCO.svi_round_robin: + LOG.debug(_("Using round robin to create SVI")) + switch_dict = dict( + (switch_ip, 0) for switch_ip, _ in nexus_switches) + try: + bindings = nxos_db.get_nexussvi_bindings() + # Build a switch dictionary with weights + for binding in bindings: + switch_ip = binding.switch_ip + if switch_ip not in switch_dict: + switch_dict[switch_ip] = 1 + else: + switch_dict[switch_ip] += 1 + # Search for the lowest value in the dict + if switch_dict: + switch_ip = min(switch_dict, key=switch_dict.get) + return switch_ip + except cisco_exc.NexusPortBindingNotFound: + pass + + LOG.debug(_("No round robin or zero weights, using first switch")) + # Return the first switch in the config + return conf.first_device_ip + + def delete_network(self, tenant_id, net_id, **kwargs): + """Delete network. + + Not applicable to Nexus plugin. Defined here to satisfy abstract + method requirements. + """ + LOG.debug(_("NexusPlugin:delete_network() called")) # pragma no cover + + def update_network(self, tenant_id, net_id, **kwargs): + """Update the properties of a particular Virtual Network. + + Not applicable to Nexus plugin. Defined here to satisfy abstract + method requirements. + """ + LOG.debug(_("NexusPlugin:update_network() called")) # pragma no cover + + def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs): + """Create port. + + Not applicable to Nexus plugin. Defined here to satisfy abstract + method requirements. + """ + LOG.debug(_("NexusPlugin:create_port() called")) # pragma no cover + + def delete_port(self, device_id, vlan_id): + """Delete port. + + Delete port bindings from the database and scan whether the network + is still required on the interfaces trunked. + """ + LOG.debug(_("NexusPlugin:delete_port() called")) + # Delete DB row(s) for this port + try: + rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id) + except cisco_exc.NexusPortBindingNotFound: + return + + auto_delete = True + auto_untrunk = True + if cdb.is_provider_vlan(vlan_id): + auto_delete = conf.CISCO.provider_vlan_auto_create + auto_untrunk = conf.CISCO.provider_vlan_auto_trunk + LOG.debug(_("delete_network(): provider vlan %s"), vlan_id) + + instance_id = False + for row in rows: + instance_id = row['instance_id'] + switch_ip = row.switch_ip + etype, nexus_port = '', '' + if row['port_id'] == 'router': + etype, nexus_port = 'vlan', row['port_id'] + auto_untrunk = False + else: + etype, nexus_port = row['port_id'].split(':') + + nxos_db.remove_nexusport_binding(row.port_id, row.vlan_id, + row.switch_ip, + row.instance_id) + # Check whether there are any remaining instances using this + # vlan on this Nexus port. + try: + nxos_db.get_port_vlan_switch_binding(row.port_id, + row.vlan_id, + row.switch_ip) + except cisco_exc.NexusPortBindingNotFound: + try: + if nexus_port and auto_untrunk: + # Untrunk the vlan from this Nexus interface + self._client.disable_vlan_on_trunk_int( + switch_ip, row.vlan_id, etype, nexus_port) + + # Check whether there are any remaining instances + # using this vlan on the Nexus switch. + if auto_delete: + try: + nxos_db.get_nexusvlan_binding(row.vlan_id, + row.switch_ip) + except cisco_exc.NexusPortBindingNotFound: + # Delete this vlan from this switch + self._client.delete_vlan(switch_ip, row.vlan_id) + except Exception: + # The delete vlan operation on the Nexus failed, + # so this delete_port request has failed. For + # consistency, roll back the Nexus database to what + # it was before this request. + with excutils.save_and_reraise_exception(): + nxos_db.add_nexusport_binding(row.port_id, + row.vlan_id, + row.switch_ip, + row.instance_id) + + return instance_id + + def update_port(self, tenant_id, net_id, port_id, port_state, **kwargs): + """Update port. + + Not applicable to Nexus plugin. Defined here to satisfy abstract + method requirements. + """ + LOG.debug(_("NexusPlugin:update_port() called")) # pragma no cover + + def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id, + **kwargs): + """Plug interfaces. + + Not applicable to Nexus plugin. Defined here to satisfy abstract + method requirements. + """ + LOG.debug(_("NexusPlugin:plug_interface() called")) # pragma no cover + + def unplug_interface(self, tenant_id, net_id, port_id, **kwargs): + """Unplug interface. + + Not applicable to Nexus plugin. Defined here to satisfy abstract + method requirements. + """ + LOG.debug(_("NexusPlugin:unplug_interface() called") + ) # pragma no cover diff --git a/neutron/plugins/cisco/nexus/cisco_nexus_snippets.py b/neutron/plugins/cisco/nexus/cisco_nexus_snippets.py new file mode 100644 index 000000000..90d265443 --- /dev/null +++ b/neutron/plugins/cisco/nexus/cisco_nexus_snippets.py @@ -0,0 +1,180 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, Cisco Systems, Inc. +# @author: Arvind Somya (asomya@cisco.com) Cisco Systems, Inc. + +""" +Nexus-OS XML-based configuration snippets +""" + +import logging + + +LOG = logging.getLogger(__name__) + + +# The following are standard strings, messages used to communicate with Nexus, +EXEC_CONF_SNIPPET = """ + + + <__XML__MODE__exec_configure>%s + + + +""" + +CMD_VLAN_CONF_SNIPPET = """ + + + <__XML__PARAM_value>%s + <__XML__MODE_vlan> + + %s + + + + +""" + +CMD_VLAN_ACTIVE_SNIPPET = """ + + + <__XML__PARAM_value>%s + <__XML__MODE_vlan> + + active + + + + +""" + +CMD_VLAN_NO_SHUTDOWN_SNIPPET = """ + + + <__XML__PARAM_value>%s + <__XML__MODE_vlan> + + + + + + +""" + +CMD_NO_VLAN_CONF_SNIPPET = """ + + + + <__XML__PARAM_value>%s + + + +""" + +CMD_INT_VLAN_HEADER = """ + + <%s> + %s + <__XML__MODE_if-ethernet-switch> + + + + """ + +CMD_VLAN_ID = """ + %s""" + +CMD_VLAN_ADD_ID = """ + %s + """ % CMD_VLAN_ID + +CMD_INT_VLAN_TRAILER = """ + + + + + + + +""" + +CMD_INT_VLAN_SNIPPET = (CMD_INT_VLAN_HEADER + + CMD_VLAN_ID + + CMD_INT_VLAN_TRAILER) + +CMD_INT_VLAN_ADD_SNIPPET = (CMD_INT_VLAN_HEADER + + CMD_VLAN_ADD_ID + + CMD_INT_VLAN_TRAILER) + +CMD_NO_VLAN_INT_SNIPPET = """ + + <%s> + %s + <__XML__MODE_if-ethernet-switch> + + + + + + + %s + + + + + + + + +""" + +FILTER_SHOW_VLAN_BRIEF_SNIPPET = """ + + + + + +""" + +CMD_VLAN_SVI_SNIPPET = """ + + + %s + <__XML__MODE_vlan> + + + + +
+
%s
+
+
+ +
+
+""" + +CMD_NO_VLAN_SVI_SNIPPET = """ + + + + %s + + + +""" diff --git a/neutron/plugins/cisco/test/__init__.py b/neutron/plugins/cisco/test/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/cisco/test/nexus/__init__.py b/neutron/plugins/cisco/test/nexus/__init__.py new file mode 100644 index 000000000..a68ed41ea --- /dev/null +++ b/neutron/plugins/cisco/test/nexus/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import __builtin__ +setattr(__builtin__, '_', lambda x: x) diff --git a/neutron/plugins/cisco/test/nexus/fake_nexus_driver.py b/neutron/plugins/cisco/test/nexus/fake_nexus_driver.py new file mode 100644 index 000000000..b40cbef14 --- /dev/null +++ b/neutron/plugins/cisco/test/nexus/fake_nexus_driver.py @@ -0,0 +1,101 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# @author: Rohit Agarwalla, Cisco Systems, Inc. + + +class CiscoNEXUSFakeDriver(): + """Nexus Driver Fake Class.""" + + def __init__(self): + pass + + def nxos_connect(self, nexus_host, nexus_ssh_port, nexus_user, + nexus_password): + """Make the fake connection to the Nexus Switch.""" + pass + + def create_xml_snippet(self, cutomized_config): + """Create XML snippet. + + Creates the Proper XML structure for the Nexus Switch + Configuration. + """ + pass + + def enable_vlan(self, mgr, vlanid, vlanname): + """Create a VLAN on Nexus Switch given the VLAN ID and Name.""" + pass + + def disable_vlan(self, mgr, vlanid): + """Delete a VLAN on Nexus Switch given the VLAN ID.""" + pass + + def disable_switch_port(self, mgr, interface): + """Disable trunk mode an interface on Nexus Switch.""" + pass + + def enable_vlan_on_trunk_int(self, mgr, etype, interface, vlanid): + """Enable vlan on trunk interface. + + Enable trunk mode vlan access an interface on Nexus Switch given + VLANID. + """ + pass + + def disable_vlan_on_trunk_int(self, mgr, interface, vlanid): + """Disables vlan in trunk interface. + + Enables trunk mode vlan access an interface on Nexus Switch given + VLANID. + """ + pass + + def create_vlan(self, vlan_name, vlan_id, nexus_host, nexus_user, + nexus_password, nexus_ports, nexus_ssh_port, vlan_ids): + """Create VLAN and enable it on interface. + + Creates a VLAN and Enable on trunk mode an interface on Nexus Switch + given the VLAN ID and Name and Interface Number. + """ + pass + + def delete_vlan(self, vlan_id, nexus_host, nexus_user, nexus_password, + nexus_ports, nexus_ssh_port): + """Delete VLAN. + + Delete a VLAN and Disables trunk mode an interface on Nexus Switch + given the VLAN ID and Interface Number. + """ + pass + + def build_vlans_cmd(self): + """Build a string with all the VLANs on the same Switch.""" + pass + + def add_vlan_int(self, vlan_id, nexus_host, nexus_user, nexus_password, + nexus_ports, nexus_ssh_port, vlan_ids=None): + """Add a vlan from interfaces on the Nexus switch given the VLAN ID.""" + pass + + def remove_vlan_int(self, vlan_id, nexus_host, nexus_user, nexus_password, + nexus_ports, nexus_ssh_port): + """Remove vlan from interfaces. + + Removes a vlan from interfaces on the Nexus switch given the VLAN ID. + """ + pass diff --git a/neutron/plugins/common/__init__.py b/neutron/plugins/common/__init__.py new file mode 100644 index 000000000..7e503debd --- /dev/null +++ b/neutron/plugins/common/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/common/constants.py b/neutron/plugins/common/constants.py new file mode 100644 index 000000000..366945ad0 --- /dev/null +++ b/neutron/plugins/common/constants.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# service type constants: +CORE = "CORE" +DUMMY = "DUMMY" +LOADBALANCER = "LOADBALANCER" +FIREWALL = "FIREWALL" +VPN = "VPN" +METERING = "METERING" +L3_ROUTER_NAT = "L3_ROUTER_NAT" + + +#maps extension alias to service type +EXT_TO_SERVICE_MAPPING = { + 'dummy': DUMMY, + 'lbaas': LOADBALANCER, + 'fwaas': FIREWALL, + 'vpnaas': VPN, + 'metering': METERING, + 'router': L3_ROUTER_NAT +} + +# TODO(salvatore-orlando): Move these (or derive them) from conf file +ALLOWED_SERVICES = [CORE, DUMMY, LOADBALANCER, FIREWALL, VPN, METERING, + L3_ROUTER_NAT] + +COMMON_PREFIXES = { + CORE: "", + DUMMY: "/dummy_svc", + LOADBALANCER: "/lb", + FIREWALL: "/fw", + VPN: "/vpn", + METERING: "/metering", + L3_ROUTER_NAT: "", +} + +# Service operation status constants +ACTIVE = "ACTIVE" +DOWN = "DOWN" +PENDING_CREATE = "PENDING_CREATE" +PENDING_UPDATE = "PENDING_UPDATE" +PENDING_DELETE = "PENDING_DELETE" +INACTIVE = "INACTIVE" +ERROR = "ERROR" + +ACTIVE_PENDING_STATUSES = ( + ACTIVE, + PENDING_CREATE, + PENDING_UPDATE +) + +# FWaaS firewall rule action +FWAAS_ALLOW = "allow" +FWAAS_DENY = "deny" + +# L3 Protocol name constants +TCP = "tcp" +UDP = "udp" +ICMP = "icmp" + +# Network Type constants +TYPE_FLAT = 'flat' +TYPE_GRE = 'gre' +TYPE_LOCAL = 'local' +TYPE_VXLAN = 'vxlan' +TYPE_VLAN = 'vlan' +TYPE_NONE = 'none' + +# The maximum length of an interface name (in Linux) +MAX_DEV_NAME_LEN = 16 diff --git a/neutron/plugins/common/utils.py b/neutron/plugins/common/utils.py new file mode 100644 index 000000000..fce131234 --- /dev/null +++ b/neutron/plugins/common/utils.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Common utilities and helper functions for Openstack Networking Plugins. +""" + +from neutron.common import exceptions as n_exc +from neutron.common import utils +from neutron.plugins.common import constants + + +def verify_vlan_range(vlan_range): + """Raise an exception for invalid tags or malformed range.""" + for vlan_tag in vlan_range: + if not utils.is_valid_vlan_tag(vlan_tag): + raise n_exc.NetworkVlanRangeError( + vlan_range=vlan_range, + error=_("%s is not a valid VLAN tag") % vlan_tag) + if vlan_range[1] < vlan_range[0]: + raise n_exc.NetworkVlanRangeError( + vlan_range=vlan_range, + error=_("End of VLAN range is less than start of VLAN range")) + + +def parse_network_vlan_range(network_vlan_range): + """Interpret a string as network[:vlan_begin:vlan_end].""" + entry = network_vlan_range.strip() + if ':' in entry: + try: + network, vlan_min, vlan_max = entry.split(':') + vlan_range = (int(vlan_min), int(vlan_max)) + except ValueError as ex: + raise n_exc.NetworkVlanRangeError(vlan_range=entry, error=ex) + verify_vlan_range(vlan_range) + return network, vlan_range + else: + return entry, None + + +def parse_network_vlan_ranges(network_vlan_ranges_cfg_entries): + """Interpret a list of strings as network[:vlan_begin:vlan_end] entries.""" + networks = {} + for entry in network_vlan_ranges_cfg_entries: + network, vlan_range = parse_network_vlan_range(entry) + if vlan_range: + networks.setdefault(network, []).append(vlan_range) + else: + networks.setdefault(network, []) + return networks + + +def in_pending_status(status): + return status in (constants.PENDING_CREATE, + constants.PENDING_UPDATE, + constants.PENDING_DELETE) diff --git a/neutron/plugins/embrane/README b/neutron/plugins/embrane/README new file mode 100644 index 000000000..15ad1abbd --- /dev/null +++ b/neutron/plugins/embrane/README @@ -0,0 +1,9 @@ +Embrane Neutron Plugin + +This plugin interfaces OpenStack Neutron with Embrane's heleos platform, which +provides layer 3-7 network services for cloud environments. + +L2 connectivity is leveraged by one of the supported existing plugins. + +For more details on use, configuration and implementation please refer to: +http://wiki.openstack.org/wiki/Neutron/EmbraneNeutronPlugin \ No newline at end of file diff --git a/neutron/plugins/embrane/__init__.py b/neutron/plugins/embrane/__init__.py new file mode 100644 index 000000000..1fac4725b --- /dev/null +++ b/neutron/plugins/embrane/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/agent/__init__.py b/neutron/plugins/embrane/agent/__init__.py new file mode 100644 index 000000000..1fac4725b --- /dev/null +++ b/neutron/plugins/embrane/agent/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/agent/dispatcher.py b/neutron/plugins/embrane/agent/dispatcher.py new file mode 100644 index 000000000..121abe9ac --- /dev/null +++ b/neutron/plugins/embrane/agent/dispatcher.py @@ -0,0 +1,134 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from eventlet import greenthread +from eventlet import queue +from heleosapi import constants as h_con +from heleosapi import exceptions as h_exc + +from neutron.openstack.common import log as logging +from neutron.plugins.embrane.agent.operations import router_operations +from neutron.plugins.embrane.common import constants as p_con +from neutron.plugins.embrane.common import contexts as ctx + +LOG = logging.getLogger(__name__) + + +class Dispatcher(object): + + def __init__(self, plugin, async=True): + self._async = async + self._plugin = plugin + self.sync_items = dict() + + def dispatch_l3(self, d_context, args=(), kwargs={}): + item = d_context.item + event = d_context.event + n_context = d_context.n_context + chain = d_context.chain + + item_id = item["id"] + handlers = router_operations.handlers + if event in handlers: + for f in handlers[event]: + first_run = False + if item_id not in self.sync_items: + self.sync_items[item_id] = (queue.Queue(),) + first_run = True + self.sync_items[item_id][0].put( + ctx.OperationContext(event, n_context, item, chain, f, + args, kwargs)) + t = None + if first_run: + t = greenthread.spawn(self._consume_l3, + item_id, + self.sync_items[item_id][0], + self._plugin, + self._async) + self.sync_items[item_id] += (t,) + if not self._async: + t = self.sync_items[item_id][1] + t.wait() + + def _consume_l3(self, sync_item, sync_queue, plugin, a_sync): + current_state = None + while True: + try: + # If the DVA is deleted, the thread (and the associated queue) + # can die as well + if current_state == p_con.Status.DELETED: + del self.sync_items[sync_item] + return + try: + # If synchronous op, empty the queue as fast as possible + operation_context = sync_queue.get( + block=a_sync, + timeout=p_con.QUEUE_TIMEOUT) + except queue.Empty: + del self.sync_items[sync_item] + return + # Execute the preliminary operations + (operation_context.chain and + operation_context.chain.execute_all()) + # Execute the main operation, a transient state is maintained + # so that the consumer can decide if it has + # to be burned to the DB + transient_state = None + try: + dva_state = operation_context.function( + plugin._esm_api, + operation_context.n_context.tenant_id, + operation_context.item, + *operation_context.args, + **operation_context.kwargs) + if dva_state == p_con.Status.DELETED: + transient_state = dva_state + else: + if not dva_state: + transient_state = p_con.Status.ERROR + elif dva_state == h_con.DvaState.POWER_ON: + transient_state = p_con.Status.ACTIVE + else: + transient_state = p_con.Status.READY + + except (h_exc.PendingDva, h_exc.DvaNotFound, + h_exc.BrokenInterface, h_exc.DvaCreationFailed, + h_exc.DvaCreationPending, h_exc.BrokenDva, + h_exc.ConfigurationFailed) as ex: + LOG.warning(p_con.error_map[type(ex)] % ex.message) + transient_state = p_con.Status.ERROR + except h_exc.DvaDeleteFailed as ex: + LOG.warning(p_con.error_map[type(ex)] % ex.message) + transient_state = p_con.Status.DELETED + finally: + # if the returned transient state is None, no operations + # are required on the DVA status + if transient_state: + if transient_state == p_con.Status.DELETED: + current_state = plugin._delete_router( + operation_context.n_context, + operation_context.item["id"]) + # Error state cannot be reverted + elif transient_state != p_con.Status.ERROR: + current_state = plugin._update_neutron_state( + operation_context.n_context, + operation_context.item, + transient_state) + except Exception: + LOG.exception(_("Unhandled exception occurred")) diff --git a/neutron/plugins/embrane/agent/operations/__init__.py b/neutron/plugins/embrane/agent/operations/__init__.py new file mode 100644 index 000000000..1fac4725b --- /dev/null +++ b/neutron/plugins/embrane/agent/operations/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/agent/operations/router_operations.py b/neutron/plugins/embrane/agent/operations/router_operations.py new file mode 100644 index 000000000..a9d35bfd4 --- /dev/null +++ b/neutron/plugins/embrane/agent/operations/router_operations.py @@ -0,0 +1,156 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +import functools + +from heleosapi import exceptions as h_exc + +from neutron.openstack.common import log as logging +from neutron.plugins.embrane.common import constants as p_con + +LOG = logging.getLogger(__name__) +handlers = dict() + + +def handler(event, handler): + def wrap(f): + if event not in handler.keys(): + new_func_list = [f] + handler[event] = new_func_list + else: + handler[event].append(f) + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return f(*args, **kwargs) + return wrapped_f + return wrap + + +@handler(p_con.Events.CREATE_ROUTER, handlers) +def _create_dva_and_assign_address(api, tenant_id, neutron_router, + flavor, utif_info=None, + ip_allocation_info=None): + """Creates a new router, and assign the gateway interface if any.""" + + dva = api.create_router(tenant_id=tenant_id, + router_id=neutron_router["id"], + name=neutron_router["name"], + flavor=flavor, + up=neutron_router["admin_state_up"]) + try: + if utif_info: + api.grow_interface(utif_info, neutron_router["admin_state_up"], + tenant_id, neutron_router["id"]) + if ip_allocation_info: + dva = api.allocate_address(neutron_router["id"], + neutron_router["admin_state_up"], + ip_allocation_info) + except h_exc.PreliminaryOperationsFailed as ex: + raise h_exc.BrokenInterface(err_msg=ex.message) + + state = api.extract_dva_state(dva) + return state + + +@handler(p_con.Events.UPDATE_ROUTER, handlers) +def _update_dva_and_assign_address(api, tenant_id, neutron_router, + utif_info=None, ip_allocation_info=None, + routes_info=[]): + name = neutron_router["name"] + up = neutron_router["admin_state_up"] + r_id = neutron_router["id"] + if ip_allocation_info or routes_info: + up = True + dva = api.update_dva(tenant_id=tenant_id, router_id=r_id, name=name, + up=up, utif_info=utif_info) + if ip_allocation_info: + api.allocate_address(r_id, up, ip_allocation_info) + + if routes_info: + api.delete_extra_routes(r_id, up) + api.set_extra_routes(r_id, neutron_router["admin_state_up"], + routes_info) + + return api.extract_dva_state(dva) + + +@handler(p_con.Events.DELETE_ROUTER, handlers) +def _delete_dva(api, tenant_id, neutron_router): + try: + api.delete_dva(tenant_id, neutron_router["id"]) + except h_exc.DvaNotFound: + LOG.warning(_("The router %s had no physical representation," + "likely already deleted"), neutron_router["id"]) + return p_con.Status.DELETED + + +@handler(p_con.Events.GROW_ROUTER_IF, handlers) +def _grow_dva_iface_and_assign_address(api, tenant_id, neutron_router, + utif_info=None, + ip_allocation_info=None): + try: + dva = api.grow_interface(utif_info, neutron_router["admin_state_up"], + tenant_id, neutron_router["id"]) + if ip_allocation_info: + dva = api.allocate_address(neutron_router["id"], + neutron_router["admin_state_up"], + ip_allocation_info) + except h_exc.PreliminaryOperationsFailed as ex: + raise h_exc.BrokenInterface(err_msg=ex.message) + + state = api.extract_dva_state(dva) + return state + + +@handler(p_con.Events.SHRINK_ROUTER_IF, handlers) +def _shrink_dva_iface(api, tenant_id, neutron_router, port_id): + try: + dva = api.shrink_interface(tenant_id, neutron_router["id"], + neutron_router["admin_state_up"], port_id) + except h_exc.InterfaceNotFound: + LOG.warning(_("Interface %s not found in the heleos back-end," + "likely already deleted"), port_id) + return (p_con.Status.ACTIVE if neutron_router["admin_state_up"] else + p_con.Status.READY) + except h_exc.PreliminaryOperationsFailed as ex: + raise h_exc.BrokenInterface(err_msg=ex.message) + state = api.extract_dva_state(dva) + return state + + +@handler(p_con.Events.SET_NAT_RULE, handlers) +def _create_nat_rule(api, tenant_id, neutron_router, nat_info=None): + + dva = api.create_nat_entry(neutron_router["id"], + neutron_router["admin_state_up"], nat_info) + + state = api.extract_dva_state(dva) + return state + + +@handler(p_con.Events.RESET_NAT_RULE, handlers) +def _delete_nat_rule(api, tenant_id, neutron_router, floating_ip_id): + + dva = api.remove_nat_entry(neutron_router["id"], + neutron_router["admin_state_up"], + floating_ip_id) + + state = api.extract_dva_state(dva) + return state diff --git a/neutron/plugins/embrane/base_plugin.py b/neutron/plugins/embrane/base_plugin.py new file mode 100644 index 000000000..33d213888 --- /dev/null +++ b/neutron/plugins/embrane/base_plugin.py @@ -0,0 +1,375 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from heleosapi import backend_operations as h_op +from heleosapi import constants as h_con +from heleosapi import exceptions as h_exc +from oslo.config import cfg +from sqlalchemy.orm import exc + +from neutron.common import constants as l3_constants +from neutron.common import exceptions as neutron_exc +from neutron.db import extraroute_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.extensions import l3 +from neutron.openstack.common import log as logging +from neutron.plugins.embrane.agent import dispatcher +from neutron.plugins.embrane.common import config # noqa +from neutron.plugins.embrane.common import constants as p_con +from neutron.plugins.embrane.common import contexts as embrane_ctx +from neutron.plugins.embrane.common import operation +from neutron.plugins.embrane.common import utils + +LOG = logging.getLogger(__name__) +conf = cfg.CONF.heleos + + +class EmbranePlugin(object): + """Embrane Neutron plugin. + + uses the heleos(c) platform and a support L2 plugin to leverage networking + in cloud environments. + + """ + _l3super = extraroute_db.ExtraRoute_db_mixin + + def __init__(self): + pass + + def _run_embrane_config(self): + # read configurations + config_esm_mgmt = conf.esm_mgmt + config_admin_username = conf.admin_username + config_admin_password = conf.admin_password + config_router_image_id = conf.router_image + config_security_zones = {h_con.SzType.IB: conf.inband_id, + h_con.SzType.OOB: conf.oob_id, + h_con.SzType.MGMT: conf.mgmt_id, + h_con.SzType.DUMMY: conf.dummy_utif_id} + config_resource_pool = conf.resource_pool_id + self._embrane_async = conf.async_requests + self._esm_api = h_op.BackendOperations( + esm_mgmt=config_esm_mgmt, + admin_username=config_admin_username, + admin_password=config_admin_password, + router_image_id=config_router_image_id, + security_zones=config_security_zones, + resource_pool=config_resource_pool) + self._dispatcher = dispatcher.Dispatcher(self, self._embrane_async) + + def _make_router_dict(self, *args, **kwargs): + return self._l3super._make_router_dict(self, *args, **kwargs) + + def _delete_router(self, context, router_id): + self._l3super.delete_router(self, context, router_id) + + def _update_db_router_state(self, context, neutron_router, dva_state): + if not dva_state: + new_state = p_con.Status.ERROR + elif dva_state == h_con.DvaState.POWER_ON: + new_state = p_con.Status.ACTIVE + else: + new_state = p_con.Status.READY + self._set_db_router_state(context, neutron_router, new_state) + return new_state + + def _set_db_router_state(self, context, neutron_router, new_state): + return utils.set_db_item_state(context, neutron_router, new_state) + + def _update_db_interfaces_state(self, context, neutron_router): + router_ports = self.get_ports(context, + {"device_id": [neutron_router["id"]]}) + self._esm_api.update_ports_status(neutron_router["id"], router_ports) + for port in router_ports: + db_port = self._get_port(context, port["id"]) + db_port["status"] = port["status"] + context.session.merge(db_port) + + def _update_neutron_state(self, context, neutron_router, state): + try: + self._update_db_interfaces_state(context, neutron_router) + except Exception: + LOG.exception(_("Unhandled exception occurred")) + return self._set_db_router_state(context, neutron_router, state) + + def _retrieve_prefix_from_port(self, context, neutron_port): + subnet_id = neutron_port["fixed_ips"][0]["subnet_id"] + subnet = utils.retrieve_subnet(context, subnet_id) + prefix = subnet["cidr"].split("/")[1] + return prefix + + # L3 extension + def create_router(self, context, router): + r = router["router"] + self._get_tenant_id_for_create(context, r) + db_router = self._l3super.create_router(self, context, router) + neutron_router = self._get_router(context, db_router['id']) + gw_port = neutron_router.gw_port + # For now, only small flavor is used + utif_info = (self._plugin_support.retrieve_utif_info(context, + gw_port) + if gw_port else None) + ip_allocation_info = (utils.retrieve_ip_allocation_info(context, + gw_port) + if gw_port else None) + neutron_router = self._l3super._get_router(self, context, + neutron_router["id"]) + neutron_router["status"] = p_con.Status.CREATING + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.CREATE_ROUTER, neutron_router, context, None), + args=(h_con.Flavor.SMALL, utif_info, ip_allocation_info)) + return self._make_router_dict(neutron_router) + + def update_router(self, context, id, router): + db_router = self._l3super.update_router(self, context, id, router) + neutron_router = self._get_router(context, db_router['id']) + gw_port = neutron_router.gw_port + utif_info = (self._plugin_support.retrieve_utif_info(context, + gw_port) + if gw_port else None) + ip_allocation_info = (utils.retrieve_ip_allocation_info(context, + gw_port) + if gw_port else None) + + routes_info = router["router"].get("routes") + + neutron_router = self._l3super._get_router(self, context, id) + state_change = operation.Operation( + self._set_db_router_state, + args=(context, neutron_router, p_con.Status.UPDATING)) + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.UPDATE_ROUTER, neutron_router, context, + state_change), + args=(utif_info, ip_allocation_info, routes_info)) + return self._make_router_dict(neutron_router) + + def get_router(self, context, id, fields=None): + """Ensures that id does exist in the ESM.""" + neutron_router = self._get_router(context, id) + + try: + if neutron_router["status"] != p_con.Status.CREATING: + self._esm_api.get_dva(id) + except h_exc.DvaNotFound: + + LOG.error(_("The following routers have not physical match: %s"), + id) + self._set_db_router_state(context, neutron_router, + p_con.Status.ERROR) + + LOG.debug(_("Requested router: %s"), neutron_router) + return self._make_router_dict(neutron_router, fields) + + def get_routers(self, context, filters=None, fields=None, sorts=None, + limit=None, marker=None, page_reverse=False): + """Retrieves the router list defined by the incoming filters.""" + router_query = self._apply_filters_to_query( + self._model_query(context, l3_db.Router), + l3_db.Router, filters) + id_list = [x["id"] for x in router_query + if x["status"] != p_con.Status.CREATING] + try: + self._esm_api.get_dvas(id_list) + except h_exc.DvaNotFound: + LOG.error(_("The following routers have not physical match: %s"), + repr(id_list)) + error_routers = [] + for id in id_list: + try: + error_routers.append(self._get_router(context, id)) + except l3.RouterNotFound: + pass + for error_router in error_routers: + self._set_db_router_state(context, error_router, + p_con.Status.ERROR) + return [self._make_router_dict(router, fields) + for router in router_query] + + def delete_router(self, context, id): + """Deletes the DVA with the specific router id.""" + # Copy of the parent validation code, shouldn't the base modules + # provide functions for validating operations? + device_owner_router_intf = l3_constants.DEVICE_OWNER_ROUTER_INTF + fips = self.get_floatingips_count(context.elevated(), + filters={"router_id": [id]}) + if fips: + raise l3.RouterInUse(router_id=id) + + device_filter = {"device_id": [id], + "device_owner": [device_owner_router_intf]} + ports = self.get_ports_count(context.elevated(), + filters=device_filter) + if ports: + raise l3.RouterInUse(router_id=id) + neutron_router = self._get_router(context, id) + state_change = operation.Operation(self._set_db_router_state, + args=(context, neutron_router, + p_con.Status.DELETING)) + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.DELETE_ROUTER, neutron_router, context, + state_change), args=()) + LOG.debug(_("Deleting router=%s"), neutron_router) + return neutron_router + + def add_router_interface(self, context, router_id, interface_info): + """Grows DVA interface in the specified subnet.""" + neutron_router = self._get_router(context, router_id) + rport_qry = context.session.query(models_v2.Port) + ports = rport_qry.filter_by( + device_id=router_id).all() + if len(ports) >= p_con.UTIF_LIMIT: + raise neutron_exc.BadRequest( + resource=router_id, + msg=("this router doesn't support more than " + + str(p_con.UTIF_LIMIT) + " interfaces")) + neutron_router_iface = self._l3super.add_router_interface( + self, context, router_id, interface_info) + port = self._get_port(context, neutron_router_iface["port_id"]) + utif_info = self._plugin_support.retrieve_utif_info(context, port) + ip_allocation_info = utils.retrieve_ip_allocation_info(context, + port) + state_change = operation.Operation(self._set_db_router_state, + args=(context, neutron_router, + p_con.Status.UPDATING)) + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.GROW_ROUTER_IF, neutron_router, context, + state_change), + args=(utif_info, ip_allocation_info)) + return neutron_router_iface + + def remove_router_interface(self, context, router_id, interface_info): + port_id = None + if "port_id" in interface_info: + port_id = interface_info["port_id"] + elif "subnet_id" in interface_info: + subnet_id = interface_info["subnet_id"] + subnet = utils.retrieve_subnet(context, subnet_id) + rport_qry = context.session.query(models_v2.Port) + ports = rport_qry.filter_by( + device_id=router_id, + device_owner=l3_constants.DEVICE_OWNER_ROUTER_INTF, + network_id=subnet["network_id"]) + for p in ports: + if p["fixed_ips"][0]["subnet_id"] == subnet_id: + port_id = p["id"] + break + neutron_router = self._get_router(context, router_id) + self._l3super.remove_router_interface(self, context, router_id, + interface_info) + state_change = operation.Operation(self._set_db_router_state, + args=(context, neutron_router, + p_con.Status.UPDATING)) + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.SHRINK_ROUTER_IF, neutron_router, context, + state_change), + args=(port_id,)) + + def create_floatingip(self, context, floatingip): + result = self._l3super.create_floatingip( + self, context, floatingip) + + if result["port_id"]: + neutron_router = self._get_router(context, result["router_id"]) + db_fixed_port = self._get_port(context, result["port_id"]) + fixed_prefix = self._retrieve_prefix_from_port(context, + db_fixed_port) + db_floating_port = neutron_router["gw_port"] + floating_prefix = self._retrieve_prefix_from_port( + context, db_floating_port) + nat_info = utils.retrieve_nat_info(context, result, + fixed_prefix, + floating_prefix, + neutron_router) + state_change = operation.Operation( + self._set_db_router_state, + args=(context, neutron_router, p_con.Status.UPDATING)) + + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.SET_NAT_RULE, neutron_router, context, + state_change), + args=(nat_info,)) + return result + + def update_floatingip(self, context, id, floatingip): + db_fip = self._l3super.get_floatingip(self, context, id) + result = self._l3super.update_floatingip(self, context, id, + floatingip) + + if db_fip["port_id"] and db_fip["port_id"] != result["port_id"]: + neutron_router = self._get_router(context, db_fip["router_id"]) + fip_id = db_fip["id"] + state_change = operation.Operation( + self._set_db_router_state, + args=(context, neutron_router, p_con.Status.UPDATING)) + + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.RESET_NAT_RULE, neutron_router, context, + state_change), + args=(fip_id,)) + if result["port_id"]: + neutron_router = self._get_router(context, result["router_id"]) + db_fixed_port = self._get_port(context, result["port_id"]) + fixed_prefix = self._retrieve_prefix_from_port(context, + db_fixed_port) + db_floating_port = neutron_router["gw_port"] + floating_prefix = self._retrieve_prefix_from_port( + context, db_floating_port) + nat_info = utils.retrieve_nat_info(context, result, + fixed_prefix, + floating_prefix, + neutron_router) + state_change = operation.Operation( + self._set_db_router_state, + args=(context, neutron_router, p_con.Status.UPDATING)) + + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.SET_NAT_RULE, neutron_router, context, + state_change), + args=(nat_info,)) + return result + + def disassociate_floatingips(self, context, port_id): + try: + fip_qry = context.session.query(l3_db.FloatingIP) + floating_ip = fip_qry.filter_by(fixed_port_id=port_id).one() + router_id = floating_ip["router_id"] + except exc.NoResultFound: + return + self._l3super.disassociate_floatingips(self, context, port_id) + if router_id: + neutron_router = self._get_router(context, router_id) + fip_id = floating_ip["id"] + state_change = operation.Operation( + self._set_db_router_state, + args=(context, neutron_router, p_con.Status.UPDATING)) + + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.RESET_NAT_RULE, neutron_router, context, + state_change), + args=(fip_id,)) diff --git a/neutron/plugins/embrane/common/__init__.py b/neutron/plugins/embrane/common/__init__.py new file mode 100644 index 000000000..1fac4725b --- /dev/null +++ b/neutron/plugins/embrane/common/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/common/config.py b/neutron/plugins/embrane/common/config.py new file mode 100644 index 000000000..54c9153f3 --- /dev/null +++ b/neutron/plugins/embrane/common/config.py @@ -0,0 +1,49 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from oslo.config import cfg + + +heleos_opts = [ + cfg.StrOpt('esm_mgmt', + help=_('ESM management root address')), + cfg.StrOpt('admin_username', default='admin', + help=_('ESM admin username.')), + cfg.StrOpt('admin_password', + secret=True, + help=_('ESM admin password.')), + cfg.StrOpt('router_image', + help=_('Router image id (Embrane FW/VPN)')), + cfg.StrOpt('inband_id', + help=_('In band Security Zone id')), + cfg.StrOpt('oob_id', + help=_('Out of band Security Zone id')), + cfg.StrOpt('mgmt_id', + help=_('Management Security Zone id')), + cfg.StrOpt('dummy_utif_id', + help=_('Dummy user traffic Security Zone id')), + cfg.StrOpt('resource_pool_id', default='default', + help=_('Shared resource pool id')), + cfg.BoolOpt('async_requests', default=True, + help=_('Define if the requests have ' + 'run asynchronously or not')), +] + + +cfg.CONF.register_opts(heleos_opts, "heleos") diff --git a/neutron/plugins/embrane/common/constants.py b/neutron/plugins/embrane/common/constants.py new file mode 100644 index 000000000..65f3818a2 --- /dev/null +++ b/neutron/plugins/embrane/common/constants.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from heleosapi import exceptions as h_exc + +from neutron.plugins.common import constants + + +# Router specific constants +UTIF_LIMIT = 7 +QUEUE_TIMEOUT = 300 + + +class Status: + # Transient + CREATING = constants.PENDING_CREATE + UPDATING = constants.PENDING_UPDATE + DELETING = constants.PENDING_DELETE + # Final + ACTIVE = constants.ACTIVE + ERROR = constants.ERROR + READY = constants.INACTIVE + DELETED = "DELETED" # not visible + + +class Events: + CREATE_ROUTER = "create_router" + UPDATE_ROUTER = "update_router" + DELETE_ROUTER = "delete_router" + GROW_ROUTER_IF = "grow_router_if" + SHRINK_ROUTER_IF = "shrink_router_if" + SET_NAT_RULE = "set_nat_rule" + RESET_NAT_RULE = "reset_nat_rule" + +_DVA_PENDING_ERROR_MSG = _("Dva is pending for the following reason: %s") +_DVA_NOT_FOUNT_ERROR_MSG = _("Dva can't be found to execute the operation, " + "probably was cancelled through the heleos UI") +_DVA_BROKEN_ERROR_MSG = _("Dva seems to be broken for reason %s") +_DVA_BROKEN_INTERFACE_ERROR_MSG = _("Dva interface seems to be broken " + "for reason %s") +_DVA_CREATION_FAILED_ERROR_MSG = _("Dva creation failed reason %s") +_DVA_CREATION_PENDING_ERROR_MSG = _("Dva creation is in pending state " + "for reason %s") +_CFG_FAILED_ERROR_MSG = _("Dva configuration failed for reason %s") +_DVA_DEL_FAILED_ERROR_MSG = _("Failed to delete the backend " + "router for reason %s. Please remove " + "it manually through the heleos UI") + +error_map = {h_exc.PendingDva: _DVA_PENDING_ERROR_MSG, + h_exc.DvaNotFound: _DVA_NOT_FOUNT_ERROR_MSG, + h_exc.BrokenDva: _DVA_BROKEN_ERROR_MSG, + h_exc.BrokenInterface: _DVA_BROKEN_INTERFACE_ERROR_MSG, + h_exc.DvaCreationFailed: _DVA_CREATION_FAILED_ERROR_MSG, + h_exc.DvaCreationPending: _DVA_CREATION_PENDING_ERROR_MSG, + h_exc.ConfigurationFailed: _CFG_FAILED_ERROR_MSG, + h_exc.DvaDeleteFailed: _DVA_DEL_FAILED_ERROR_MSG} diff --git a/neutron/plugins/embrane/common/contexts.py b/neutron/plugins/embrane/common/contexts.py new file mode 100644 index 000000000..f35a02427 --- /dev/null +++ b/neutron/plugins/embrane/common/contexts.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + + +class DispatcherContext(object): + + def __init__(self, event, item, neutron_context, chain=None): + self.event = event + self.item = item + self.n_context = neutron_context + self.chain = chain + + +class OperationContext(DispatcherContext): + """Operational context. + + contains all the parameters needed to execute a status aware operation + + """ + def __init__(self, event, context, item, chain, function, args, kwargs): + super(OperationContext, self).__init__(event, item, context, chain) + self.function = function + self.args = args + self.kwargs = kwargs diff --git a/neutron/plugins/embrane/common/exceptions.py b/neutron/plugins/embrane/common/exceptions.py new file mode 100644 index 000000000..f7cfa7b24 --- /dev/null +++ b/neutron/plugins/embrane/common/exceptions.py @@ -0,0 +1,28 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from neutron.common import exceptions as neutron_exec + + +class EmbranePluginException(neutron_exec.NeutronException): + message = _("An unexpected error occurred:%(err_msg)s") + + +class UnsupportedException(EmbranePluginException): + message = _("%(err_msg)s") diff --git a/neutron/plugins/embrane/common/operation.py b/neutron/plugins/embrane/common/operation.py new file mode 100644 index 000000000..39fa413e2 --- /dev/null +++ b/neutron/plugins/embrane/common/operation.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + + +class Operation(object): + """Defines a series of operations which shall be executed in order. + + the operations expected are procedures, return values are discarded + + """ + + def __init__(self, procedure, args=(), kwargs={}, nextop=None): + self._procedure = procedure + self.args = args[:] + self.kwargs = dict(kwargs) + self.nextop = nextop + + def execute(self): + args = self.args + self._procedure(*args, **self.kwargs) + return self.nextop + + def execute_all(self): + nextop = self.execute() + while nextop: + nextop = self.execute_all() + + def has_next(self): + return self.nextop is not None + + def add_bottom_operation(self, operation): + op = self + while op.has_next(): + op = op.nextop + op.nextop = operation diff --git a/neutron/plugins/embrane/common/utils.py b/neutron/plugins/embrane/common/utils.py new file mode 100644 index 000000000..5fa20eb59 --- /dev/null +++ b/neutron/plugins/embrane/common/utils.py @@ -0,0 +1,73 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from heleosapi import info as h_info + +from neutron.common import constants +from neutron.db import models_v2 +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def set_db_item_state(context, neutron_item, new_state): + with context.session.begin(subtransactions=True): + if neutron_item["status"] != new_state: + neutron_item["status"] = new_state + context.session.merge(neutron_item) + + +def retrieve_subnet(context, subnet_id): + return (context.session.query( + models_v2.Subnet).filter(models_v2.Subnet.id == subnet_id).one()) + + +def retrieve_ip_allocation_info(context, neutron_port): + """Retrieves ip allocation info for a specific port if any.""" + + try: + subnet_id = neutron_port["fixed_ips"][0]["subnet_id"] + except (KeyError, IndexError): + LOG.info(_("No ip allocation set")) + return + subnet = retrieve_subnet(context, subnet_id) + allocated_ip = neutron_port["fixed_ips"][0]["ip_address"] + is_gw_port = (neutron_port["device_owner"] == + constants.DEVICE_OWNER_ROUTER_GW) + gateway_ip = subnet["gateway_ip"] + + ip_allocation_info = h_info.IpAllocationInfo( + is_gw=is_gw_port, + ip_version=subnet["ip_version"], + prefix=subnet["cidr"].split("/")[1], + ip_address=allocated_ip, + port_id=neutron_port["id"], + gateway_ip=gateway_ip) + + return ip_allocation_info + + +def retrieve_nat_info(context, fip, fixed_prefix, floating_prefix, router): + nat_info = h_info.NatInfo(source_address=fip["floating_ip_address"], + source_prefix=floating_prefix, + destination_address=fip["fixed_ip_address"], + destination_prefix=fixed_prefix, + floating_ip_id=fip["id"], + fixed_port_id=fip["port_id"]) + return nat_info diff --git a/neutron/plugins/embrane/l2base/__init__.py b/neutron/plugins/embrane/l2base/__init__.py new file mode 100644 index 000000000..1fac4725b --- /dev/null +++ b/neutron/plugins/embrane/l2base/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/l2base/fake/__init__.py b/neutron/plugins/embrane/l2base/fake/__init__.py new file mode 100644 index 000000000..1fac4725b --- /dev/null +++ b/neutron/plugins/embrane/l2base/fake/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py b/neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py new file mode 100644 index 000000000..5cf68df28 --- /dev/null +++ b/neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py @@ -0,0 +1,24 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from neutron.db import db_base_plugin_v2 + + +class FakeL2Plugin(db_base_plugin_v2.NeutronDbPluginV2): + supported_extension_aliases = [] diff --git a/neutron/plugins/embrane/l2base/fake/fakeplugin_support.py b/neutron/plugins/embrane/l2base/fake/fakeplugin_support.py new file mode 100644 index 000000000..7818d28de --- /dev/null +++ b/neutron/plugins/embrane/l2base/fake/fakeplugin_support.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from heleosapi import info as h_info + +from neutron.common import constants +from neutron import manager +from neutron.plugins.embrane.l2base import support_base as base + + +class FakePluginSupport(base.SupportBase): + + def __init__(self): + super(FakePluginSupport, self).__init__() + + def retrieve_utif_info(self, context, neutron_port): + plugin = manager.NeutronManager.get_plugin() + network_id = neutron_port["network_id"] + network = plugin._get_network(context, network_id) + is_gw = (neutron_port["device_owner"] == + constants.DEVICE_OWNER_ROUTER_GW) + result = h_info.UtifInfo(vlan=0, + network_name=network["name"], + network_id=network["id"], + is_gw=is_gw, + owner_tenant=network["tenant_id"], + port_id=neutron_port["id"], + mac_address=neutron_port["mac_address"]) + return result diff --git a/neutron/plugins/embrane/l2base/openvswitch/__init__.py b/neutron/plugins/embrane/l2base/openvswitch/__init__.py new file mode 100644 index 000000000..1fac4725b --- /dev/null +++ b/neutron/plugins/embrane/l2base/openvswitch/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py b/neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py new file mode 100644 index 000000000..f37a6b81a --- /dev/null +++ b/neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py @@ -0,0 +1,58 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from heleosapi import info as h_info + +from neutron.common import constants +from neutron import manager +from neutron.plugins.embrane.l2base import support_base as base +from neutron.plugins.embrane.l2base import support_exceptions as exc +from neutron.plugins.openvswitch import ovs_db_v2 + + +class OpenvswitchSupport(base.SupportBase): + """OpenVSwitch plugin support. + + Obtains the informations needed to build the user security zones + + """ + + def __init__(self): + super(OpenvswitchSupport, self).__init__() + + def retrieve_utif_info(self, context, neutron_port): + plugin = manager.NeutronManager.get_plugin() + session = context.session + network_id = neutron_port["network_id"] + network_binding = ovs_db_v2.get_network_binding(session, network_id) + if not network_binding["segmentation_id"]: + raise exc.UtifInfoError( + err_msg=_("No segmentation_id found for the network, " + "please be sure that tenant_network_type is vlan")) + network = plugin._get_network(context, network_id) + is_gw = (neutron_port["device_owner"] == + constants.DEVICE_OWNER_ROUTER_GW) + result = h_info.UtifInfo(vlan=network_binding["segmentation_id"], + network_name=network["name"], + network_id=network["id"], + is_gw=is_gw, + owner_tenant=network["tenant_id"], + port_id=neutron_port["id"], + mac_address=neutron_port["mac_address"]) + return result diff --git a/neutron/plugins/embrane/l2base/support_base.py b/neutron/plugins/embrane/l2base/support_base.py new file mode 100644 index 000000000..a2b7e5342 --- /dev/null +++ b/neutron/plugins/embrane/l2base/support_base.py @@ -0,0 +1,50 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class SupportBase(object): + """abstract support class. + + Defines the methods a plugin support should implement to be used as + the L2 base for Embrane plugin. + + """ + + @abc.abstractmethod + def __init__(self): + pass + + @abc.abstractmethod + def retrieve_utif_info(self, context, neutron_port=None, network=None): + """Retrieve specific network info. + + each plugin support, querying its own DB, can collect all the + information needed by the ESM in order to create the + user traffic security zone. + + :param interface_info: the foo parameter + :param context: neutron request context + :returns: heleosapi.info.UtifInfo -- specific network info + :raises: UtifInfoError + """ diff --git a/neutron/plugins/embrane/l2base/support_exceptions.py b/neutron/plugins/embrane/l2base/support_exceptions.py new file mode 100644 index 000000000..1c5c01322 --- /dev/null +++ b/neutron/plugins/embrane/l2base/support_exceptions.py @@ -0,0 +1,25 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from neutron.plugins.embrane.common import exceptions as embrane_exc + + +class UtifInfoError(embrane_exc.EmbranePluginException): + message = _("Cannot retrieve utif info for the following reason: " + "%(err_msg)s") diff --git a/neutron/plugins/embrane/plugins/__init__.py b/neutron/plugins/embrane/plugins/__init__.py new file mode 100644 index 000000000..1fac4725b --- /dev/null +++ b/neutron/plugins/embrane/plugins/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/plugins/embrane_fake_plugin.py b/neutron/plugins/embrane/plugins/embrane_fake_plugin.py new file mode 100644 index 000000000..69d972c54 --- /dev/null +++ b/neutron/plugins/embrane/plugins/embrane_fake_plugin.py @@ -0,0 +1,34 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from neutron.db import extraroute_db +from neutron.plugins.embrane import base_plugin as base +from neutron.plugins.embrane.l2base.fake import fake_l2_plugin as l2 +from neutron.plugins.embrane.l2base.fake import fakeplugin_support as sup + + +class EmbraneFakePlugin(base.EmbranePlugin, extraroute_db.ExtraRoute_db_mixin, + l2.FakeL2Plugin): + _plugin_support = sup.FakePluginSupport() + + def __init__(self): + '''First run plugin specific initialization, then Embrane's.''' + self.supported_extension_aliases += ["extraroute", "router"] + l2.FakeL2Plugin.__init__(self) + self._run_embrane_config() diff --git a/neutron/plugins/embrane/plugins/embrane_ovs_plugin.py b/neutron/plugins/embrane/plugins/embrane_ovs_plugin.py new file mode 100644 index 000000000..d4d5ac180 --- /dev/null +++ b/neutron/plugins/embrane/plugins/embrane_ovs_plugin.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from neutron.plugins.embrane import base_plugin as base +from neutron.plugins.embrane.l2base.openvswitch import openvswitch_support +from neutron.plugins.openvswitch import ovs_neutron_plugin as l2 + + +class EmbraneOvsPlugin(base.EmbranePlugin, l2.OVSNeutronPluginV2): + '''EmbraneOvsPlugin. + + This plugin uses OpenVSwitch specific L2 plugin for providing L2 networks + and the base EmbranePlugin for L3. + + ''' + _plugin_support = openvswitch_support.OpenvswitchSupport() + + def __init__(self): + '''First run plugin specific initialization, then Embrane's.''' + self._supported_extension_aliases.remove("l3_agent_scheduler") + l2.OVSNeutronPluginV2.__init__(self) + self._run_embrane_config() diff --git a/neutron/plugins/hyperv/__init__.py b/neutron/plugins/hyperv/__init__.py new file mode 100644 index 000000000..7ef4e09fa --- /dev/null +++ b/neutron/plugins/hyperv/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/hyperv/agent/__init__.py b/neutron/plugins/hyperv/agent/__init__.py new file mode 100644 index 000000000..7ef4e09fa --- /dev/null +++ b/neutron/plugins/hyperv/agent/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/hyperv/agent/hyperv_neutron_agent.py b/neutron/plugins/hyperv/agent/hyperv_neutron_agent.py new file mode 100644 index 000000000..07a5ed776 --- /dev/null +++ b/neutron/plugins/hyperv/agent/hyperv_neutron_agent.py @@ -0,0 +1,475 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +#Copyright 2013 Cloudbase Solutions SRL +#Copyright 2013 Pedro Navarro Perez +#All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Pedro Navarro Perez +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +import platform +import re +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import constants as n_const +from neutron.common import rpc_compat +from neutron.common import topics +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as p_const +from neutron.plugins.hyperv.agent import utils +from neutron.plugins.hyperv.agent import utilsfactory +from neutron.plugins.hyperv.common import constants + +LOG = logging.getLogger(__name__) + +agent_opts = [ + cfg.ListOpt( + 'physical_network_vswitch_mappings', + default=[], + help=_('List of : ' + 'where the physical networks can be expressed with ' + 'wildcards, e.g.: ."*:external"')), + cfg.StrOpt( + 'local_network_vswitch', + default='private', + help=_('Private vswitch name used for local networks')), + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), + cfg.BoolOpt('enable_metrics_collection', + default=False, + help=_('Enables metrics collections for switch ports by using ' + 'Hyper-V\'s metric APIs. Collected data can by ' + 'retrieved by other apps and services, e.g.: ' + 'Ceilometer. Requires Hyper-V / Windows Server 2012 ' + 'and above')), + cfg.IntOpt('metrics_max_retries', + default=100, + help=_('Specifies the maximum number of retries to enable ' + 'Hyper-V\'s port metrics collection. The agent will try ' + 'to enable the feature once every polling_interval ' + 'period for at most metrics_max_retries or until it ' + 'succeedes.')) +] + + +CONF = cfg.CONF +CONF.register_opts(agent_opts, "AGENT") +config.register_agent_state_opts_helper(cfg.CONF) + + +class HyperVSecurityAgent(rpc_compat.RpcCallback, + sg_rpc.SecurityGroupAgentRpcMixin): + # Set RPC API version to 1.1 by default. + RPC_API_VERSION = '1.1' + + def __init__(self, context, plugin_rpc): + super(HyperVSecurityAgent, self).__init__() + self.context = context + self.plugin_rpc = plugin_rpc + + if sg_rpc.is_firewall_enabled(): + self.init_firewall() + self._setup_rpc() + + def _setup_rpc(self): + self.topic = topics.AGENT + self.endpoints = [HyperVSecurityCallbackMixin(self)] + consumers = [[topics.SECURITY_GROUP, topics.UPDATE]] + + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + +class HyperVSecurityCallbackMixin(rpc_compat.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + # Set RPC API version to 1.1 by default. + RPC_API_VERSION = '1.1' + + def __init__(self, sg_agent): + super(HyperVSecurityCallbackMixin, self).__init__() + self.sg_agent = sg_agent + + +class HyperVPluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class HyperVNeutronAgent(rpc_compat.RpcCallback): + # Set RPC API version to 1.0 by default. + RPC_API_VERSION = '1.0' + + def __init__(self): + super(HyperVNeutronAgent, self).__init__() + self._utils = utilsfactory.get_hypervutils() + self._polling_interval = CONF.AGENT.polling_interval + self._load_physical_network_mappings() + self._network_vswitch_map = {} + self._port_metric_retries = {} + self._set_agent_state() + self._setup_rpc() + + def _set_agent_state(self): + self.agent_state = { + 'binary': 'neutron-hyperv-agent', + 'host': cfg.CONF.host, + 'topic': n_const.L2_AGENT_TOPIC, + 'configurations': {'vswitch_mappings': + self._physical_network_mappings}, + 'agent_type': n_const.AGENT_TYPE_HYPERV, + 'start_flag': True} + + def _report_state(self): + try: + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception as ex: + LOG.exception(_("Failed reporting state! %s"), ex) + + def _setup_rpc(self): + self.agent_id = 'hyperv_%s' % platform.node() + self.topic = topics.AGENT + self.plugin_rpc = HyperVPluginApi(topics.PLUGIN) + + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + + # RPC network init + self.context = context.get_admin_context_without_session() + # Handle updates from service + self.endpoints = [self] + # Define the listening consumers for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.NETWORK, topics.DELETE], + [topics.PORT, topics.DELETE], + [constants.TUNNEL, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + self.sec_groups_agent = HyperVSecurityAgent( + self.context, self.plugin_rpc) + report_interval = CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def _load_physical_network_mappings(self): + self._physical_network_mappings = {} + for mapping in CONF.AGENT.physical_network_vswitch_mappings: + parts = mapping.split(':') + if len(parts) != 2: + LOG.debug(_('Invalid physical network mapping: %s'), mapping) + else: + pattern = re.escape(parts[0].strip()).replace('\\*', '.*') + vswitch = parts[1].strip() + self._physical_network_mappings[pattern] = vswitch + + def _get_vswitch_for_physical_network(self, phys_network_name): + for pattern in self._physical_network_mappings: + if phys_network_name is None: + phys_network_name = '' + if re.match(pattern, phys_network_name): + return self._physical_network_mappings[pattern] + # Not found in the mappings, the vswitch has the same name + return phys_network_name + + def _get_network_vswitch_map_by_port_id(self, port_id): + for network_id, map in self._network_vswitch_map.iteritems(): + if port_id in map['ports']: + return (network_id, map) + + def network_delete(self, context, network_id=None): + LOG.debug(_("network_delete received. " + "Deleting network %s"), network_id) + # The network may not be defined on this agent + if network_id in self._network_vswitch_map: + self._reclaim_local_network(network_id) + else: + LOG.debug(_("Network %s not defined on agent."), network_id) + + def port_delete(self, context, port_id=None): + LOG.debug(_("port_delete received")) + self._port_unbound(port_id) + + def port_update(self, context, port=None, network_type=None, + segmentation_id=None, physical_network=None): + LOG.debug(_("port_update received")) + if CONF.SECURITYGROUP.enable_security_group: + if 'security_groups' in port: + self.sec_groups_agent.refresh_firewall() + + self._treat_vif_port( + port['id'], port['network_id'], + network_type, physical_network, + segmentation_id, port['admin_state_up']) + + def _get_vswitch_name(self, network_type, physical_network): + if network_type != p_const.TYPE_LOCAL: + vswitch_name = self._get_vswitch_for_physical_network( + physical_network) + else: + vswitch_name = CONF.AGENT.local_network_vswitch + return vswitch_name + + def _provision_network(self, port_id, + net_uuid, network_type, + physical_network, + segmentation_id): + LOG.info(_("Provisioning network %s"), net_uuid) + + vswitch_name = self._get_vswitch_name(network_type, physical_network) + + if network_type in [p_const.TYPE_VLAN, p_const.TYPE_FLAT]: + #Nothing to do + pass + elif network_type == p_const.TYPE_LOCAL: + #TODO(alexpilotti): Check that the switch type is private + #or create it if not existing + pass + else: + raise utils.HyperVException( + msg=(_("Cannot provision unknown network type %(network_type)s" + " for network %(net_uuid)s") % + dict(network_type=network_type, net_uuid=net_uuid))) + + map = { + 'network_type': network_type, + 'vswitch_name': vswitch_name, + 'ports': [], + 'vlan_id': segmentation_id} + self._network_vswitch_map[net_uuid] = map + + def _reclaim_local_network(self, net_uuid): + LOG.info(_("Reclaiming local network %s"), net_uuid) + del self._network_vswitch_map[net_uuid] + + def _port_bound(self, port_id, + net_uuid, + network_type, + physical_network, + segmentation_id): + LOG.debug(_("Binding port %s"), port_id) + + if net_uuid not in self._network_vswitch_map: + self._provision_network( + port_id, net_uuid, network_type, + physical_network, segmentation_id) + + map = self._network_vswitch_map[net_uuid] + map['ports'].append(port_id) + + self._utils.connect_vnic_to_vswitch(map['vswitch_name'], port_id) + + if network_type == p_const.TYPE_VLAN: + LOG.info(_('Binding VLAN ID %(segmentation_id)s ' + 'to switch port %(port_id)s'), + dict(segmentation_id=segmentation_id, port_id=port_id)) + self._utils.set_vswitch_port_vlan_id( + segmentation_id, + port_id) + elif network_type == p_const.TYPE_FLAT: + #Nothing to do + pass + elif network_type == p_const.TYPE_LOCAL: + #Nothing to do + pass + else: + LOG.error(_('Unsupported network type %s'), network_type) + + if CONF.AGENT.enable_metrics_collection: + self._utils.enable_port_metrics_collection(port_id) + self._port_metric_retries[port_id] = CONF.AGENT.metrics_max_retries + + def _port_unbound(self, port_id): + (net_uuid, map) = self._get_network_vswitch_map_by_port_id(port_id) + if net_uuid not in self._network_vswitch_map: + LOG.info(_('Network %s is not avalailable on this agent'), + net_uuid) + return + + LOG.debug(_("Unbinding port %s"), port_id) + self._utils.disconnect_switch_port(map['vswitch_name'], port_id, True) + + if not map['ports']: + self._reclaim_local_network(net_uuid) + + def _port_enable_control_metrics(self): + if not CONF.AGENT.enable_metrics_collection: + return + + for port_id in self._port_metric_retries.keys(): + if self._utils.can_enable_control_metrics(port_id): + self._utils.enable_control_metrics(port_id) + LOG.info(_('Port metrics enabled for port: %s'), port_id) + del self._port_metric_retries[port_id] + elif self._port_metric_retries[port_id] < 1: + self._utils.enable_control_metrics(port_id) + LOG.error(_('Port metrics raw enabling for port: %s'), port_id) + del self._port_metric_retries[port_id] + else: + self._port_metric_retries[port_id] -= 1 + + def _update_ports(self, registered_ports): + ports = self._utils.get_vnic_ids() + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def _treat_vif_port(self, port_id, network_id, network_type, + physical_network, segmentation_id, + admin_state_up): + if self._utils.vnic_port_exists(port_id): + if admin_state_up: + self._port_bound(port_id, network_id, network_type, + physical_network, segmentation_id) + else: + self._port_unbound(port_id) + else: + LOG.debug(_("No port %s defined on agent."), port_id) + + def _treat_devices_added(self, devices): + resync = False + for device in devices: + LOG.info(_("Adding port %s"), device) + try: + device_details = self.plugin_rpc.get_device_details( + self.context, + device, + self.agent_id) + except Exception as e: + LOG.debug( + _("Unable to get port details for " + "device %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + if 'port_id' in device_details: + LOG.info( + _("Port %(device)s updated. Details: %(device_details)s"), + {'device': device, 'device_details': device_details}) + self._treat_vif_port( + device_details['port_id'], + device_details['network_id'], + device_details['network_type'], + device_details['physical_network'], + device_details['segmentation_id'], + device_details['admin_state_up']) + + # check if security groups is enabled. + # if not, teardown the security group rules + if CONF.SECURITYGROUP.enable_security_group: + self.sec_groups_agent.prepare_devices_filter([device]) + else: + self._utils.remove_all_security_rules( + device_details['port_id']) + self.plugin_rpc.update_device_up(self.context, + device, + self.agent_id, + cfg.CONF.host) + return resync + + def _treat_devices_removed(self, devices): + resync = False + for device in devices: + LOG.info(_("Removing port %s"), device) + try: + self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug( + _("Removing port failed for device %(device)s: %(e)s"), + dict(device=device, e=e)) + resync = True + continue + self._port_unbound(device) + return resync + + def _process_network_ports(self, port_info): + resync_a = False + resync_b = False + if 'added' in port_info: + resync_a = self._treat_devices_added(port_info['added']) + if 'removed' in port_info: + resync_b = self._treat_devices_removed(port_info['removed']) + # If one of the above operations fails => resync with plugin + return (resync_a | resync_b) + + def daemon_loop(self): + sync = True + ports = set() + + while True: + try: + start = time.time() + if sync: + LOG.info(_("Agent out of sync with plugin!")) + ports.clear() + sync = False + + port_info = self._update_ports(ports) + + # notify plugin about port deltas + if port_info: + LOG.debug(_("Agent loop has new devices!")) + # If treat devices fails - must resync with plugin + sync = self._process_network_ports(port_info) + ports = port_info['current'] + + self._port_enable_control_metrics() + except Exception as e: + LOG.exception(_("Error in agent event loop: %s"), e) + sync = True + + # sleep till end of polling interval + elapsed = (time.time() - start) + if (elapsed < self._polling_interval): + time.sleep(self._polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)"), + {'polling_interval': self._polling_interval, + 'elapsed': elapsed}) + + +def main(): + common_config.init(sys.argv[1:]) + common_config.setup_logging(cfg.CONF) + + plugin = HyperVNeutronAgent() + + # Start everything. + LOG.info(_("Agent initialized successfully, now running... ")) + plugin.daemon_loop() diff --git a/neutron/plugins/hyperv/agent/security_groups_driver.py b/neutron/plugins/hyperv/agent/security_groups_driver.py new file mode 100644 index 000000000..755ab5270 --- /dev/null +++ b/neutron/plugins/hyperv/agent/security_groups_driver.py @@ -0,0 +1,146 @@ +#Copyright 2014 Cloudbase Solutions SRL +#All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Claudiu Belu, Cloudbase Solutions Srl + +from neutron.agent import firewall +from neutron.openstack.common import log as logging +from neutron.plugins.hyperv.agent import utilsfactory +from neutron.plugins.hyperv.agent import utilsv2 + +LOG = logging.getLogger(__name__) + + +class HyperVSecurityGroupsDriver(firewall.FirewallDriver): + """Security Groups Driver. + + Security Groups implementation for Hyper-V VMs. + """ + + _ACL_PROP_MAP = { + 'direction': {'ingress': utilsv2.HyperVUtilsV2._ACL_DIR_IN, + 'egress': utilsv2.HyperVUtilsV2._ACL_DIR_OUT}, + 'ethertype': {'IPv4': utilsv2.HyperVUtilsV2._ACL_TYPE_IPV4, + 'IPv6': utilsv2.HyperVUtilsV2._ACL_TYPE_IPV6}, + 'protocol': {'icmp': utilsv2.HyperVUtilsV2._ICMP_PROTOCOL}, + 'default': "ANY", + 'address_default': {'IPv4': '0.0.0.0/0', 'IPv6': '::/0'} + } + + def __init__(self): + self._utils = utilsfactory.get_hypervutils() + self._security_ports = {} + + def prepare_port_filter(self, port): + LOG.debug('Creating port %s rules' % len(port['security_group_rules'])) + + # newly created port, add default rules. + if port['device'] not in self._security_ports: + LOG.debug('Creating default reject rules.') + self._utils.create_default_reject_all_rules(port['id']) + + self._security_ports[port['device']] = port + self._create_port_rules(port['id'], port['security_group_rules']) + + def _create_port_rules(self, port_id, rules): + for rule in rules: + param_map = self._create_param_map(rule) + try: + self._utils.create_security_rule(port_id, **param_map) + except Exception as ex: + LOG.error(_('Hyper-V Exception: %(hyperv_exeption)s while ' + 'adding rule: %(rule)s'), + dict(hyperv_exeption=ex, rule=rule)) + + def _remove_port_rules(self, port_id, rules): + for rule in rules: + param_map = self._create_param_map(rule) + try: + self._utils.remove_security_rule(port_id, **param_map) + except Exception as ex: + LOG.error(_('Hyper-V Exception: %(hyperv_exeption)s while ' + 'removing rule: %(rule)s'), + dict(hyperv_exeption=ex, rule=rule)) + + def _create_param_map(self, rule): + if 'port_range_min' in rule and 'port_range_max' in rule: + local_port = '%s-%s' % (rule['port_range_min'], + rule['port_range_max']) + else: + local_port = self._ACL_PROP_MAP['default'] + + return { + 'direction': self._ACL_PROP_MAP['direction'][rule['direction']], + 'acl_type': self._ACL_PROP_MAP['ethertype'][rule['ethertype']], + 'local_port': local_port, + 'protocol': self._get_rule_protocol(rule), + 'remote_address': self._get_rule_remote_address(rule) + } + + def apply_port_filter(self, port): + LOG.info(_('Aplying port filter.')) + + def update_port_filter(self, port): + LOG.info(_('Updating port rules.')) + + if port['device'] not in self._security_ports: + self.prepare_port_filter(port) + return + + old_port = self._security_ports[port['device']] + rules = old_port['security_group_rules'] + param_port_rules = port['security_group_rules'] + + new_rules = [r for r in param_port_rules if r not in rules] + remove_rules = [r for r in rules if r not in param_port_rules] + + LOG.info(_("Creating %(new)s new rules, removing %(old)s " + "old rules."), + {'new': len(new_rules), + 'old': len(remove_rules)}) + + self._remove_port_rules(old_port['id'], remove_rules) + self._create_port_rules(port['id'], new_rules) + + self._security_ports[port['device']] = port + + def remove_port_filter(self, port): + LOG.info(_('Removing port filter')) + self._security_ports.pop(port['device'], None) + + @property + def ports(self): + return self._security_ports + + def _get_rule_remote_address(self, rule): + if rule['direction'] is 'ingress': + ip_prefix = 'source_ip_prefix' + else: + ip_prefix = 'dest_ip_prefix' + + if ip_prefix in rule: + return rule[ip_prefix] + return self._ACL_PROP_MAP['address_default'][rule['ethertype']] + + def _get_rule_protocol(self, rule): + protocol = self._get_rule_prop_or_default(rule, 'protocol') + if protocol in self._ACL_PROP_MAP['protocol'].keys(): + return self._ACL_PROP_MAP['protocol'][protocol] + + return protocol + + def _get_rule_prop_or_default(self, rule, prop): + if prop in rule: + return rule[prop] + return self._ACL_PROP_MAP['default'] diff --git a/neutron/plugins/hyperv/agent/utils.py b/neutron/plugins/hyperv/agent/utils.py new file mode 100644 index 000000000..31439f0b0 --- /dev/null +++ b/neutron/plugins/hyperv/agent/utils.py @@ -0,0 +1,256 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# Copyright 2013 Pedro Navarro Perez +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Pedro Navarro Perez +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +import sys +import time + +from oslo.config import cfg + +from neutron.common import exceptions as n_exc +from neutron.openstack.common import log as logging + +# Check needed for unit testing on Unix +if sys.platform == 'win32': + import wmi + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class HyperVException(n_exc.NeutronException): + message = _('HyperVException: %(msg)s') + +WMI_JOB_STATE_STARTED = 4096 +WMI_JOB_STATE_RUNNING = 4 +WMI_JOB_STATE_COMPLETED = 7 + + +class HyperVUtils(object): + + _ETHERNET_SWITCH_PORT = 'Msvm_SwitchPort' + + _wmi_namespace = '//./root/virtualization' + + def __init__(self): + self._wmi_conn = None + + @property + def _conn(self): + if self._wmi_conn is None: + self._wmi_conn = wmi.WMI(moniker=self._wmi_namespace) + return self._wmi_conn + + def get_switch_ports(self, vswitch_name): + vswitch = self._get_vswitch(vswitch_name) + vswitch_ports = vswitch.associators( + wmi_result_class=self._ETHERNET_SWITCH_PORT) + return set(p.Name for p in vswitch_ports) + + def vnic_port_exists(self, port_id): + try: + self._get_vnic_settings(port_id) + except Exception: + return False + return True + + def get_vnic_ids(self): + return set( + p.ElementName + for p in self._conn.Msvm_SyntheticEthernetPortSettingData() + if p.ElementName is not None) + + def _get_vnic_settings(self, vnic_name): + vnic_settings = self._conn.Msvm_SyntheticEthernetPortSettingData( + ElementName=vnic_name) + if not vnic_settings: + raise HyperVException(msg=_('Vnic not found: %s') % vnic_name) + return vnic_settings[0] + + def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name): + vnic_settings = self._get_vnic_settings(switch_port_name) + if not vnic_settings.Connection or not vnic_settings.Connection[0]: + port = self.get_port_by_id(switch_port_name, vswitch_name) + if port: + port_path = port.Path_() + else: + port_path = self._create_switch_port( + vswitch_name, switch_port_name) + vnic_settings.Connection = [port_path] + self._modify_virt_resource(vnic_settings) + + def _get_vm_from_res_setting_data(self, res_setting_data): + sd = res_setting_data.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + vm = sd[0].associators( + wmi_result_class='Msvm_ComputerSystem') + return vm[0] + + def _modify_virt_resource(self, res_setting_data): + vm = self._get_vm_from_res_setting_data(res_setting_data) + + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources( + vm.Path_(), [res_setting_data.GetText_(1)]) + self._check_job_status(ret_val, job_path) + + def _check_job_status(self, ret_val, jobpath): + """Poll WMI job state for completion.""" + if not ret_val: + return + elif ret_val not in [WMI_JOB_STATE_STARTED, WMI_JOB_STATE_RUNNING]: + raise HyperVException(msg=_('Job failed with error %d') % ret_val) + + job_wmi_path = jobpath.replace('\\', '/') + job = wmi.WMI(moniker=job_wmi_path) + + while job.JobState == WMI_JOB_STATE_RUNNING: + time.sleep(0.1) + job = wmi.WMI(moniker=job_wmi_path) + if job.JobState != WMI_JOB_STATE_COMPLETED: + job_state = job.JobState + if job.path().Class == "Msvm_ConcreteJob": + err_sum_desc = job.ErrorSummaryDescription + err_desc = job.ErrorDescription + err_code = job.ErrorCode + data = {'job_state': job_state, + 'err_sum_desc': err_sum_desc, + 'err_desc': err_desc, + 'err_code': err_code} + raise HyperVException( + msg=_("WMI job failed with status %(job_state)d. " + "Error details: %(err_sum_desc)s - %(err_desc)s - " + "Error code: %(err_code)d") % data) + else: + (error, ret_val) = job.GetError() + if not ret_val and error: + data = {'job_state': job_state, + 'error': error} + raise HyperVException( + msg=_("WMI job failed with status %(job_state)d. " + "Error details: %(error)s") % data) + else: + raise HyperVException( + msg=_("WMI job failed with status %d. " + "No error description available") % job_state) + + desc = job.Description + elap = job.ElapsedTime + LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s"), + {'desc': desc, 'elap': elap}) + + def _create_switch_port(self, vswitch_name, switch_port_name): + """Creates a switch port.""" + switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] + vswitch_path = self._get_vswitch(vswitch_name).path_() + (new_port, ret_val) = switch_svc.CreateSwitchPort( + Name=switch_port_name, + FriendlyName=switch_port_name, + ScopeOfResidence="", + VirtualSwitch=vswitch_path) + if ret_val != 0: + raise HyperVException( + msg=_('Failed creating port for %s') % vswitch_name) + return new_port + + def disconnect_switch_port( + self, vswitch_name, switch_port_name, delete_port): + """Disconnects the switch port.""" + switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] + switch_port_path = self._get_switch_port_path_by_name( + switch_port_name) + if not switch_port_path: + # Port not found. It happens when the VM was already deleted. + return + + (ret_val, ) = switch_svc.DisconnectSwitchPort( + SwitchPort=switch_port_path) + if ret_val != 0: + data = {'switch_port_name': switch_port_name, + 'vswitch_name': vswitch_name, + 'ret_val': ret_val} + raise HyperVException( + msg=_('Failed to disconnect port %(switch_port_name)s ' + 'from switch %(vswitch_name)s ' + 'with error %(ret_val)s') % data) + if delete_port: + (ret_val, ) = switch_svc.DeleteSwitchPort( + SwitchPort=switch_port_path) + if ret_val != 0: + data = {'switch_port_name': switch_port_name, + 'vswitch_name': vswitch_name, + 'ret_val': ret_val} + raise HyperVException( + msg=_('Failed to delete port %(switch_port_name)s ' + 'from switch %(vswitch_name)s ' + 'with error %(ret_val)s') % data) + + def _get_vswitch(self, vswitch_name): + vswitch = self._conn.Msvm_VirtualSwitch(ElementName=vswitch_name) + if not vswitch: + raise HyperVException(msg=_('VSwitch not found: %s') % + vswitch_name) + return vswitch[0] + + def _get_vswitch_external_port(self, vswitch): + vswitch_ports = vswitch.associators( + wmi_result_class=self._ETHERNET_SWITCH_PORT) + for vswitch_port in vswitch_ports: + lan_endpoints = vswitch_port.associators( + wmi_result_class='Msvm_SwitchLanEndpoint') + if lan_endpoints: + ext_port = lan_endpoints[0].associators( + wmi_result_class='Msvm_ExternalEthernetPort') + if ext_port: + return vswitch_port + + def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name): + vlan_endpoint_settings = self._conn.Msvm_VLANEndpointSettingData( + ElementName=switch_port_name)[0] + if vlan_endpoint_settings.AccessVLAN != vlan_id: + vlan_endpoint_settings.AccessVLAN = vlan_id + vlan_endpoint_settings.put() + + def _get_switch_port_path_by_name(self, switch_port_name): + vswitch = self._conn.Msvm_SwitchPort(ElementName=switch_port_name) + if vswitch: + return vswitch[0].path_() + + def get_vswitch_id(self, vswitch_name): + vswitch = self._get_vswitch(vswitch_name) + return vswitch.Name + + def get_port_by_id(self, port_id, vswitch_name): + vswitch = self._get_vswitch(vswitch_name) + switch_ports = vswitch.associators( + wmi_result_class=self._ETHERNET_SWITCH_PORT) + for switch_port in switch_ports: + if (switch_port.ElementName == port_id): + return switch_port + + def enable_port_metrics_collection(self, switch_port_name): + raise NotImplementedError(_("Metrics collection is not supported on " + "this version of Hyper-V")) + + def enable_control_metrics(self, switch_port_name): + raise NotImplementedError(_("Metrics collection is not supported on " + "this version of Hyper-V")) + + def can_enable_control_metrics(self, switch_port_name): + return False diff --git a/neutron/plugins/hyperv/agent/utilsfactory.py b/neutron/plugins/hyperv/agent/utilsfactory.py new file mode 100644 index 000000000..5698255c3 --- /dev/null +++ b/neutron/plugins/hyperv/agent/utilsfactory.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Claudiu Belu, Cloudbase Solutions Srl + +import sys + +from oslo.config import cfg + +from neutron.openstack.common import log as logging +from neutron.plugins.hyperv.agent import utils +from neutron.plugins.hyperv.agent import utilsv2 + +# Check needed for unit testing on Unix +if sys.platform == 'win32': + import wmi + +hyper_opts = [ + cfg.BoolOpt('force_hyperv_utils_v1', + default=False, + help=_('Force V1 WMI utility classes')), +] + +CONF = cfg.CONF +CONF.register_opts(hyper_opts, 'hyperv') + +LOG = logging.getLogger(__name__) + + +def _get_windows_version(): + return wmi.WMI(moniker='//./root/cimv2').Win32_OperatingSystem()[0].Version + + +def _check_min_windows_version(major, minor, build=0): + version_str = _get_windows_version() + return map(int, version_str.split('.')) >= [major, minor, build] + + +def get_hypervutils(): + # V1 virtualization namespace features are supported up to + # Windows Server / Hyper-V Server 2012 + # V2 virtualization namespace features are supported starting with + # Windows Server / Hyper-V Server 2012 + # Windows Server / Hyper-V Server 2012 R2 uses the V2 namespace and + # introduces additional features + + force_v1_flag = CONF.hyperv.force_hyperv_utils_v1 + if _check_min_windows_version(6, 3): + if force_v1_flag: + LOG.warning(_('V1 virtualization namespace no longer supported on ' + 'Windows Server / Hyper-V Server 2012 R2 or above.')) + cls = utilsv2.HyperVUtilsV2R2 + elif not force_v1_flag and _check_min_windows_version(6, 2): + cls = utilsv2.HyperVUtilsV2 + else: + cls = utils.HyperVUtils + LOG.debug(_("Loading class: %(module_name)s.%(class_name)s"), + {'module_name': cls.__module__, 'class_name': cls.__name__}) + return cls() diff --git a/neutron/plugins/hyperv/agent/utilsv2.py b/neutron/plugins/hyperv/agent/utilsv2.py new file mode 100644 index 000000000..a55839487 --- /dev/null +++ b/neutron/plugins/hyperv/agent/utilsv2.py @@ -0,0 +1,439 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl +# @author: Claudiu Belu, Cloudbase Solutions Srl + +from neutron.plugins.hyperv.agent import utils + + +class HyperVUtilsV2(utils.HyperVUtils): + + _EXTERNAL_PORT = 'Msvm_ExternalEthernetPort' + _ETHERNET_SWITCH_PORT = 'Msvm_EthernetSwitchPort' + _PORT_ALLOC_SET_DATA = 'Msvm_EthernetPortAllocationSettingData' + _PORT_VLAN_SET_DATA = 'Msvm_EthernetSwitchPortVlanSettingData' + _PORT_SECURITY_SET_DATA = 'Msvm_EthernetSwitchPortSecuritySettingData' + _PORT_ALLOC_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData' + _PORT_EXT_ACL_SET_DATA = _PORT_ALLOC_ACL_SET_DATA + _LAN_ENDPOINT = 'Msvm_LANEndpoint' + _STATE_DISABLED = 3 + _OPERATION_MODE_ACCESS = 1 + + _VIRTUAL_SYSTEM_SETTING_DATA = 'Msvm_VirtualSystemSettingData' + _VM_SUMMARY_ENABLED_STATE = 100 + _HYPERV_VM_STATE_ENABLED = 2 + + _ACL_DIR_IN = 1 + _ACL_DIR_OUT = 2 + + _ACL_TYPE_IPV4 = 2 + _ACL_TYPE_IPV6 = 3 + + _ACL_ACTION_ALLOW = 1 + _ACL_ACTION_DENY = 2 + _ACL_ACTION_METER = 3 + + _METRIC_ENABLED = 2 + _NET_IN_METRIC_NAME = 'Filtered Incoming Network Traffic' + _NET_OUT_METRIC_NAME = 'Filtered Outgoing Network Traffic' + + _ACL_APPLICABILITY_LOCAL = 1 + _ACL_APPLICABILITY_REMOTE = 2 + + _ACL_DEFAULT = 'ANY' + _IPV4_ANY = '0.0.0.0/0' + _IPV6_ANY = '::/0' + _TCP_PROTOCOL = 'tcp' + _UDP_PROTOCOL = 'udp' + _ICMP_PROTOCOL = '1' + _MAX_WEIGHT = 65500 + + # 2 directions x 2 address types = 4 ACLs + _REJECT_ACLS_COUNT = 4 + + _wmi_namespace = '//./root/virtualization/v2' + + def __init__(self): + super(HyperVUtilsV2, self).__init__() + + def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name): + vnic = self._get_vnic_settings(switch_port_name) + vswitch = self._get_vswitch(vswitch_name) + + port, found = self._get_switch_port_allocation(switch_port_name, True) + port.HostResource = [vswitch.path_()] + port.Parent = vnic.path_() + if not found: + vm = self._get_vm_from_res_setting_data(vnic) + self._add_virt_resource(vm, port) + else: + self._modify_virt_resource(port) + + def _modify_virt_resource(self, res_setting_data): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job_path, out_set_data, ret_val) = vs_man_svc.ModifyResourceSettings( + ResourceSettings=[res_setting_data.GetText_(1)]) + self._check_job_status(ret_val, job_path) + + def _add_virt_resource(self, vm, res_setting_data): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job_path, out_set_data, ret_val) = vs_man_svc.AddResourceSettings( + vm.path_(), [res_setting_data.GetText_(1)]) + self._check_job_status(ret_val, job_path) + + def _remove_virt_resource(self, res_setting_data): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job, ret_val) = vs_man_svc.RemoveResourceSettings( + ResourceSettings=[res_setting_data.path_()]) + self._check_job_status(ret_val, job) + + def _add_virt_feature(self, element, res_setting_data): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job_path, out_set_data, ret_val) = vs_man_svc.AddFeatureSettings( + element.path_(), [res_setting_data.GetText_(1)]) + self._check_job_status(ret_val, job_path) + + def _remove_virt_feature(self, feature_resource): + self._remove_multiple_virt_features([feature_resource]) + + def _remove_multiple_virt_features(self, feature_resources): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job_path, ret_val) = vs_man_svc.RemoveFeatureSettings( + FeatureSettings=[f.path_() for f in feature_resources]) + self._check_job_status(ret_val, job_path) + + def disconnect_switch_port( + self, vswitch_name, switch_port_name, delete_port): + """Disconnects the switch port.""" + sw_port, found = self._get_switch_port_allocation(switch_port_name) + if not sw_port: + # Port not found. It happens when the VM was already deleted. + return + + if delete_port: + self._remove_virt_resource(sw_port) + else: + sw_port.EnabledState = self._STATE_DISABLED + self._modify_virt_resource(sw_port) + + def _get_vswitch(self, vswitch_name): + vswitch = self._conn.Msvm_VirtualEthernetSwitch( + ElementName=vswitch_name) + if not len(vswitch): + raise utils.HyperVException(msg=_('VSwitch not found: %s') % + vswitch_name) + return vswitch[0] + + def _get_vswitch_external_port(self, vswitch): + vswitch_ports = vswitch.associators( + wmi_result_class=self._ETHERNET_SWITCH_PORT) + for vswitch_port in vswitch_ports: + lan_endpoints = vswitch_port.associators( + wmi_result_class=self._LAN_ENDPOINT) + if len(lan_endpoints): + lan_endpoints = lan_endpoints[0].associators( + wmi_result_class=self._LAN_ENDPOINT) + if len(lan_endpoints): + ext_port = lan_endpoints[0].associators( + wmi_result_class=self._EXTERNAL_PORT) + if ext_port: + return vswitch_port + + def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name): + port_alloc, found = self._get_switch_port_allocation(switch_port_name) + if not found: + raise utils.HyperVException( + msg=_('Port Allocation not found: %s') % switch_port_name) + + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc) + if vlan_settings: + # Removing the feature because it cannot be modified + # due to a wmi exception. + (job_path, ret_val) = vs_man_svc.RemoveFeatureSettings( + FeatureSettings=[vlan_settings.path_()]) + self._check_job_status(ret_val, job_path) + + (vlan_settings, found) = self._get_vlan_setting_data(switch_port_name) + vlan_settings.AccessVlanId = vlan_id + vlan_settings.OperationMode = self._OPERATION_MODE_ACCESS + (job_path, out, ret_val) = vs_man_svc.AddFeatureSettings( + port_alloc.path_(), [vlan_settings.GetText_(1)]) + self._check_job_status(ret_val, job_path) + + def _get_vlan_setting_data_from_port_alloc(self, port_alloc): + return self._get_first_item(port_alloc.associators( + wmi_result_class=self._PORT_VLAN_SET_DATA)) + + def _get_vlan_setting_data(self, switch_port_name, create=True): + return self._get_setting_data( + self._PORT_VLAN_SET_DATA, + switch_port_name, create) + + def _get_switch_port_allocation(self, switch_port_name, create=False): + return self._get_setting_data( + self._PORT_ALLOC_SET_DATA, + switch_port_name, create) + + def _get_setting_data(self, class_name, element_name, create=True): + element_name = element_name.replace("'", '"') + q = self._conn.query("SELECT * FROM %(class_name)s WHERE " + "ElementName = '%(element_name)s'" % + {"class_name": class_name, + "element_name": element_name}) + data = self._get_first_item(q) + found = data is not None + if not data and create: + data = self._get_default_setting_data(class_name) + data.ElementName = element_name + return data, found + + def _get_default_setting_data(self, class_name): + return self._conn.query("SELECT * FROM %s WHERE InstanceID " + "LIKE '%%\\Default'" % class_name)[0] + + def _get_first_item(self, obj): + if obj: + return obj[0] + + def enable_port_metrics_collection(self, switch_port_name): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + return + + # Add the ACLs only if they don't already exist + acls = port.associators(wmi_result_class=self._PORT_ALLOC_ACL_SET_DATA) + for acl_type in [self._ACL_TYPE_IPV4, self._ACL_TYPE_IPV6]: + for acl_dir in [self._ACL_DIR_IN, self._ACL_DIR_OUT]: + _acls = self._filter_acls( + acls, self._ACL_ACTION_METER, acl_dir, acl_type) + + if not _acls: + acl = self._create_acl( + acl_dir, acl_type, self._ACL_ACTION_METER) + self._add_virt_feature(port, acl) + + def enable_control_metrics(self, switch_port_name): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + return + + metric_svc = self._conn.Msvm_MetricService()[0] + metric_names = [self._NET_IN_METRIC_NAME, self._NET_OUT_METRIC_NAME] + + for metric_name in metric_names: + metric_def = self._conn.CIM_BaseMetricDefinition(Name=metric_name) + if metric_def: + metric_svc.ControlMetrics( + Subject=port.path_(), + Definition=metric_def[0].path_(), + MetricCollectionEnabled=self._METRIC_ENABLED) + + def can_enable_control_metrics(self, switch_port_name): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + return False + + if not self._is_port_vm_started(port): + return False + + # all 4 meter ACLs must be existent first. (2 x direction) + acls = port.associators(wmi_result_class=self._PORT_ALLOC_ACL_SET_DATA) + acls = [a for a in acls if a.Action == self._ACL_ACTION_METER] + if len(acls) < 2: + return False + return True + + def _is_port_vm_started(self, port): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + vmsettings = port.associators( + wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA) + #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx + (ret_val, summary_info) = vs_man_svc.GetSummaryInformation( + [self._VM_SUMMARY_ENABLED_STATE], + [v.path_() for v in vmsettings]) + if ret_val or not summary_info: + raise utils.HyperVException(msg=_('Cannot get VM summary data ' + 'for: %s') % port.ElementName) + + return summary_info[0].EnabledState is self._HYPERV_VM_STATE_ENABLED + + def create_security_rule(self, switch_port_name, direction, acl_type, + local_port, protocol, remote_address): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + return + + # Add the ACLs only if they don't already exist + acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) + weight = self._get_new_weight(acls) + self._bind_security_rule( + port, direction, acl_type, self._ACL_ACTION_ALLOW, local_port, + protocol, remote_address, weight) + + def remove_security_rule(self, switch_port_name, direction, acl_type, + local_port, protocol, remote_address): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + # Port not found. It happens when the VM was already deleted. + return + + acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) + filtered_acls = self._filter_security_acls( + acls, self._ACL_ACTION_ALLOW, direction, acl_type, local_port, + protocol, remote_address) + + for acl in filtered_acls: + self._remove_virt_feature(acl) + + def remove_all_security_rules(self, switch_port_name): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + # Port not found. It happens when the VM was already deleted. + return + + acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) + filtered_acls = [a for a in acls if + a.Action is not self._ACL_ACTION_METER] + + if filtered_acls: + self._remove_multiple_virt_features(filtered_acls) + + def create_default_reject_all_rules(self, switch_port_name): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + raise utils.HyperVException( + msg=_('Port Allocation not found: %s') % switch_port_name) + + acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) + filtered_acls = [v for v in acls if v.Action == self._ACL_ACTION_DENY] + + if len(filtered_acls) >= self._REJECT_ACLS_COUNT: + return + + for acl in filtered_acls: + self._remove_virt_feature(acl) + + weight = 0 + ipv4_pair = (self._ACL_TYPE_IPV4, self._IPV4_ANY) + ipv6_pair = (self._ACL_TYPE_IPV6, self._IPV6_ANY) + for direction in [self._ACL_DIR_IN, self._ACL_DIR_OUT]: + for acl_type, address in [ipv4_pair, ipv6_pair]: + for protocol in [self._TCP_PROTOCOL, + self._UDP_PROTOCOL, + self._ICMP_PROTOCOL]: + self._bind_security_rule( + port, direction, acl_type, self._ACL_ACTION_DENY, + self._ACL_DEFAULT, protocol, address, weight) + weight += 1 + + def _bind_security_rule(self, port, direction, acl_type, action, + local_port, protocol, remote_address, weight): + acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) + filtered_acls = self._filter_security_acls( + acls, action, direction, acl_type, local_port, protocol, + remote_address) + + for acl in filtered_acls: + self._remove_virt_feature(acl) + + acl = self._create_security_acl( + direction, acl_type, action, local_port, protocol, remote_address, + weight) + + self._add_virt_feature(port, acl) + + def _create_acl(self, direction, acl_type, action): + acl = self._get_default_setting_data(self._PORT_ALLOC_ACL_SET_DATA) + acl.set(Direction=direction, + AclType=acl_type, + Action=action, + Applicability=self._ACL_APPLICABILITY_LOCAL) + return acl + + def _create_security_acl(self, direction, acl_type, action, local_port, + protocol, remote_ip_address, weight): + acl = self._create_acl(direction, acl_type, action) + (remote_address, remote_prefix_length) = remote_ip_address.split('/') + acl.set(Applicability=self._ACL_APPLICABILITY_REMOTE, + RemoteAddress=remote_address, + RemoteAddressPrefixLength=remote_prefix_length) + return acl + + def _filter_acls(self, acls, action, direction, acl_type, remote_addr=""): + return [v for v in acls + if v.Action == action and + v.Direction == direction and + v.AclType == acl_type and + v.RemoteAddress == remote_addr] + + def _filter_security_acls(self, acls, acl_action, direction, acl_type, + local_port, protocol, remote_addr=""): + (remote_address, remote_prefix_length) = remote_addr.split('/') + remote_prefix_length = int(remote_prefix_length) + + return [v for v in acls + if v.Direction == direction and + v.Action in [self._ACL_ACTION_ALLOW, self._ACL_ACTION_DENY] and + v.AclType == acl_type and + v.RemoteAddress == remote_address and + v.RemoteAddressPrefixLength == remote_prefix_length] + + def _get_new_weight(self, acls): + return 0 + + +class HyperVUtilsV2R2(HyperVUtilsV2): + _PORT_EXT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortExtendedAclSettingData' + _MAX_WEIGHT = 65500 + + # 2 directions x 2 address types x 3 protocols = 12 ACLs + _REJECT_ACLS_COUNT = 12 + + def _create_security_acl(self, direction, acl_type, action, local_port, + protocol, remote_addr, weight): + acl = self._get_default_setting_data(self._PORT_EXT_ACL_SET_DATA) + acl.set(Direction=direction, + Action=action, + LocalPort=str(local_port), + Protocol=protocol, + RemoteIPAddress=remote_addr, + IdleSessionTimeout=0, + Weight=weight) + return acl + + def _filter_security_acls(self, acls, action, direction, acl_type, + local_port, protocol, remote_addr=""): + return [v for v in acls + if v.Action == action and + v.Direction == direction and + v.LocalPort == str(local_port) and + v.Protocol == protocol and + v.RemoteIPAddress == remote_addr] + + def _get_new_weight(self, acls): + acls = [a for a in acls if a.Action is not self._ACL_ACTION_DENY] + if not acls: + return self._MAX_WEIGHT - 1 + + weights = [a.Weight for a in acls] + min_weight = min(weights) + for weight in range(min_weight, self._MAX_WEIGHT): + if weight not in weights: + return weight + + return min_weight - 1 diff --git a/neutron/plugins/hyperv/agent_notifier_api.py b/neutron/plugins/hyperv/agent_notifier_api.py new file mode 100644 index 000000000..058d96c4c --- /dev/null +++ b/neutron/plugins/hyperv/agent_notifier_api.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.openstack.common import log as logging +from neutron.plugins.hyperv.common import constants + +LOG = logging.getLogger(__name__) + + +class AgentNotifierApi(rpc_compat.RpcProxy): + '''Agent side of the openvswitch rpc API. + + API version history: + 1.0 - Initial version. + + ''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + self.topic_port_delete = topics.get_topic_name(topic, + topics.PORT, + topics.DELETE) + self.topic_tunnel_update = topics.get_topic_name(topic, + constants.TUNNEL, + topics.UPDATE) + + def network_delete(self, context, network_id): + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, network_type, segmentation_id, + physical_network): + self.fanout_cast(context, + self.make_msg('port_update', + port=port, + network_type=network_type, + segmentation_id=segmentation_id, + physical_network=physical_network), + topic=self.topic_port_update) + + def port_delete(self, context, port_id): + self.fanout_cast(context, + self.make_msg('port_delete', + port_id=port_id), + topic=self.topic_port_delete) + + def tunnel_update(self, context, tunnel_ip, tunnel_id): + self.fanout_cast(context, + self.make_msg('tunnel_update', + tunnel_ip=tunnel_ip, + tunnel_id=tunnel_id), + topic=self.topic_tunnel_update) diff --git a/neutron/plugins/hyperv/common/__init__.py b/neutron/plugins/hyperv/common/__init__.py new file mode 100644 index 000000000..7ef4e09fa --- /dev/null +++ b/neutron/plugins/hyperv/common/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/hyperv/common/constants.py b/neutron/plugins/hyperv/common/constants.py new file mode 100644 index 000000000..b36d9b559 --- /dev/null +++ b/neutron/plugins/hyperv/common/constants.py @@ -0,0 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +# Topic for tunnel notifications between the plugin and agent +TUNNEL = 'tunnel' + +# Special vlan_id value in ovs_vlan_allocations table indicating flat network +FLAT_VLAN_ID = -1 diff --git a/neutron/plugins/hyperv/db.py b/neutron/plugins/hyperv/db.py new file mode 100644 index 000000000..159275a85 --- /dev/null +++ b/neutron/plugins/hyperv/db.py @@ -0,0 +1,219 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +from six import moves +from sqlalchemy.orm import exc + +from neutron.common import exceptions as n_exc +import neutron.db.api as db_api +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.plugins.hyperv.common import constants +from neutron.plugins.hyperv import model as hyperv_model + +LOG = logging.getLogger(__name__) + + +class HyperVPluginDB(object): + def initialize(self): + db_api.configure_db() + + def reserve_vlan(self, session): + with session.begin(subtransactions=True): + alloc_q = session.query(hyperv_model.VlanAllocation) + alloc_q = alloc_q.filter_by(allocated=False) + alloc = alloc_q.first() + if alloc: + LOG.debug(_("Reserving vlan %(vlan_id)s on physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': alloc.physical_network}) + alloc.allocated = True + return (alloc.physical_network, alloc.vlan_id) + raise n_exc.NoNetworkAvailable() + + def reserve_flat_net(self, session): + with session.begin(subtransactions=True): + alloc_q = session.query(hyperv_model.VlanAllocation) + alloc_q = alloc_q.filter_by(allocated=False, + vlan_id=constants.FLAT_VLAN_ID) + alloc = alloc_q.first() + if alloc: + LOG.debug(_("Reserving flat physical network " + "%(physical_network)s from pool"), + {'physical_network': alloc.physical_network}) + alloc.allocated = True + return alloc.physical_network + raise n_exc.NoNetworkAvailable() + + def reserve_specific_vlan(self, session, physical_network, vlan_id): + with session.begin(subtransactions=True): + try: + alloc_q = session.query(hyperv_model.VlanAllocation) + alloc_q = alloc_q.filter_by( + physical_network=physical_network, + vlan_id=vlan_id) + alloc = alloc_q.one() + if alloc.allocated: + if vlan_id == constants.FLAT_VLAN_ID: + raise n_exc.FlatNetworkInUse( + physical_network=physical_network) + else: + raise n_exc.VlanIdInUse( + vlan_id=vlan_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + alloc.allocated = True + except exc.NoResultFound: + raise n_exc.NoNetworkAvailable() + + def reserve_specific_flat_net(self, session, physical_network): + return self.reserve_specific_vlan(session, physical_network, + constants.FLAT_VLAN_ID) + + def add_network_binding(self, session, network_id, network_type, + physical_network, segmentation_id): + with session.begin(subtransactions=True): + binding = hyperv_model.NetworkBinding( + network_id, network_type, + physical_network, + segmentation_id) + session.add(binding) + + def get_port(self, port_id): + session = db_api.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + except exc.NoResultFound: + port = None + return port + + def get_network_binding(self, session, network_id): + session = session or db_api.get_session() + try: + binding_q = session.query(hyperv_model.NetworkBinding) + binding_q = binding_q.filter_by(network_id=network_id) + return binding_q.one() + except exc.NoResultFound: + return + + def set_port_status(self, port_id, status): + session = db_api.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.merge(port) + session.flush() + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) + + def release_vlan(self, session, physical_network, vlan_id): + with session.begin(subtransactions=True): + try: + alloc_q = session.query(hyperv_model.VlanAllocation) + alloc_q = alloc_q.filter_by(physical_network=physical_network, + vlan_id=vlan_id) + alloc = alloc_q.one() + alloc.allocated = False + #session.delete(alloc) + LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " + "%(physical_network)s"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + except exc.NoResultFound: + LOG.warning(_("vlan_id %(vlan_id)s on physical network " + "%(physical_network)s not found"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + + def _add_missing_allocatable_vlans(self, session, vlan_ids, + physical_network): + for vlan_id in sorted(vlan_ids): + alloc = hyperv_model.VlanAllocation( + physical_network, vlan_id) + session.add(alloc) + + def _remove_non_allocatable_vlans(self, session, + physical_network, + vlan_ids, + allocations): + if physical_network in allocations: + for alloc in allocations[physical_network]: + try: + # see if vlan is allocatable + vlan_ids.remove(alloc.vlan_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_( + "Removing vlan %(vlan_id)s on " + "physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': physical_network}) + session.delete(alloc) + del allocations[physical_network] + + def _remove_unconfigured_vlans(self, session, allocations): + for allocs in allocations.itervalues(): + for alloc in allocs: + if not alloc.allocated: + LOG.debug(_("Removing vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': alloc.physical_network}) + session.delete(alloc) + + def sync_vlan_allocations(self, network_vlan_ranges): + """Synchronize vlan_allocations table with configured VLAN ranges.""" + + session = db_api.get_session() + with session.begin(): + # get existing allocations for all physical networks + allocations = dict() + allocs_q = session.query(hyperv_model.VlanAllocation) + for alloc in allocs_q: + allocations.setdefault(alloc.physical_network, + set()).add(alloc) + + # process vlan ranges for each configured physical network + for physical_network, vlan_ranges in network_vlan_ranges.items(): + # determine current configured allocatable vlans for this + # physical network + vlan_ids = set() + for vlan_range in vlan_ranges: + vlan_ids |= set(moves.xrange(vlan_range[0], + vlan_range[1] + 1)) + + # remove from table unallocated vlans not currently allocatable + self._remove_non_allocatable_vlans(session, + physical_network, + vlan_ids, + allocations) + + # add missing allocatable vlans to table + self._add_missing_allocatable_vlans(session, vlan_ids, + physical_network) + + # remove from table unallocated vlans for any unconfigured physical + # networks + self._remove_unconfigured_vlans(session, allocations) diff --git a/neutron/plugins/hyperv/hyperv_neutron_plugin.py b/neutron/plugins/hyperv/hyperv_neutron_plugin.py new file mode 100644 index 000000000..4307e5133 --- /dev/null +++ b/neutron/plugins/hyperv/hyperv_neutron_plugin.py @@ -0,0 +1,333 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.common import exceptions as n_exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import l3_gwmode_db +from neutron.db import portbindings_base +from neutron.db import quota_db # noqa +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.common import utils as plugin_utils +from neutron.plugins.hyperv import agent_notifier_api +from neutron.plugins.hyperv.common import constants +from neutron.plugins.hyperv import db as hyperv_db +from neutron.plugins.hyperv import rpc_callbacks + + +DEFAULT_VLAN_RANGES = [] + +hyperv_opts = [ + cfg.StrOpt('tenant_network_type', default='local', + help=_("Network type for tenant networks " + "(local, flat, vlan or none)")), + cfg.ListOpt('network_vlan_ranges', + default=DEFAULT_VLAN_RANGES, + help=_("List of :: " + "or ")), +] + +cfg.CONF.register_opts(hyperv_opts, "HYPERV") + +LOG = logging.getLogger(__name__) + + +class BaseNetworkProvider(object): + def __init__(self): + self._db = hyperv_db.HyperVPluginDB() + + def create_network(self, session, attrs): + pass + + def delete_network(self, session, binding): + pass + + def extend_network_dict(self, network, binding): + pass + + +class LocalNetworkProvider(BaseNetworkProvider): + def create_network(self, session, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + if attributes.is_attr_set(segmentation_id): + msg = _("segmentation_id specified " + "for %s network") % network_type + raise n_exc.InvalidInput(error_message=msg) + attrs[provider.SEGMENTATION_ID] = None + + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + if attributes.is_attr_set(physical_network): + msg = _("physical_network specified " + "for %s network") % network_type + raise n_exc.InvalidInput(error_message=msg) + attrs[provider.PHYSICAL_NETWORK] = None + + def extend_network_dict(self, network, binding): + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = None + + +class FlatNetworkProvider(BaseNetworkProvider): + def create_network(self, session, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + if attributes.is_attr_set(segmentation_id): + msg = _("segmentation_id specified " + "for %s network") % network_type + raise n_exc.InvalidInput(error_message=msg) + segmentation_id = constants.FLAT_VLAN_ID + attrs[provider.SEGMENTATION_ID] = segmentation_id + + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + if not attributes.is_attr_set(physical_network): + physical_network = self._db.reserve_flat_net(session) + attrs[provider.PHYSICAL_NETWORK] = physical_network + else: + self._db.reserve_specific_flat_net(session, physical_network) + + def delete_network(self, session, binding): + self._db.release_vlan(session, binding.physical_network, + constants.FLAT_VLAN_ID) + + def extend_network_dict(self, network, binding): + network[provider.PHYSICAL_NETWORK] = binding.physical_network + + +class VlanNetworkProvider(BaseNetworkProvider): + def create_network(self, session, attrs): + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + if attributes.is_attr_set(segmentation_id): + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + if not attributes.is_attr_set(physical_network): + msg = _("physical_network not provided") + raise n_exc.InvalidInput(error_message=msg) + self._db.reserve_specific_vlan(session, physical_network, + segmentation_id) + else: + (physical_network, + segmentation_id) = self._db.reserve_vlan(session) + attrs[provider.SEGMENTATION_ID] = segmentation_id + attrs[provider.PHYSICAL_NETWORK] = physical_network + + def delete_network(self, session, binding): + self._db.release_vlan( + session, binding.physical_network, + binding.segmentation_id) + + def extend_network_dict(self, network, binding): + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = binding.segmentation_id + + +class HyperVNeutronPlugin(agents_db.AgentDbMixin, + db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + portbindings_base.PortBindingBaseMixin): + + # This attribute specifies whether the plugin supports or not + # bulk operations. Name mangling is used in order to ensure it + # is qualified by class + __native_bulk_support = True + supported_extension_aliases = ["provider", "external-net", "router", + "agent", "ext-gw-mode", "binding", "quotas"] + + def __init__(self, configfile=None): + self._db = hyperv_db.HyperVPluginDB() + self._db.initialize() + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_HYPERV} + portbindings_base.register_port_dict_function() + self._set_tenant_network_type() + + self._parse_network_vlan_ranges() + self._create_network_providers_map() + self._db.sync_vlan_allocations(self._network_vlan_ranges) + + self._setup_rpc() + + def _set_tenant_network_type(self): + tenant_network_type = cfg.CONF.HYPERV.tenant_network_type + if tenant_network_type not in [svc_constants.TYPE_LOCAL, + svc_constants.TYPE_FLAT, + svc_constants.TYPE_VLAN, + svc_constants.TYPE_NONE]: + msg = _( + "Invalid tenant_network_type: %s. " + "Agent terminated!") % tenant_network_type + raise n_exc.InvalidInput(error_message=msg) + self._tenant_network_type = tenant_network_type + + def _setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = rpc_compat.create_connection(new=True) + self.notifier = agent_notifier_api.AgentNotifierApi( + topics.AGENT) + self.endpoints = [rpc_callbacks.HyperVRpcCallbacks(self.notifier), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def _parse_network_vlan_ranges(self): + self._network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( + cfg.CONF.HYPERV.network_vlan_ranges) + LOG.info(_("Network VLAN ranges: %s"), self._network_vlan_ranges) + + def _check_vlan_id_in_range(self, physical_network, vlan_id): + for r in self._network_vlan_ranges[physical_network]: + if vlan_id >= r[0] and vlan_id <= r[1]: + return True + return False + + def _create_network_providers_map(self): + self._network_providers_map = { + svc_constants.TYPE_LOCAL: LocalNetworkProvider(), + svc_constants.TYPE_FLAT: FlatNetworkProvider(), + svc_constants.TYPE_VLAN: VlanNetworkProvider() + } + + def _process_provider_create(self, context, session, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + network_type_set = attributes.is_attr_set(network_type) + if not network_type_set: + if self._tenant_network_type == svc_constants.TYPE_NONE: + raise n_exc.TenantNetworksDisabled() + network_type = self._tenant_network_type + attrs[provider.NETWORK_TYPE] = network_type + + if network_type not in self._network_providers_map: + msg = _("Network type %s not supported") % network_type + raise n_exc.InvalidInput(error_message=msg) + p = self._network_providers_map[network_type] + # Provider specific network creation + p.create_network(session, attrs) + + def create_network(self, context, network): + session = context.session + with session.begin(subtransactions=True): + network_attrs = network['network'] + self._process_provider_create(context, session, network_attrs) + + net = super(HyperVNeutronPlugin, self).create_network( + context, network) + + network_type = network_attrs[provider.NETWORK_TYPE] + physical_network = network_attrs[provider.PHYSICAL_NETWORK] + segmentation_id = network_attrs[provider.SEGMENTATION_ID] + + self._db.add_network_binding( + session, net['id'], network_type, + physical_network, segmentation_id) + + self._process_l3_create(context, net, network['network']) + self._extend_network_dict_provider(context, net) + + LOG.debug(_("Created network: %s"), net['id']) + return net + + def _extend_network_dict_provider(self, context, network): + binding = self._db.get_network_binding( + context.session, network['id']) + network[provider.NETWORK_TYPE] = binding.network_type + p = self._network_providers_map[binding.network_type] + p.extend_network_dict(network, binding) + + def update_network(self, context, id, network): + provider._raise_if_updates_provider_attributes(network['network']) + + session = context.session + with session.begin(subtransactions=True): + net = super(HyperVNeutronPlugin, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + self._extend_network_dict_provider(context, net) + return net + + def delete_network(self, context, id): + session = context.session + with session.begin(subtransactions=True): + binding = self._db.get_network_binding(session, id) + self._process_l3_delete(context, id) + super(HyperVNeutronPlugin, self).delete_network(context, id) + p = self._network_providers_map[binding.network_type] + p.delete_network(session, binding) + # the network_binding record is deleted via cascade from + # the network record, so explicit removal is not necessary + self.notifier.network_delete(context, id) + + def get_network(self, context, id, fields=None): + net = super(HyperVNeutronPlugin, self).get_network(context, id, None) + self._extend_network_dict_provider(context, net) + return self._fields(net, fields) + + def get_networks(self, context, filters=None, fields=None): + nets = super(HyperVNeutronPlugin, self).get_networks( + context, filters, None) + for net in nets: + self._extend_network_dict_provider(context, net) + + return [self._fields(net, fields) for net in nets] + + def create_port(self, context, port): + port_data = port['port'] + port = super(HyperVNeutronPlugin, self).create_port(context, port) + self._process_portbindings_create_and_update(context, + port_data, + port) + return port + + def update_port(self, context, id, port): + original_port = super(HyperVNeutronPlugin, self).get_port( + context, id) + port_data = port['port'] + port = super(HyperVNeutronPlugin, self).update_port(context, id, port) + self._process_portbindings_create_and_update(context, + port_data, + port) + if original_port['admin_state_up'] != port['admin_state_up']: + binding = self._db.get_network_binding( + None, port['network_id']) + self.notifier.port_update(context, port, + binding.network_type, + binding.segmentation_id, + binding.physical_network) + return port + + def delete_port(self, context, id, l3_port_check=True): + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + self.disassociate_floatingips(context, id) + + super(HyperVNeutronPlugin, self).delete_port(context, id) + self.notifier.port_delete(context, id) diff --git a/neutron/plugins/hyperv/model.py b/neutron/plugins/hyperv/model.py new file mode 100644 index 000000000..808d2e591 --- /dev/null +++ b/neutron/plugins/hyperv/model.py @@ -0,0 +1,55 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +from sqlalchemy import Boolean, Column, ForeignKey, Integer, String + +from neutron.db import model_base + + +class VlanAllocation(model_base.BASEV2): + """Represents allocation state of vlan_id on physical network.""" + __tablename__ = 'hyperv_vlan_allocations' + + physical_network = Column(String(64), nullable=False, primary_key=True) + vlan_id = Column(Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = Column(Boolean, nullable=False) + + def __init__(self, physical_network, vlan_id): + self.physical_network = physical_network + self.vlan_id = vlan_id + self.allocated = False + + +class NetworkBinding(model_base.BASEV2): + """Represents binding of virtual network to physical realization.""" + __tablename__ = 'hyperv_network_bindings' + + network_id = Column(String(36), + ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + network_type = Column(String(32), nullable=False) + physical_network = Column(String(64)) + segmentation_id = Column(Integer) + + def __init__(self, network_id, network_type, physical_network, + segmentation_id): + self.network_id = network_id + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id diff --git a/neutron/plugins/hyperv/rpc_callbacks.py b/neutron/plugins/hyperv/rpc_callbacks.py new file mode 100644 index 000000000..874059a58 --- /dev/null +++ b/neutron/plugins/hyperv/rpc_callbacks.py @@ -0,0 +1,94 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +from neutron.common import constants as q_const +from neutron.common import rpc_compat +from neutron.db import dhcp_rpc_base +from neutron.db import l3_rpc_base +from neutron.openstack.common import log as logging +from neutron.plugins.hyperv import db as hyperv_db + + +LOG = logging.getLogger(__name__) + + +class HyperVRpcCallbacks( + rpc_compat.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin): + + # Set RPC API version to 1.0 by default. + RPC_API_VERSION = '1.1' + + def __init__(self, notifier): + super(HyperVRpcCallbacks, self).__init__() + self.notifier = notifier + self._db = hyperv_db.HyperVPluginDB() + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = self._db.get_port(device) + if port: + binding = self._db.get_network_binding(None, port['network_id']) + entry = {'device': device, + 'network_id': port['network_id'], + 'port_id': port['id'], + 'admin_state_up': port['admin_state_up'], + 'network_type': binding.network_type, + 'segmentation_id': binding.segmentation_id, + 'physical_network': binding.physical_network} + # Set the port status to UP + self._db.set_port_status(port['id'], q_const.PORT_STATUS_ACTIVE) + else: + entry = {'device': device} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + # TODO(garyk) - live migration and port status + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = self._db.get_port(device) + if port: + entry = {'device': device, + 'exists': True} + # Set port status to DOWN + self._db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN) + else: + entry = {'device': device, + 'exists': False} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def tunnel_sync(self, rpc_context, **kwargs): + """Tunnel sync. + + Dummy function for ovs agent running on Linux to + work with Hyper-V plugin and agent. + """ + entry = dict() + entry['tunnels'] = {} + # Return the list of tunnels IP's to the agent + return entry diff --git a/neutron/plugins/ibm/README b/neutron/plugins/ibm/README new file mode 100644 index 000000000..732fd7776 --- /dev/null +++ b/neutron/plugins/ibm/README @@ -0,0 +1,6 @@ +IBM SDN-VE Neutron Plugin + +This plugin implements Neutron v2 APIs. + +For more details on how to use it please refer to the following page: +http://wiki.openstack.org/wiki/IBM-Neutron diff --git a/neutron/plugins/ibm/__init__.py b/neutron/plugins/ibm/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ibm/agent/__init__.py b/neutron/plugins/ibm/agent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ibm/agent/sdnve_neutron_agent.py b/neutron/plugins/ibm/agent/sdnve_neutron_agent.py new file mode 100644 index 000000000..e1c8d3ed7 --- /dev/null +++ b/neutron/plugins/ibm/agent/sdnve_neutron_agent.py @@ -0,0 +1,270 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp. + + +import socket +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants as n_const +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils as n_utils +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.ibm.common import config # noqa +from neutron.plugins.ibm.common import constants + + +LOG = logging.getLogger(__name__) + + +class SdnvePluginApi(agent_rpc.PluginApi): + + def sdnve_info(self, context, info): + return self.call(context, + self.make_msg('sdnve_info', info=info), + topic=self.topic) + + +class SdnveNeutronAgent(rpc_compat.RpcCallback): + + RPC_API_VERSION = '1.1' + + def __init__(self, integ_br, interface_mappings, + info, root_helper, polling_interval, + controller_ip, reset_br, out_of_band): + '''The agent initialization. + + Sets the following parameters and sets up the integration + bridge and physical interfaces if need be. + :param integ_br: name of the integration bridge. + :param interface_mappings: interfaces to physical networks. + :param info: local IP address of this hypervisor. + :param root_helper: utility to use when running shell cmds. + :param polling_interval: interval (secs) to poll DB. + :param controller_ip: Ip address of SDN-VE controller. + ''' + + super(SdnveNeutronAgent, self).__init__() + self.root_helper = root_helper + self.int_bridge_name = integ_br + self.controller_ip = controller_ip + self.interface_mappings = interface_mappings + self.polling_interval = polling_interval + self.info = info + self.reset_br = reset_br + self.out_of_band = out_of_band + + self.agent_state = { + 'binary': 'neutron-sdnve-agent', + 'host': cfg.CONF.host, + 'topic': n_const.L2_AGENT_TOPIC, + 'configurations': {'interface_mappings': interface_mappings, + 'reset_br': self.reset_br, + 'out_of_band': self.out_of_band, + 'controller_ip': self.controller_ip}, + 'agent_type': n_const.AGENT_TYPE_SDNVE, + 'start_flag': True} + + if self.int_bridge_name: + self.int_br = self.setup_integration_br(integ_br, reset_br, + out_of_band, + self.controller_ip) + self.setup_physical_interfaces(self.interface_mappings) + else: + self.int_br = None + + self.setup_rpc() + + def _report_state(self): + try: + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def setup_rpc(self): + if self.int_br: + mac = self.int_br.get_local_port_mac() + self.agent_id = '%s%s' % ('sdnve', (mac.replace(":", ""))) + else: + nameaddr = socket.gethostbyname(socket.gethostname()) + self.agent_id = '%s%s' % ('sdnve_', (nameaddr.replace(".", "_"))) + + self.topic = topics.AGENT + self.plugin_rpc = SdnvePluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + + self.context = context.get_admin_context_without_session() + self.endpoints = [self] + consumers = [[constants.INFO, topics.UPDATE]] + + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + if self.polling_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=self.polling_interval) + + # Plugin calls the agents through the following + def info_update(self, context, **kwargs): + LOG.debug(_("info_update received")) + info = kwargs.get('info', {}) + new_controller = info.get('new_controller') + out_of_band = info.get('out_of_band') + if self.int_br and new_controller: + LOG.debug(_("info_update received. New controller" + "is to be set to: %s"), new_controller) + self.int_br.run_vsctl(["set-controller", + self.int_bridge_name, + "tcp:" + new_controller]) + if out_of_band: + LOG.debug(_("info_update received. New controller" + "is set to be out of band")) + self.int_br.set_db_attribute("controller", + self.int_bridge_name, + "connection-mode", + "out-of-band") + + def setup_integration_br(self, bridge_name, reset_br, out_of_band, + controller_ip=None): + '''Sets up the integration bridge. + + Create the bridge and remove all existing flows if reset_br is True. + Otherwise, creates the bridge if not already existing. + :param bridge_name: the name of the integration bridge. + :param reset_br: A boolean to rest the bridge if True. + :param out_of_band: A boolean indicating controller is out of band. + :param controller_ip: IP address to use as the bridge controller. + :returns: the integration bridge + ''' + + int_br = ovs_lib.OVSBridge(bridge_name, self.root_helper) + if reset_br: + int_br.reset_bridge() + int_br.remove_all_flows() + else: + int_br.create() + + # set the controller + if controller_ip: + int_br.run_vsctl( + ["set-controller", bridge_name, "tcp:" + controller_ip]) + if out_of_band: + int_br.set_db_attribute("controller", bridge_name, + "connection-mode", "out-of-band") + + return int_br + + def setup_physical_interfaces(self, interface_mappings): + '''Sets up the physical network interfaces. + + Link physical interfaces to the integration bridge. + :param interface_mappings: map physical net names to interface names. + ''' + + for physical_network, interface in interface_mappings.iteritems(): + LOG.info(_("Mapping physical network %(physical_network)s to " + "interface %(interface)s"), + {'physical_network': physical_network, + 'interface': interface}) + # Connect the physical interface to the bridge + if not ip_lib.device_exists(interface, self.root_helper): + LOG.error(_("Interface %(interface)s for physical network " + "%(physical_network)s does not exist. Agent " + "terminated!"), + {'physical_network': physical_network, + 'interface': interface}) + raise SystemExit(1) + self.int_br.add_port(interface) + + def sdnve_info(self): + details = self.plugin_rpc.sdnve_info( + self.context, + {'info': self.info}) + return details + + def rpc_loop(self): + + while True: + start = time.time() + LOG.debug(_("Agent in the rpc loop.")) + + # sleep till end of polling interval + elapsed = (time.time() - start) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.info(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + + def daemon_loop(self): + self.rpc_loop() + + +def create_agent_config_map(config): + + interface_mappings = n_utils.parse_mappings( + config.SDNVE.interface_mappings) + + controller_ips = config.SDNVE.controller_ips + LOG.info(_("Controller IPs: %s"), controller_ips) + controller_ip = controller_ips[0] + + return { + 'integ_br': config.SDNVE.integration_bridge, + 'interface_mappings': interface_mappings, + 'controller_ip': controller_ip, + 'info': config.SDNVE.info, + 'root_helper': config.SDNVE_AGENT.root_helper, + 'polling_interval': config.SDNVE_AGENT.polling_interval, + 'reset_br': config.SDNVE.reset_bridge, + 'out_of_band': config.SDNVE.out_of_band} + + +def main(): + cfg.CONF.register_opts(ip_lib.OPTS) + common_config.init(sys.argv[1:]) + common_config.setup_logging(cfg.CONF) + + try: + agent_config = create_agent_config_map(cfg.CONF) + except ValueError as e: + LOG.exception(_("%s Agent terminated!"), e) + raise SystemExit(1) + + plugin = SdnveNeutronAgent(**agent_config) + + # Start everything. + LOG.info(_("Agent initialized successfully, now running... ")) + plugin.daemon_loop() diff --git a/neutron/plugins/ibm/common/__init__.py b/neutron/plugins/ibm/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ibm/common/config.py b/neutron/plugins/ibm/common/config.py new file mode 100644 index 000000000..68e2dbd42 --- /dev/null +++ b/neutron/plugins/ibm/common/config.py @@ -0,0 +1,74 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp. + + +from oslo.config import cfg + + +DEFAULT_INTERFACE_MAPPINGS = [] +DEFAULT_CONTROLLER_IPS = ['127.0.0.1'] + +sdnve_opts = [ + cfg.BoolOpt('use_fake_controller', default=False, + help=_("If set to True uses a fake controller.")), + cfg.StrOpt('base_url', default='/one/nb/v2/', + help=_("Base URL for SDN-VE controller REST API")), + cfg.ListOpt('controller_ips', default=DEFAULT_CONTROLLER_IPS, + help=_("List of IP addresses of SDN-VE controller(s)")), + cfg.StrOpt('info', default='sdnve_info_string', + help=_("SDN-VE RPC subject")), + cfg.StrOpt('port', default='8443', + help=_("SDN-VE controller port number")), + cfg.StrOpt('format', default='json', + help=_("SDN-VE request/response format")), + cfg.StrOpt('userid', default='admin', + help=_("SDN-VE administrator user id")), + cfg.StrOpt('password', default='admin', secret=True, + help=_("SDN-VE administrator password")), + cfg.StrOpt('integration_bridge', + help=_("Integration bridge to use")), + cfg.BoolOpt('reset_bridge', default=True, + help=_("Reset the integration bridge before use")), + cfg.BoolOpt('out_of_band', default=True, + help=_("Indicating if controller is out of band or not")), + cfg.ListOpt('interface_mappings', + default=DEFAULT_INTERFACE_MAPPINGS, + help=_("List of :")), + cfg.StrOpt('default_tenant_type', default='OVERLAY', + help=_("Tenant type: OVERLAY (default) or OF")), + cfg.StrOpt('overlay_signature', default='SDNVE-OVERLAY', + help=_("The string in tenant description that indicates " + "the tenant is a OVERLAY tenant")), + cfg.StrOpt('of_signature', default='SDNVE-OF', + help=_("The string in tenant description that indicates " + "the tenant is a OF tenant")), +] + +sdnve_agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("Agent polling interval if necessary")), + cfg.StrOpt('root_helper', default='sudo', + help=_("Using root helper")), + cfg.BoolOpt('rpc', default=True, + help=_("Whether using rpc")), + +] + + +cfg.CONF.register_opts(sdnve_opts, "SDNVE") +cfg.CONF.register_opts(sdnve_agent_opts, "SDNVE_AGENT") diff --git a/neutron/plugins/ibm/common/constants.py b/neutron/plugins/ibm/common/constants.py new file mode 100644 index 000000000..3acf9baff --- /dev/null +++ b/neutron/plugins/ibm/common/constants.py @@ -0,0 +1,32 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp. + + +import httplib + +# Topic for info notifications between the plugin and agent +INFO = 'info' + +TENANT_TYPE_OF = 'OF' +TENANT_TYPE_OVERLAY = 'OVERLAY' + +HTTP_ACCEPTABLE = [httplib.OK, + httplib.CREATED, + httplib.ACCEPTED, + httplib.NO_CONTENT + ] diff --git a/neutron/plugins/ibm/common/exceptions.py b/neutron/plugins/ibm/common/exceptions.py new file mode 100644 index 000000000..d2e5e7ed8 --- /dev/null +++ b/neutron/plugins/ibm/common/exceptions.py @@ -0,0 +1,28 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp. + +from neutron.common import exceptions + + +class SdnveException(exceptions.NeutronException): + message = _("An unexpected error occurred in the SDN-VE Plugin. " + "Here is the error message: %(msg)s") + + +class BadInputException(exceptions.BadRequest): + message = _("The input does not contain nececessary info: %(msg)s") diff --git a/neutron/plugins/ibm/sdnve_api.py b/neutron/plugins/ibm/sdnve_api.py new file mode 100644 index 000000000..50e689c1c --- /dev/null +++ b/neutron/plugins/ibm/sdnve_api.py @@ -0,0 +1,388 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp. + + +import httplib +import urllib + +import httplib2 +from keystoneclient.v2_0 import client as keyclient +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.openstack.common import log as logging +from neutron.plugins.ibm.common import config # noqa +from neutron.plugins.ibm.common import constants +from neutron import wsgi + +LOG = logging.getLogger(__name__) + +SDNVE_VERSION = '2.0' +SDNVE_ACTION_PREFIX = '/sdnve' +SDNVE_RETRIES = 0 +SDNVE_RETRIY_INTERVAL = 1 +SDNVE_TENANT_TYPE_OVERLAY = u'DOVE' +SDNVE_URL = 'https://%s:%s%s' + + +class RequestHandler(object): + '''Handles processing requests to and responses from controller.''' + + def __init__(self, controller_ips=None, port=None, ssl=None, + base_url=None, userid=None, password=None, + timeout=10, formats=None): + '''Initializes the RequestHandler for communication with controller + + Following keyword arguments are used; if not specified, default + values are used. + :param port: Username for authentication. + :param timeout: Time out for http requests. + :param userid: User id for accessing controller. + :param password: Password for accessing the controller. + :param base_url: The base url for the controller. + :param controller_ips: List of controller IP addresses. + :param formats: Supported formats. + ''' + self.port = port or cfg.CONF.SDNVE.port + self.timeout = timeout + self._s_meta = None + self.connection = None + self.httpclient = httplib2.Http( + disable_ssl_certificate_validation=True) + self.cookie = None + + userid = userid or cfg.CONF.SDNVE.userid + password = password or cfg.CONF.SDNVE.password + if (userid and password): + self.httpclient.add_credentials(userid, password) + + self.base_url = base_url or cfg.CONF.SDNVE.base_url + self.controller_ips = controller_ips or cfg.CONF.SDNVE.controller_ips + + LOG.info(_("The IP addr of available SDN-VE controllers: %s"), + self.controller_ips) + self.controller_ip = self.controller_ips[0] + LOG.info(_("The SDN-VE controller IP address: %s"), + self.controller_ip) + + self.new_controller = False + self.format = formats or cfg.CONF.SDNVE.format + + self.version = SDNVE_VERSION + self.action_prefix = SDNVE_ACTION_PREFIX + self.retries = SDNVE_RETRIES + self.retry_interval = SDNVE_RETRIY_INTERVAL + + def serialize(self, data): + '''Serializes a dictionary with a single key.''' + + if isinstance(data, dict): + return wsgi.Serializer().serialize(data, self.content_type()) + elif data: + raise TypeError(_("unable to serialize object type: '%s'") % + type(data)) + + def deserialize(self, data, status_code): + '''Deserializes an xml or json string into a dictionary.''' + + # NOTE(mb): Temporary fix for backend controller requirement + data = data.replace("router_external", "router:external") + + if status_code == httplib.NO_CONTENT: + return data + try: + deserialized_data = wsgi.Serializer( + metadata=self._s_meta).deserialize(data, self.content_type()) + deserialized_data = deserialized_data['body'] + except Exception: + deserialized_data = data + + return deserialized_data + + def content_type(self, format=None): + '''Returns the mime-type for either 'xml' or 'json'.''' + + return 'application/%s' % (format or self.format) + + def delete(self, url, body=None, headers=None, params=None): + return self.do_request("DELETE", url, body=body, + headers=headers, params=params) + + def get(self, url, body=None, headers=None, params=None): + return self.do_request("GET", url, body=body, + headers=headers, params=params) + + def post(self, url, body=None, headers=None, params=None): + return self.do_request("POST", url, body=body, + headers=headers, params=params) + + def put(self, url, body=None, headers=None, params=None): + return self.do_request("PUT", url, body=body, + headers=headers, params=params) + + def do_request(self, method, url, body=None, headers=None, + params=None, connection_type=None): + + status_code = -1 + replybody_deserialized = '' + + if body: + body = self.serialize(body) + + self.headers = headers or {'Content-Type': self.content_type()} + if self.cookie: + self.headers['cookie'] = self.cookie + + if self.controller_ip != self.controller_ips[0]: + controllers = [self.controller_ip] + else: + controllers = [] + controllers.extend(self.controller_ips) + + for controller_ip in controllers: + serverurl = SDNVE_URL % (controller_ip, self.port, self.base_url) + myurl = serverurl + url + if params and isinstance(params, dict): + myurl += '?' + urllib.urlencode(params, doseq=1) + + try: + LOG.debug(_("Sending request to SDN-VE. url: " + "%(myurl)s method: %(method)s body: " + "%(body)s header: %(header)s "), + {'myurl': myurl, 'method': method, + 'body': body, 'header': self.headers}) + resp, replybody = self.httpclient.request( + myurl, method=method, body=body, headers=self.headers) + LOG.debug(("Response recd from SDN-VE. resp: %(resp)s" + "body: %(body)s"), + {'resp': resp.status, 'body': replybody}) + status_code = resp.status + + except Exception as e: + LOG.error(_("Error: Could not reach server: %(url)s " + "Exception: %(excp)s."), + {'url': myurl, 'excp': e}) + self.cookie = None + continue + + if status_code not in constants.HTTP_ACCEPTABLE: + LOG.debug(_("Error message: %(reply)s -- Status: %(status)s"), + {'reply': replybody, 'status': status_code}) + else: + LOG.debug(_("Received response status: %s"), status_code) + + if resp.get('set-cookie'): + self.cookie = resp['set-cookie'] + replybody_deserialized = self.deserialize( + replybody, + status_code) + LOG.debug(_("Deserialized body: %s"), replybody_deserialized) + if controller_ip != self.controller_ip: + # bcast the change of controller + self.new_controller = True + self.controller_ip = controller_ip + + return (status_code, replybody_deserialized) + + return (httplib.REQUEST_TIMEOUT, 'Could not reach server(s)') + + +class Client(RequestHandler): + '''Client for SDNVE controller.''' + + def __init__(self): + '''Initialize a new SDNVE client.''' + super(Client, self).__init__() + + self.keystoneclient = KeystoneClient() + + resource_path = { + 'network': "ln/networks/", + 'subnet': "ln/subnets/", + 'port': "ln/ports/", + 'tenant': "ln/tenants/", + 'router': "ln/routers/", + 'floatingip': "ln/floatingips/", + } + + def process_request(self, body): + '''Processes requests according to requirements of controller.''' + if self.format == 'json': + body = dict( + (k.replace(':', '_'), v) for k, v in body.items() + if attributes.is_attr_set(v)) + return body + + def sdnve_list(self, resource, **params): + '''Fetches a list of resources.''' + + res = self.resource_path.get(resource, None) + if not res: + LOG.info(_("Bad resource for forming a list request")) + return 0, '' + + return self.get(res, params=params) + + def sdnve_show(self, resource, specific, **params): + '''Fetches information of a certain resource.''' + + res = self.resource_path.get(resource, None) + if not res: + LOG.info(_("Bad resource for forming a show request")) + return 0, '' + + return self.get(res + specific, params=params) + + def sdnve_create(self, resource, body): + '''Creates a new resource.''' + + res = self.resource_path.get(resource, None) + if not res: + LOG.info(_("Bad resource for forming a create request")) + return 0, '' + + body = self.process_request(body) + status, data = self.post(res, body=body) + return (status, data) + + def sdnve_update(self, resource, specific, body=None): + '''Updates a resource.''' + + res = self.resource_path.get(resource, None) + if not res: + LOG.info(_("Bad resource for forming a update request")) + return 0, '' + + body = self.process_request(body) + return self.put(res + specific, body=body) + + def sdnve_delete(self, resource, specific): + '''Deletes the specified resource.''' + + res = self.resource_path.get(resource, None) + if not res: + LOG.info(_("Bad resource for forming a delete request")) + return 0, '' + + return self.delete(res + specific) + + def _tenant_id_conversion(self, osid): + return osid + + def sdnve_get_tenant_byid(self, os_tenant_id): + sdnve_tenant_id = self._tenant_id_conversion(os_tenant_id) + resp, content = self.sdnve_show('tenant', sdnve_tenant_id) + if resp in constants.HTTP_ACCEPTABLE: + tenant_id = content.get('id') + tenant_type = content.get('network_type') + if tenant_type == SDNVE_TENANT_TYPE_OVERLAY: + tenant_type = constants.TENANT_TYPE_OVERLAY + return tenant_id, tenant_type + return None, None + + def sdnve_check_and_create_tenant(self, os_tenant_id, network_type=None): + + if not os_tenant_id: + return + tenant_id, tenant_type = self.sdnve_get_tenant_byid(os_tenant_id) + if tenant_id: + if not network_type: + return tenant_id + if tenant_type != network_type: + LOG.info(_("Non matching tenant and network types: " + "%(ttype)s %(ntype)s"), + {'ttype': tenant_type, 'ntype': network_type}) + return + return tenant_id + + # Have to create a new tenant + sdnve_tenant_id = self._tenant_id_conversion(os_tenant_id) + if not network_type: + network_type = self.keystoneclient.get_tenant_type(os_tenant_id) + if network_type == constants.TENANT_TYPE_OVERLAY: + network_type = SDNVE_TENANT_TYPE_OVERLAY + + pinn_desc = ("Created by SDN-VE Neutron Plugin, OS project name = " + + self.keystoneclient.get_tenant_name(os_tenant_id)) + + res, content = self.sdnve_create('tenant', + {'id': sdnve_tenant_id, + 'name': os_tenant_id, + 'network_type': network_type, + 'description': pinn_desc}) + if res not in constants.HTTP_ACCEPTABLE: + return + + return sdnve_tenant_id + + def sdnve_get_controller(self): + if self.new_controller: + self.new_controller = False + return self.controller_ip + + +class KeystoneClient(object): + + def __init__(self, username=None, tenant_name=None, password=None, + auth_url=None): + + keystone_conf = cfg.CONF.keystone_authtoken + keystone_auth_url = ('%s://%s:%s/v2.0/' % + (keystone_conf.auth_protocol, + keystone_conf.auth_host, + keystone_conf.auth_port)) + + username = username or keystone_conf.admin_user + tenant_name = tenant_name or keystone_conf.admin_tenant_name + password = password or keystone_conf.admin_password + auth_url = auth_url or keystone_auth_url + + self.overlay_signature = cfg.CONF.SDNVE.overlay_signature + self.of_signature = cfg.CONF.SDNVE.of_signature + self.default_tenant_type = cfg.CONF.SDNVE.default_tenant_type + + self.client = keyclient.Client(username=username, + password=password, + tenant_name=tenant_name, + auth_url=auth_url) + + def get_tenant_byid(self, id): + + try: + return self.client.tenants.get(id) + except Exception: + LOG.exception(_("Did not find tenant: %r"), id) + + def get_tenant_type(self, id): + + tenant = self.get_tenant_byid(id) + if tenant: + description = tenant.description + if description: + if (description.find(self.overlay_signature) >= 0): + return constants.TENANT_TYPE_OVERLAY + if (description.find(self.of_signature) >= 0): + return constants.TENANT_TYPE_OF + return self.default_tenant_type + + def get_tenant_name(self, id): + + tenant = self.get_tenant_byid(id) + if tenant: + return tenant.name + return 'not found' diff --git a/neutron/plugins/ibm/sdnve_api_fake.py b/neutron/plugins/ibm/sdnve_api_fake.py new file mode 100644 index 000000000..74cfc8386 --- /dev/null +++ b/neutron/plugins/ibm/sdnve_api_fake.py @@ -0,0 +1,64 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp. + +from neutron.openstack.common import log as logging +from neutron.plugins.ibm.common import constants + +LOG = logging.getLogger(__name__) + +HTTP_OK = 200 + + +class FakeClient(): + + '''Fake Client for SDNVE controller.''' + + def __init__(self, **kwargs): + LOG.info(_('Fake SDNVE controller initialized')) + + def sdnve_list(self, resource, **_params): + LOG.info(_('Fake SDNVE controller: list')) + return (HTTP_OK, None) + + def sdnve_show(self, resource, specific, **_params): + LOG.info(_('Fake SDNVE controller: show')) + return (HTTP_OK, None) + + def sdnve_create(self, resource, body): + LOG.info(_('Fake SDNVE controller: create')) + return (HTTP_OK, None) + + def sdnve_update(self, resource, specific, body=None): + LOG.info(_('Fake SDNVE controller: update')) + return (HTTP_OK, None) + + def sdnve_delete(self, resource, specific): + LOG.info(_('Fake SDNVE controller: delete')) + return (HTTP_OK, None) + + def sdnve_get_tenant_byid(self, id): + LOG.info(_('Fake SDNVE controller: get tenant by id')) + return id, constants.TENANT_TYPE_OF + + def sdnve_check_and_create_tenant(self, id, network_type=None): + LOG.info(_('Fake SDNVE controller: check and create tenant')) + return id + + def sdnve_get_controller(self): + LOG.info(_('Fake SDNVE controller: get controller')) + return None diff --git a/neutron/plugins/ibm/sdnve_neutron_plugin.py b/neutron/plugins/ibm/sdnve_neutron_plugin.py new file mode 100644 index 000000000..cf127f001 --- /dev/null +++ b/neutron/plugins/ibm/sdnve_neutron_plugin.py @@ -0,0 +1,666 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp. + + +import functools + +from oslo.config import cfg + +from neutron.common import constants as n_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import l3_gwmode_db +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.ibm.common import config # noqa +from neutron.plugins.ibm.common import constants +from neutron.plugins.ibm.common import exceptions as sdnve_exc +from neutron.plugins.ibm import sdnve_api as sdnve +from neutron.plugins.ibm import sdnve_api_fake as sdnve_fake + +LOG = logging.getLogger(__name__) + + +class SdnveRpcCallbacks(): + + def __init__(self, notifier): + self.notifier = notifier # used to notify the agent + + def sdnve_info(self, rpc_context, **kwargs): + '''Update new information.''' + info = kwargs.get('info') + # Notify all other listening agents + self.notifier.info_update(rpc_context, info) + return info + + +class AgentNotifierApi(rpc_compat.RpcProxy): + '''Agent side of the SDN-VE rpc API.''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + + self.topic_info_update = topics.get_topic_name(topic, + constants.INFO, + topics.UPDATE) + + def info_update(self, context, info): + self.fanout_cast(context, + self.make_msg('info_update', + info=info), + topic=self.topic_info_update) + + +def _ha(func): + '''Supports the high availability feature of the controller.''' + + @functools.wraps(func) + def hawrapper(self, *args, **kwargs): + '''This wrapper sets the new controller if necessary + + When a controller is detected to be not responding, and a + new controller is chosen to be used in its place, this decorator + makes sure the existing integration bridges are set to point + to the new controller by calling the set_controller method. + ''' + ret_func = func(self, *args, **kwargs) + self.set_controller(args[0]) + return ret_func + return hawrapper + + +class SdnvePluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + portbindings_db.PortBindingMixin, + l3_gwmode_db.L3_NAT_db_mixin, + agents_db.AgentDbMixin, + ): + + ''' + Implement the Neutron abstractions using SDN-VE SDN Controller. + ''' + + __native_bulk_support = False + __native_pagination_support = False + __native_sorting_support = False + + supported_extension_aliases = ["binding", "router", "external-net", + "agent", "quotas"] + + def __init__(self, configfile=None): + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: {portbindings.CAP_PORT_FILTER: False}} + + super(SdnvePluginV2, self).__init__() + self.setup_rpc() + self.sdnve_controller_select() + if self.fake_controller: + self.sdnve_client = sdnve_fake.FakeClient() + else: + self.sdnve_client = sdnve.Client() + + def sdnve_controller_select(self): + self.fake_controller = cfg.CONF.SDNVE.use_fake_controller + + def setup_rpc(self): + # RPC support + self.topic = topics.PLUGIN + self.conn = rpc_compat.create_connection(new=True) + self.notifier = AgentNotifierApi(topics.AGENT) + self.endpoints = [SdnveRpcCallbacks(self.notifier), + agents_db.AgentExtRpcCallback()] + self.conn.create_consumer(self.topic, self.endpoints, + fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def _update_base_binding_dict(self, tenant_type): + if tenant_type == constants.TENANT_TYPE_OVERLAY: + self.base_binding_dict[ + portbindings.VIF_TYPE] = portbindings.VIF_TYPE_BRIDGE + if tenant_type == constants.TENANT_TYPE_OF: + self.base_binding_dict[ + portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS + + def set_controller(self, context): + LOG.info(_("Set a new controller if needed.")) + new_controller = self.sdnve_client.sdnve_get_controller() + if new_controller: + self.notifier.info_update( + context, + {'new_controller': new_controller}) + LOG.info(_("Set the controller to a new controller: %s"), + new_controller) + + def _process_request(self, request, current): + new_request = dict( + (k, v) for k, v in request.items() + if v != current.get(k)) + + msg = _("Original SDN-VE HTTP request: %(orig)s; New request: %(new)s") + LOG.debug(msg, {'orig': request, 'new': new_request}) + return new_request + + # + # Network + # + + @_ha + def create_network(self, context, network): + LOG.debug(_("Create network in progress: %r"), network) + session = context.session + + tenant_id = self._get_tenant_id_for_create(context, network['network']) + # Create a new SDN-VE tenant if need be + sdnve_tenant = self.sdnve_client.sdnve_check_and_create_tenant( + tenant_id) + if sdnve_tenant is None: + raise sdnve_exc.SdnveException( + msg=_('Create net failed: no SDN-VE tenant.')) + + with session.begin(subtransactions=True): + net = super(SdnvePluginV2, self).create_network(context, network) + self._process_l3_create(context, net, network['network']) + + # Create SDN-VE network + (res, data) = self.sdnve_client.sdnve_create('network', net) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).delete_network(context, net['id']) + raise sdnve_exc.SdnveException( + msg=(_('Create net failed in SDN-VE: %s') % res)) + + LOG.debug(_("Created network: %s"), net['id']) + return net + + @_ha + def update_network(self, context, id, network): + LOG.debug(_("Update network in progress: %r"), network) + session = context.session + + processed_request = {} + with session.begin(subtransactions=True): + original_network = super(SdnvePluginV2, self).get_network( + context, id) + processed_request['network'] = self._process_request( + network['network'], original_network) + net = super(SdnvePluginV2, self).update_network( + context, id, network) + self._process_l3_update(context, net, network['network']) + + if processed_request['network']: + (res, data) = self.sdnve_client.sdnve_update( + 'network', id, processed_request['network']) + if res not in constants.HTTP_ACCEPTABLE: + net = super(SdnvePluginV2, self).update_network( + context, id, {'network': original_network}) + raise sdnve_exc.SdnveException( + msg=(_('Update net failed in SDN-VE: %s') % res)) + + return net + + @_ha + def delete_network(self, context, id): + LOG.debug(_("Delete network in progress: %s"), id) + session = context.session + + with session.begin(subtransactions=True): + self._process_l3_delete(context, id) + super(SdnvePluginV2, self).delete_network(context, id) + + (res, data) = self.sdnve_client.sdnve_delete('network', id) + if res not in constants.HTTP_ACCEPTABLE: + LOG.error( + _("Delete net failed after deleting the network in DB: %s"), + res) + + @_ha + def get_network(self, context, id, fields=None): + LOG.debug(_("Get network in progress: %s"), id) + return super(SdnvePluginV2, self).get_network(context, id, fields) + + @_ha + def get_networks(self, context, filters=None, fields=None, sorts=None, + limit=None, marker=None, page_reverse=False): + LOG.debug(_("Get networks in progress")) + return super(SdnvePluginV2, self).get_networks( + context, filters, fields, sorts, limit, marker, page_reverse) + + # + # Port + # + + @_ha + def create_port(self, context, port): + LOG.debug(_("Create port in progress: %r"), port) + session = context.session + + # Set port status as 'ACTIVE' to avoid needing the agent + port['port']['status'] = n_const.PORT_STATUS_ACTIVE + port_data = port['port'] + + with session.begin(subtransactions=True): + port = super(SdnvePluginV2, self).create_port(context, port) + if 'id' not in port: + return port + # If the tenant_id is set to '' by create_port, add the id to + # the request being sent to the controller as the controller + # requires a tenant id + tenant_id = port.get('tenant_id') + if not tenant_id: + LOG.debug(_("Create port does not have tenant id info")) + original_network = super(SdnvePluginV2, self).get_network( + context, port['network_id']) + original_tenant_id = original_network['tenant_id'] + port['tenant_id'] = original_tenant_id + LOG.debug( + _("Create port does not have tenant id info; " + "obtained is: %s"), + port['tenant_id']) + + os_tenant_id = tenant_id + id_na, tenant_type = self.sdnve_client.sdnve_get_tenant_byid( + os_tenant_id) + self._update_base_binding_dict(tenant_type) + self._process_portbindings_create_and_update(context, + port_data, port) + + # NOTE(mb): Remove this block when controller is updated + # Remove the information that the controller does not accept + sdnve_port = port.copy() + sdnve_port.pop('device_id', None) + sdnve_port.pop('device_owner', None) + + (res, data) = self.sdnve_client.sdnve_create('port', sdnve_port) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).delete_port(context, port['id']) + raise sdnve_exc.SdnveException( + msg=(_('Create port failed in SDN-VE: %s') % res)) + + LOG.debug(_("Created port: %s"), port.get('id', 'id not found')) + return port + + @_ha + def update_port(self, context, id, port): + LOG.debug(_("Update port in progress: %r"), port) + session = context.session + + processed_request = {} + with session.begin(subtransactions=True): + original_port = super(SdnvePluginV2, self).get_port( + context, id) + processed_request['port'] = self._process_request( + port['port'], original_port) + updated_port = super(SdnvePluginV2, self).update_port( + context, id, port) + + os_tenant_id = updated_port['tenant_id'] + id_na, tenant_type = self.sdnve_client.sdnve_get_tenant_byid( + os_tenant_id) + self._update_base_binding_dict(tenant_type) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + + if processed_request['port']: + (res, data) = self.sdnve_client.sdnve_update( + 'port', id, processed_request['port']) + if res not in constants.HTTP_ACCEPTABLE: + updated_port = super(SdnvePluginV2, self).update_port( + context, id, {'port': original_port}) + raise sdnve_exc.SdnveException( + msg=(_('Update port failed in SDN-VE: %s') % res)) + + return updated_port + + @_ha + def delete_port(self, context, id, l3_port_check=True): + LOG.debug(_("Delete port in progress: %s"), id) + + # if needed, check to see if this is a port owned by + # an l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + self.disassociate_floatingips(context, id) + + super(SdnvePluginV2, self).delete_port(context, id) + + (res, data) = self.sdnve_client.sdnve_delete('port', id) + if res not in constants.HTTP_ACCEPTABLE: + LOG.error( + _("Delete port operation failed in SDN-VE " + "after deleting the port from DB: %s"), res) + + # + # Subnet + # + + @_ha + def create_subnet(self, context, subnet): + LOG.debug(_("Create subnet in progress: %r"), subnet) + new_subnet = super(SdnvePluginV2, self).create_subnet(context, subnet) + + # Note(mb): Use of null string currently required by controller + sdnve_subnet = new_subnet.copy() + if subnet.get('gateway_ip') is None: + sdnve_subnet['gateway_ip'] = 'null' + (res, data) = self.sdnve_client.sdnve_create('subnet', sdnve_subnet) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).delete_subnet(context, + new_subnet['id']) + raise sdnve_exc.SdnveException( + msg=(_('Create subnet failed in SDN-VE: %s') % res)) + + LOG.debug(_("Subnet created: %s"), new_subnet['id']) + + return new_subnet + + @_ha + def update_subnet(self, context, id, subnet): + LOG.debug(_("Update subnet in progress: %r"), subnet) + session = context.session + + processed_request = {} + with session.begin(subtransactions=True): + original_subnet = super(SdnvePluginV2, self).get_subnet( + context, id) + processed_request['subnet'] = self._process_request( + subnet['subnet'], original_subnet) + updated_subnet = super(SdnvePluginV2, self).update_subnet( + context, id, subnet) + + if processed_request['subnet']: + # Note(mb): Use of string containing null required by controller + if 'gateway_ip' in processed_request['subnet']: + if processed_request['subnet'].get('gateway_ip') is None: + processed_request['subnet']['gateway_ip'] = 'null' + (res, data) = self.sdnve_client.sdnve_update( + 'subnet', id, processed_request['subnet']) + if res not in constants.HTTP_ACCEPTABLE: + for key in subnet['subnet'].keys(): + subnet['subnet'][key] = original_subnet[key] + super(SdnvePluginV2, self).update_subnet( + context, id, subnet) + raise sdnve_exc.SdnveException( + msg=(_('Update subnet failed in SDN-VE: %s') % res)) + + return updated_subnet + + @_ha + def delete_subnet(self, context, id): + LOG.debug(_("Delete subnet in progress: %s"), id) + super(SdnvePluginV2, self).delete_subnet(context, id) + + (res, data) = self.sdnve_client.sdnve_delete('subnet', id) + if res not in constants.HTTP_ACCEPTABLE: + LOG.error(_("Delete subnet operation failed in SDN-VE after " + "deleting the subnet from DB: %s"), res) + + # + # Router + # + + @_ha + def create_router(self, context, router): + LOG.debug(_("Create router in progress: %r"), router) + + if router['router']['admin_state_up'] is False: + LOG.warning(_('Ignoring admin_state_up=False for router=%r. ' + 'Overriding with True'), router) + router['router']['admin_state_up'] = True + + tenant_id = self._get_tenant_id_for_create(context, router['router']) + # Create a new SDN-VE tenant if need be + sdnve_tenant = self.sdnve_client.sdnve_check_and_create_tenant( + tenant_id) + if sdnve_tenant is None: + raise sdnve_exc.SdnveException( + msg=_('Create router failed: no SDN-VE tenant.')) + + new_router = super(SdnvePluginV2, self).create_router(context, router) + # Create SDN-VE router + (res, data) = self.sdnve_client.sdnve_create('router', new_router) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).delete_router(context, new_router['id']) + raise sdnve_exc.SdnveException( + msg=(_('Create router failed in SDN-VE: %s') % res)) + + LOG.debug(_("Router created: %r"), new_router) + return new_router + + @_ha + def update_router(self, context, id, router): + LOG.debug(_("Update router in progress: id=%(id)s " + "router=%(router)r"), + {'id': id, 'router': router}) + session = context.session + + processed_request = {} + if not router['router'].get('admin_state_up', True): + raise n_exc.NotImplementedError(_('admin_state_up=False ' + 'routers are not ' + 'supported.')) + + with session.begin(subtransactions=True): + original_router = super(SdnvePluginV2, self).get_router( + context, id) + processed_request['router'] = self._process_request( + router['router'], original_router) + updated_router = super(SdnvePluginV2, self).update_router( + context, id, router) + + if processed_request['router']: + egw = processed_request['router'].get('external_gateway_info') + # Check for existing empty set (different from None) in request + if egw == {}: + processed_request['router'][ + 'external_gateway_info'] = {'network_id': 'null'} + (res, data) = self.sdnve_client.sdnve_update( + 'router', id, processed_request['router']) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).update_router( + context, id, {'router': original_router}) + raise sdnve_exc.SdnveException( + msg=(_('Update router failed in SDN-VE: %s') % res)) + + return updated_router + + @_ha + def delete_router(self, context, id): + LOG.debug(_("Delete router in progress: %s"), id) + + super(SdnvePluginV2, self).delete_router(context, id) + + (res, data) = self.sdnve_client.sdnve_delete('router', id) + if res not in constants.HTTP_ACCEPTABLE: + LOG.error( + _("Delete router operation failed in SDN-VE after " + "deleting the router in DB: %s"), res) + + @_ha + def add_router_interface(self, context, router_id, interface_info): + LOG.debug(_("Add router interface in progress: " + "router_id=%(router_id)s " + "interface_info=%(interface_info)r"), + {'router_id': router_id, 'interface_info': interface_info}) + + new_interface = super(SdnvePluginV2, self).add_router_interface( + context, router_id, interface_info) + LOG.debug( + _("SdnvePluginV2.add_router_interface called. Port info: %s"), + new_interface) + request_info = interface_info.copy() + request_info['port_id'] = new_interface['port_id'] + # Add the subnet_id to the request sent to the controller + if 'subnet_id' not in interface_info: + request_info['subnet_id'] = new_interface['subnet_id'] + + (res, data) = self.sdnve_client.sdnve_update( + 'router', router_id + '/add_router_interface', request_info) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).remove_router_interface( + context, router_id, interface_info) + raise sdnve_exc.SdnveException( + msg=(_('Update router-add-interface failed in SDN-VE: %s') % + res)) + + LOG.debug(_("Added router interface: %r"), new_interface) + return new_interface + + def _add_router_interface_only(self, context, router_id, interface_info): + LOG.debug(_("Add router interface only called: " + "router_id=%(router_id)s " + "interface_info=%(interface_info)r"), + {'router_id': router_id, 'interface_info': interface_info}) + + port_id = interface_info.get('port_id') + if port_id: + (res, data) = self.sdnve_client.sdnve_update( + 'router', router_id + '/add_router_interface', interface_info) + if res not in constants.HTTP_ACCEPTABLE: + LOG.error(_("SdnvePluginV2._add_router_interface_only: " + "failed to add the interface in the roll back." + " of a remove_router_interface operation")) + + @_ha + def remove_router_interface(self, context, router_id, interface_info): + LOG.debug(_("Remove router interface in progress: " + "router_id=%(router_id)s " + "interface_info=%(interface_info)r"), + {'router_id': router_id, 'interface_info': interface_info}) + + subnet_id = interface_info.get('subnet_id') + port_id = interface_info.get('port_id') + if not subnet_id: + if not port_id: + raise sdnve_exc.BadInputException(msg=_('No port ID')) + myport = super(SdnvePluginV2, self).get_port(context, port_id) + LOG.debug(_("SdnvePluginV2.remove_router_interface port: %s"), + myport) + myfixed_ips = myport.get('fixed_ips') + if not myfixed_ips: + raise sdnve_exc.BadInputException(msg=_('No fixed IP')) + subnet_id = myfixed_ips[0].get('subnet_id') + if subnet_id: + interface_info['subnet_id'] = subnet_id + LOG.debug( + _("SdnvePluginV2.remove_router_interface subnet_id: %s"), + subnet_id) + else: + if not port_id: + # The backend requires port id info in the request + subnet = super(SdnvePluginV2, self).get_subnet(context, + subnet_id) + df = {'device_id': [router_id], + 'device_owner': [n_const.DEVICE_OWNER_ROUTER_INTF], + 'network_id': [subnet['network_id']]} + ports = self.get_ports(context, filters=df) + if ports: + pid = ports[0]['id'] + interface_info['port_id'] = pid + msg = ("SdnvePluginV2.remove_router_interface " + "subnet_id: %(sid)s port_id: %(pid)s") + LOG.debug(msg, {'sid': subnet_id, 'pid': pid}) + + (res, data) = self.sdnve_client.sdnve_update( + 'router', router_id + '/remove_router_interface', interface_info) + + if res not in constants.HTTP_ACCEPTABLE: + raise sdnve_exc.SdnveException( + msg=(_('Update router-remove-interface failed SDN-VE: %s') % + res)) + + session = context.session + with session.begin(subtransactions=True): + try: + info = super(SdnvePluginV2, self).remove_router_interface( + context, router_id, interface_info) + except Exception: + with excutils.save_and_reraise_exception(): + self._add_router_interface_only(context, + router_id, interface_info) + + return info + + # + # Floating Ip + # + + @_ha + def create_floatingip(self, context, floatingip): + LOG.debug(_("Create floatingip in progress: %r"), + floatingip) + new_floatingip = super(SdnvePluginV2, self).create_floatingip( + context, floatingip) + + (res, data) = self.sdnve_client.sdnve_create( + 'floatingip', {'floatingip': new_floatingip}) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).delete_floatingip( + context, new_floatingip['id']) + raise sdnve_exc.SdnveException( + msg=(_('Creating floating ip operation failed ' + 'in SDN-VE controller: %s') % res)) + + LOG.debug(_("Created floatingip : %r"), new_floatingip) + return new_floatingip + + @_ha + def update_floatingip(self, context, id, floatingip): + LOG.debug(_("Update floatingip in progress: %r"), floatingip) + session = context.session + + processed_request = {} + with session.begin(subtransactions=True): + original_floatingip = super( + SdnvePluginV2, self).get_floatingip(context, id) + processed_request['floatingip'] = self._process_request( + floatingip['floatingip'], original_floatingip) + updated_floatingip = super( + SdnvePluginV2, self).update_floatingip(context, id, floatingip) + + if processed_request['floatingip']: + (res, data) = self.sdnve_client.sdnve_update( + 'floatingip', id, + {'floatingip': processed_request['floatingip']}) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).update_floatingip( + context, id, {'floatingip': original_floatingip}) + raise sdnve_exc.SdnveException( + msg=(_('Update floating ip failed in SDN-VE: %s') % res)) + + return updated_floatingip + + @_ha + def delete_floatingip(self, context, id): + LOG.debug(_("Delete floatingip in progress: %s"), id) + super(SdnvePluginV2, self).delete_floatingip(context, id) + + (res, data) = self.sdnve_client.sdnve_delete('floatingip', id) + if res not in constants.HTTP_ACCEPTABLE: + LOG.error(_("Delete floatingip failed in SDN-VE: %s"), res) diff --git a/neutron/plugins/linuxbridge/README b/neutron/plugins/linuxbridge/README new file mode 100644 index 000000000..b7601205f --- /dev/null +++ b/neutron/plugins/linuxbridge/README @@ -0,0 +1,169 @@ +# -- Background + +The Neutron Linux Bridge plugin is a plugin that allows you to manage +connectivity between VMs on hosts that are capable of running a Linux Bridge. + +The Neutron Linux Bridge plugin consists of three components: + +1) The plugin itself: The plugin uses a database backend (mysql for + now) to store configuration and mappings that are used by the + agent. The mysql server runs on a central server (often the same + host as nova itself). + +2) The neutron service host which will be running neutron. This can + be run on the server running nova. + +3) An agent which runs on the host and communicates with the host operating + system. The agent gathers the configuration and mappings from + the mysql database running on the neutron host. + +The sections below describe how to configure and run the neutron +service with the Linux Bridge plugin. + +# -- Python library dependencies + + Make sure you have the following package(s) installedi on neutron server + host as well as any hosts which run the agent: + python-configobj + bridge-utils + python-mysqldb + sqlite3 + +# -- Nova configuration (controller node) + +1) Ensure that the neutron network manager is configured in the + nova.conf on the node that will be running nova-network. + +network_manager=nova.network.neutron.manager.NeutronManager + +# -- Nova configuration (compute node(s)) + +1) Configure the vif driver, and libvirt/vif type + +connection_type=libvirt +libvirt_type=qemu +libvirt_vif_type=ethernet +libvirt_vif_driver=nova.virt.libvirt.vif.NeutronLinuxBridgeVIFDriver +linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver + +2) If you want a DHCP server to be run for the VMs to acquire IPs, + add the following flag to your nova.conf file: + +neutron_use_dhcp=true + +(Note: For more details on how to work with Neutron using Nova, i.e. how to create networks and such, + please refer to the top level Neutron README which points to the relevant documentation.) + +# -- Neutron configuration + +Make the Linux Bridge plugin the current neutron plugin + +- edit neutron.conf and change the core_plugin + +core_plugin = neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2 + +# -- Database config. + +(Note: The plugin ships with a default SQLite in-memory database configuration, + and can be used to run tests without performing the suggested DB config below.) + +The Linux Bridge neutron plugin requires access to a mysql database in order +to store configuration and mappings that will be used by the agent. Here is +how to set up the database on the host that you will be running the neutron +service on. + +MySQL should be installed on the host, and all plugins and clients +must be configured with access to the database. + +To prep mysql, run: + +$ mysql -u root -p -e "create database neutron_linux_bridge" + +# log in to mysql service +$ mysql -u root -p +# The Linux Bridge Neutron agent running on each compute node must be able to +# make a mysql connection back to the main database server. +mysql> GRANT USAGE ON *.* to root@'yourremotehost' IDENTIFIED BY 'newpassword'; +# force update of authorization changes +mysql> FLUSH PRIVILEGES; + +(Note: If the remote connection fails to MySQL, you might need to add the IP address, + and/or fully-qualified hostname, and/or unqualified hostname in the above GRANT sql + command. Also, you might need to specify "ALL" instead of "USAGE".) + +# -- Plugin configuration + +- Edit the configuration file: + etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini + Make sure it matches your mysql configuration. This file must be updated + with the addresses and credentials to access the database. + + Note: debug and logging information should be updated in etc/neutron.conf + + Note: When running the tests, set the connection type to sqlite, and when + actually running the server set it to mysql. At any given time, only one + of these should be active in the conf file (you can comment out the other). + +- On the neutron server, network_vlan_ranges must be configured in + linuxbridge_conf.ini to specify the names of the physical networks + managed by the linuxbridge plugin, along with the ranges of VLAN IDs + available on each physical network for allocation to virtual + networks. An entry of the form + "::" specifies a VLAN range on + the named physical network. An entry of the form + "" specifies a named network without making a + range of VLANs available for allocation. Networks specified using + either form are available for adminstrators to create provider flat + networks and provider VLANs. Multiple VLAN ranges can be specified + for the same physical network. + + The following example linuxbridge_conf.ini entry shows three + physical networks that can be used to create provider networks, with + ranges of VLANs available for allocation on two of them: + + [VLANS] + network_vlan_ranges = physnet1:1000:2999,physnet1:3000:3999,physnet2,physnet3:1:4094 + + +# -- Agent configuration + +- Edit the configuration file: + etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini + +- Copy neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py + and etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini + to the compute node. + +- Copy the neutron.conf file to the compute node + + Note: debug and logging information should be updated in etc/neutron.conf + +- On each compute node, the network_interface_mappings must be + configured in linuxbridge_conf.ini to map each physical network name + to the physical interface connecting the node to that physical + network. Entries are of the form + ":". For example, one compute + node may use the following physical_inteface_mappings entries: + + [LINUX_BRIDGE] + physical_interface_mappings = physnet1:eth1,physnet2:eth2,physnet3:eth3 + + while another might use: + + [LINUX_BRIDGE] + physical_interface_mappings = physnet1:em3,physnet2:em2,physnet3:em1 + + +$ Run the following: + python linuxbridge_neutron_agent.py --config-file neutron.conf + --config-file linuxbridge_conf.ini + + Note that the the user running the agent must have sudo priviliges + to run various networking commands. Also, the agent can be + configured to use neutron-rootwrap, limiting what commands it can + run via sudo. See http://wiki.openstack.org/Packager/Rootwrap for + details on rootwrap. + + As an alternative to coping the agent python file, if neutron is + installed on the compute node, the agent can be run as + bin/neutron-linuxbridge-agent. diff --git a/neutron/plugins/linuxbridge/__init__.py b/neutron/plugins/linuxbridge/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/linuxbridge/agent/__init__.py b/neutron/plugins/linuxbridge/agent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py new file mode 100755 index 000000000..5db728655 --- /dev/null +++ b/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -0,0 +1,1026 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Cisco Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# +# Performs per host Linux Bridge configuration for Neutron. +# Based on the structure of the OpenVSwitch agent in the +# Neutron OpenVSwitch Plugin. +# @author: Sumit Naiksatam, Cisco Systems, Inc. + +import os +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent import l2population_rpc as l2pop_rpc +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import constants +from neutron.common import exceptions +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils as q_utils +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as p_const +from neutron.plugins.linuxbridge.common import config # noqa +from neutron.plugins.linuxbridge.common import constants as lconst + + +LOG = logging.getLogger(__name__) + +BRIDGE_NAME_PREFIX = "brq" +TAP_INTERFACE_PREFIX = "tap" +BRIDGE_FS = "/sys/devices/virtual/net/" +BRIDGE_NAME_PLACEHOLDER = "bridge_name" +BRIDGE_INTERFACES_FS = BRIDGE_FS + BRIDGE_NAME_PLACEHOLDER + "/brif/" +DEVICE_NAME_PLACEHOLDER = "device_name" +BRIDGE_PORT_FS_FOR_DEVICE = BRIDGE_FS + DEVICE_NAME_PLACEHOLDER + "/brport" +VXLAN_INTERFACE_PREFIX = "vxlan-" + + +class NetworkSegment: + def __init__(self, network_type, physical_network, segmentation_id): + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id + + +class LinuxBridgeManager: + def __init__(self, interface_mappings, root_helper): + self.interface_mappings = interface_mappings + self.root_helper = root_helper + self.ip = ip_lib.IPWrapper(self.root_helper) + # VXLAN related parameters: + self.local_ip = cfg.CONF.VXLAN.local_ip + self.vxlan_mode = lconst.VXLAN_NONE + if cfg.CONF.VXLAN.enable_vxlan: + self.local_int = self.get_interface_by_ip(self.local_ip) + if self.local_int: + self.check_vxlan_support() + else: + LOG.warning(_('VXLAN is enabled, a valid local_ip ' + 'must be provided')) + # Store network mapping to segments + self.network_map = {} + + def interface_exists_on_bridge(self, bridge, interface): + directory = '/sys/class/net/%s/brif' % bridge + for filename in os.listdir(directory): + if filename == interface: + return True + return False + + def get_bridge_name(self, network_id): + if not network_id: + LOG.warning(_("Invalid Network ID, will lead to incorrect bridge" + "name")) + bridge_name = BRIDGE_NAME_PREFIX + network_id[0:11] + return bridge_name + + def get_subinterface_name(self, physical_interface, vlan_id): + if not vlan_id: + LOG.warning(_("Invalid VLAN ID, will lead to incorrect " + "subinterface name")) + subinterface_name = '%s.%s' % (physical_interface, vlan_id) + return subinterface_name + + def get_tap_device_name(self, interface_id): + if not interface_id: + LOG.warning(_("Invalid Interface ID, will lead to incorrect " + "tap device name")) + tap_device_name = TAP_INTERFACE_PREFIX + interface_id[0:11] + return tap_device_name + + def get_vxlan_device_name(self, segmentation_id): + if 0 <= int(segmentation_id) <= constants.MAX_VXLAN_VNI: + return VXLAN_INTERFACE_PREFIX + str(segmentation_id) + else: + LOG.warning(_("Invalid Segmentation ID: %s, will lead to " + "incorrect vxlan device name"), segmentation_id) + + def get_all_neutron_bridges(self): + neutron_bridge_list = [] + bridge_list = os.listdir(BRIDGE_FS) + for bridge in bridge_list: + if bridge.startswith(BRIDGE_NAME_PREFIX): + neutron_bridge_list.append(bridge) + return neutron_bridge_list + + def get_interfaces_on_bridge(self, bridge_name): + if ip_lib.device_exists(bridge_name, root_helper=self.root_helper): + bridge_interface_path = BRIDGE_INTERFACES_FS.replace( + BRIDGE_NAME_PLACEHOLDER, bridge_name) + return os.listdir(bridge_interface_path) + else: + return [] + + def get_tap_devices_count(self, bridge_name): + bridge_interface_path = BRIDGE_INTERFACES_FS.replace( + BRIDGE_NAME_PLACEHOLDER, bridge_name) + try: + if_list = os.listdir(bridge_interface_path) + return len([interface for interface in if_list if + interface.startswith(TAP_INTERFACE_PREFIX)]) + except OSError: + return 0 + + def get_interface_by_ip(self, ip): + for device in self.ip.get_devices(): + if device.addr.list(to=ip): + return device.name + + def get_bridge_for_tap_device(self, tap_device_name): + bridges = self.get_all_neutron_bridges() + for bridge in bridges: + interfaces = self.get_interfaces_on_bridge(bridge) + if tap_device_name in interfaces: + return bridge + + return None + + def is_device_on_bridge(self, device_name): + if not device_name: + return False + else: + bridge_port_path = BRIDGE_PORT_FS_FOR_DEVICE.replace( + DEVICE_NAME_PLACEHOLDER, device_name) + return os.path.exists(bridge_port_path) + + def ensure_vlan_bridge(self, network_id, physical_interface, vlan_id): + """Create a vlan and bridge unless they already exist.""" + interface = self.ensure_vlan(physical_interface, vlan_id) + bridge_name = self.get_bridge_name(network_id) + ips, gateway = self.get_interface_details(interface) + if self.ensure_bridge(bridge_name, interface, ips, gateway): + return interface + + def ensure_vxlan_bridge(self, network_id, segmentation_id): + """Create a vxlan and bridge unless they already exist.""" + interface = self.ensure_vxlan(segmentation_id) + if not interface: + LOG.error(_("Failed creating vxlan interface for " + "%(segmentation_id)s"), + {segmentation_id: segmentation_id}) + return + bridge_name = self.get_bridge_name(network_id) + self.ensure_bridge(bridge_name, interface) + return interface + + def get_interface_details(self, interface): + device = self.ip.device(interface) + ips = device.addr.list(scope='global') + + # Update default gateway if necessary + gateway = device.route.get_gateway(scope='global') + return ips, gateway + + def ensure_flat_bridge(self, network_id, physical_interface): + """Create a non-vlan bridge unless it already exists.""" + bridge_name = self.get_bridge_name(network_id) + ips, gateway = self.get_interface_details(physical_interface) + if self.ensure_bridge(bridge_name, physical_interface, ips, gateway): + return physical_interface + + def ensure_local_bridge(self, network_id): + """Create a local bridge unless it already exists.""" + bridge_name = self.get_bridge_name(network_id) + return self.ensure_bridge(bridge_name) + + def ensure_vlan(self, physical_interface, vlan_id): + """Create a vlan unless it already exists.""" + interface = self.get_subinterface_name(physical_interface, vlan_id) + if not ip_lib.device_exists(interface, root_helper=self.root_helper): + LOG.debug(_("Creating subinterface %(interface)s for " + "VLAN %(vlan_id)s on interface " + "%(physical_interface)s"), + {'interface': interface, 'vlan_id': vlan_id, + 'physical_interface': physical_interface}) + if utils.execute(['ip', 'link', 'add', 'link', + physical_interface, + 'name', interface, 'type', 'vlan', 'id', + vlan_id], root_helper=self.root_helper): + return + if utils.execute(['ip', 'link', 'set', + interface, 'up'], root_helper=self.root_helper): + return + LOG.debug(_("Done creating subinterface %s"), interface) + return interface + + def ensure_vxlan(self, segmentation_id): + """Create a vxlan unless it already exists.""" + interface = self.get_vxlan_device_name(segmentation_id) + if not ip_lib.device_exists(interface, root_helper=self.root_helper): + LOG.debug(_("Creating vxlan interface %(interface)s for " + "VNI %(segmentation_id)s"), + {'interface': interface, + 'segmentation_id': segmentation_id}) + args = {'dev': self.local_int} + if self.vxlan_mode == lconst.VXLAN_MCAST: + args['group'] = cfg.CONF.VXLAN.vxlan_group + if cfg.CONF.VXLAN.ttl: + args['ttl'] = cfg.CONF.VXLAN.ttl + if cfg.CONF.VXLAN.tos: + args['tos'] = cfg.CONF.VXLAN.tos + if cfg.CONF.VXLAN.l2_population: + args['proxy'] = True + int_vxlan = self.ip.add_vxlan(interface, segmentation_id, **args) + int_vxlan.link.set_up() + LOG.debug(_("Done creating vxlan interface %s"), interface) + return interface + + def update_interface_ip_details(self, destination, source, ips, + gateway): + if ips or gateway: + dst_device = self.ip.device(destination) + src_device = self.ip.device(source) + + # Append IP's to bridge if necessary + if ips: + for ip in ips: + dst_device.addr.add(ip_version=ip['ip_version'], + cidr=ip['cidr'], + broadcast=ip['broadcast']) + + if gateway: + # Ensure that the gateway can be updated by changing the metric + metric = 100 + if 'metric' in gateway: + metric = gateway['metric'] - 1 + dst_device.route.add_gateway(gateway=gateway['gateway'], + metric=metric) + src_device.route.delete_gateway(gateway=gateway['gateway']) + + # Remove IP's from interface + if ips: + for ip in ips: + src_device.addr.delete(ip_version=ip['ip_version'], + cidr=ip['cidr']) + + def _bridge_exists_and_ensure_up(self, bridge_name): + """Check if the bridge exists and make sure it is up.""" + br = ip_lib.IPDevice(bridge_name, self.root_helper) + try: + # If the device doesn't exist this will throw a RuntimeError + br.link.set_up() + except RuntimeError: + return False + return True + + def ensure_bridge(self, bridge_name, interface=None, ips=None, + gateway=None): + """Create a bridge unless it already exists.""" + # _bridge_exists_and_ensure_up instead of device_exists is used here + # because there are cases where the bridge exists but it's not UP, + # for example: + # 1) A greenthread was executing this function and had not yet executed + # "ip link set bridge_name up" before eventlet switched to this + # thread running the same function + # 2) The Nova VIF driver was running concurrently and had just created + # the bridge, but had not yet put it UP + if not self._bridge_exists_and_ensure_up(bridge_name): + LOG.debug(_("Starting bridge %(bridge_name)s for subinterface " + "%(interface)s"), + {'bridge_name': bridge_name, 'interface': interface}) + if utils.execute(['brctl', 'addbr', bridge_name], + root_helper=self.root_helper): + return + if utils.execute(['brctl', 'setfd', bridge_name, + str(0)], root_helper=self.root_helper): + return + if utils.execute(['brctl', 'stp', bridge_name, + 'off'], root_helper=self.root_helper): + return + if utils.execute(['ip', 'link', 'set', bridge_name, + 'up'], root_helper=self.root_helper): + return + LOG.debug(_("Done starting bridge %(bridge_name)s for " + "subinterface %(interface)s"), + {'bridge_name': bridge_name, 'interface': interface}) + + if not interface: + return bridge_name + + # Update IP info if necessary + self.update_interface_ip_details(bridge_name, interface, ips, gateway) + + # Check if the interface is part of the bridge + if not self.interface_exists_on_bridge(bridge_name, interface): + try: + # Check if the interface is not enslaved in another bridge + if self.is_device_on_bridge(interface): + bridge = self.get_bridge_for_tap_device(interface) + utils.execute(['brctl', 'delif', bridge, interface], + root_helper=self.root_helper) + + utils.execute(['brctl', 'addif', bridge_name, interface], + root_helper=self.root_helper) + except Exception as e: + LOG.error(_("Unable to add %(interface)s to %(bridge_name)s! " + "Exception: %(e)s"), + {'interface': interface, 'bridge_name': bridge_name, + 'e': e}) + return + return bridge_name + + def ensure_physical_in_bridge(self, network_id, + network_type, + physical_network, + segmentation_id): + if network_type == p_const.TYPE_VXLAN: + if self.vxlan_mode == lconst.VXLAN_NONE: + LOG.error(_("Unable to add vxlan interface for network %s"), + network_id) + return + return self.ensure_vxlan_bridge(network_id, segmentation_id) + + physical_interface = self.interface_mappings.get(physical_network) + if not physical_interface: + LOG.error(_("No mapping for physical network %s"), + physical_network) + return + if network_type == p_const.TYPE_FLAT: + return self.ensure_flat_bridge(network_id, physical_interface) + elif network_type == p_const.TYPE_VLAN: + return self.ensure_vlan_bridge(network_id, physical_interface, + segmentation_id) + else: + LOG.error(_("Unknown network_type %(network_type)s for network " + "%(network_id)s."), {network_type: network_type, + network_id: network_id}) + + def add_tap_interface(self, network_id, network_type, physical_network, + segmentation_id, tap_device_name): + """Add tap interface. + + If a VIF has been plugged into a network, this function will + add the corresponding tap device to the relevant bridge. + """ + if not ip_lib.device_exists(tap_device_name, + root_helper=self.root_helper): + LOG.debug(_("Tap device: %s does not exist on " + "this host, skipped"), tap_device_name) + return False + + bridge_name = self.get_bridge_name(network_id) + if network_type == p_const.TYPE_LOCAL: + self.ensure_local_bridge(network_id) + elif not self.ensure_physical_in_bridge(network_id, + network_type, + physical_network, + segmentation_id): + return False + + # Check if device needs to be added to bridge + tap_device_in_bridge = self.get_bridge_for_tap_device(tap_device_name) + if not tap_device_in_bridge: + data = {'tap_device_name': tap_device_name, + 'bridge_name': bridge_name} + msg = _("Adding device %(tap_device_name)s to bridge " + "%(bridge_name)s") % data + LOG.debug(msg) + if utils.execute(['brctl', 'addif', bridge_name, tap_device_name], + root_helper=self.root_helper): + return False + else: + data = {'tap_device_name': tap_device_name, + 'bridge_name': bridge_name} + msg = _("%(tap_device_name)s already exists on bridge " + "%(bridge_name)s") % data + LOG.debug(msg) + return True + + def add_interface(self, network_id, network_type, physical_network, + segmentation_id, port_id): + self.network_map[network_id] = NetworkSegment(network_type, + physical_network, + segmentation_id) + tap_device_name = self.get_tap_device_name(port_id) + return self.add_tap_interface(network_id, network_type, + physical_network, segmentation_id, + tap_device_name) + + def delete_vlan_bridge(self, bridge_name): + if ip_lib.device_exists(bridge_name, root_helper=self.root_helper): + interfaces_on_bridge = self.get_interfaces_on_bridge(bridge_name) + for interface in interfaces_on_bridge: + self.remove_interface(bridge_name, interface) + + if interface.startswith(VXLAN_INTERFACE_PREFIX): + self.delete_vxlan(interface) + continue + + for physical_interface in self.interface_mappings.itervalues(): + if (interface.startswith(physical_interface)): + ips, gateway = self.get_interface_details(bridge_name) + if ips: + # This is a flat network or a VLAN interface that + # was setup outside of neutron => return IP's from + # bridge to interface + self.update_interface_ip_details(interface, + bridge_name, + ips, gateway) + elif physical_interface != interface: + self.delete_vlan(interface) + + LOG.debug(_("Deleting bridge %s"), bridge_name) + if utils.execute(['ip', 'link', 'set', bridge_name, 'down'], + root_helper=self.root_helper): + return + if utils.execute(['brctl', 'delbr', bridge_name], + root_helper=self.root_helper): + return + LOG.debug(_("Done deleting bridge %s"), bridge_name) + + else: + LOG.error(_("Cannot delete bridge %s, does not exist"), + bridge_name) + + def remove_empty_bridges(self): + for network_id in self.network_map.keys(): + bridge_name = self.get_bridge_name(network_id) + if not self.get_tap_devices_count(bridge_name): + self.delete_vlan_bridge(bridge_name) + del self.network_map[network_id] + + def remove_interface(self, bridge_name, interface_name): + if ip_lib.device_exists(bridge_name, root_helper=self.root_helper): + if not self.is_device_on_bridge(interface_name): + return True + LOG.debug(_("Removing device %(interface_name)s from bridge " + "%(bridge_name)s"), + {'interface_name': interface_name, + 'bridge_name': bridge_name}) + if utils.execute(['brctl', 'delif', bridge_name, interface_name], + root_helper=self.root_helper): + return False + LOG.debug(_("Done removing device %(interface_name)s from bridge " + "%(bridge_name)s"), + {'interface_name': interface_name, + 'bridge_name': bridge_name}) + return True + else: + LOG.debug(_("Cannot remove device %(interface_name)s bridge " + "%(bridge_name)s does not exist"), + {'interface_name': interface_name, + 'bridge_name': bridge_name}) + return False + + def delete_vlan(self, interface): + if ip_lib.device_exists(interface, root_helper=self.root_helper): + LOG.debug(_("Deleting subinterface %s for vlan"), interface) + if utils.execute(['ip', 'link', 'set', interface, 'down'], + root_helper=self.root_helper): + return + if utils.execute(['ip', 'link', 'delete', interface], + root_helper=self.root_helper): + return + LOG.debug(_("Done deleting subinterface %s"), interface) + + def delete_vxlan(self, interface): + if ip_lib.device_exists(interface, root_helper=self.root_helper): + LOG.debug(_("Deleting vxlan interface %s for vlan"), + interface) + int_vxlan = self.ip.device(interface) + int_vxlan.link.set_down() + int_vxlan.link.delete() + LOG.debug(_("Done deleting vxlan interface %s"), interface) + + def get_tap_devices(self): + devices = set() + for device in os.listdir(BRIDGE_FS): + if device.startswith(TAP_INTERFACE_PREFIX): + devices.add(device) + return devices + + def vxlan_ucast_supported(self): + if not cfg.CONF.VXLAN.l2_population: + return False + if not ip_lib.iproute_arg_supported( + ['bridge', 'fdb'], 'append', self.root_helper): + LOG.warning(_('Option "%(option)s" must be supported by command ' + '"%(command)s" to enable %(mode)s mode') % + {'option': 'append', + 'command': 'bridge fdb', + 'mode': 'VXLAN UCAST'}) + return False + for segmentation_id in range(1, constants.MAX_VXLAN_VNI + 1): + if not ip_lib.device_exists( + self.get_vxlan_device_name(segmentation_id), + root_helper=self.root_helper): + break + else: + LOG.error(_('No valid Segmentation ID to perform UCAST test.')) + return False + + test_iface = self.ensure_vxlan(segmentation_id) + try: + utils.execute( + cmd=['bridge', 'fdb', 'append', constants.FLOODING_ENTRY[0], + 'dev', test_iface, 'dst', '1.1.1.1'], + root_helper=self.root_helper) + return True + except RuntimeError: + return False + finally: + self.delete_vxlan(test_iface) + + def vxlan_mcast_supported(self): + if not cfg.CONF.VXLAN.vxlan_group: + LOG.warning(_('VXLAN muticast group must be provided in ' + 'vxlan_group option to enable VXLAN MCAST mode')) + return False + if not ip_lib.iproute_arg_supported( + ['ip', 'link', 'add', 'type', 'vxlan'], + 'proxy', self.root_helper): + LOG.warning(_('Option "%(option)s" must be supported by command ' + '"%(command)s" to enable %(mode)s mode') % + {'option': 'proxy', + 'command': 'ip link add type vxlan', + 'mode': 'VXLAN MCAST'}) + + return False + return True + + def vxlan_module_supported(self): + try: + utils.execute(cmd=['modinfo', 'vxlan']) + return True + except RuntimeError: + return False + + def check_vxlan_support(self): + self.vxlan_mode = lconst.VXLAN_NONE + if not self.vxlan_module_supported(): + LOG.error(_('Linux kernel vxlan module and iproute2 3.8 or above ' + 'are required to enable VXLAN.')) + raise exceptions.VxlanNetworkUnsupported() + + if self.vxlan_ucast_supported(): + self.vxlan_mode = lconst.VXLAN_UCAST + elif self.vxlan_mcast_supported(): + self.vxlan_mode = lconst.VXLAN_MCAST + else: + raise exceptions.VxlanNetworkUnsupported() + LOG.debug(_('Using %s VXLAN mode'), self.vxlan_mode) + + def fdb_ip_entry_exists(self, mac, ip, interface): + entries = utils.execute(['ip', 'neigh', 'show', 'to', ip, + 'dev', interface], + root_helper=self.root_helper) + return mac in entries + + def fdb_bridge_entry_exists(self, mac, interface, agent_ip=None): + entries = utils.execute(['bridge', 'fdb', 'show', 'dev', interface], + root_helper=self.root_helper) + if not agent_ip: + return mac in entries + + return (agent_ip in entries and mac in entries) + + def add_fdb_ip_entry(self, mac, ip, interface): + utils.execute(['ip', 'neigh', 'replace', ip, 'lladdr', mac, + 'dev', interface, 'nud', 'permanent'], + root_helper=self.root_helper, + check_exit_code=False) + + def remove_fdb_ip_entry(self, mac, ip, interface): + utils.execute(['ip', 'neigh', 'del', ip, 'lladdr', mac, + 'dev', interface], + root_helper=self.root_helper, + check_exit_code=False) + + def add_fdb_bridge_entry(self, mac, agent_ip, interface, operation="add"): + utils.execute(['bridge', 'fdb', operation, mac, 'dev', interface, + 'dst', agent_ip], + root_helper=self.root_helper, + check_exit_code=False) + + def remove_fdb_bridge_entry(self, mac, agent_ip, interface): + utils.execute(['bridge', 'fdb', 'del', mac, 'dev', interface, + 'dst', agent_ip], + root_helper=self.root_helper, + check_exit_code=False) + + def add_fdb_entries(self, agent_ip, ports, interface): + for mac, ip in ports: + if mac != constants.FLOODING_ENTRY[0]: + self.add_fdb_ip_entry(mac, ip, interface) + self.add_fdb_bridge_entry(mac, agent_ip, interface) + elif self.vxlan_mode == lconst.VXLAN_UCAST: + if self.fdb_bridge_entry_exists(mac, interface): + self.add_fdb_bridge_entry(mac, agent_ip, interface, + "append") + else: + self.add_fdb_bridge_entry(mac, agent_ip, interface) + + def remove_fdb_entries(self, agent_ip, ports, interface): + for mac, ip in ports: + if mac != constants.FLOODING_ENTRY[0]: + self.remove_fdb_ip_entry(mac, ip, interface) + self.remove_fdb_bridge_entry(mac, agent_ip, interface) + elif self.vxlan_mode == lconst.VXLAN_UCAST: + self.remove_fdb_bridge_entry(mac, agent_ip, interface) + + +class LinuxBridgeRpcCallbacks(rpc_compat.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin, + l2pop_rpc.L2populationRpcCallBackMixin): + + # Set RPC API version to 1.0 by default. + # history + # 1.1 Support Security Group RPC + RPC_API_VERSION = '1.1' + + def __init__(self, context, agent): + super(LinuxBridgeRpcCallbacks, self).__init__() + self.context = context + self.agent = agent + self.sg_agent = agent + + def network_delete(self, context, **kwargs): + LOG.debug(_("network_delete received")) + network_id = kwargs.get('network_id') + bridge_name = self.agent.br_mgr.get_bridge_name(network_id) + LOG.debug(_("Delete %s"), bridge_name) + self.agent.br_mgr.delete_vlan_bridge(bridge_name) + + def port_update(self, context, **kwargs): + port_id = kwargs['port']['id'] + tap_name = self.agent.br_mgr.get_tap_device_name(port_id) + # Put the tap name in the updated_devices set. + # Do not store port details, as if they're used for processing + # notifications there is no guarantee the notifications are + # processed in the same order as the relevant API requests. + self.agent.updated_devices.add(tap_name) + LOG.debug(_("port_update RPC received for port: %s"), port_id) + + def fdb_add(self, context, fdb_entries): + LOG.debug(_("fdb_add received")) + for network_id, values in fdb_entries.items(): + segment = self.agent.br_mgr.network_map.get(network_id) + if not segment: + return + + if segment.network_type != p_const.TYPE_VXLAN: + return + + interface = self.agent.br_mgr.get_vxlan_device_name( + segment.segmentation_id) + + agent_ports = values.get('ports') + for agent_ip, ports in agent_ports.items(): + if agent_ip == self.agent.br_mgr.local_ip: + continue + + self.agent.br_mgr.add_fdb_entries(agent_ip, + ports, + interface) + + def fdb_remove(self, context, fdb_entries): + LOG.debug(_("fdb_remove received")) + for network_id, values in fdb_entries.items(): + segment = self.agent.br_mgr.network_map.get(network_id) + if not segment: + return + + if segment.network_type != p_const.TYPE_VXLAN: + return + + interface = self.agent.br_mgr.get_vxlan_device_name( + segment.segmentation_id) + + agent_ports = values.get('ports') + for agent_ip, ports in agent_ports.items(): + if agent_ip == self.agent.br_mgr.local_ip: + continue + + self.agent.br_mgr.remove_fdb_entries(agent_ip, + ports, + interface) + + def _fdb_chg_ip(self, context, fdb_entries): + LOG.debug(_("update chg_ip received")) + for network_id, agent_ports in fdb_entries.items(): + segment = self.agent.br_mgr.network_map.get(network_id) + if not segment: + return + + if segment.network_type != p_const.TYPE_VXLAN: + return + + interface = self.agent.br_mgr.get_vxlan_device_name( + segment.segmentation_id) + + for agent_ip, state in agent_ports.items(): + if agent_ip == self.agent.br_mgr.local_ip: + continue + + after = state.get('after') + for mac, ip in after: + self.agent.br_mgr.add_fdb_ip_entry(mac, ip, interface) + + before = state.get('before') + for mac, ip in before: + self.agent.br_mgr.remove_fdb_ip_entry(mac, ip, interface) + + def fdb_update(self, context, fdb_entries): + LOG.debug(_("fdb_update received")) + for action, values in fdb_entries.items(): + method = '_fdb_' + action + if not hasattr(self, method): + raise NotImplementedError() + + getattr(self, method)(context, values) + + +class LinuxBridgePluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class LinuxBridgeNeutronAgentRPC(sg_rpc.SecurityGroupAgentRpcMixin): + + def __init__(self, interface_mappings, polling_interval, + root_helper): + self.polling_interval = polling_interval + self.root_helper = root_helper + self.setup_linux_bridge(interface_mappings) + configurations = {'interface_mappings': interface_mappings} + if self.br_mgr.vxlan_mode != lconst.VXLAN_NONE: + configurations['tunneling_ip'] = self.br_mgr.local_ip + configurations['tunnel_types'] = [p_const.TYPE_VXLAN] + configurations['l2_population'] = cfg.CONF.VXLAN.l2_population + self.agent_state = { + 'binary': 'neutron-linuxbridge-agent', + 'host': cfg.CONF.host, + 'topic': constants.L2_AGENT_TOPIC, + 'configurations': configurations, + 'agent_type': constants.AGENT_TYPE_LINUXBRIDGE, + 'start_flag': True} + + # stores received port_updates for processing by the main loop + self.updated_devices = set() + self.setup_rpc(interface_mappings.values()) + self.init_firewall() + + def _report_state(self): + try: + devices = len(self.br_mgr.get_tap_devices()) + self.agent_state.get('configurations')['devices'] = devices + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def setup_rpc(self, physical_interfaces): + if physical_interfaces: + mac = utils.get_interface_mac(physical_interfaces[0]) + else: + devices = ip_lib.IPWrapper(self.root_helper).get_devices(True) + if devices: + mac = utils.get_interface_mac(devices[0].name) + else: + LOG.error(_("Unable to obtain MAC address for unique ID. " + "Agent terminated!")) + exit(1) + self.agent_id = '%s%s' % ('lb', (mac.replace(":", ""))) + LOG.info(_("RPC agent_id: %s"), self.agent_id) + + self.topic = topics.AGENT + self.plugin_rpc = LinuxBridgePluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + # RPC network init + self.context = context.get_admin_context_without_session() + # Handle updates from service + self.endpoints = [LinuxBridgeRpcCallbacks(self.context, self)] + # Define the listening consumers for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.NETWORK, topics.DELETE], + [topics.SECURITY_GROUP, topics.UPDATE]] + if cfg.CONF.VXLAN.l2_population: + consumers.append([topics.L2POPULATION, + topics.UPDATE, cfg.CONF.host]) + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + report_interval = cfg.CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def setup_linux_bridge(self, interface_mappings): + self.br_mgr = LinuxBridgeManager(interface_mappings, self.root_helper) + + def remove_port_binding(self, network_id, interface_id): + bridge_name = self.br_mgr.get_bridge_name(network_id) + tap_device_name = self.br_mgr.get_tap_device_name(interface_id) + return self.br_mgr.remove_interface(bridge_name, tap_device_name) + + def process_network_devices(self, device_info): + resync_a = False + resync_b = False + + self.prepare_devices_filter(device_info.get('added')) + + if device_info.get('updated'): + self.refresh_firewall() + + # Updated devices are processed the same as new ones, as their + # admin_state_up may have changed. The set union prevents duplicating + # work when a device is new and updated in the same polling iteration. + devices_added_updated = (set(device_info.get('added')) + | set(device_info.get('updated'))) + if devices_added_updated: + resync_a = self.treat_devices_added_updated(devices_added_updated) + + if device_info.get('removed'): + resync_b = self.treat_devices_removed(device_info['removed']) + # If one of the above operations fails => resync with plugin + return (resync_a | resync_b) + + def treat_devices_added_updated(self, devices): + resync = False + + for device in devices: + LOG.debug(_("Treating added or updated device: %s"), device) + try: + details = self.plugin_rpc.get_device_details(self.context, + device, + self.agent_id) + except Exception as e: + LOG.debug(_("Unable to get port details for " + "%(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + if 'port_id' in details: + LOG.info(_("Port %(device)s updated. Details: %(details)s"), + {'device': device, 'details': details}) + if details['admin_state_up']: + # create the networking for the port + network_type = details.get('network_type') + if network_type: + segmentation_id = details.get('segmentation_id') + else: + # compatibility with pre-Havana RPC vlan_id encoding + vlan_id = details.get('vlan_id') + (network_type, + segmentation_id) = lconst.interpret_vlan_id(vlan_id) + if self.br_mgr.add_interface(details['network_id'], + network_type, + details['physical_network'], + segmentation_id, + details['port_id']): + + # update plugin about port status + self.plugin_rpc.update_device_up(self.context, + device, + self.agent_id, + cfg.CONF.host) + else: + self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + else: + self.remove_port_binding(details['network_id'], + details['port_id']) + else: + LOG.info(_("Device %s not defined on plugin"), device) + return resync + + def treat_devices_removed(self, devices): + resync = False + self.remove_devices_filter(devices) + for device in devices: + LOG.info(_("Attachment %s removed"), device) + details = None + try: + details = self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + if details and details['exists']: + LOG.info(_("Port %s updated."), device) + else: + LOG.debug(_("Device %s not defined on plugin"), device) + self.br_mgr.remove_empty_bridges() + return resync + + def scan_devices(self, registered_devices, updated_devices): + curr_devices = self.br_mgr.get_tap_devices() + device_info = {} + device_info['current'] = curr_devices + device_info['added'] = curr_devices - registered_devices + # we don't want to process updates for devices that don't exist + device_info['updated'] = updated_devices & curr_devices + # we need to clean up after devices are removed + device_info['removed'] = registered_devices - curr_devices + return device_info + + def _device_info_has_changes(self, device_info): + return (device_info.get('added') + or device_info.get('updated') + or device_info.get('removed')) + + def daemon_loop(self): + sync = True + devices = set() + + LOG.info(_("LinuxBridge Agent RPC Daemon Started!")) + + while True: + start = time.time() + if sync: + LOG.info(_("Agent out of sync with plugin!")) + devices.clear() + sync = False + device_info = {} + # Save updated devices dict to perform rollback in case + # resync would be needed, and then clear self.updated_devices. + # As the greenthread should not yield between these + # two statements, this will should be thread-safe. + updated_devices_copy = self.updated_devices + self.updated_devices = set() + try: + device_info = self.scan_devices(devices, updated_devices_copy) + if self._device_info_has_changes(device_info): + LOG.debug(_("Agent loop found changes! %s"), device_info) + # If treat devices fails - indicates must resync with + # plugin + sync = self.process_network_devices(device_info) + devices = device_info['current'] + except Exception: + LOG.exception(_("Error in agent loop. Devices info: %s"), + device_info) + sync = True + # Restore devices that were removed from this set earlier + # without overwriting ones that may have arrived since. + self.updated_devices |= updated_devices_copy + + # sleep till end of polling interval + elapsed = (time.time() - start) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + + +def main(): + common_config.init(sys.argv[1:]) + + common_config.setup_logging(cfg.CONF) + try: + interface_mappings = q_utils.parse_mappings( + cfg.CONF.LINUX_BRIDGE.physical_interface_mappings) + except ValueError as e: + LOG.error(_("Parsing physical_interface_mappings failed: %s." + " Agent terminated!"), e) + sys.exit(1) + LOG.info(_("Interface mappings: %s"), interface_mappings) + + polling_interval = cfg.CONF.AGENT.polling_interval + root_helper = cfg.CONF.AGENT.root_helper + agent = LinuxBridgeNeutronAgentRPC(interface_mappings, + polling_interval, + root_helper) + LOG.info(_("Agent initialized successfully, now running... ")) + agent.daemon_loop() + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/neutron/plugins/linuxbridge/common/__init__.py b/neutron/plugins/linuxbridge/common/__init__.py new file mode 100644 index 000000000..5bb15232d --- /dev/null +++ b/neutron/plugins/linuxbridge/common/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. diff --git a/neutron/plugins/linuxbridge/common/config.py b/neutron/plugins/linuxbridge/common/config.py new file mode 100644 index 000000000..8736d63a6 --- /dev/null +++ b/neutron/plugins/linuxbridge/common/config.py @@ -0,0 +1,78 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# @author: Rohit Agarwalla, Cisco Systems, Inc. + +from oslo.config import cfg + +from neutron.agent.common import config + +DEFAULT_VLAN_RANGES = [] +DEFAULT_INTERFACE_MAPPINGS = [] +DEFAULT_VXLAN_GROUP = '224.0.0.1' + + +vlan_opts = [ + cfg.StrOpt('tenant_network_type', default='local', + help=_("Network type for tenant networks " + "(local, vlan, or none)")), + cfg.ListOpt('network_vlan_ranges', + default=DEFAULT_VLAN_RANGES, + help=_("List of :: " + "or ")), +] + +vxlan_opts = [ + cfg.BoolOpt('enable_vxlan', default=False, + help=_("Enable VXLAN on the agent. Can be enabled when " + "agent is managed by ml2 plugin using linuxbridge " + "mechanism driver")), + cfg.IntOpt('ttl', + help=_("TTL for vxlan interface protocol packets.")), + cfg.IntOpt('tos', + help=_("TOS for vxlan interface protocol packets.")), + cfg.StrOpt('vxlan_group', default=DEFAULT_VXLAN_GROUP, + help=_("Multicast group for vxlan interface.")), + cfg.StrOpt('local_ip', default='', + help=_("Local IP address of the VXLAN endpoints.")), + cfg.BoolOpt('l2_population', default=False, + help=_("Extension to use alongside ml2 plugin's l2population " + "mechanism driver. It enables the plugin to populate " + "VXLAN forwarding table.")), +] + +bridge_opts = [ + cfg.ListOpt('physical_interface_mappings', + default=DEFAULT_INTERFACE_MAPPINGS, + help=_("List of :")), +] + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), + cfg.BoolOpt('rpc_support_old_agents', default=False, + help=_("Enable server RPC compatibility with old agents")), +] + + +cfg.CONF.register_opts(vlan_opts, "VLANS") +cfg.CONF.register_opts(vxlan_opts, "VXLAN") +cfg.CONF.register_opts(bridge_opts, "LINUX_BRIDGE") +cfg.CONF.register_opts(agent_opts, "AGENT") +config.register_agent_state_opts_helper(cfg.CONF) +config.register_root_helper(cfg.CONF) diff --git a/neutron/plugins/linuxbridge/common/constants.py b/neutron/plugins/linuxbridge/common/constants.py new file mode 100644 index 000000000..6dee88f40 --- /dev/null +++ b/neutron/plugins/linuxbridge/common/constants.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. + + +from neutron.plugins.common import constants as p_const + + +FLAT_VLAN_ID = -1 +LOCAL_VLAN_ID = -2 + +# Supported VXLAN features +VXLAN_NONE = 'not_supported' +VXLAN_MCAST = 'multicast_flooding' +VXLAN_UCAST = 'unicast_flooding' + + +# TODO(rkukura): Eventually remove this function, which provides +# temporary backward compatibility with pre-Havana RPC and DB vlan_id +# encoding. +def interpret_vlan_id(vlan_id): + """Return (network_type, segmentation_id) tuple for encoded vlan_id.""" + if vlan_id == LOCAL_VLAN_ID: + return (p_const.TYPE_LOCAL, None) + elif vlan_id == FLAT_VLAN_ID: + return (p_const.TYPE_FLAT, None) + else: + return (p_const.TYPE_VLAN, vlan_id) diff --git a/neutron/plugins/linuxbridge/db/__init__.py b/neutron/plugins/linuxbridge/db/__init__.py new file mode 100644 index 000000000..33daf1f33 --- /dev/null +++ b/neutron/plugins/linuxbridge/db/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# diff --git a/neutron/plugins/linuxbridge/db/l2network_db_v2.py b/neutron/plugins/linuxbridge/db/l2network_db_v2.py new file mode 100644 index 000000000..416bd2f59 --- /dev/null +++ b/neutron/plugins/linuxbridge/db/l2network_db_v2.py @@ -0,0 +1,238 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import moves +from sqlalchemy.orm import exc + +from neutron.common import exceptions as n_exc +import neutron.db.api as db +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.linuxbridge.common import config # noqa +from neutron.plugins.linuxbridge.common import constants +from neutron.plugins.linuxbridge.db import l2network_models_v2 + +LOG = logging.getLogger(__name__) + + +def sync_network_states(network_vlan_ranges): + """Synchronize network_states table with current configured VLAN ranges.""" + + session = db.get_session() + with session.begin(): + # get existing allocations for all physical networks + allocations = dict() + states = (session.query(l2network_models_v2.NetworkState). + all()) + for state in states: + if state.physical_network not in allocations: + allocations[state.physical_network] = set() + allocations[state.physical_network].add(state) + + # process vlan ranges for each configured physical network + for physical_network, vlan_ranges in network_vlan_ranges.iteritems(): + # determine current configured allocatable vlans for this + # physical network + vlan_ids = set() + for vlan_range in vlan_ranges: + vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1)) + + # remove from table unallocated vlans not currently allocatable + if physical_network in allocations: + for state in allocations[physical_network]: + try: + # see if vlan is allocatable + vlan_ids.remove(state.vlan_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not state.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing vlan %(vlan_id)s on " + "physical network %(physical_network)s" + " from pool"), + {'vlan_id': state.vlan_id, + 'physical_network': physical_network}) + session.delete(state) + del allocations[physical_network] + + # add missing allocatable vlans to table + for vlan_id in sorted(vlan_ids): + state = l2network_models_v2.NetworkState(physical_network, + vlan_id) + session.add(state) + + # remove from table unallocated vlans for any unconfigured physical + # networks + for states in allocations.itervalues(): + for state in states: + if not state.allocated: + LOG.debug(_("Removing vlan %(vlan_id)s on physical " + "network %(physical_network)s" + " from pool"), + {'vlan_id': state.vlan_id, + 'physical_network': state.physical_network}) + session.delete(state) + + +def get_network_state(physical_network, vlan_id): + """Get state of specified network.""" + + session = db.get_session() + try: + state = (session.query(l2network_models_v2.NetworkState). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + one()) + return state + except exc.NoResultFound: + return None + + +def reserve_network(session): + with session.begin(subtransactions=True): + state = (session.query(l2network_models_v2.NetworkState). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if not state: + raise n_exc.NoNetworkAvailable() + LOG.debug(_("Reserving vlan %(vlan_id)s on physical network " + "%(physical_network)s from pool"), + {'vlan_id': state.vlan_id, + 'physical_network': state.physical_network}) + state.allocated = True + return (state.physical_network, state.vlan_id) + + +def reserve_specific_network(session, physical_network, vlan_id): + with session.begin(subtransactions=True): + try: + state = (session.query(l2network_models_v2.NetworkState). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + if state.allocated: + if vlan_id == constants.FLAT_VLAN_ID: + raise n_exc.FlatNetworkInUse( + physical_network=physical_network) + else: + raise n_exc.VlanIdInUse(vlan_id=vlan_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + state.allocated = True + except exc.NoResultFound: + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + state = l2network_models_v2.NetworkState(physical_network, vlan_id) + state.allocated = True + session.add(state) + + +def release_network(session, physical_network, vlan_id, network_vlan_ranges): + with session.begin(subtransactions=True): + try: + state = (session.query(l2network_models_v2.NetworkState). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + state.allocated = False + inside = False + for vlan_range in network_vlan_ranges.get(physical_network, []): + if vlan_id >= vlan_range[0] and vlan_id <= vlan_range[1]: + inside = True + break + if inside: + LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " + "%(physical_network)s to pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + else: + LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " + "%(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + session.delete(state) + except exc.NoResultFound: + LOG.warning(_("vlan_id %(vlan_id)s on physical network " + "%(physical_network)s not found"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + + +def add_network_binding(session, network_id, physical_network, vlan_id): + with session.begin(subtransactions=True): + binding = l2network_models_v2.NetworkBinding(network_id, + physical_network, vlan_id) + session.add(binding) + + +def get_network_binding(session, network_id): + try: + binding = (session.query(l2network_models_v2.NetworkBinding). + filter_by(network_id=network_id). + one()) + return binding + except exc.NoResultFound: + return + + +def get_port_from_device(device): + """Get port from database.""" + LOG.debug(_("get_port_from_device() called")) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id.startswith(device)) + port_and_sgs = query.all() + if not port_and_sgs: + return + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict['security_groups'] = [] + for port_in_db, sg_id in port_and_sgs: + if sg_id: + port_dict['security_groups'].append(sg_id) + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict + + +def set_port_status(port_id, status): + """Set the port status.""" + LOG.debug(_("set_port_status as %s called"), status) + session = db.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.merge(port) + session.flush() + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) diff --git a/neutron/plugins/linuxbridge/db/l2network_models_v2.py b/neutron/plugins/linuxbridge/db/l2network_models_v2.py new file mode 100644 index 000000000..0c08e29c5 --- /dev/null +++ b/neutron/plugins/linuxbridge/db/l2network_models_v2.py @@ -0,0 +1,59 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sqlalchemy as sa + +from neutron.db import model_base + + +class NetworkState(model_base.BASEV2): + """Represents state of vlan_id on physical network.""" + __tablename__ = 'network_states' + + physical_network = sa.Column(sa.String(64), nullable=False, + primary_key=True) + vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False) + + def __init__(self, physical_network, vlan_id): + self.physical_network = physical_network + self.vlan_id = vlan_id + self.allocated = False + + def __repr__(self): + return "" % (self.physical_network, + self.vlan_id, self.allocated) + + +class NetworkBinding(model_base.BASEV2): + """Represents binding of virtual network to physical network and vlan.""" + __tablename__ = 'network_bindings' + + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + physical_network = sa.Column(sa.String(64)) + vlan_id = sa.Column(sa.Integer, nullable=False) + + def __init__(self, network_id, physical_network, vlan_id): + self.network_id = network_id + self.physical_network = physical_network + self.vlan_id = vlan_id + + def __repr__(self): + return "" % (self.network_id, + self.physical_network, + self.vlan_id) diff --git a/neutron/plugins/linuxbridge/lb_neutron_plugin.py b/neutron/plugins/linuxbridge/lb_neutron_plugin.py new file mode 100644 index 000000000..412275d24 --- /dev/null +++ b/neutron/plugins/linuxbridge/lb_neutron_plugin.py @@ -0,0 +1,530 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as q_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import api as db_api +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_gwmode_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.common import utils as plugin_utils +from neutron.plugins.linuxbridge.common import constants +from neutron.plugins.linuxbridge.db import l2network_db_v2 as db + + +LOG = logging.getLogger(__name__) + + +class LinuxBridgeRpcCallbacks(rpc_compat.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin + ): + + # history + # 1.1 Support Security Group RPC + RPC_API_VERSION = '1.1' + # Device names start with "tap" + TAP_PREFIX_LEN = 3 + + @classmethod + def get_port_from_device(cls, device): + port = db.get_port_from_device(device[cls.TAP_PREFIX_LEN:]) + if port: + port['device'] = device + return port + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = self.get_port_from_device(device) + if port: + binding = db.get_network_binding(db_api.get_session(), + port['network_id']) + (network_type, + segmentation_id) = constants.interpret_vlan_id(binding.vlan_id) + entry = {'device': device, + 'network_type': network_type, + 'physical_network': binding.physical_network, + 'segmentation_id': segmentation_id, + 'network_id': port['network_id'], + 'port_id': port['id'], + 'admin_state_up': port['admin_state_up']} + if cfg.CONF.AGENT.rpc_support_old_agents: + entry['vlan_id'] = binding.vlan_id + new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up'] + else q_const.PORT_STATUS_DOWN) + if port['status'] != new_status: + db.set_port_status(port['id'], new_status) + else: + entry = {'device': device} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + # TODO(garyk) - live migration and port status + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + port = self.get_port_from_device(device) + LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + plugin = manager.NeutronManager.get_plugin() + if port: + entry = {'device': device, + 'exists': True} + if (host and not + plugin.get_port_host(rpc_context, port['id']) == host): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + elif port['status'] != q_const.PORT_STATUS_DOWN: + # Set port status to DOWN + db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN) + else: + entry = {'device': device, + 'exists': False} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def update_device_up(self, rpc_context, **kwargs): + """Device is up on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + port = self.get_port_from_device(device) + LOG.debug(_("Device %(device)s up on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + plugin = manager.NeutronManager.get_plugin() + if port: + if (host and + not plugin.get_port_host(rpc_context, port['id']) == host): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + return + elif port['status'] != q_const.PORT_STATUS_ACTIVE: + db.set_port_status(port['id'], + q_const.PORT_STATUS_ACTIVE) + else: + LOG.debug(_("%s can not be found in database"), device) + + +class AgentNotifierApi(rpc_compat.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + '''Agent side of the linux bridge rpc API. + + API version history: + 1.0 - Initial version. + 1.1 - Added get_active_networks_info, create_dhcp_port, + and update_dhcp_port methods. + + + ''' + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic = topic + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + + def network_delete(self, context, network_id): + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, physical_network, vlan_id): + network_type, segmentation_id = constants.interpret_vlan_id(vlan_id) + kwargs = {'port': port, + 'network_type': network_type, + 'physical_network': physical_network, + 'segmentation_id': segmentation_id} + if cfg.CONF.AGENT.rpc_support_old_agents: + kwargs['vlan_id'] = vlan_id + msg = self.make_msg('port_update', **kwargs) + self.fanout_cast(context, msg, + topic=self.topic_port_update) + + +class LinuxBridgePluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + portbindings_db.PortBindingMixin): + """Implement the Neutron abstractions using Linux bridging. + + A new VLAN is created for each network. An agent is relied upon + to perform the actual Linux bridge configuration on each host. + + The provider extension is also supported. As discussed in + https://bugs.launchpad.net/neutron/+bug/1023156, this class could + be simplified, and filtering on extended attributes could be + handled, by adding support for extended attributes to the + NeutronDbPluginV2 base class. When that occurs, this class should + be updated to take advantage of it. + + The port binding extension enables an external application relay + information to and from the plugin. + """ + + # This attribute specifies whether the plugin supports or not + # bulk/pagination/sorting operations. Name mangling is used in + # order to ensure it is qualified by class + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + _supported_extension_aliases = ["provider", "external-net", "router", + "ext-gw-mode", "binding", "quotas", + "security-group", "agent", "extraroute", + "l3_agent_scheduler", + "dhcp_agent_scheduler"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + super(LinuxBridgePluginV2, self).__init__() + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_BRIDGE, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + self._parse_network_vlan_ranges() + db.sync_network_states(self.network_vlan_ranges) + self.tenant_network_type = cfg.CONF.VLANS.tenant_network_type + if self.tenant_network_type not in [svc_constants.TYPE_LOCAL, + svc_constants.TYPE_VLAN, + svc_constants.TYPE_NONE]: + LOG.error(_("Invalid tenant_network_type: %s. " + "Service terminated!"), + self.tenant_network_type) + sys.exit(1) + self._setup_rpc() + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver + ) + LOG.debug(_("Linux Bridge Plugin initialization complete")) + + def _setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = rpc_compat.create_connection(new=True) + self.endpoints = [LinuxBridgeRpcCallbacks(), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + self.notifier = AgentNotifierApi(topics.AGENT) + self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + l3_rpc_agent_api.L3AgentNotifyAPI() + ) + + def _parse_network_vlan_ranges(self): + try: + self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( + cfg.CONF.VLANS.network_vlan_ranges) + except Exception as ex: + LOG.error(_("%s. Agent terminated!"), ex) + sys.exit(1) + LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) + + def _add_network_vlan_range(self, physical_network, vlan_min, vlan_max): + self._add_network(physical_network) + self.network_vlan_ranges[physical_network].append((vlan_min, vlan_max)) + + def _add_network(self, physical_network): + if physical_network not in self.network_vlan_ranges: + self.network_vlan_ranges[physical_network] = [] + + def _extend_network_dict_provider(self, context, network): + binding = db.get_network_binding(context.session, network['id']) + if binding.vlan_id == constants.FLAT_VLAN_ID: + network[provider.NETWORK_TYPE] = svc_constants.TYPE_FLAT + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = None + elif binding.vlan_id == constants.LOCAL_VLAN_ID: + network[provider.NETWORK_TYPE] = svc_constants.TYPE_LOCAL + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = None + else: + network[provider.NETWORK_TYPE] = svc_constants.TYPE_VLAN + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = binding.vlan_id + + def _process_provider_create(self, context, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + + network_type_set = attributes.is_attr_set(network_type) + physical_network_set = attributes.is_attr_set(physical_network) + segmentation_id_set = attributes.is_attr_set(segmentation_id) + + if not (network_type_set or physical_network_set or + segmentation_id_set): + return (None, None, None) + + if not network_type_set: + msg = _("provider:network_type required") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == svc_constants.TYPE_FLAT: + if segmentation_id_set: + msg = _("provider:segmentation_id specified for flat network") + raise n_exc.InvalidInput(error_message=msg) + else: + segmentation_id = constants.FLAT_VLAN_ID + elif network_type == svc_constants.TYPE_VLAN: + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + if not utils.is_valid_vlan_tag(segmentation_id): + msg = (_("provider:segmentation_id out of range " + "(%(min_id)s through %(max_id)s)") % + {'min_id': q_const.MIN_VLAN_TAG, + 'max_id': q_const.MAX_VLAN_TAG}) + raise n_exc.InvalidInput(error_message=msg) + elif network_type == svc_constants.TYPE_LOCAL: + if physical_network_set: + msg = _("provider:physical_network specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + else: + physical_network = None + if segmentation_id_set: + msg = _("provider:segmentation_id specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + else: + segmentation_id = constants.LOCAL_VLAN_ID + else: + msg = _("provider:network_type %s not supported") % network_type + raise n_exc.InvalidInput(error_message=msg) + + if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]: + if physical_network_set: + if physical_network not in self.network_vlan_ranges: + msg = (_("Unknown provider:physical_network %s") % + physical_network) + raise n_exc.InvalidInput(error_message=msg) + elif 'default' in self.network_vlan_ranges: + physical_network = 'default' + else: + msg = _("provider:physical_network required") + raise n_exc.InvalidInput(error_message=msg) + + return (network_type, physical_network, segmentation_id) + + def create_network(self, context, network): + (network_type, physical_network, + vlan_id) = self._process_provider_create(context, + network['network']) + + session = context.session + with session.begin(subtransactions=True): + #set up default security groups + tenant_id = self._get_tenant_id_for_create( + context, network['network']) + self._ensure_default_security_group(context, tenant_id) + + if not network_type: + # tenant network + network_type = self.tenant_network_type + if network_type == svc_constants.TYPE_NONE: + raise n_exc.TenantNetworksDisabled() + elif network_type == svc_constants.TYPE_VLAN: + physical_network, vlan_id = db.reserve_network(session) + else: # TYPE_LOCAL + vlan_id = constants.LOCAL_VLAN_ID + else: + # provider network + if network_type in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_FLAT]: + db.reserve_specific_network(session, physical_network, + vlan_id) + # no reservation needed for TYPE_LOCAL + net = super(LinuxBridgePluginV2, self).create_network(context, + network) + db.add_network_binding(session, net['id'], + physical_network, vlan_id) + self._process_l3_create(context, net, network['network']) + self._extend_network_dict_provider(context, net) + # note - exception will rollback entire transaction + return net + + def update_network(self, context, id, network): + provider._raise_if_updates_provider_attributes(network['network']) + + session = context.session + with session.begin(subtransactions=True): + net = super(LinuxBridgePluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + self._extend_network_dict_provider(context, net) + return net + + def delete_network(self, context, id): + session = context.session + with session.begin(subtransactions=True): + binding = db.get_network_binding(session, id) + self._process_l3_delete(context, id) + super(LinuxBridgePluginV2, self).delete_network(context, id) + if binding.vlan_id != constants.LOCAL_VLAN_ID: + db.release_network(session, binding.physical_network, + binding.vlan_id, self.network_vlan_ranges) + # the network_binding record is deleted via cascade from + # the network record, so explicit removal is not necessary + self.notifier.network_delete(context, id) + + def get_network(self, context, id, fields=None): + session = context.session + with session.begin(subtransactions=True): + net = super(LinuxBridgePluginV2, self).get_network(context, + id, None) + self._extend_network_dict_provider(context, net) + return self._fields(net, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + session = context.session + with session.begin(subtransactions=True): + nets = super(LinuxBridgePluginV2, + self).get_networks(context, filters, None, sorts, + limit, marker, page_reverse) + for net in nets: + self._extend_network_dict_provider(context, net) + + return [self._fields(net, fields) for net in nets] + + def create_port(self, context, port): + session = context.session + port_data = port['port'] + with session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + # Set port status as 'DOWN'. This will be updated by agent + port['port']['status'] = q_const.PORT_STATUS_DOWN + + port = super(LinuxBridgePluginV2, + self).create_port(context, port) + self._process_portbindings_create_and_update(context, + port_data, + port) + self._process_port_create_security_group( + context, port, sgids) + self.notify_security_groups_member_updated(context, port) + return port + + def update_port(self, context, id, port): + original_port = self.get_port(context, id) + session = context.session + need_port_update_notify = False + + with session.begin(subtransactions=True): + updated_port = super(LinuxBridgePluginV2, self).update_port( + context, id, port) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + need_port_update_notify = self.update_security_group_on_port( + context, id, port, original_port, updated_port) + + need_port_update_notify |= self.is_security_group_member_updated( + context, original_port, updated_port) + + if original_port['admin_state_up'] != updated_port['admin_state_up']: + need_port_update_notify = True + + if need_port_update_notify: + self._notify_port_updated(context, updated_port) + return updated_port + + def delete_port(self, context, id, l3_port_check=True): + + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + + session = context.session + with session.begin(subtransactions=True): + self.disassociate_floatingips(context, id) + port = self.get_port(context, id) + self._delete_port_security_group_bindings(context, id) + super(LinuxBridgePluginV2, self).delete_port(context, id) + + self.notify_security_groups_member_updated(context, port) + + def _notify_port_updated(self, context, port): + binding = db.get_network_binding(context.session, + port['network_id']) + self.notifier.port_update(context, port, + binding.physical_network, + binding.vlan_id) diff --git a/neutron/plugins/metaplugin/README b/neutron/plugins/metaplugin/README new file mode 100644 index 000000000..8dbc47756 --- /dev/null +++ b/neutron/plugins/metaplugin/README @@ -0,0 +1,92 @@ +# -- Background + +This plugin supports multiple plugin at same time. This plugin is for L3 connectivility +between networks which are realized by different plugins.This plugin adds new attributes 'flavor:network' and 'flavor:router". +flavor:network corresponds to specific l2 plugin ( flavor-plugin mapping could be configurable by plugin_list config. +flavor:router corresponds to specific l3 plugin ( flavor-plugin mapping could be configurable by l3_plugin_list config. Note that Metaplugin can provide l3 functionaliteis for l2 plugin which didn't support l3 extension yet. +This plugin also support extensions. We can map extension to plugin by using extension_map config. + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:nova@127.0.0.1:3306/ovs_neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +connection = mysql://root:password@localhost/neutron_metaplugin?charset=utf8 + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implgies an infinite retry count +# max_retries = 10 +# Database reconnection interval in seconds - in event connectivity is lost +retry_interval = 2 + +[meta] +## This is list of flavor:neutron_plugins +# extension method is used in the order of this list +plugin_list= 'openvswitch:neutron.plugins.openvswitch.ovs_neutron_plugin.OVSneutronPluginV2,linuxbridge:neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2' +# plugin for l3 +l3_plugin_list= 'openvswitch:neutron.plugins.openvswitch.ovs_neutron_plugin.OVSneutronPluginV2,linuxbridge:neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2' + +# Default value of flavor +default_flavor = 'openvswitch' +# Default value for l3 +default_l3_flavor = 'openvswitch' + +# supported extensions +supported_extension_aliases = 'providernet' +# specific method map for each flavor to extensions +extension_map = 'get_port_stats:nvp' + +# -- BridgeDriver Configration +# In order to use metaplugin, you should use MetaDriver. Following configation is needed. + +[DEFAULT] +# Meta Plugin +# Mapping between flavor and driver +meta_flavor_driver_mappings = openvswitch:neutron.agent.linux.interface.OVSInterfaceDriver, linuxbridge:neutron.agent.linux.interface.BridgeInterfaceDriver +# interface driver for MetaPlugin +interface_driver = neutron.agent.linux.interface.MetaInterfaceDriver + +[proxy] +auth_url = http://10.0.0.1:35357/v2.0 +auth_region = RegionOne +admin_tenant_name = service +admin_user = neutron +admin_password = password + + +# -- Agent +Agents for Metaplugin are in neutron/plugins/metaplugin/agent +linuxbridge_neutron_agent and ovs_neutron_agent is available. + +# -- Extensions + +- flavor +MetaPlugin supports flavor and provider net extension. +Metaplugin select plugin_list using flavor. +One plugin may use multiple flavor value. If the plugin support flavor, it may provide +multiple flavor of network. + +- Attribute extension +Each plugin can use attribute extension such as provider_net, if you specify that in supported_extension_aliases. + +- providernet +Vlan ID range of each plugin should be different, since Metaplugin dose not manage that. + +#- limitations + +Basically, All plugin should inherit NeutronDbPluginV2. +Metaplugin assumes all plugin share same Database especially for IPAM part in NeutronV2 API. +You can use another plugin if you use ProxyPluginV2, which proxies request to the another neutron server. + +Example flavor configration for ProxyPluginV2 + +meta_flavor_driver_mappings = "openvswitch:neutron.agent.linux.interface.OVSInterfaceDriver,proxy:neutron.plugins.metaplugin.proxy_neutron_plugin.ProxyPluginV2" + +- Limited L3 support +In folsom version, l3 is an extension. There is no way to extend exntension attributes. +so you can set flavor:router value but you can't get flavor:router value in API output. +L3 agent dont's support flavor:router. + + + diff --git a/neutron/plugins/metaplugin/__init__.py b/neutron/plugins/metaplugin/__init__.py new file mode 100644 index 000000000..d8bce7745 --- /dev/null +++ b/neutron/plugins/metaplugin/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/metaplugin/common/__init__.py b/neutron/plugins/metaplugin/common/__init__.py new file mode 100644 index 000000000..d8bce7745 --- /dev/null +++ b/neutron/plugins/metaplugin/common/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/metaplugin/common/config.py b/neutron/plugins/metaplugin/common/config.py new file mode 100644 index 000000000..26978d71b --- /dev/null +++ b/neutron/plugins/metaplugin/common/config.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + + +meta_plugin_opts = [ + cfg.StrOpt( + 'plugin_list', + default='', + help=_("Comma separated list of flavor:neutron_plugin for " + "plugins to load. Extension method is searched in the " + "list order and the first one is used.")), + cfg.StrOpt( + 'l3_plugin_list', + default='', + help=_("Comma separated list of flavor:neutron_plugin for L3 " + "service plugins to load. This is intended for specifying " + "L2 plugins which support L3 functions. If you use a router " + "service plugin, set this blank.")), + cfg.StrOpt( + 'default_flavor', + default='', + help=_("Default flavor to use, when flavor:network is not " + "specified at network creation.")), + cfg.StrOpt( + 'default_l3_flavor', + default='', + help=_("Default L3 flavor to use, when flavor:router is not " + "specified at router creation. Ignored if 'l3_plugin_list' " + "is blank.")), + cfg.StrOpt( + 'supported_extension_aliases', + default='', + help=_("Comma separated list of supported extension aliases.")), + cfg.StrOpt( + 'extension_map', + default='', + help=_("Comma separated list of method:flavor to select specific " + "plugin for a method. This has priority over method search " + "order based on 'plugin_list'.")), + cfg.StrOpt( + 'rpc_flavor', + default='', + help=_("Specifies flavor for plugin to handle 'q-plugin' RPC " + "requests.")), +] + +proxy_plugin_opts = [ + cfg.StrOpt('admin_user', + help=_("Admin user")), + cfg.StrOpt('admin_password', + help=_("Admin password"), + secret=True), + cfg.StrOpt('admin_tenant_name', + help=_("Admin tenant name")), + cfg.StrOpt('auth_url', + help=_("Authentication URL")), + cfg.StrOpt('auth_strategy', default='keystone', + help=_("The type of authentication to use")), + cfg.StrOpt('auth_region', + help=_("Authentication region")), +] + +cfg.CONF.register_opts(meta_plugin_opts, "META") +cfg.CONF.register_opts(proxy_plugin_opts, "PROXY") diff --git a/neutron/plugins/metaplugin/meta_db_v2.py b/neutron/plugins/metaplugin/meta_db_v2.py new file mode 100644 index 000000000..68c9055ff --- /dev/null +++ b/neutron/plugins/metaplugin/meta_db_v2.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.orm import exc + +from neutron.plugins.metaplugin import meta_models_v2 + + +def get_flavor_by_network(session, net_id): + try: + binding = (session.query(meta_models_v2.NetworkFlavor). + filter_by(network_id=net_id). + one()) + except exc.NoResultFound: + return None + return binding.flavor + + +def add_network_flavor_binding(session, flavor, net_id): + binding = meta_models_v2.NetworkFlavor(flavor=flavor, network_id=net_id) + session.add(binding) + return binding + + +def get_flavor_by_router(session, router_id): + try: + binding = (session.query(meta_models_v2.RouterFlavor). + filter_by(router_id=router_id). + one()) + except exc.NoResultFound: + return None + return binding.flavor + + +def add_router_flavor_binding(session, flavor, router_id): + binding = meta_models_v2.RouterFlavor(flavor=flavor, router_id=router_id) + session.add(binding) + return binding diff --git a/neutron/plugins/metaplugin/meta_models_v2.py b/neutron/plugins/metaplugin/meta_models_v2.py new file mode 100644 index 000000000..566d1d8d8 --- /dev/null +++ b/neutron/plugins/metaplugin/meta_models_v2.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import Column, String + +from neutron.db import models_v2 + + +class NetworkFlavor(models_v2.model_base.BASEV2): + """Represents a binding of network_id to flavor.""" + flavor = Column(String(255)) + network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', + ondelete="CASCADE"), + primary_key=True) + + def __repr__(self): + return "" % (self.flavor, self.network_id) + + +class RouterFlavor(models_v2.model_base.BASEV2): + """Represents a binding of router_id to flavor.""" + flavor = Column(String(255)) + router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', + ondelete="CASCADE"), + primary_key=True) + + def __repr__(self): + return "" % (self.flavor, self.router_id) diff --git a/neutron/plugins/metaplugin/meta_neutron_plugin.py b/neutron/plugins/metaplugin/meta_neutron_plugin.py new file mode 100644 index 000000000..92a962846 --- /dev/null +++ b/neutron/plugins/metaplugin/meta_neutron_plugin.py @@ -0,0 +1,419 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.common import exceptions as exc +from neutron.common import topics +from neutron import context as neutron_context +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.extensions import flavor as ext_flavor +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.metaplugin.common import config # noqa +from neutron.plugins.metaplugin import meta_db_v2 +from neutron.plugins.metaplugin import meta_models_v2 + + +LOG = logging.getLogger(__name__) + + +# Hooks used to select records which belong a target plugin. +def _meta_network_model_hook(context, original_model, query): + return query.outerjoin(meta_models_v2.NetworkFlavor, + meta_models_v2.NetworkFlavor.network_id == + models_v2.Network.id) + + +def _meta_port_model_hook(context, original_model, query): + return query.join(meta_models_v2.NetworkFlavor, + meta_models_v2.NetworkFlavor.network_id == + models_v2.Port.network_id) + + +def _meta_flavor_filter_hook(query, filters): + if ext_flavor.FLAVOR_NETWORK in filters: + return query.filter(meta_models_v2.NetworkFlavor.flavor == + filters[ext_flavor.FLAVOR_NETWORK][0]) + return query + + +# Metaplugin Exceptions +class FlavorNotFound(exc.NotFound): + message = _("Flavor %(flavor)s could not be found") + + +class FaildToAddFlavorBinding(exc.NeutronException): + message = _("Failed to add flavor binding") + + +class MetaPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin): + + def __init__(self, configfile=None): + super(MetaPluginV2, self).__init__() + LOG.debug(_("Start initializing metaplugin")) + self.supported_extension_aliases = ['flavor', 'external-net'] + if cfg.CONF.META.supported_extension_aliases: + cfg_aliases = cfg.CONF.META.supported_extension_aliases.split(',') + self.supported_extension_aliases += cfg_aliases + + # Ignore config option overapping + def _is_opt_registered(opts, opt): + if opt.dest in opts: + return True + else: + return False + + cfg._is_opt_registered = _is_opt_registered + + # Keep existing tables if multiple plugin use same table name. + db.model_base.NeutronBase.__table_args__ = {'keep_existing': True} + + self.plugins = {} + + plugin_list = [plugin_set.split(':') + for plugin_set + in cfg.CONF.META.plugin_list.split(',')] + self.rpc_flavor = cfg.CONF.META.rpc_flavor + topic_save = topics.PLUGIN + topic_fake = topic_save + '-metaplugin' + for flavor, plugin_provider in plugin_list: + # Rename topic used by a plugin other than rpc_flavor during + # loading the plugin instance if rpc_flavor is specified. + # This enforces the plugin specified by rpc_flavor is only + # consumer of 'q-plugin'. It is a bit tricky but there is no + # bad effect. + if self.rpc_flavor and self.rpc_flavor != flavor: + topics.PLUGIN = topic_fake + self.plugins[flavor] = self._load_plugin(plugin_provider) + topics.PLUGIN = topic_save + + self.l3_plugins = {} + if cfg.CONF.META.l3_plugin_list: + l3_plugin_list = [plugin_set.split(':') + for plugin_set + in cfg.CONF.META.l3_plugin_list.split(',')] + for flavor, plugin_provider in l3_plugin_list: + if flavor in self.plugins: + self.l3_plugins[flavor] = self.plugins[flavor] + else: + # For l3 only plugin + self.l3_plugins[flavor] = self._load_plugin( + plugin_provider) + + self.default_flavor = cfg.CONF.META.default_flavor + if self.default_flavor not in self.plugins: + raise exc.Invalid(_('default_flavor %s is not plugin list') % + self.default_flavor) + + if self.l3_plugins: + self.default_l3_flavor = cfg.CONF.META.default_l3_flavor + if self.default_l3_flavor not in self.l3_plugins: + raise exc.Invalid(_('default_l3_flavor %s is not plugin list') + % self.default_l3_flavor) + self.supported_extension_aliases += ['router', 'ext-gw-mode', + 'extraroute'] + + if self.rpc_flavor and self.rpc_flavor not in self.plugins: + raise exc.Invalid(_('rpc_flavor %s is not plugin list') % + self.rpc_flavor) + + self.extension_map = {} + if not cfg.CONF.META.extension_map == '': + extension_list = [method_set.split(':') + for method_set + in cfg.CONF.META.extension_map.split(',')] + for method_name, flavor in extension_list: + self.extension_map[method_name] = flavor + + # Register hooks. + # The hooks are applied for each target plugin instance when + # calling the base class to get networks/ports so that only records + # which belong to the plugin are selected. + #NOTE: Doing registration here (within __init__()) is to avoid + # registration when merely importing this file. This is only + # for running whole unit tests. + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Network, + 'metaplugin_net', + _meta_network_model_hook, + None, + _meta_flavor_filter_hook) + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Port, + 'metaplugin_port', + _meta_port_model_hook, + None, + _meta_flavor_filter_hook) + + def _load_plugin(self, plugin_provider): + LOG.debug(_("Plugin location: %s"), plugin_provider) + plugin_klass = importutils.import_class(plugin_provider) + return plugin_klass() + + def _get_plugin(self, flavor): + if flavor not in self.plugins: + raise FlavorNotFound(flavor=flavor) + return self.plugins[flavor] + + def _get_l3_plugin(self, flavor): + if flavor not in self.l3_plugins: + raise FlavorNotFound(flavor=flavor) + return self.l3_plugins[flavor] + + def __getattr__(self, key): + # At first, try to pickup extension command from extension_map + + if key in self.extension_map: + flavor = self.extension_map[key] + plugin = self._get_plugin(flavor) + if plugin and hasattr(plugin, key): + return getattr(plugin, key) + + # Second, try to match extension method in order of plugin list + for flavor, plugin in self.plugins.items(): + if hasattr(plugin, key): + return getattr(plugin, key) + + # if no plugin support the method, then raise + raise AttributeError + + def _extend_network_dict(self, context, network): + flavor = self._get_flavor_by_network_id(context, network['id']) + network[ext_flavor.FLAVOR_NETWORK] = flavor + + def start_rpc_listeners(self): + return self.plugins[self.rpc_flavor].start_rpc_listeners() + + def rpc_workers_supported(self): + #NOTE: If a plugin which supports multiple RPC workers is desired + # to handle RPC, rpc_flavor must be specified. + return (self.rpc_flavor and + self.plugins[self.rpc_flavor].rpc_workers_supported()) + + def create_network(self, context, network): + n = network['network'] + flavor = n.get(ext_flavor.FLAVOR_NETWORK) + if str(flavor) not in self.plugins: + flavor = self.default_flavor + plugin = self._get_plugin(flavor) + net = plugin.create_network(context, network) + LOG.debug(_("Created network: %(net_id)s with flavor " + "%(flavor)s"), {'net_id': net['id'], 'flavor': flavor}) + try: + meta_db_v2.add_network_flavor_binding(context.session, + flavor, str(net['id'])) + except Exception: + LOG.exception(_('Failed to add flavor bindings')) + plugin.delete_network(context, net['id']) + raise FaildToAddFlavorBinding() + + LOG.debug(_("Created network: %s"), net['id']) + self._extend_network_dict(context, net) + return net + + def update_network(self, context, id, network): + flavor = meta_db_v2.get_flavor_by_network(context.session, id) + plugin = self._get_plugin(flavor) + return plugin.update_network(context, id, network) + + def delete_network(self, context, id): + flavor = meta_db_v2.get_flavor_by_network(context.session, id) + plugin = self._get_plugin(flavor) + return plugin.delete_network(context, id) + + def get_network(self, context, id, fields=None): + flavor = meta_db_v2.get_flavor_by_network(context.session, id) + plugin = self._get_plugin(flavor) + net = plugin.get_network(context, id, fields) + net['id'] = id + if not fields or ext_flavor.FLAVOR_NETWORK in fields: + self._extend_network_dict(context, net) + if fields and 'id' not in fields: + del net['id'] + return net + + def get_networks(self, context, filters=None, fields=None): + nets = [] + for flavor, plugin in self.plugins.items(): + if (filters and ext_flavor.FLAVOR_NETWORK in filters and + not flavor in filters[ext_flavor.FLAVOR_NETWORK]): + continue + if filters: + #NOTE: copy each time since a target plugin may modify + # plugin_filters. + plugin_filters = filters.copy() + else: + plugin_filters = {} + plugin_filters[ext_flavor.FLAVOR_NETWORK] = [flavor] + plugin_nets = plugin.get_networks(context, plugin_filters, fields) + for net in plugin_nets: + if not fields or ext_flavor.FLAVOR_NETWORK in fields: + net[ext_flavor.FLAVOR_NETWORK] = flavor + nets.append(net) + return nets + + def _get_flavor_by_network_id(self, context, network_id): + return meta_db_v2.get_flavor_by_network(context.session, network_id) + + def _get_flavor_by_router_id(self, context, router_id): + return meta_db_v2.get_flavor_by_router(context.session, router_id) + + def _get_plugin_by_network_id(self, context, network_id): + flavor = self._get_flavor_by_network_id(context, network_id) + return self._get_plugin(flavor) + + def create_port(self, context, port): + p = port['port'] + if 'network_id' not in p: + raise exc.NotFound + plugin = self._get_plugin_by_network_id(context, p['network_id']) + return plugin.create_port(context, port) + + def update_port(self, context, id, port): + port_in_db = self._get_port(context, id) + plugin = self._get_plugin_by_network_id(context, + port_in_db['network_id']) + return plugin.update_port(context, id, port) + + def delete_port(self, context, id, l3_port_check=True): + port_in_db = self._get_port(context, id) + plugin = self._get_plugin_by_network_id(context, + port_in_db['network_id']) + return plugin.delete_port(context, id, l3_port_check) + + # This is necessary since there is a case that + # NeutronManager.get_plugin()._make_port_dict is called. + def _make_port_dict(self, port): + context = neutron_context.get_admin_context() + plugin = self._get_plugin_by_network_id(context, + port['network_id']) + return plugin._make_port_dict(port) + + def get_port(self, context, id, fields=None): + port_in_db = self._get_port(context, id) + plugin = self._get_plugin_by_network_id(context, + port_in_db['network_id']) + return plugin.get_port(context, id, fields) + + def get_ports(self, context, filters=None, fields=None): + all_ports = [] + for flavor, plugin in self.plugins.items(): + if filters: + #NOTE: copy each time since a target plugin may modify + # plugin_filters. + plugin_filters = filters.copy() + else: + plugin_filters = {} + plugin_filters[ext_flavor.FLAVOR_NETWORK] = [flavor] + ports = plugin.get_ports(context, plugin_filters, fields) + all_ports += ports + return all_ports + + def create_subnet(self, context, subnet): + s = subnet['subnet'] + if 'network_id' not in s: + raise exc.NotFound + plugin = self._get_plugin_by_network_id(context, + s['network_id']) + return plugin.create_subnet(context, subnet) + + def update_subnet(self, context, id, subnet): + s = self.get_subnet(context, id) + plugin = self._get_plugin_by_network_id(context, + s['network_id']) + return plugin.update_subnet(context, id, subnet) + + def delete_subnet(self, context, id): + s = self.get_subnet(context, id) + plugin = self._get_plugin_by_network_id(context, + s['network_id']) + return plugin.delete_subnet(context, id) + + def _extend_router_dict(self, context, router): + flavor = self._get_flavor_by_router_id(context, router['id']) + router[ext_flavor.FLAVOR_ROUTER] = flavor + + def create_router(self, context, router): + r = router['router'] + flavor = r.get(ext_flavor.FLAVOR_ROUTER) + if str(flavor) not in self.l3_plugins: + flavor = self.default_l3_flavor + plugin = self._get_l3_plugin(flavor) + r_in_db = plugin.create_router(context, router) + LOG.debug(_("Created router: %(router_id)s with flavor " + "%(flavor)s"), + {'router_id': r_in_db['id'], 'flavor': flavor}) + try: + meta_db_v2.add_router_flavor_binding(context.session, + flavor, str(r_in_db['id'])) + except Exception: + LOG.exception(_('Failed to add flavor bindings')) + plugin.delete_router(context, r_in_db['id']) + raise FaildToAddFlavorBinding() + + LOG.debug(_("Created router: %s"), r_in_db['id']) + self._extend_router_dict(context, r_in_db) + return r_in_db + + def update_router(self, context, id, router): + flavor = meta_db_v2.get_flavor_by_router(context.session, id) + plugin = self._get_l3_plugin(flavor) + return plugin.update_router(context, id, router) + + def delete_router(self, context, id): + flavor = meta_db_v2.get_flavor_by_router(context.session, id) + plugin = self._get_l3_plugin(flavor) + return plugin.delete_router(context, id) + + def get_router(self, context, id, fields=None): + flavor = meta_db_v2.get_flavor_by_router(context.session, id) + plugin = self._get_l3_plugin(flavor) + router = plugin.get_router(context, id, fields) + if not fields or ext_flavor.FLAVOR_ROUTER in fields: + self._extend_router_dict(context, router) + return router + + def get_routers_with_flavor(self, context, filters=None, + fields=None): + collection = self._model_query(context, l3_db.Router) + r_model = meta_models_v2.RouterFlavor + collection = collection.join(r_model, + l3_db.Router.id == r_model.router_id) + if filters: + for key, value in filters.iteritems(): + if key == ext_flavor.FLAVOR_ROUTER: + column = meta_models_v2.RouterFlavor.flavor + else: + column = getattr(l3_db.Router, key, None) + if column: + collection = collection.filter(column.in_(value)) + return [self._make_router_dict(c, fields) for c in collection] + + def get_routers(self, context, filters=None, fields=None): + routers = self.get_routers_with_flavor(context, filters, + None) + return [self.get_router(context, router['id'], + fields) + for router in routers] diff --git a/neutron/plugins/metaplugin/proxy_neutron_plugin.py b/neutron/plugins/metaplugin/proxy_neutron_plugin.py new file mode 100644 index 000000000..61cc34026 --- /dev/null +++ b/neutron/plugins/metaplugin/proxy_neutron_plugin.py @@ -0,0 +1,136 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import l3_db +from neutron.openstack.common import log as logging +from neutronclient.common import exceptions +from neutronclient.v2_0 import client + + +LOG = logging.getLogger(__name__) + + +class ProxyPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + l3_db.L3_NAT_db_mixin): + supported_extension_aliases = ["external-net", "router"] + + def __init__(self, configfile=None): + super(ProxyPluginV2, self).__init__() + self.neutron = client.Client( + username=cfg.CONF.PROXY.admin_user, + password=cfg.CONF.PROXY.admin_password, + tenant_name=cfg.CONF.PROXY.admin_tenant_name, + auth_url=cfg.CONF.PROXY.auth_url, + auth_strategy=cfg.CONF.PROXY.auth_strategy, + region_name=cfg.CONF.PROXY.auth_region + ) + + def _get_client(self): + return self.neutron + + def create_subnet(self, context, subnet): + subnet_remote = self._get_client().create_subnet(subnet) + subnet['subnet']['id'] = subnet_remote['id'] + tenant_id = self._get_tenant_id_for_create(context, subnet['subnet']) + subnet['subnet']['tenant_id'] = tenant_id + try: + subnet_in_db = super(ProxyPluginV2, self).create_subnet( + context, subnet) + except Exception: + self._get_client().delete_subnet(subnet_remote['id']) + return subnet_in_db + + def update_subnet(self, context, id, subnet): + subnet_in_db = super(ProxyPluginV2, self).update_subnet( + context, id, subnet) + try: + self._get_client().update_subnet(id, subnet) + except Exception as e: + LOG.error(_("Update subnet failed: %s"), e) + return subnet_in_db + + def delete_subnet(self, context, id): + try: + self._get_client().delete_subnet(id) + except exceptions.NotFound: + LOG.warn(_("Subnet in remote have already deleted")) + return super(ProxyPluginV2, self).delete_subnet(context, id) + + def create_network(self, context, network): + network_remote = self._get_client().create_network(network) + network['network']['id'] = network_remote['id'] + tenant_id = self._get_tenant_id_for_create(context, network['network']) + network['network']['tenant_id'] = tenant_id + try: + network_in_db = super(ProxyPluginV2, self).create_network( + context, network) + except Exception: + self._get_client().delete_network(network_remote['id']) + return network_in_db + + def update_network(self, context, id, network): + network_in_db = super(ProxyPluginV2, self).update_network( + context, id, network) + try: + self._get_client().update_network(id, network) + except Exception as e: + LOG.error(_("Update network failed: %s"), e) + return network_in_db + + def delete_network(self, context, id): + try: + self._get_client().delete_network(id) + except exceptions.NetworkNotFoundClient: + LOG.warn(_("Network in remote have already deleted")) + return super(ProxyPluginV2, self).delete_network(context, id) + + def create_port(self, context, port): + port_remote = self._get_client().create_port(port) + port['port']['id'] = port_remote['id'] + tenant_id = self._get_tenant_id_for_create(context, port['port']) + port['port']['tenant_id'] = tenant_id + try: + port_in_db = super(ProxyPluginV2, self).create_port( + context, port) + except Exception: + self._get_client().delete_port(port_remote['id']) + return port_in_db + + def update_port(self, context, id, port): + port_in_db = super(ProxyPluginV2, self).update_port( + context, id, port) + try: + self._get_client().update_port(id, port) + except Exception as e: + LOG.error(_("Update port failed: %s"), e) + return port_in_db + + def delete_port(self, context, id, l3_port_check=True): + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + self.disassociate_floatingips(context, id) + + try: + self._get_client().delete_port(id) + except exceptions.PortNotFoundClient: + LOG.warn(_("Port in remote have already deleted")) + return super(ProxyPluginV2, self).delete_port(context, id) diff --git a/neutron/plugins/midonet/__init__.py b/neutron/plugins/midonet/__init__.py new file mode 100644 index 000000000..439ff6594 --- /dev/null +++ b/neutron/plugins/midonet/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2012 Midokura Japan K.K. +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/midonet/agent/__init__.py b/neutron/plugins/midonet/agent/__init__.py new file mode 100644 index 000000000..9fddc1976 --- /dev/null +++ b/neutron/plugins/midonet/agent/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/midonet/agent/midonet_driver.py b/neutron/plugins/midonet/agent/midonet_driver.py new file mode 100644 index 000000000..ada98a3d1 --- /dev/null +++ b/neutron/plugins/midonet/agent/midonet_driver.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rossella Sblendido, Midokura Japan KK +# @author: Tomoe Sugihara, Midokura Japan KK +# @author: Ryu Ishimoto, Midokura Japan KK + +from neutron.agent.linux import dhcp +from neutron.openstack.common import log as logging +from neutron.plugins.midonet.common import config # noqa + +LOG = logging.getLogger(__name__) + + +class DhcpNoOpDriver(dhcp.DhcpLocalProcess): + + @classmethod + def existing_dhcp_networks(cls, conf, root_helper): + """Return a list of existing networks ids that we have configs for.""" + return [] + + @classmethod + def check_version(cls): + """Execute version checks on DHCP server.""" + return float(1.0) + + def disable(self, retain_port=False): + """Disable DHCP for this network.""" + if not retain_port: + self.device_manager.destroy(self.network, self.interface_name) + self._remove_config_files() + + def reload_allocations(self): + """Force the DHCP server to reload the assignment database.""" + pass + + def spawn_process(self): + pass diff --git a/neutron/plugins/midonet/common/__init__.py b/neutron/plugins/midonet/common/__init__.py new file mode 100644 index 000000000..9fddc1976 --- /dev/null +++ b/neutron/plugins/midonet/common/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/midonet/common/config.py b/neutron/plugins/midonet/common/config.py new file mode 100644 index 000000000..924474f5b --- /dev/null +++ b/neutron/plugins/midonet/common/config.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2012 Midokura Japan K.K. +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Tomoe Sugihara, Midokura Japan KK + +from oslo.config import cfg + +midonet_opts = [ + cfg.StrOpt('midonet_uri', default='http://localhost:8080/midonet-api', + help=_('MidoNet API server URI.')), + cfg.StrOpt('username', default='admin', + help=_('MidoNet admin username.')), + cfg.StrOpt('password', default='passw0rd', + secret=True, + help=_('MidoNet admin password.')), + cfg.StrOpt('project_id', + default='77777777-7777-7777-7777-777777777777', + help=_('ID of the project that MidoNet admin user' + 'belongs to.')), + cfg.StrOpt('provider_router_id', + help=_('Virtual provider router ID.')), + cfg.StrOpt('mode', + default='dev', + help=_('Operational mode. Internal dev use only.')), + cfg.StrOpt('midonet_host_uuid_path', + default='/etc/midolman/host_uuid.properties', + help=_('Path to midonet host uuid file')) +] + + +cfg.CONF.register_opts(midonet_opts, "MIDONET") diff --git a/neutron/plugins/midonet/common/net_util.py b/neutron/plugins/midonet/common/net_util.py new file mode 100644 index 000000000..884048675 --- /dev/null +++ b/neutron/plugins/midonet/common/net_util.py @@ -0,0 +1,68 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ryu Ishimoto, Midokura Japan KK + + +from neutron.common import constants + + +def subnet_str(cidr): + """Convert the cidr string to x.x.x.x_y format + + :param cidr: CIDR in x.x.x.x/y format + """ + if cidr is None: + return None + return cidr.replace("/", "_") + + +def net_addr(addr): + """Get network address prefix and length from a given address.""" + if addr is None: + return (None, None) + nw_addr, nw_len = addr.split('/') + nw_len = int(nw_len) + return nw_addr, nw_len + + +def get_ethertype_value(ethertype): + """Convert string representation of ethertype to the numerical.""" + if ethertype is None: + return None + mapping = { + 'ipv4': 0x0800, + 'ipv6': 0x86DD, + 'arp': 0x806 + } + return mapping.get(ethertype.lower()) + + +def get_protocol_value(protocol): + """Convert string representation of protocol to the numerical.""" + if protocol is None: + return None + + if isinstance(protocol, int): + return protocol + + mapping = { + constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP, + constants.PROTO_NAME_UDP: constants.PROTO_NUM_UDP, + constants.PROTO_NAME_ICMP: constants.PROTO_NUM_ICMP + } + return mapping.get(protocol.lower()) diff --git a/neutron/plugins/midonet/midonet_lib.py b/neutron/plugins/midonet/midonet_lib.py new file mode 100644 index 000000000..74d2bae6a --- /dev/null +++ b/neutron/plugins/midonet/midonet_lib.py @@ -0,0 +1,696 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2012 Midokura Japan K.K. +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Tomoe Sugihara, Midokura Japan KK +# @author: Ryu Ishimoto, Midokura Japan KK +# @author: Rossella Sblendido, Midokura Japan KK +# @author: Duarte Nunes, Midokura Japan KK + +from midonetclient import exc +from webob import exc as w_exc + +from neutron.common import exceptions as n_exc +from neutron.openstack.common import log as logging +from neutron.plugins.midonet.common import net_util + +LOG = logging.getLogger(__name__) + + +def handle_api_error(fn): + """Wrapper for methods that throws custom exceptions.""" + def wrapped(*args, **kwargs): + try: + return fn(*args, **kwargs) + except (w_exc.HTTPException, + exc.MidoApiConnectionError) as ex: + raise MidonetApiException(msg=ex) + return wrapped + + +class MidonetResourceNotFound(n_exc.NotFound): + message = _('MidoNet %(resource_type)s %(id)s could not be found') + + +class MidonetApiException(n_exc.NeutronException): + message = _("MidoNet API error: %(msg)s") + + +class MidoClient: + + def __init__(self, mido_api): + self.mido_api = mido_api + + @classmethod + def _fill_dto(cls, dto, fields): + for field_name, field_value in fields.iteritems(): + # We assume the setters are named the + # same way as the attributes themselves. + try: + getattr(dto, field_name)(field_value) + except AttributeError: + pass + return dto + + @classmethod + def _create_dto(cls, dto, fields): + return cls._fill_dto(dto, fields).create() + + @classmethod + def _update_dto(cls, dto, fields): + return cls._fill_dto(dto, fields).update() + + @handle_api_error + def create_bridge(self, **kwargs): + """Create a new bridge + + :param \**kwargs: configuration of the new bridge + :returns: newly created bridge + """ + LOG.debug(_("MidoClient.create_bridge called: " + "kwargs=%(kwargs)s"), {'kwargs': kwargs}) + return self._create_dto(self.mido_api.add_bridge(), kwargs) + + @handle_api_error + def delete_bridge(self, id): + """Delete a bridge + + :param id: id of the bridge + """ + LOG.debug(_("MidoClient.delete_bridge called: id=%(id)s"), {'id': id}) + return self.mido_api.delete_bridge(id) + + @handle_api_error + def get_bridge(self, id): + """Get a bridge + + :param id: id of the bridge + :returns: requested bridge. None if bridge does not exist. + """ + LOG.debug(_("MidoClient.get_bridge called: id=%s"), id) + try: + return self.mido_api.get_bridge(id) + except w_exc.HTTPNotFound: + raise MidonetResourceNotFound(resource_type='Bridge', id=id) + + @handle_api_error + def update_bridge(self, id, **kwargs): + """Update a bridge of the given id with the new fields + + :param id: id of the bridge + :param \**kwargs: the fields to update and their values + :returns: bridge object + """ + LOG.debug(_("MidoClient.update_bridge called: " + "id=%(id)s, kwargs=%(kwargs)s"), + {'id': id, 'kwargs': kwargs}) + try: + return self._update_dto(self.mido_api.get_bridge(id), kwargs) + except w_exc.HTTPNotFound: + raise MidonetResourceNotFound(resource_type='Bridge', id=id) + + @handle_api_error + def create_dhcp(self, bridge, gateway_ip, cidr, host_rts=None, + dns_servers=None): + """Create a new DHCP entry + + :param bridge: bridge object to add dhcp to + :param gateway_ip: IP address of gateway + :param cidr: subnet represented as x.x.x.x/y + :param host_rts: list of routes set in the host + :param dns_servers: list of dns servers + :returns: newly created dhcp + """ + LOG.debug(_("MidoClient.create_dhcp called: bridge=%(bridge)s, " + "cidr=%(cidr)s, gateway_ip=%(gateway_ip)s, " + "host_rts=%(host_rts)s, dns_servers=%(dns_servers)s"), + {'bridge': bridge, 'cidr': cidr, 'gateway_ip': gateway_ip, + 'host_rts': host_rts, 'dns_servers': dns_servers}) + self.mido_api.add_bridge_dhcp(bridge, gateway_ip, cidr, + host_rts=host_rts, + dns_nservers=dns_servers) + + @handle_api_error + def add_dhcp_host(self, bridge, cidr, ip, mac): + """Add DHCP host entry + + :param bridge: bridge the DHCP is configured for + :param cidr: subnet represented as x.x.x.x/y + :param ip: IP address + :param mac: MAC address + """ + LOG.debug(_("MidoClient.add_dhcp_host called: bridge=%(bridge)s, " + "cidr=%(cidr)s, ip=%(ip)s, mac=%(mac)s"), + {'bridge': bridge, 'cidr': cidr, 'ip': ip, 'mac': mac}) + subnet = bridge.get_dhcp_subnet(net_util.subnet_str(cidr)) + if subnet is None: + raise MidonetApiException(msg=_("Tried to add to" + "non-existent DHCP")) + + subnet.add_dhcp_host().ip_addr(ip).mac_addr(mac).create() + + @handle_api_error + def remove_dhcp_host(self, bridge, cidr, ip, mac): + """Remove DHCP host entry + + :param bridge: bridge the DHCP is configured for + :param cidr: subnet represented as x.x.x.x/y + :param ip: IP address + :param mac: MAC address + """ + LOG.debug(_("MidoClient.remove_dhcp_host called: bridge=%(bridge)s, " + "cidr=%(cidr)s, ip=%(ip)s, mac=%(mac)s"), + {'bridge': bridge, 'cidr': cidr, 'ip': ip, 'mac': mac}) + subnet = bridge.get_dhcp_subnet(net_util.subnet_str(cidr)) + if subnet is None: + LOG.warn(_("Tried to delete mapping from non-existent subnet")) + return + + for dh in subnet.get_dhcp_hosts(): + if dh.get_mac_addr() == mac and dh.get_ip_addr() == ip: + LOG.debug(_("MidoClient.remove_dhcp_host: Deleting %(dh)r"), + {"dh": dh}) + dh.delete() + + @handle_api_error + def delete_dhcp_host(self, bridge_id, cidr, ip, mac): + """Delete DHCP host entry + + :param bridge_id: id of the bridge of the DHCP + :param cidr: subnet represented as x.x.x.x/y + :param ip: IP address + :param mac: MAC address + """ + LOG.debug(_("MidoClient.delete_dhcp_host called: " + "bridge_id=%(bridge_id)s, cidr=%(cidr)s, ip=%(ip)s, " + "mac=%(mac)s"), {'bridge_id': bridge_id, + 'cidr': cidr, + 'ip': ip, 'mac': mac}) + bridge = self.get_bridge(bridge_id) + self.remove_dhcp_host(bridge, net_util.subnet_str(cidr), ip, mac) + + @handle_api_error + def delete_dhcp(self, bridge, cidr): + """Delete a DHCP entry + + :param bridge: bridge to remove DHCP from + :param cidr: subnet represented as x.x.x.x/y + """ + LOG.debug(_("MidoClient.delete_dhcp called: bridge=%(bridge)s, " + "cidr=%(cidr)s"), + {'bridge': bridge, 'cidr': cidr}) + dhcp_subnets = bridge.get_dhcp_subnets() + net_addr, net_len = net_util.net_addr(cidr) + if not dhcp_subnets: + raise MidonetApiException( + msg=_("Tried to delete non-existent DHCP")) + for dhcp in dhcp_subnets: + if dhcp.get_subnet_prefix() == net_addr: + dhcp.delete() + break + + @handle_api_error + def delete_port(self, id, delete_chains=False): + """Delete a port + + :param id: id of the port + """ + LOG.debug(_("MidoClient.delete_port called: id=%(id)s, " + "delete_chains=%(delete_chains)s"), + {'id': id, 'delete_chains': delete_chains}) + if delete_chains: + self.delete_port_chains(id) + + self.mido_api.delete_port(id) + + @handle_api_error + def get_port(self, id): + """Get a port + + :param id: id of the port + :returns: requested port. None if it does not exist + """ + LOG.debug(_("MidoClient.get_port called: id=%(id)s"), {'id': id}) + try: + return self.mido_api.get_port(id) + except w_exc.HTTPNotFound: + raise MidonetResourceNotFound(resource_type='Port', id=id) + + @handle_api_error + def add_bridge_port(self, bridge, **kwargs): + """Add a port on a bridge + + :param bridge: bridge to add a new port to + :param \**kwargs: configuration of the new port + :returns: newly created port + """ + LOG.debug(_("MidoClient.add_bridge_port called: " + "bridge=%(bridge)s, kwargs=%(kwargs)s"), + {'bridge': bridge, 'kwargs': kwargs}) + return self._create_dto(self.mido_api.add_bridge_port(bridge), kwargs) + + @handle_api_error + def update_port(self, id, **kwargs): + """Update a port of the given id with the new fields + + :param id: id of the port + :param \**kwargs: the fields to update and their values + """ + LOG.debug(_("MidoClient.update_port called: " + "id=%(id)s, kwargs=%(kwargs)s"), + {'id': id, 'kwargs': kwargs}) + try: + return self._update_dto(self.mido_api.get_port(id), kwargs) + except w_exc.HTTPNotFound: + raise MidonetResourceNotFound(resource_type='Port', id=id) + + @handle_api_error + def add_router_port(self, router, **kwargs): + """Add a new port to an existing router. + + :param router: router to add a new port to + :param \**kwargs: configuration of the new port + :returns: newly created port + """ + return self._create_dto(self.mido_api.add_router_port(router), kwargs) + + @handle_api_error + def create_router(self, **kwargs): + """Create a new router + + :param \**kwargs: configuration of the new router + :returns: newly created router + """ + LOG.debug(_("MidoClient.create_router called: " + "kwargs=%(kwargs)s"), {'kwargs': kwargs}) + return self._create_dto(self.mido_api.add_router(), kwargs) + + @handle_api_error + def delete_router(self, id): + """Delete a router + + :param id: id of the router + """ + LOG.debug(_("MidoClient.delete_router called: id=%(id)s"), {'id': id}) + return self.mido_api.delete_router(id) + + @handle_api_error + def get_router(self, id): + """Get a router with the given id + + :param id: id of the router + :returns: requested router object. None if it does not exist. + """ + LOG.debug(_("MidoClient.get_router called: id=%(id)s"), {'id': id}) + try: + return self.mido_api.get_router(id) + except w_exc.HTTPNotFound: + raise MidonetResourceNotFound(resource_type='Router', id=id) + + @handle_api_error + def update_router(self, id, **kwargs): + """Update a router of the given id with the new name + + :param id: id of the router + :param \**kwargs: the fields to update and their values + :returns: router object + """ + LOG.debug(_("MidoClient.update_router called: " + "id=%(id)s, kwargs=%(kwargs)s"), + {'id': id, 'kwargs': kwargs}) + try: + return self._update_dto(self.mido_api.get_router(id), kwargs) + except w_exc.HTTPNotFound: + raise MidonetResourceNotFound(resource_type='Router', id=id) + + @handle_api_error + def delete_route(self, id): + return self.mido_api.delete_route(id) + + @handle_api_error + def add_dhcp_route_option(self, bridge, cidr, gw_ip, dst_ip): + """Add Option121 route to subnet + + :param bridge: Bridge to add the option route to + :param cidr: subnet represented as x.x.x.x/y + :param gw_ip: IP address of the next hop + :param dst_ip: IP address of the destination, in x.x.x.x/y format + """ + LOG.debug(_("MidoClient.add_dhcp_route_option called: " + "bridge=%(bridge)s, cidr=%(cidr)s, gw_ip=%(gw_ip)s" + "dst_ip=%(dst_ip)s"), + {"bridge": bridge, "cidr": cidr, "gw_ip": gw_ip, + "dst_ip": dst_ip}) + subnet = bridge.get_dhcp_subnet(net_util.subnet_str(cidr)) + if subnet is None: + raise MidonetApiException( + msg=_("Tried to access non-existent DHCP")) + prefix, length = dst_ip.split("/") + routes = [{'destinationPrefix': prefix, 'destinationLength': length, + 'gatewayAddr': gw_ip}] + cur_routes = subnet.get_opt121_routes() + if cur_routes: + routes = routes + cur_routes + subnet.opt121_routes(routes).update() + + @handle_api_error + def link(self, port, peer_id): + """Link a port to a given peerId.""" + self.mido_api.link(port, peer_id) + + @handle_api_error + def delete_port_routes(self, routes, port_id): + """Remove routes whose next hop port is the given port ID.""" + for route in routes: + if route.get_next_hop_port() == port_id: + self.mido_api.delete_route(route.get_id()) + + @handle_api_error + def get_router_routes(self, router_id): + """Get all routes for the given router.""" + return self.mido_api.get_router_routes(router_id) + + @handle_api_error + def unlink(self, port): + """Unlink a port + + :param port: port object + """ + LOG.debug(_("MidoClient.unlink called: port=%(port)s"), + {'port': port}) + if port.get_peer_id(): + self.mido_api.unlink(port) + else: + LOG.warn(_("Attempted to unlink a port that was not linked. %s"), + port.get_id()) + + @handle_api_error + def remove_rules_by_property(self, tenant_id, chain_name, key, value): + """Remove all the rules that match the provided key and value.""" + LOG.debug(_("MidoClient.remove_rules_by_property called: " + "tenant_id=%(tenant_id)s, chain_name=%(chain_name)s" + "key=%(key)s, value=%(value)s"), + {'tenant_id': tenant_id, 'chain_name': chain_name, + 'key': key, 'value': value}) + chain = self.get_chain_by_name(tenant_id, chain_name) + if chain is None: + raise MidonetResourceNotFound(resource_type='Chain', + id=chain_name) + + for r in chain.get_rules(): + if key in r.get_properties(): + if r.get_properties()[key] == value: + self.mido_api.delete_rule(r.get_id()) + + @handle_api_error + def add_router_chains(self, router, inbound_chain_name, + outbound_chain_name): + """Create chains for a new router. + + Creates inbound and outbound chains for the router with the given + names, and the new chains are set on the router. + + :param router: router to set chains for + :param inbound_chain_name: Name of the inbound chain + :param outbound_chain_name: Name of the outbound chain + """ + LOG.debug(_("MidoClient.create_router_chains called: " + "router=%(router)s, inbound_chain_name=%(in_chain)s, " + "outbound_chain_name=%(out_chain)s"), + {"router": router, "in_chain": inbound_chain_name, + "out_chain": outbound_chain_name}) + tenant_id = router.get_tenant_id() + + inbound_chain = self.mido_api.add_chain().tenant_id(tenant_id).name( + inbound_chain_name,).create() + outbound_chain = self.mido_api.add_chain().tenant_id(tenant_id).name( + outbound_chain_name).create() + + # set chains to in/out filters + router.inbound_filter_id(inbound_chain.get_id()).outbound_filter_id( + outbound_chain.get_id()).update() + return inbound_chain, outbound_chain + + @handle_api_error + def delete_router_chains(self, id): + """Deletes chains of a router. + + :param id: router ID to delete chains of + """ + LOG.debug(_("MidoClient.delete_router_chains called: " + "id=%(id)s"), {'id': id}) + router = self.get_router(id) + if (router.get_inbound_filter_id()): + self.mido_api.delete_chain(router.get_inbound_filter_id()) + + if (router.get_outbound_filter_id()): + self.mido_api.delete_chain(router.get_outbound_filter_id()) + + @handle_api_error + def delete_port_chains(self, id): + """Deletes chains of a port. + + :param id: port ID to delete chains of + """ + LOG.debug(_("MidoClient.delete_port_chains called: " + "id=%(id)s"), {'id': id}) + port = self.get_port(id) + if (port.get_inbound_filter_id()): + self.mido_api.delete_chain(port.get_inbound_filter_id()) + + if (port.get_outbound_filter_id()): + self.mido_api.delete_chain(port.get_outbound_filter_id()) + + @handle_api_error + def get_link_port(self, router, peer_router_id): + """Setup a route on the router to the next hop router.""" + LOG.debug(_("MidoClient.get_link_port called: " + "router=%(router)s, peer_router_id=%(peer_router_id)s"), + {'router': router, 'peer_router_id': peer_router_id}) + # Find the port linked between the two routers + link_port = None + for p in router.get_peer_ports(): + if p.get_device_id() == peer_router_id: + link_port = p + break + return link_port + + @handle_api_error + def add_router_route(self, router, type='Normal', + src_network_addr=None, src_network_length=None, + dst_network_addr=None, dst_network_length=None, + next_hop_port=None, next_hop_gateway=None, + weight=100): + """Setup a route on the router.""" + return self.mido_api.add_router_route( + router, type=type, src_network_addr=src_network_addr, + src_network_length=src_network_length, + dst_network_addr=dst_network_addr, + dst_network_length=dst_network_length, + next_hop_port=next_hop_port, next_hop_gateway=next_hop_gateway, + weight=weight) + + @handle_api_error + def add_static_nat(self, tenant_id, chain_name, from_ip, to_ip, port_id, + nat_type='dnat', **kwargs): + """Add a static NAT entry + + :param tenant_id: owner fo the chain to add a NAT to + :param chain_name: name of the chain to add a NAT to + :param from_ip: IP to translate from + :param from_ip: IP to translate from + :param to_ip: IP to translate to + :param port_id: port to match on + :param nat_type: 'dnat' or 'snat' + """ + LOG.debug(_("MidoClient.add_static_nat called: " + "tenant_id=%(tenant_id)s, chain_name=%(chain_name)s, " + "from_ip=%(from_ip)s, to_ip=%(to_ip)s, " + "port_id=%(port_id)s, nat_type=%(nat_type)s"), + {'tenant_id': tenant_id, 'chain_name': chain_name, + 'from_ip': from_ip, 'to_ip': to_ip, + 'portid': port_id, 'nat_type': nat_type}) + if nat_type not in ['dnat', 'snat']: + raise ValueError(_("Invalid NAT type passed in %s") % nat_type) + + chain = self.get_chain_by_name(tenant_id, chain_name) + nat_targets = [] + nat_targets.append( + {'addressFrom': to_ip, 'addressTo': to_ip, + 'portFrom': 0, 'portTo': 0}) + + rule = chain.add_rule().type(nat_type).flow_action('accept').position( + 1).nat_targets(nat_targets).properties(kwargs) + + if nat_type == 'dnat': + rule = rule.nw_dst_address(from_ip).nw_dst_length(32).in_ports( + [port_id]) + else: + rule = rule.nw_src_address(from_ip).nw_src_length(32).out_ports( + [port_id]) + + return rule.create() + + @handle_api_error + def add_dynamic_snat(self, tenant_id, pre_chain_name, post_chain_name, + snat_ip, port_id, **kwargs): + """Add SNAT masquerading rule + + MidoNet requires two rules on the router, one to do NAT to a range of + ports, and another to retrieve back the original IP in the return + flow. + """ + pre_chain = self.get_chain_by_name(tenant_id, pre_chain_name) + post_chain = self.get_chain_by_name(tenant_id, post_chain_name) + + pre_chain.add_rule().nw_dst_address(snat_ip).nw_dst_length( + 32).type('rev_snat').flow_action('accept').in_ports( + [port_id]).properties(kwargs).position(1).create() + + nat_targets = [] + nat_targets.append( + {'addressFrom': snat_ip, 'addressTo': snat_ip, + 'portFrom': 1, 'portTo': 65535}) + + post_chain.add_rule().type('snat').flow_action( + 'accept').nat_targets(nat_targets).out_ports( + [port_id]).properties(kwargs).position(1).create() + + @handle_api_error + def remove_static_route(self, router, ip): + """Remove static route for the IP + + :param router: next hop router to remove the routes to + :param ip: IP address of the route to remove + """ + LOG.debug(_("MidoClient.remote_static_route called: " + "router=%(router)s, ip=%(ip)s"), + {'router': router, 'ip': ip}) + for r in router.get_routes(): + if (r.get_dst_network_addr() == ip and + r.get_dst_network_length() == 32): + self.mido_api.delete_route(r.get_id()) + + @handle_api_error + def update_port_chains(self, port, inbound_chain_id, outbound_chain_id): + """Bind inbound and outbound chains to the port.""" + LOG.debug(_("MidoClient.update_port_chains called: port=%(port)s" + "inbound_chain_id=%(inbound_chain_id)s, " + "outbound_chain_id=%(outbound_chain_id)s"), + {"port": port, "inbound_chain_id": inbound_chain_id, + "outbound_chain_id": outbound_chain_id}) + port.inbound_filter_id(inbound_chain_id).outbound_filter_id( + outbound_chain_id).update() + + @handle_api_error + def create_chain(self, tenant_id, name): + """Create a new chain.""" + LOG.debug(_("MidoClient.create_chain called: tenant_id=%(tenant_id)s " + " name=%(name)s"), {"tenant_id": tenant_id, "name": name}) + return self.mido_api.add_chain().tenant_id(tenant_id).name( + name).create() + + @handle_api_error + def delete_chain(self, id): + """Delete chain matching the ID.""" + LOG.debug(_("MidoClient.delete_chain called: id=%(id)s"), {"id": id}) + self.mido_api.delete_chain(id) + + @handle_api_error + def delete_chains_by_names(self, tenant_id, names): + """Delete chains matching the names given for a tenant.""" + LOG.debug(_("MidoClient.delete_chains_by_names called: " + "tenant_id=%(tenant_id)s names=%(names)s "), + {"tenant_id": tenant_id, "names": names}) + chains = self.mido_api.get_chains({'tenant_id': tenant_id}) + for c in chains: + if c.get_name() in names: + self.mido_api.delete_chain(c.get_id()) + + @handle_api_error + def get_chain_by_name(self, tenant_id, name): + """Get the chain by its name.""" + LOG.debug(_("MidoClient.get_chain_by_name called: " + "tenant_id=%(tenant_id)s name=%(name)s "), + {"tenant_id": tenant_id, "name": name}) + for c in self.mido_api.get_chains({'tenant_id': tenant_id}): + if c.get_name() == name: + return c + return None + + @handle_api_error + def get_port_group_by_name(self, tenant_id, name): + """Get the port group by name.""" + LOG.debug(_("MidoClient.get_port_group_by_name called: " + "tenant_id=%(tenant_id)s name=%(name)s "), + {"tenant_id": tenant_id, "name": name}) + for p in self.mido_api.get_port_groups({'tenant_id': tenant_id}): + if p.get_name() == name: + return p + return None + + @handle_api_error + def create_port_group(self, tenant_id, name): + """Create a port group + + Create a new port group for a given name and ID. + """ + LOG.debug(_("MidoClient.create_port_group called: " + "tenant_id=%(tenant_id)s name=%(name)s"), + {"tenant_id": tenant_id, "name": name}) + return self.mido_api.add_port_group().tenant_id(tenant_id).name( + name).create() + + @handle_api_error + def delete_port_group_by_name(self, tenant_id, name): + """Delete port group matching the name given for a tenant.""" + LOG.debug(_("MidoClient.delete_port_group_by_name called: " + "tenant_id=%(tenant_id)s name=%(name)s "), + {"tenant_id": tenant_id, "name": name}) + pgs = self.mido_api.get_port_groups({'tenant_id': tenant_id}) + for pg in pgs: + if pg.get_name() == name: + LOG.debug(_("Deleting pg %(id)s"), {"id": pg.get_id()}) + self.mido_api.delete_port_group(pg.get_id()) + + @handle_api_error + def add_port_to_port_group_by_name(self, tenant_id, name, port_id): + """Add a port to a port group with the given name.""" + LOG.debug(_("MidoClient.add_port_to_port_group_by_name called: " + "tenant_id=%(tenant_id)s name=%(name)s " + "port_id=%(port_id)s"), + {"tenant_id": tenant_id, "name": name, "port_id": port_id}) + pg = self.get_port_group_by_name(tenant_id, name) + if pg is None: + raise MidonetResourceNotFound(resource_type='PortGroup', id=name) + + pg = pg.add_port_group_port().port_id(port_id).create() + return pg + + @handle_api_error + def remove_port_from_port_groups(self, port_id): + """Remove a port binding from all the port groups.""" + LOG.debug(_("MidoClient.remove_port_from_port_groups called: " + "port_id=%(port_id)s"), {"port_id": port_id}) + port = self.get_port(port_id) + for pg in port.get_port_groups(): + pg.delete() + + @handle_api_error + def add_chain_rule(self, chain, action='accept', **kwargs): + """Create a new accept chain rule.""" + self.mido_api.add_chain_rule(chain, action, **kwargs) diff --git a/neutron/plugins/midonet/plugin.py b/neutron/plugins/midonet/plugin.py new file mode 100644 index 000000000..9a706d4a5 --- /dev/null +++ b/neutron/plugins/midonet/plugin.py @@ -0,0 +1,1258 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2012 Midokura Japan K.K. +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Takaaki Suzuki, Midokura Japan KK +# @author: Tomoe Sugihara, Midokura Japan KK +# @author: Ryu Ishimoto, Midokura Japan KK +# @author: Rossella Sblendido, Midokura Japan KK +# @author: Duarte Nunes, Midokura Japan KK + +from midonetclient import api +from oslo.config import cfg +from sqlalchemy.orm import exc as sa_exc + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.db import portbindings_db +from neutron.db import securitygroups_db +from neutron.extensions import external_net as ext_net +from neutron.extensions import l3 +from neutron.extensions import portbindings +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.midonet.common import config # noqa +from neutron.plugins.midonet.common import net_util +from neutron.plugins.midonet import midonet_lib + +LOG = logging.getLogger(__name__) + +EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO + +METADATA_DEFAULT_IP = "169.254.169.254/32" +OS_FLOATING_IP_RULE_KEY = 'OS_FLOATING_IP' +OS_SG_RULE_KEY = 'OS_SG_RULE_ID' +OS_TENANT_ROUTER_RULE_KEY = 'OS_TENANT_ROUTER_RULE' +PRE_ROUTING_CHAIN_NAME = "OS_PRE_ROUTING_%s" +PORT_INBOUND_CHAIN_NAME = "OS_PORT_%s_INBOUND" +PORT_OUTBOUND_CHAIN_NAME = "OS_PORT_%s_OUTBOUND" +POST_ROUTING_CHAIN_NAME = "OS_POST_ROUTING_%s" +SG_INGRESS_CHAIN_NAME = "OS_SG_%s_INGRESS" +SG_EGRESS_CHAIN_NAME = "OS_SG_%s_EGRESS" +SG_PORT_GROUP_NAME = "OS_PG_%s" +SNAT_RULE = 'SNAT' + + +def _get_nat_ips(type, fip): + """Get NAT IP address information. + + From the route type given, determine the source and target IP addresses + from the provided floating IP DB object. + """ + if type == 'pre-routing': + return fip["floating_ip_address"], fip["fixed_ip_address"] + elif type == 'post-routing': + return fip["fixed_ip_address"], fip["floating_ip_address"] + else: + raise ValueError(_("Invalid nat_type %s") % type) + + +def _nat_chain_names(router_id): + """Get the chain names for NAT. + + These names are used to associate MidoNet chains to the NAT rules + applied to the router. For each of these, there are two NAT types, + 'dnat' and 'snat' that are returned as keys, and the corresponding + chain names as their values. + """ + pre_routing_name = PRE_ROUTING_CHAIN_NAME % router_id + post_routing_name = POST_ROUTING_CHAIN_NAME % router_id + return {'pre-routing': pre_routing_name, 'post-routing': post_routing_name} + + +def _sg_chain_names(sg_id): + """Get the chain names for security group. + + These names are used to associate a security group to MidoNet chains. + There are two names for ingress and egress security group directions. + """ + ingress = SG_INGRESS_CHAIN_NAME % sg_id + egress = SG_EGRESS_CHAIN_NAME % sg_id + return {'ingress': ingress, 'egress': egress} + + +def _port_chain_names(port_id): + """Get the chain names for a port. + + These are chains to hold security group chains. + """ + inbound = PORT_INBOUND_CHAIN_NAME % port_id + outbound = PORT_OUTBOUND_CHAIN_NAME % port_id + return {'inbound': inbound, 'outbound': outbound} + + +def _sg_port_group_name(sg_id): + """Get the port group name for security group.. + + This name is used to associate a security group to MidoNet port groups. + """ + return SG_PORT_GROUP_NAME % sg_id + + +def _rule_direction(sg_direction): + """Convert the SG direction to MidoNet direction + + MidoNet terms them 'inbound' and 'outbound' instead of 'ingress' and + 'egress'. Also, the direction is reversed since MidoNet sees it + from the network port's point of view, not the VM's. + """ + if sg_direction == 'ingress': + return 'outbound' + elif sg_direction == 'egress': + return 'inbound' + else: + raise ValueError(_("Unrecognized direction %s") % sg_direction) + + +def _is_router_interface_port(port): + """Check whether the given port is a router interface port.""" + device_owner = port['device_owner'] + return (device_owner in l3_db.DEVICE_OWNER_ROUTER_INTF) + + +def _is_router_gw_port(port): + """Check whether the given port is a router gateway port.""" + device_owner = port['device_owner'] + return (device_owner in l3_db.DEVICE_OWNER_ROUTER_GW) + + +def _is_vif_port(port): + """Check whether the given port is a standard VIF port.""" + device_owner = port['device_owner'] + return (not _is_dhcp_port(port) and + device_owner not in (l3_db.DEVICE_OWNER_ROUTER_GW, + l3_db.DEVICE_OWNER_ROUTER_INTF)) + + +def _is_dhcp_port(port): + """Check whether the given port is a DHCP port.""" + device_owner = port['device_owner'] + return device_owner.startswith(constants.DEVICE_OWNER_DHCP) + + +def _check_resource_exists(func, id, name, raise_exc=False): + """Check whether the given resource exists in MidoNet data store.""" + try: + func(id) + except midonet_lib.MidonetResourceNotFound as exc: + LOG.error(_("There is no %(name)s with ID %(id)s in MidoNet."), + {"name": name, "id": id}) + if raise_exc: + raise MidonetPluginException(msg=exc) + + +class MidoRpcCallbacks(rpc_compat.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin): + RPC_API_VERSION = '1.1' + + +class MidonetPluginException(n_exc.NeutronException): + message = _("%(msg)s") + + +class MidonetPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + portbindings_db.PortBindingMixin, + external_net_db.External_net_db_mixin, + l3_db.L3_NAT_db_mixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + securitygroups_db.SecurityGroupDbMixin): + + supported_extension_aliases = ['external-net', 'router', 'security-group', + 'agent', 'dhcp_agent_scheduler', 'binding'] + __native_bulk_support = False + + def __init__(self): + super(MidonetPluginV2, self).__init__() + # Read config values + midonet_conf = cfg.CONF.MIDONET + midonet_uri = midonet_conf.midonet_uri + admin_user = midonet_conf.username + admin_pass = midonet_conf.password + admin_project_id = midonet_conf.project_id + self.provider_router_id = midonet_conf.provider_router_id + self.provider_router = None + + self.mido_api = api.MidonetApi(midonet_uri, admin_user, + admin_pass, + project_id=admin_project_id) + self.client = midonet_lib.MidoClient(self.mido_api) + + # self.provider_router_id should have been set. + if self.provider_router_id is None: + msg = _('provider_router_id should be configured in the plugin ' + 'config file') + LOG.exception(msg) + raise MidonetPluginException(msg=msg) + + self.setup_rpc() + + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_MIDONET, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + + def _get_provider_router(self): + if self.provider_router is None: + self.provider_router = self.client.get_router( + self.provider_router_id) + return self.provider_router + + def _dhcp_mappings(self, context, fixed_ips, mac): + for fixed_ip in fixed_ips: + subnet = self._get_subnet(context, fixed_ip["subnet_id"]) + if subnet["ip_version"] == 6: + # TODO(ryu) handle IPv6 + continue + if not subnet["enable_dhcp"]: + # Skip if DHCP is disabled + continue + yield subnet['cidr'], fixed_ip["ip_address"], mac + + def _metadata_subnets(self, context, fixed_ips): + for fixed_ip in fixed_ips: + subnet = self._get_subnet(context, fixed_ip["subnet_id"]) + if subnet["ip_version"] == 6: + continue + yield subnet['cidr'], fixed_ip["ip_address"] + + def _initialize_port_chains(self, port, in_chain, out_chain, sg_ids): + + tenant_id = port["tenant_id"] + + position = 1 + # mac spoofing protection + self._add_chain_rule(in_chain, action='drop', + dl_src=port["mac_address"], inv_dl_src=True, + position=position) + + # ip spoofing protection + for fixed_ip in port["fixed_ips"]: + position += 1 + self._add_chain_rule(in_chain, action="drop", + src_addr=fixed_ip["ip_address"] + "/32", + inv_nw_src=True, dl_type=0x0800, # IPv4 + position=position) + + # conntrack + position += 1 + self._add_chain_rule(in_chain, action='accept', + match_forward_flow=True, + position=position) + + # Reset the position to process egress + position = 1 + + # Add rule for SGs + if sg_ids: + for sg_id in sg_ids: + chain_name = _sg_chain_names(sg_id)["ingress"] + chain = self.client.get_chain_by_name(tenant_id, chain_name) + self._add_chain_rule(out_chain, action='jump', + jump_chain_id=chain.get_id(), + jump_chain_name=chain_name, + position=position) + position += 1 + + # add reverse flow matching at the end + self._add_chain_rule(out_chain, action='accept', + match_return_flow=True, + position=position) + position += 1 + + # fall back DROP rule at the end except for ARP + self._add_chain_rule(out_chain, action='drop', + dl_type=0x0806, # ARP + inv_dl_type=True, position=position) + + def _bind_port_to_sgs(self, context, port, sg_ids): + self._process_port_create_security_group(context, port, sg_ids) + if sg_ids is not None: + for sg_id in sg_ids: + pg_name = _sg_port_group_name(sg_id) + self.client.add_port_to_port_group_by_name( + port["tenant_id"], pg_name, port["id"]) + + def _unbind_port_from_sgs(self, context, port_id): + self._delete_port_security_group_bindings(context, port_id) + self.client.remove_port_from_port_groups(port_id) + + def _create_accept_chain_rule(self, context, sg_rule, chain=None): + direction = sg_rule["direction"] + tenant_id = sg_rule["tenant_id"] + sg_id = sg_rule["security_group_id"] + chain_name = _sg_chain_names(sg_id)[direction] + + if chain is None: + chain = self.client.get_chain_by_name(tenant_id, chain_name) + + pg_id = None + if sg_rule["remote_group_id"] is not None: + pg_name = _sg_port_group_name(sg_id) + pg = self.client.get_port_group_by_name(tenant_id, pg_name) + pg_id = pg.get_id() + + props = {OS_SG_RULE_KEY: str(sg_rule["id"])} + + # Determine source or destination address by looking at direction + src_pg_id = dst_pg_id = None + src_addr = dst_addr = None + src_port_to = dst_port_to = None + src_port_from = dst_port_from = None + if direction == "egress": + dst_pg_id = pg_id + dst_addr = sg_rule["remote_ip_prefix"] + dst_port_from = sg_rule["port_range_min"] + dst_port_to = sg_rule["port_range_max"] + else: + src_pg_id = pg_id + src_addr = sg_rule["remote_ip_prefix"] + src_port_from = sg_rule["port_range_min"] + src_port_to = sg_rule["port_range_max"] + + return self._add_chain_rule( + chain, action='accept', port_group_src=src_pg_id, + port_group_dst=dst_pg_id, + src_addr=src_addr, src_port_from=src_port_from, + src_port_to=src_port_to, + dst_addr=dst_addr, dst_port_from=dst_port_from, + dst_port_to=dst_port_to, + nw_proto=net_util.get_protocol_value(sg_rule["protocol"]), + dl_type=net_util.get_ethertype_value(sg_rule["ethertype"]), + properties=props) + + def _remove_nat_rules(self, context, fip): + router = self.client.get_router(fip["router_id"]) + self.client.remove_static_route(self._get_provider_router(), + fip["floating_ip_address"]) + + chain_names = _nat_chain_names(router.get_id()) + for _type, name in chain_names.iteritems(): + self.client.remove_rules_by_property( + router.get_tenant_id(), name, + OS_FLOATING_IP_RULE_KEY, fip["id"]) + + def setup_rpc(self): + # RPC support + self.topic = topics.PLUGIN + self.conn = rpc_compat.create_connection(new=True) + self.endpoints = [MidoRpcCallbacks(), + agents_db.AgentExtRpcCallback()] + self.conn.create_consumer(self.topic, self.endpoints, + fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def create_subnet(self, context, subnet): + """Create Neutron subnet. + + Creates a Neutron subnet and a DHCP entry in MidoNet bridge. + """ + LOG.debug(_("MidonetPluginV2.create_subnet called: subnet=%r"), subnet) + + s = subnet["subnet"] + net = super(MidonetPluginV2, self).get_network( + context, subnet['subnet']['network_id'], fields=None) + + session = context.session + with session.begin(subtransactions=True): + sn_entry = super(MidonetPluginV2, self).create_subnet(context, + subnet) + bridge = self.client.get_bridge(sn_entry['network_id']) + + gateway_ip = s['gateway_ip'] + cidr = s['cidr'] + if s['enable_dhcp']: + dns_nameservers = None + host_routes = None + if s['dns_nameservers'] is not attributes.ATTR_NOT_SPECIFIED: + dns_nameservers = s['dns_nameservers'] + + if s['host_routes'] is not attributes.ATTR_NOT_SPECIFIED: + host_routes = s['host_routes'] + + self.client.create_dhcp(bridge, gateway_ip, cidr, + host_rts=host_routes, + dns_servers=dns_nameservers) + + # For external network, link the bridge to the provider router. + if net['router:external']: + self._link_bridge_to_gw_router( + bridge, self._get_provider_router(), gateway_ip, cidr) + + LOG.debug(_("MidonetPluginV2.create_subnet exiting: sn_entry=%r"), + sn_entry) + return sn_entry + + def delete_subnet(self, context, id): + """Delete Neutron subnet. + + Delete neutron network and its corresponding MidoNet bridge. + """ + LOG.debug(_("MidonetPluginV2.delete_subnet called: id=%s"), id) + subnet = super(MidonetPluginV2, self).get_subnet(context, id, + fields=None) + net = super(MidonetPluginV2, self).get_network(context, + subnet['network_id'], + fields=None) + session = context.session + with session.begin(subtransactions=True): + + super(MidonetPluginV2, self).delete_subnet(context, id) + bridge = self.client.get_bridge(subnet['network_id']) + if subnet['enable_dhcp']: + self.client.delete_dhcp(bridge, subnet['cidr']) + + # If the network is external, clean up routes, links, ports + if net[ext_net.EXTERNAL]: + self._unlink_bridge_from_gw_router( + bridge, self._get_provider_router()) + + LOG.debug(_("MidonetPluginV2.delete_subnet exiting")) + + def create_network(self, context, network): + """Create Neutron network. + + Create a new Neutron network and its corresponding MidoNet bridge. + """ + LOG.debug(_('MidonetPluginV2.create_network called: network=%r'), + network) + net_data = network['network'] + tenant_id = self._get_tenant_id_for_create(context, net_data) + net_data['tenant_id'] = tenant_id + self._ensure_default_security_group(context, tenant_id) + + bridge = self.client.create_bridge(**net_data) + net_data['id'] = bridge.get_id() + + session = context.session + with session.begin(subtransactions=True): + net = super(MidonetPluginV2, self).create_network(context, network) + self._process_l3_create(context, net, net_data) + + LOG.debug(_("MidonetPluginV2.create_network exiting: net=%r"), net) + return net + + def update_network(self, context, id, network): + """Update Neutron network. + + Update an existing Neutron network and its corresponding MidoNet + bridge. + """ + LOG.debug(_("MidonetPluginV2.update_network called: id=%(id)r, " + "network=%(network)r"), {'id': id, 'network': network}) + session = context.session + with session.begin(subtransactions=True): + net = super(MidonetPluginV2, self).update_network( + context, id, network) + self._process_l3_update(context, net, network['network']) + self.client.update_bridge(id, **network['network']) + + LOG.debug(_("MidonetPluginV2.update_network exiting: net=%r"), net) + return net + + def get_network(self, context, id, fields=None): + """Get Neutron network. + + Retrieves a Neutron network and its corresponding MidoNet bridge. + """ + LOG.debug(_("MidonetPluginV2.get_network called: id=%(id)r, " + "fields=%(fields)r"), {'id': id, 'fields': fields}) + qnet = super(MidonetPluginV2, self).get_network(context, id, fields) + self.client.get_bridge(id) + + LOG.debug(_("MidonetPluginV2.get_network exiting: qnet=%r"), qnet) + return qnet + + def delete_network(self, context, id): + """Delete a network and its corresponding MidoNet bridge.""" + LOG.debug(_("MidonetPluginV2.delete_network called: id=%r"), id) + self.client.delete_bridge(id) + try: + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, id) + super(MidonetPluginV2, self).delete_network(context, id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to delete neutron db, while Midonet ' + 'bridge=%r had been deleted'), id) + + def create_port(self, context, port): + """Create a L2 port in Neutron/MidoNet.""" + LOG.debug(_("MidonetPluginV2.create_port called: port=%r"), port) + port_data = port['port'] + + # Create a bridge port in MidoNet and set the bridge port ID as the + # port ID in Neutron. + bridge = self.client.get_bridge(port_data["network_id"]) + tenant_id = bridge.get_tenant_id() + asu = port_data.get("admin_state_up", True) + bridge_port = self.client.add_bridge_port(bridge, + admin_state_up=asu) + port_data["id"] = bridge_port.get_id() + + try: + session = context.session + with session.begin(subtransactions=True): + # Create a Neutron port + new_port = super(MidonetPluginV2, self).create_port(context, + port) + port_data.update(new_port) + self._ensure_default_security_group_on_port(context, + port) + if _is_vif_port(port_data): + # Bind security groups to the port + sg_ids = self._get_security_groups_on_port(context, port) + self._bind_port_to_sgs(context, new_port, sg_ids) + + # Create port chains + port_chains = {} + for d, name in _port_chain_names( + new_port["id"]).iteritems(): + port_chains[d] = self.client.create_chain(tenant_id, + name) + + self._initialize_port_chains(port_data, + port_chains['inbound'], + port_chains['outbound'], + sg_ids) + + # Update the port with the chain + self.client.update_port_chains( + bridge_port, port_chains["inbound"].get_id(), + port_chains["outbound"].get_id()) + + # DHCP mapping is only for VIF ports + for cidr, ip, mac in self._dhcp_mappings( + context, port_data["fixed_ips"], + port_data["mac_address"]): + self.client.add_dhcp_host(bridge, cidr, ip, mac) + + elif _is_dhcp_port(port_data): + # For DHCP port, add a metadata route + for cidr, ip in self._metadata_subnets( + context, port_data["fixed_ips"]): + self.client.add_dhcp_route_option(bridge, cidr, ip, + METADATA_DEFAULT_IP) + + self._process_portbindings_create_and_update(context, + port_data, new_port) + except Exception as ex: + # Try removing the MidoNet port before raising an exception. + with excutils.save_and_reraise_exception(): + LOG.error(_("Failed to create a port on network %(net_id)s: " + "%(err)s"), + {"net_id": port_data["network_id"], "err": ex}) + self.client.delete_port(bridge_port.get_id()) + + LOG.debug(_("MidonetPluginV2.create_port exiting: port=%r"), new_port) + return new_port + + def get_port(self, context, id, fields=None): + """Retrieve port.""" + LOG.debug(_("MidonetPluginV2.get_port called: id=%(id)s " + "fields=%(fields)r"), {'id': id, 'fields': fields}) + port = super(MidonetPluginV2, self).get_port(context, id, fields) + "Check if the port exists in MidoNet DB""" + try: + self.client.get_port(id) + except midonet_lib.MidonetResourceNotFound as exc: + LOG.error(_("There is no port with ID %(id)s in MidoNet."), + {"id": id}) + port['status'] = constants.PORT_STATUS_ERROR + raise exc + LOG.debug(_("MidonetPluginV2.get_port exiting: port=%r"), port) + return port + + def get_ports(self, context, filters=None, fields=None): + """List neutron ports and verify that they exist in MidoNet.""" + LOG.debug(_("MidonetPluginV2.get_ports called: filters=%(filters)s " + "fields=%(fields)r"), + {'filters': filters, 'fields': fields}) + ports = super(MidonetPluginV2, self).get_ports(context, filters, + fields) + return ports + + def delete_port(self, context, id, l3_port_check=True): + """Delete a neutron port and corresponding MidoNet bridge port.""" + LOG.debug(_("MidonetPluginV2.delete_port called: id=%(id)s " + "l3_port_check=%(l3_port_check)r"), + {'id': id, 'l3_port_check': l3_port_check}) + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + + self.disassociate_floatingips(context, id) + port = self.get_port(context, id) + device_id = port['device_id'] + # If this port is for router interface/gw, unlink and delete. + if _is_router_interface_port(port): + self._unlink_bridge_from_router(device_id, id) + elif _is_router_gw_port(port): + # Gateway removed + # Remove all the SNAT rules that are tagged. + router = self._get_router(context, device_id) + tenant_id = router["tenant_id"] + chain_names = _nat_chain_names(device_id) + for _type, name in chain_names.iteritems(): + self.client.remove_rules_by_property( + tenant_id, name, OS_TENANT_ROUTER_RULE_KEY, + SNAT_RULE) + # Remove the default routes and unlink + self._remove_router_gateway(port['device_id']) + + self.client.delete_port(id, delete_chains=True) + try: + for cidr, ip, mac in self._dhcp_mappings( + context, port["fixed_ips"], port["mac_address"]): + self.client.delete_dhcp_host(port["network_id"], cidr, ip, + mac) + except Exception: + LOG.error(_("Failed to delete DHCP mapping for port %(id)s"), + {"id": id}) + + super(MidonetPluginV2, self).delete_port(context, id) + + def update_port(self, context, id, port): + """Handle port update, including security groups and fixed IPs.""" + with context.session.begin(subtransactions=True): + + # Get the port and save the fixed IPs + old_port = self._get_port(context, id) + net_id = old_port["network_id"] + mac = old_port["mac_address"] + old_ips = old_port["fixed_ips"] + # update the port DB + p = super(MidonetPluginV2, self).update_port(context, id, port) + + if "admin_state_up" in port["port"]: + asu = port["port"]["admin_state_up"] + mido_port = self.client.update_port(id, admin_state_up=asu) + + # If we're changing the admin_state_up flag and the port is + # associated with a router, then we also need to update the + # peer port. + if _is_router_interface_port(p): + self.client.update_port(mido_port.get_peer_id(), + admin_state_up=asu) + + new_ips = p["fixed_ips"] + if new_ips: + bridge = self.client.get_bridge(net_id) + # If it's a DHCP port, add a route to reach the MD server + if _is_dhcp_port(p): + for cidr, ip in self._metadata_subnets( + context, new_ips): + self.client.add_dhcp_route_option( + bridge, cidr, ip, METADATA_DEFAULT_IP) + else: + # IPs have changed. Re-map the DHCP entries + for cidr, ip, mac in self._dhcp_mappings( + context, old_ips, mac): + self.client.remove_dhcp_host( + bridge, cidr, ip, mac) + + for cidr, ip, mac in self._dhcp_mappings( + context, new_ips, mac): + self.client.add_dhcp_host( + bridge, cidr, ip, mac) + + if (self._check_update_deletes_security_groups(port) or + self._check_update_has_security_groups(port)): + self._unbind_port_from_sgs(context, p["id"]) + sg_ids = self._get_security_groups_on_port(context, port) + self._bind_port_to_sgs(context, p, sg_ids) + + self._process_portbindings_create_and_update(context, + port['port'], + p) + return p + + def create_router(self, context, router): + """Handle router creation. + + When a new Neutron router is created, its corresponding MidoNet router + is also created. In MidoNet, this router is initialized with chains + for inbound and outbound traffic, which will be used to hold other + chains that include various rules, such as NAT. + + :param router: Router information provided to create a new router. + """ + + # NOTE(dcahill): Similar to the NSX plugin, we completely override + # this method in order to be able to use the MidoNet ID as Neutron ID + # TODO(dcahill): Propose upstream patch for allowing + # 3rd parties to specify IDs as we do with l2 plugin + LOG.debug(_("MidonetPluginV2.create_router called: router=%(router)s"), + {"router": router}) + r = router['router'] + tenant_id = self._get_tenant_id_for_create(context, r) + r['tenant_id'] = tenant_id + mido_router = self.client.create_router(**r) + mido_router_id = mido_router.get_id() + + try: + has_gw_info = False + if EXTERNAL_GW_INFO in r: + has_gw_info = True + gw_info = r.pop(EXTERNAL_GW_INFO) + with context.session.begin(subtransactions=True): + # pre-generate id so it will be available when + # configuring external gw port + router_db = l3_db.Router(id=mido_router_id, + tenant_id=tenant_id, + name=r['name'], + admin_state_up=r['admin_state_up'], + status="ACTIVE") + context.session.add(router_db) + if has_gw_info: + self._update_router_gw_info(context, router_db['id'], + gw_info) + + router_data = self._make_router_dict(router_db) + + except Exception: + # Try removing the midonet router + with excutils.save_and_reraise_exception(): + self.client.delete_router(mido_router_id) + + # Create router chains + chain_names = _nat_chain_names(mido_router_id) + try: + self.client.add_router_chains(mido_router, + chain_names["pre-routing"], + chain_names["post-routing"]) + except Exception: + # Set the router status to Error + with context.session.begin(subtransactions=True): + r = self._get_router(context, router_data["id"]) + router_data['status'] = constants.NET_STATUS_ERROR + r['status'] = router_data['status'] + context.session.add(r) + + LOG.debug(_("MidonetPluginV2.create_router exiting: " + "router_data=%(router_data)s."), + {"router_data": router_data}) + return router_data + + def _set_router_gateway(self, id, gw_router, gw_ip): + """Set router uplink gateway + + :param ID: ID of the router + :param gw_router: gateway router to link to + :param gw_ip: gateway IP address + """ + LOG.debug(_("MidonetPluginV2.set_router_gateway called: id=%(id)s, " + "gw_router=%(gw_router)s, gw_ip=%(gw_ip)s"), + {'id': id, 'gw_router': gw_router, 'gw_ip': gw_ip}), + + router = self.client.get_router(id) + + # Create a port in the gw router + gw_port = self.client.add_router_port(gw_router, + port_address='169.254.255.1', + network_address='169.254.255.0', + network_length=30) + + # Create a port in the router + port = self.client.add_router_port(router, + port_address='169.254.255.2', + network_address='169.254.255.0', + network_length=30) + + # Link them + self.client.link(gw_port, port.get_id()) + + # Add a route for gw_ip to bring it down to the router + self.client.add_router_route(gw_router, type='Normal', + src_network_addr='0.0.0.0', + src_network_length=0, + dst_network_addr=gw_ip, + dst_network_length=32, + next_hop_port=gw_port.get_id(), + weight=100) + + # Add default route to uplink in the router + self.client.add_router_route(router, type='Normal', + src_network_addr='0.0.0.0', + src_network_length=0, + dst_network_addr='0.0.0.0', + dst_network_length=0, + next_hop_port=port.get_id(), + weight=100) + + def _remove_router_gateway(self, id): + """Clear router gateway + + :param ID: ID of the router + """ + LOG.debug(_("MidonetPluginV2.remove_router_gateway called: " + "id=%(id)s"), {'id': id}) + router = self.client.get_router(id) + + # delete the port that is connected to the gateway router + for p in router.get_ports(): + if p.get_port_address() == '169.254.255.2': + peer_port_id = p.get_peer_id() + if peer_port_id is not None: + self.client.unlink(p) + self.client.delete_port(peer_port_id) + + # delete default route + for r in router.get_routes(): + if (r.get_dst_network_addr() == '0.0.0.0' and + r.get_dst_network_length() == 0): + self.client.delete_route(r.get_id()) + + def update_router(self, context, id, router): + """Handle router updates.""" + LOG.debug(_("MidonetPluginV2.update_router called: id=%(id)s " + "router=%(router)r"), {"id": id, "router": router}) + + router_data = router["router"] + + # Check if the update included changes to the gateway. + gw_updated = l3_db.EXTERNAL_GW_INFO in router_data + with context.session.begin(subtransactions=True): + + # Update the Neutron DB + r = super(MidonetPluginV2, self).update_router(context, id, + router) + tenant_id = r["tenant_id"] + if gw_updated: + if (l3_db.EXTERNAL_GW_INFO in r and + r[l3_db.EXTERNAL_GW_INFO] is not None): + # Gateway created + gw_port_neutron = self._get_port( + context.elevated(), r["gw_port_id"]) + gw_ip = gw_port_neutron['fixed_ips'][0]['ip_address'] + + # First link routers and set up the routes + self._set_router_gateway(r["id"], + self._get_provider_router(), + gw_ip) + gw_port_midonet = self.client.get_link_port( + self._get_provider_router(), r["id"]) + + # Get the NAT chains and add dynamic SNAT rules. + chain_names = _nat_chain_names(r["id"]) + props = {OS_TENANT_ROUTER_RULE_KEY: SNAT_RULE} + self.client.add_dynamic_snat(tenant_id, + chain_names['pre-routing'], + chain_names['post-routing'], + gw_ip, + gw_port_midonet.get_id(), + **props) + + self.client.update_router(id, **router_data) + + LOG.debug(_("MidonetPluginV2.update_router exiting: router=%r"), r) + return r + + def delete_router(self, context, id): + """Handler for router deletion. + + Deleting a router on Neutron simply means deleting its corresponding + router in MidoNet. + + :param id: router ID to remove + """ + LOG.debug(_("MidonetPluginV2.delete_router called: id=%s"), id) + + self.client.delete_router_chains(id) + self.client.delete_router(id) + + super(MidonetPluginV2, self).delete_router(context, id) + + def _link_bridge_to_gw_router(self, bridge, gw_router, gw_ip, cidr): + """Link a bridge to the gateway router + + :param bridge: bridge + :param gw_router: gateway router to link to + :param gw_ip: IP address of gateway + :param cidr: network CIDR + """ + net_addr, net_len = net_util.net_addr(cidr) + + # create a port on the gateway router + gw_port = self.client.add_router_port(gw_router, port_address=gw_ip, + network_address=net_addr, + network_length=net_len) + + # create a bridge port, then link it to the router. + port = self.client.add_bridge_port(bridge) + self.client.link(gw_port, port.get_id()) + + # add a route for the subnet in the gateway router + self.client.add_router_route(gw_router, type='Normal', + src_network_addr='0.0.0.0', + src_network_length=0, + dst_network_addr=net_addr, + dst_network_length=net_len, + next_hop_port=gw_port.get_id(), + weight=100) + + def _unlink_bridge_from_gw_router(self, bridge, gw_router): + """Unlink a bridge from the gateway router + + :param bridge: bridge to unlink + :param gw_router: gateway router to unlink from + """ + # Delete routes and unlink the router and the bridge. + routes = self.client.get_router_routes(gw_router.get_id()) + + bridge_ports_to_delete = [ + p for p in gw_router.get_peer_ports() + if p.get_device_id() == bridge.get_id()] + + for p in bridge.get_peer_ports(): + if p.get_device_id() == gw_router.get_id(): + # delete the routes going to the bridge + for r in routes: + if r.get_next_hop_port() == p.get_id(): + self.client.delete_route(r.get_id()) + self.client.unlink(p) + self.client.delete_port(p.get_id()) + + # delete bridge port + for port in bridge_ports_to_delete: + self.client.delete_port(port.get_id()) + + def _link_bridge_to_router(self, router, bridge_port, net_addr, net_len, + gw_ip, metadata_gw_ip): + router_port = self.client.add_router_port( + router, network_length=net_len, network_address=net_addr, + port_address=gw_ip, admin_state_up=bridge_port['admin_state_up']) + self.client.link(router_port, bridge_port['id']) + self.client.add_router_route(router, type='Normal', + src_network_addr='0.0.0.0', + src_network_length=0, + dst_network_addr=net_addr, + dst_network_length=net_len, + next_hop_port=router_port.get_id(), + weight=100) + + if metadata_gw_ip: + # Add a route for the metadata server. + # Not all VM images supports DHCP option 121. Add a route for the + # Metadata server in the router to forward the packet to the bridge + # that will send them to the Metadata Proxy. + md_net_addr, md_net_len = net_util.net_addr(METADATA_DEFAULT_IP) + self.client.add_router_route( + router, type='Normal', src_network_addr=net_addr, + src_network_length=net_len, + dst_network_addr=md_net_addr, + dst_network_length=md_net_len, + next_hop_port=router_port.get_id(), + next_hop_gateway=metadata_gw_ip) + + def _unlink_bridge_from_router(self, router_id, bridge_port_id): + """Unlink a bridge from a router.""" + + # Remove the routes to the port and unlink the port + bridge_port = self.client.get_port(bridge_port_id) + routes = self.client.get_router_routes(router_id) + self.client.delete_port_routes(routes, bridge_port.get_peer_id()) + self.client.unlink(bridge_port) + + def add_router_interface(self, context, router_id, interface_info): + """Handle router linking with network.""" + LOG.debug(_("MidonetPluginV2.add_router_interface called: " + "router_id=%(router_id)s " + "interface_info=%(interface_info)r"), + {'router_id': router_id, 'interface_info': interface_info}) + + with context.session.begin(subtransactions=True): + info = super(MidonetPluginV2, self).add_router_interface( + context, router_id, interface_info) + + try: + subnet = self._get_subnet(context, info["subnet_id"]) + cidr = subnet["cidr"] + net_addr, net_len = net_util.net_addr(cidr) + router = self.client.get_router(router_id) + + # Get the metadata GW IP + metadata_gw_ip = None + rport_qry = context.session.query(models_v2.Port) + dhcp_ports = rport_qry.filter_by( + network_id=subnet["network_id"], + device_owner=constants.DEVICE_OWNER_DHCP).all() + if dhcp_ports and dhcp_ports[0].fixed_ips: + metadata_gw_ip = dhcp_ports[0].fixed_ips[0].ip_address + else: + LOG.warn(_("DHCP agent is not working correctly. No port " + "to reach the Metadata server on this network")) + # Link the router and the bridge + port = super(MidonetPluginV2, self).get_port(context, + info["port_id"]) + self._link_bridge_to_router(router, port, net_addr, + net_len, subnet["gateway_ip"], + metadata_gw_ip) + except Exception: + LOG.error(_("Failed to create MidoNet resources to add router " + "interface. info=%(info)s, router_id=%(router_id)s"), + {"info": info, "router_id": router_id}) + with excutils.save_and_reraise_exception(): + with context.session.begin(subtransactions=True): + self.remove_router_interface(context, router_id, info) + + LOG.debug(_("MidonetPluginV2.add_router_interface exiting: " + "info=%r"), info) + return info + + def _assoc_fip(self, fip): + router = self.client.get_router(fip["router_id"]) + link_port = self.client.get_link_port( + self._get_provider_router(), router.get_id()) + self.client.add_router_route( + self._get_provider_router(), + src_network_addr='0.0.0.0', + src_network_length=0, + dst_network_addr=fip["floating_ip_address"], + dst_network_length=32, + next_hop_port=link_port.get_peer_id()) + props = {OS_FLOATING_IP_RULE_KEY: fip['id']} + tenant_id = router.get_tenant_id() + chain_names = _nat_chain_names(router.get_id()) + for chain_type, name in chain_names.items(): + src_ip, target_ip = _get_nat_ips(chain_type, fip) + if chain_type == 'pre-routing': + nat_type = 'dnat' + else: + nat_type = 'snat' + self.client.add_static_nat(tenant_id, name, src_ip, + target_ip, + link_port.get_id(), + nat_type, **props) + + def create_floatingip(self, context, floatingip): + session = context.session + with session.begin(subtransactions=True): + fip = super(MidonetPluginV2, self).create_floatingip( + context, floatingip) + if fip['port_id']: + self._assoc_fip(fip) + return fip + + def update_floatingip(self, context, id, floatingip): + """Handle floating IP association and disassociation.""" + LOG.debug(_("MidonetPluginV2.update_floatingip called: id=%(id)s " + "floatingip=%(floatingip)s "), + {'id': id, 'floatingip': floatingip}) + + session = context.session + with session.begin(subtransactions=True): + if floatingip['floatingip']['port_id']: + fip = super(MidonetPluginV2, self).update_floatingip( + context, id, floatingip) + + self._assoc_fip(fip) + + # disassociate floating IP + elif floatingip['floatingip']['port_id'] is None: + fip = super(MidonetPluginV2, self).get_floatingip(context, id) + self._remove_nat_rules(context, fip) + super(MidonetPluginV2, self).update_floatingip(context, id, + floatingip) + + LOG.debug(_("MidonetPluginV2.update_floating_ip exiting: fip=%s"), fip) + return fip + + def disassociate_floatingips(self, context, port_id): + """Disassociate floating IPs (if any) from this port.""" + try: + fip_qry = context.session.query(l3_db.FloatingIP) + fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) + for fip_db in fip_dbs: + self._remove_nat_rules(context, fip_db) + except sa_exc.NoResultFound: + pass + + super(MidonetPluginV2, self).disassociate_floatingips(context, port_id) + + def create_security_group(self, context, security_group, default_sg=False): + """Create security group. + + Create a new security group, including the default security group. + In MidoNet, this means creating a pair of chains, inbound and outbound, + as well as a new port group. + """ + LOG.debug(_("MidonetPluginV2.create_security_group called: " + "security_group=%(security_group)s " + "default_sg=%(default_sg)s "), + {'security_group': security_group, 'default_sg': default_sg}) + + sg = security_group.get('security_group') + tenant_id = self._get_tenant_id_for_create(context, sg) + if not default_sg: + self._ensure_default_security_group(context, tenant_id) + + # Create the Neutron sg first + sg = super(MidonetPluginV2, self).create_security_group( + context, security_group, default_sg) + + try: + # Process the MidoNet side + self.client.create_port_group(tenant_id, + _sg_port_group_name(sg["id"])) + chain_names = _sg_chain_names(sg["id"]) + chains = {} + for direction, chain_name in chain_names.iteritems(): + c = self.client.create_chain(tenant_id, chain_name) + chains[direction] = c + + # Create all the rules for this SG. Only accept rules are created + for r in sg['security_group_rules']: + self._create_accept_chain_rule(context, r, + chain=chains[r['direction']]) + except Exception: + LOG.error(_("Failed to create MidoNet resources for sg %(sg)r"), + {"sg": sg}) + with excutils.save_and_reraise_exception(): + with context.session.begin(subtransactions=True): + sg = self._get_security_group(context, sg["id"]) + context.session.delete(sg) + + LOG.debug(_("MidonetPluginV2.create_security_group exiting: sg=%r"), + sg) + return sg + + def delete_security_group(self, context, id): + """Delete chains for Neutron security group.""" + LOG.debug(_("MidonetPluginV2.delete_security_group called: id=%s"), id) + + with context.session.begin(subtransactions=True): + sg = super(MidonetPluginV2, self).get_security_group(context, id) + if not sg: + raise ext_sg.SecurityGroupNotFound(id=id) + + if sg["name"] == 'default' and not context.is_admin: + raise ext_sg.SecurityGroupCannotRemoveDefault() + + sg_id = sg['id'] + filters = {'security_group_id': [sg_id]} + if super(MidonetPluginV2, self)._get_port_security_group_bindings( + context, filters): + raise ext_sg.SecurityGroupInUse(id=sg_id) + + # Delete MidoNet Chains and portgroup for the SG + tenant_id = sg['tenant_id'] + self.client.delete_chains_by_names( + tenant_id, _sg_chain_names(sg["id"]).values()) + + self.client.delete_port_group_by_name( + tenant_id, _sg_port_group_name(sg["id"])) + + super(MidonetPluginV2, self).delete_security_group(context, id) + + def create_security_group_rule(self, context, security_group_rule): + """Create a security group rule + + Create a security group rule in the Neutron DB and corresponding + MidoNet resources in its data store. + """ + LOG.debug(_("MidonetPluginV2.create_security_group_rule called: " + "security_group_rule=%(security_group_rule)r"), + {'security_group_rule': security_group_rule}) + + with context.session.begin(subtransactions=True): + rule = super(MidonetPluginV2, self).create_security_group_rule( + context, security_group_rule) + + self._create_accept_chain_rule(context, rule) + + LOG.debug(_("MidonetPluginV2.create_security_group_rule exiting: " + "rule=%r"), rule) + return rule + + def delete_security_group_rule(self, context, sg_rule_id): + """Delete a security group rule + + Delete a security group rule from the Neutron DB and corresponding + MidoNet resources from its data store. + """ + LOG.debug(_("MidonetPluginV2.delete_security_group_rule called: " + "sg_rule_id=%s"), sg_rule_id) + with context.session.begin(subtransactions=True): + rule = super(MidonetPluginV2, self).get_security_group_rule( + context, sg_rule_id) + + if not rule: + raise ext_sg.SecurityGroupRuleNotFound(id=sg_rule_id) + + sg = self._get_security_group(context, + rule["security_group_id"]) + chain_name = _sg_chain_names(sg["id"])[rule["direction"]] + self.client.remove_rules_by_property(rule["tenant_id"], chain_name, + OS_SG_RULE_KEY, + str(rule["id"])) + super(MidonetPluginV2, self).delete_security_group_rule( + context, sg_rule_id) + + def _add_chain_rule(self, chain, action, **kwargs): + + nw_proto = kwargs.get("nw_proto") + src_addr = kwargs.pop("src_addr", None) + dst_addr = kwargs.pop("dst_addr", None) + src_port_from = kwargs.pop("src_port_from", None) + src_port_to = kwargs.pop("src_port_to", None) + dst_port_from = kwargs.pop("dst_port_from", None) + dst_port_to = kwargs.pop("dst_port_to", None) + + # Convert to the keys and values that midonet client understands + if src_addr: + kwargs["nw_src_addr"], kwargs["nw_src_length"] = net_util.net_addr( + src_addr) + + if dst_addr: + kwargs["nw_dst_addr"], kwargs["nw_dst_length"] = net_util.net_addr( + dst_addr) + + kwargs["tp_src"] = {"start": src_port_from, "end": src_port_to} + + kwargs["tp_dst"] = {"start": dst_port_from, "end": dst_port_to} + + if nw_proto == 1: # ICMP + # Overwrite port fields regardless of the direction + kwargs["tp_src"] = {"start": src_port_from, "end": src_port_from} + kwargs["tp_dst"] = {"start": dst_port_to, "end": dst_port_to} + + return self.client.add_chain_rule(chain, action=action, **kwargs) diff --git a/neutron/plugins/ml2/README b/neutron/plugins/ml2/README new file mode 100644 index 000000000..4dce789cb --- /dev/null +++ b/neutron/plugins/ml2/README @@ -0,0 +1,53 @@ +The Modular Layer 2 (ML2) plugin is a framework allowing OpenStack +Networking to simultaneously utilize the variety of layer 2 networking +technologies found in complex real-world data centers. It supports the +Open vSwitch, Linux bridge, and Hyper-V L2 agents, replacing and +deprecating the monolithic plugins previously associated with those +agents, and can also support hardware devices and SDN controllers. The +ML2 framework is intended to greatly simplify adding support for new +L2 networking technologies, requiring much less initial and ongoing +effort than would be required for an additional monolithic core +plugin. It is also intended to foster innovation through its +organization as optional driver modules. + +The ML2 plugin supports all the non-vendor-specific neutron API +extensions, and works with the standard neutron DHCP agent. It +utilizes the service plugin interface to implement the L3 router +abstraction, allowing use of either the standard neutron L3 agent or +alternative L3 solutions. Additional service plugins can also be used +with the ML2 core plugin. + +Drivers within ML2 implement separately extensible sets of network +types and of mechanisms for accessing networks of those types. Unlike +with the metaplugin, multiple mechanisms can be used simultaneously to +access different ports of the same virtual network. Mechanisms can +utilize L2 agents via RPC and/or interact with external devices or +controllers. By utilizing the multiprovidernet extension, virtual +networks can be composed of multiple segments of the same or different +types. Type and mechanism drivers are loaded as python entrypoints +using the stevedore library. + +Each available network type is managed by an ML2 type driver. Type +drivers maintain any needed type-specific network state, and perform +provider network validation and tenant network allocation. As of the +havana release, drivers for the local, flat, vlan, gre, and vxlan +network types are included. + +Each available networking mechanism is managed by an ML2 mechanism +driver. All registered mechanism drivers are called twice when +networks, subnets, and ports are created, updated, or deleted. They +are first called as part of the DB transaction, where they can +maintain any needed driver-specific state. Once the transaction has +been committed, they are called again, at which point they can +interact with external devices and controllers. Mechanism drivers are +also called as part of the port binding process, to determine whether +the associated mechanism can provide connectivity for the network, and +if so, the network segment and VIF driver to be used. The havana +release includes mechanism drivers for the Open vSwitch, Linux bridge, +and Hyper-V L2 agents, for Arista and Cisco switches, and for the +Tail-f NCS. It also includes an L2 Population mechanism driver that +can help optimize tunneled virtual network traffic. + +For additional information regarding the ML2 plugin and its collection +of type and mechanism drivers, see the OpenStack manuals and +http://wiki.openstack.org/wiki/Neutron/ML2. diff --git a/neutron/plugins/ml2/__init__.py b/neutron/plugins/ml2/__init__.py new file mode 100644 index 000000000..788cea1f7 --- /dev/null +++ b/neutron/plugins/ml2/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/ml2/common/__init__.py b/neutron/plugins/ml2/common/__init__.py new file mode 100644 index 000000000..788cea1f7 --- /dev/null +++ b/neutron/plugins/ml2/common/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/ml2/common/exceptions.py b/neutron/plugins/ml2/common/exceptions.py new file mode 100644 index 000000000..ed94b1e1f --- /dev/null +++ b/neutron/plugins/ml2/common/exceptions.py @@ -0,0 +1,23 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Exceptions used by ML2.""" + +from neutron.common import exceptions + + +class MechanismDriverError(exceptions.NeutronException): + """Mechanism driver call failed.""" + message = _("%(method)s failed.") diff --git a/neutron/plugins/ml2/config.py b/neutron/plugins/ml2/config.py new file mode 100644 index 000000000..afce63045 --- /dev/null +++ b/neutron/plugins/ml2/config.py @@ -0,0 +1,36 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + + +ml2_opts = [ + cfg.ListOpt('type_drivers', + default=['local', 'flat', 'vlan', 'gre', 'vxlan'], + help=_("List of network type driver entrypoints to be loaded " + "from the neutron.ml2.type_drivers namespace.")), + cfg.ListOpt('tenant_network_types', + default=['local'], + help=_("Ordered list of network_types to allocate as tenant " + "networks.")), + cfg.ListOpt('mechanism_drivers', + default=[], + help=_("An ordered list of networking mechanism driver " + "entrypoints to be loaded from the " + "neutron.ml2.mechanism_drivers namespace.")), +] + + +cfg.CONF.register_opts(ml2_opts, "ml2") diff --git a/neutron/plugins/ml2/db.py b/neutron/plugins/ml2/db.py new file mode 100644 index 000000000..4cf8eed32 --- /dev/null +++ b/neutron/plugins/ml2/db.py @@ -0,0 +1,136 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.orm import exc + +from neutron.db import api as db_api +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.extensions import portbindings +from neutron import manager +from neutron.openstack.common import log +from neutron.openstack.common import uuidutils +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2 import models + +LOG = log.getLogger(__name__) + + +def add_network_segment(session, network_id, segment): + with session.begin(subtransactions=True): + record = models.NetworkSegment( + id=uuidutils.generate_uuid(), + network_id=network_id, + network_type=segment.get(api.NETWORK_TYPE), + physical_network=segment.get(api.PHYSICAL_NETWORK), + segmentation_id=segment.get(api.SEGMENTATION_ID) + ) + session.add(record) + LOG.info(_("Added segment %(id)s of type %(network_type)s for network" + " %(network_id)s"), + {'id': record.id, + 'network_type': record.network_type, + 'network_id': record.network_id}) + + +def get_network_segments(session, network_id): + with session.begin(subtransactions=True): + records = (session.query(models.NetworkSegment). + filter_by(network_id=network_id)) + return [{api.ID: record.id, + api.NETWORK_TYPE: record.network_type, + api.PHYSICAL_NETWORK: record.physical_network, + api.SEGMENTATION_ID: record.segmentation_id} + for record in records] + + +def ensure_port_binding(session, port_id): + with session.begin(subtransactions=True): + try: + record = (session.query(models.PortBinding). + filter_by(port_id=port_id). + one()) + except exc.NoResultFound: + record = models.PortBinding( + port_id=port_id, + vif_type=portbindings.VIF_TYPE_UNBOUND) + session.add(record) + return record + + +def get_port(session, port_id): + """Get port record for update within transcation.""" + + with session.begin(subtransactions=True): + try: + record = (session.query(models_v2.Port). + filter(models_v2.Port.id.startswith(port_id)). + one()) + return record + except exc.NoResultFound: + return + except exc.MultipleResultsFound: + LOG.error(_("Multiple ports have port_id starting with %s"), + port_id) + return + + +def get_port_from_device_mac(device_mac): + LOG.debug(_("get_port_from_device_mac() called for mac %s"), device_mac) + session = db_api.get_session() + qry = session.query(models_v2.Port).filter_by(mac_address=device_mac) + return qry.first() + + +def get_port_and_sgs(port_id): + """Get port from database with security group info.""" + + LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id) + session = db_api.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + with session.begin(subtransactions=True): + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id.startswith(port_id)) + port_and_sgs = query.all() + if not port_and_sgs: + return + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict['security_groups'] = [ + sg_id for port_, sg_id in port_and_sgs if sg_id] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict + + +def get_port_binding_host(port_id): + session = db_api.get_session() + with session.begin(subtransactions=True): + try: + query = (session.query(models.PortBinding). + filter(models.PortBinding.port_id.startswith(port_id)). + one()) + except exc.NoResultFound: + LOG.debug(_("No binding found for port %(port_id)s"), + {'port_id': port_id}) + return + return query.host diff --git a/neutron/plugins/ml2/driver_api.py b/neutron/plugins/ml2/driver_api.py new file mode 100644 index 000000000..2384b0cf9 --- /dev/null +++ b/neutron/plugins/ml2/driver_api.py @@ -0,0 +1,597 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import six + +# The following keys are used in the segment dictionaries passed via +# the driver API. These are defined separately from similar keys in +# neutron.extensions.providernet so that drivers don't need to change +# if/when providernet moves to the core API. +# +ID = 'id' +NETWORK_TYPE = 'network_type' +PHYSICAL_NETWORK = 'physical_network' +SEGMENTATION_ID = 'segmentation_id' + + +@six.add_metaclass(abc.ABCMeta) +class TypeDriver(object): + """Define stable abstract interface for ML2 type drivers. + + ML2 type drivers each support a specific network_type for provider + and/or tenant network segments. Type drivers must implement this + abstract interface, which defines the API by which the plugin uses + the driver to manage the persistent type-specific resource + allocation state associated with network segments of that type. + + Network segments are represented by segment dictionaries using the + NETWORK_TYPE, PHYSICAL_NETWORK, and SEGMENTATION_ID keys defined + above, corresponding to the provider attributes. Future revisions + of the TypeDriver API may add additional segment dictionary + keys. Attributes not applicable for a particular network_type may + either be excluded or stored as None. + """ + + @abc.abstractmethod + def get_type(self): + """Get driver's network type. + + :returns network_type value handled by this driver + """ + pass + + @abc.abstractmethod + def initialize(self): + """Perform driver initialization. + + Called after all drivers have been loaded and the database has + been initialized. No abstract methods defined below will be + called prior to this method being called. + """ + pass + + @abc.abstractmethod + def validate_provider_segment(self, segment): + """Validate attributes of a provider network segment. + + :param segment: segment dictionary using keys defined above + :raises: neutron.common.exceptions.InvalidInput if invalid + + Called outside transaction context to validate the provider + attributes for a provider network segment. Raise InvalidInput + if: + + - any required attribute is missing + - any prohibited or unrecognized attribute is present + - any attribute value is not valid + + The network_type attribute is present in segment, but + need not be validated. + """ + pass + + @abc.abstractmethod + def reserve_provider_segment(self, session, segment): + """Reserve resource associated with a provider network segment. + + :param session: database session + :param segment: segment dictionary using keys defined above + + Called inside transaction context on session to reserve the + type-specific resource for a provider network segment. The + segment dictionary passed in was returned by a previous + validate_provider_segment() call. + """ + pass + + @abc.abstractmethod + def allocate_tenant_segment(self, session): + """Allocate resource for a new tenant network segment. + + :param session: database session + :returns: segment dictionary using keys defined above + + Called inside transaction context on session to allocate a new + tenant network, typically from a type-specific resource + pool. If successful, return a segment dictionary describing + the segment. If tenant network segment cannot be allocated + (i.e. tenant networks not supported or resource pool is + exhausted), return None. + """ + pass + + @abc.abstractmethod + def release_segment(self, session, segment): + """Release network segment. + + :param session: database session + :param segment: segment dictionary using keys defined above + + Called inside transaction context on session to release a + tenant or provider network's type-specific resource. Runtime + errors are not expected, but raising an exception will result + in rollback of the transaction. + """ + pass + + +@six.add_metaclass(abc.ABCMeta) +class NetworkContext(object): + """Context passed to MechanismDrivers for changes to network resources. + + A NetworkContext instance wraps a network resource. It provides + helper methods for accessing other relevant information. Results + from expensive operations are cached so that other + MechanismDrivers can freely access the same information. + """ + + @abc.abstractproperty + def current(self): + """Return the current state of the network. + + Return the current state of the network, as defined by + NeutronPluginBaseV2.create_network and all extensions in the + ml2 plugin. + """ + pass + + @abc.abstractproperty + def original(self): + """Return the original state of the network. + + Return the original state of the network, prior to a call to + update_network. Method is only valid within calls to + update_network_precommit and update_network_postcommit. + """ + pass + + @abc.abstractproperty + def network_segments(self): + """Return the segments associated with this network resource.""" + pass + + +@six.add_metaclass(abc.ABCMeta) +class SubnetContext(object): + """Context passed to MechanismDrivers for changes to subnet resources. + + A SubnetContext instance wraps a subnet resource. It provides + helper methods for accessing other relevant information. Results + from expensive operations are cached so that other + MechanismDrivers can freely access the same information. + """ + + @abc.abstractproperty + def current(self): + """Return the current state of the subnet. + + Return the current state of the subnet, as defined by + NeutronPluginBaseV2.create_subnet and all extensions in the + ml2 plugin. + """ + pass + + @abc.abstractproperty + def original(self): + """Return the original state of the subnet. + + Return the original state of the subnet, prior to a call to + update_subnet. Method is only valid within calls to + update_subnet_precommit and update_subnet_postcommit. + """ + pass + + +@six.add_metaclass(abc.ABCMeta) +class PortContext(object): + """Context passed to MechanismDrivers for changes to port resources. + + A PortContext instance wraps a port resource. It provides helper + methods for accessing other relevant information. Results from + expensive operations are cached so that other MechanismDrivers can + freely access the same information. + """ + + @abc.abstractproperty + def current(self): + """Return the current state of the port. + + Return the current state of the port, as defined by + NeutronPluginBaseV2.create_port and all extensions in the ml2 + plugin. + """ + pass + + @abc.abstractproperty + def original(self): + """Return the original state of the port. + + Return the original state of the port, prior to a call to + update_port. Method is only valid within calls to + update_port_precommit and update_port_postcommit. + """ + pass + + @abc.abstractproperty + def network(self): + """Return the NetworkContext associated with this port.""" + pass + + @abc.abstractproperty + def bound_segment(self): + """Return the currently bound segment dictionary.""" + pass + + @abc.abstractproperty + def original_bound_segment(self): + """Return the original bound segment dictionary. + + Return the original bound segment dictionary, prior to a call + to update_port. Method is only valid within calls to + update_port_precommit and update_port_postcommit. + """ + pass + + @abc.abstractproperty + def bound_driver(self): + """Return the currently bound mechanism driver name.""" + pass + + @abc.abstractproperty + def original_bound_driver(self): + """Return the original bound mechanism driver name. + + Return the original bound mechanism driver name, prior to a + call to update_port. Method is only valid within calls to + update_port_precommit and update_port_postcommit. + """ + pass + + @abc.abstractmethod + def host_agents(self, agent_type): + """Get agents of the specified type on port's host. + + :param agent_type: Agent type identifier + :returns: List of agents_db.Agent records + """ + pass + + @abc.abstractmethod + def set_binding(self, segment_id, vif_type, vif_details, + status=None): + """Set the binding for the port. + + :param segment_id: Network segment bound for the port. + :param vif_type: The VIF type for the bound port. + :param vif_details: Dictionary with details for VIF driver. + :param status: Port status to set if not None. + + Called by MechanismDriver.bind_port to indicate success and + specify binding details to use for port. The segment_id must + identify an item in network.network_segments. + """ + pass + + +@six.add_metaclass(abc.ABCMeta) +class MechanismDriver(object): + """Define stable abstract interface for ML2 mechanism drivers. + + A mechanism driver is called on the creation, update, and deletion + of networks and ports. For every event, there are two methods that + get called - one within the database transaction (method suffix of + _precommit), one right afterwards (method suffix of _postcommit). + + Exceptions raised by methods called inside the transaction can + rollback, but should not make any blocking calls (for example, + REST requests to an outside controller). Methods called after + transaction commits can make blocking external calls, though these + will block the entire process. Exceptions raised in calls after + the transaction commits may cause the associated resource to be + deleted. + + Because rollback outside of the transaction is not done in the + update network/port case, all data validation must be done within + methods that are part of the database transaction. + """ + + @abc.abstractmethod + def initialize(self): + """Perform driver initialization. + + Called after all drivers have been loaded and the database has + been initialized. No abstract methods defined below will be + called prior to this method being called. + """ + pass + + def create_network_precommit(self, context): + """Allocate resources for a new network. + + :param context: NetworkContext instance describing the new + network. + + Create a new network, allocating resources as necessary in the + database. Called inside transaction context on session. Call + cannot block. Raising an exception will result in a rollback + of the current transaction. + """ + pass + + def create_network_postcommit(self, context): + """Create a network. + + :param context: NetworkContext instance describing the new + network. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + """ + pass + + def update_network_precommit(self, context): + """Update resources of a network. + + :param context: NetworkContext instance describing the new + state of the network, as well as the original state prior + to the update_network call. + + Update values of a network, updating the associated resources + in the database. Called inside transaction context on session. + Raising an exception will result in rollback of the + transaction. + + update_network_precommit is called for all changes to the + network state. It is up to the mechanism driver to ignore + state or state changes that it does not know or care about. + """ + pass + + def update_network_postcommit(self, context): + """Update a network. + + :param context: NetworkContext instance describing the new + state of the network, as well as the original state prior + to the update_network call. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + + update_network_postcommit is called for all changes to the + network state. It is up to the mechanism driver to ignore + state or state changes that it does not know or care about. + """ + pass + + def delete_network_precommit(self, context): + """Delete resources for a network. + + :param context: NetworkContext instance describing the current + state of the network, prior to the call to delete it. + + Delete network resources previously allocated by this + mechanism driver for a network. Called inside transaction + context on session. Runtime errors are not expected, but + raising an exception will result in rollback of the + transaction. + """ + pass + + def delete_network_postcommit(self, context): + """Delete a network. + + :param context: NetworkContext instance describing the current + state of the network, prior to the call to delete it. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Runtime errors are not + expected, and will not prevent the resource from being + deleted. + """ + pass + + def create_subnet_precommit(self, context): + """Allocate resources for a new subnet. + + :param context: SubnetContext instance describing the new + subnet. + + Create a new subnet, allocating resources as necessary in the + database. Called inside transaction context on session. Call + cannot block. Raising an exception will result in a rollback + of the current transaction. + """ + pass + + def create_subnet_postcommit(self, context): + """Create a subnet. + + :param context: SubnetContext instance describing the new + subnet. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + """ + pass + + def update_subnet_precommit(self, context): + """Update resources of a subnet. + + :param context: SubnetContext instance describing the new + state of the subnet, as well as the original state prior + to the update_subnet call. + + Update values of a subnet, updating the associated resources + in the database. Called inside transaction context on session. + Raising an exception will result in rollback of the + transaction. + + update_subnet_precommit is called for all changes to the + subnet state. It is up to the mechanism driver to ignore + state or state changes that it does not know or care about. + """ + pass + + def update_subnet_postcommit(self, context): + """Update a subnet. + + :param context: SubnetContext instance describing the new + state of the subnet, as well as the original state prior + to the update_subnet call. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + + update_subnet_postcommit is called for all changes to the + subnet state. It is up to the mechanism driver to ignore + state or state changes that it does not know or care about. + """ + pass + + def delete_subnet_precommit(self, context): + """Delete resources for a subnet. + + :param context: SubnetContext instance describing the current + state of the subnet, prior to the call to delete it. + + Delete subnet resources previously allocated by this + mechanism driver for a subnet. Called inside transaction + context on session. Runtime errors are not expected, but + raising an exception will result in rollback of the + transaction. + """ + pass + + def delete_subnet_postcommit(self, context): + """Delete a subnet. + + :param context: SubnetContext instance describing the current + state of the subnet, prior to the call to delete it. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Runtime errors are not + expected, and will not prevent the resource from being + deleted. + """ + pass + + def create_port_precommit(self, context): + """Allocate resources for a new port. + + :param context: PortContext instance describing the port. + + Create a new port, allocating resources as necessary in the + database. Called inside transaction context on session. Call + cannot block. Raising an exception will result in a rollback + of the current transaction. + """ + pass + + def create_port_postcommit(self, context): + """Create a port. + + :param context: PortContext instance describing the port. + + Called after the transaction completes. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + result in the deletion of the resource. + """ + pass + + def update_port_precommit(self, context): + """Update resources of a port. + + :param context: PortContext instance describing the new + state of the port, as well as the original state prior + to the update_port call. + + Called inside transaction context on session to complete a + port update as defined by this mechanism driver. Raising an + exception will result in rollback of the transaction. + + update_port_precommit is called for all changes to the port + state. It is up to the mechanism driver to ignore state or + state changes that it does not know or care about. + """ + pass + + def update_port_postcommit(self, context): + """Update a port. + + :param context: PortContext instance describing the new + state of the port, as well as the original state prior + to the update_port call. + + Called after the transaction completes. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + result in the deletion of the resource. + + update_port_postcommit is called for all changes to the port + state. It is up to the mechanism driver to ignore state or + state changes that it does not know or care about. + """ + pass + + def delete_port_precommit(self, context): + """Delete resources of a port. + + :param context: PortContext instance describing the current + state of the port, prior to the call to delete it. + + Called inside transaction context on session. Runtime errors + are not expected, but raising an exception will result in + rollback of the transaction. + """ + pass + + def delete_port_postcommit(self, context): + """Delete a port. + + :param context: PortContext instance describing the current + state of the port, prior to the call to delete it. + + Called after the transaction completes. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Runtime errors are not + expected, and will not prevent the resource from being + deleted. + """ + pass + + def bind_port(self, context): + """Attempt to bind a port. + + :param context: PortContext instance describing the port + + Called inside transaction context on session, prior to + create_port_precommit or update_port_precommit, to + attempt to establish a port binding. If the driver is able to + bind the port, it calls context.set_binding with the binding + details. + """ + pass diff --git a/neutron/plugins/ml2/driver_context.py b/neutron/plugins/ml2/driver_context.py new file mode 100644 index 000000000..0c1180619 --- /dev/null +++ b/neutron/plugins/ml2/driver_context.py @@ -0,0 +1,135 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.openstack.common import jsonutils +from neutron.plugins.ml2 import db +from neutron.plugins.ml2 import driver_api as api + + +class MechanismDriverContext(object): + """MechanismDriver context base class.""" + def __init__(self, plugin, plugin_context): + self._plugin = plugin + # This temporarily creates a reference loop, but the + # lifetime of PortContext is limited to a single + # method call of the plugin. + self._plugin_context = plugin_context + + +class NetworkContext(MechanismDriverContext, api.NetworkContext): + + def __init__(self, plugin, plugin_context, network, + original_network=None): + super(NetworkContext, self).__init__(plugin, plugin_context) + self._network = network + self._original_network = original_network + self._segments = db.get_network_segments(plugin_context.session, + network['id']) + + @property + def current(self): + return self._network + + @property + def original(self): + return self._original_network + + @property + def network_segments(self): + return self._segments + + +class SubnetContext(MechanismDriverContext, api.SubnetContext): + + def __init__(self, plugin, plugin_context, subnet, original_subnet=None): + super(SubnetContext, self).__init__(plugin, plugin_context) + self._subnet = subnet + self._original_subnet = original_subnet + + @property + def current(self): + return self._subnet + + @property + def original(self): + return self._original_subnet + + +class PortContext(MechanismDriverContext, api.PortContext): + + def __init__(self, plugin, plugin_context, port, network, + original_port=None): + super(PortContext, self).__init__(plugin, plugin_context) + self._port = port + self._original_port = original_port + self._network_context = NetworkContext(plugin, plugin_context, + network) + self._binding = db.ensure_port_binding(plugin_context.session, + port['id']) + if original_port: + self._original_bound_segment_id = self._binding.segment + self._original_bound_driver = self._binding.driver + else: + self._original_bound_segment_id = None + self._original_bound_driver = None + self._new_port_status = None + + @property + def current(self): + return self._port + + @property + def original(self): + return self._original_port + + @property + def network(self): + return self._network_context + + @property + def bound_segment(self): + id = self._binding.segment + if id: + for segment in self._network_context.network_segments: + if segment[api.ID] == id: + return segment + + @property + def original_bound_segment(self): + if self._original_bound_segment_id: + for segment in self._network_context.network_segments: + if segment[api.ID] == self._original_bound_segment_id: + return segment + + @property + def bound_driver(self): + return self._binding.driver + + @property + def original_bound_driver(self): + return self._original_bound_driver + + def host_agents(self, agent_type): + return self._plugin.get_agents(self._plugin_context, + filters={'agent_type': [agent_type], + 'host': [self._binding.host]}) + + def set_binding(self, segment_id, vif_type, vif_details, + status=None): + # TODO(rkukura) Verify binding allowed, segment in network + self._binding.segment = segment_id + self._binding.vif_type = vif_type + self._binding.vif_details = jsonutils.dumps(vif_details) + self._new_port_status = status diff --git a/neutron/plugins/ml2/drivers/README.fslsdn b/neutron/plugins/ml2/drivers/README.fslsdn new file mode 100644 index 000000000..09017284c --- /dev/null +++ b/neutron/plugins/ml2/drivers/README.fslsdn @@ -0,0 +1,102 @@ +===================================================== +Freescale SDN Mechanism Driver for Neutron ML2 plugin +===================================================== + +Introduction +============ + +Freescale SDN (FSL-SDN) Mechanism Driver is an add-on support for ML2 plugin +for Neutron. + +It supports the Cloud Resource Discovery (CRD) service by updating +Network, Subnet and Port Create/Update/Delete data into the CRD database. + +CRD service manages network nodes, virtual network appliances and openflow +controller based network applications. + +Basic work flow +--------------- + +:: + + +---------------------------------+ + | | + | Neutron Server | + | (with ML2 plugin) | + | | + | +-------------------------------+ + | | Freescale SDN | + | | Mechanism Driver | + +-+--------+----------------------+ + | + | ReST API + | + +----------+-------------+ + | CRD server | + +------------------------+ + + + +How does Freescale SDN Mechanism Driver work? +=========================================== + +- Freescale Mechanism driver handles the following postcommit operations. + - Network create/update/delete + - Subnet create/update/delete + - Port create/delete + +Sequence diagram : create_network +--------------------------------- + +:: + + create_network + { + neutron -> ML2_plugin + ML2_plugin -> FSL-SDN-MD + FSL-SDN-MD -> crd_service + FSL-SDN-MD <-- crd_service + ML2_plugin <-- FSL-SDN-MD + neutron <-- ML2_plugin + } + +- Supported network types by FSL OF Controller include vlan and vxlan. + +- Freescale SDN mechanism driver handles VM port binding within in the + mechanism driver (like ODL MD). + +- 'bind_port' function verifies the supported network types (vlan,vxlan) + and calls context.set_binding with binding details. + +- Flow management in OVS is handled by Freescale Openflow Controller. + + +How to use Freescale SDN Mechanism Driver? +========================================== + +Configuring ML2 Plugin +---------------------- + +In [ml2] section of /etc/neutron/plugins/ml2/ml2_conf.ini, +modify 'mechanism_drivers' attributes as: + +:: + + mechanism_drivers = fslsdn + +Configuring FSLSDN Mechanism Driver +----------------------------------- + +Update /etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini, as below. + +:: + + [ml2_fslsdn] + crd_auth_strategy = keystone + crd_url = http://127.0.0.1:9797 + crd_auth_url = http://127.0.0.1:5000/v2.0/ + crd_tenant_name = service + crd_password = <-service-password-> + crd_user_name = <-service-username-> + +CRD service must be running in the controller. diff --git a/neutron/plugins/ml2/drivers/README.odl b/neutron/plugins/ml2/drivers/README.odl new file mode 100644 index 000000000..eef8d4441 --- /dev/null +++ b/neutron/plugins/ml2/drivers/README.odl @@ -0,0 +1,41 @@ +OpenDaylight ML2 MechanismDriver +================================ +OpenDaylight is an Open Source SDN Controller developed by a plethora of +companies and hosted by the Linux Foundation. The OpenDaylight website +contains more information on the capabilities OpenDaylight provides: + + http://www.opendaylight.org + +Theory of operation +=================== +The OpenStack Neutron integration with OpenDaylight consists of the ML2 +MechanismDriver which acts as a REST proxy and passess all Neutron API +calls into OpenDaylight. OpenDaylight contains a NB REST service (called +the NeutronAPIService) which caches data from these proxied API calls and +makes it available to other services inside of OpenDaylight. One current +user of the SB side of the NeutronAPIService is the OVSDB code in +OpenDaylight. OVSDB uses the neutron information to isolate tenant networks +using GRE or VXLAN tunnels. + +How to use the OpenDaylight ML2 MechanismDriver +=============================================== +To use the ML2 MechanismDriver, you need to ensure you have it configured +as one of the "mechanism_drivers" in ML2: + + mechanism_drivers=opendaylight + +The next step is to setup the "[ml2_odl]" section in either the ml2_conf.ini +file or in a separate ml2_conf_odl.ini file. An example is shown below: + + [ml2_odl] + password = admin + username = admin + url = http://192.168.100.1:8080/controller/nb/v2/neutron + +When starting OpenDaylight, ensure you have the SimpleForwarding application +disabled or remove the .jar file from the plugins directory. Also ensure you +start OpenDaylight before you start OpenStack Neutron. + +There is devstack support for this which will automatically pull down OpenDaylight +and start it as part of devstack as well. The patch for this will likely merge +around the same time as this patch merges. diff --git a/neutron/plugins/ml2/drivers/__init__.py b/neutron/plugins/ml2/drivers/__init__.py new file mode 100644 index 000000000..788cea1f7 --- /dev/null +++ b/neutron/plugins/ml2/drivers/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/ml2/drivers/brocade/README.md b/neutron/plugins/ml2/drivers/brocade/README.md new file mode 100644 index 000000000..5cf5a7571 --- /dev/null +++ b/neutron/plugins/ml2/drivers/brocade/README.md @@ -0,0 +1,60 @@ +Brocade ML2 Mechanism driver from ML2 plugin +============================================ + +* up-to-date version of these instructions are located at: + http://50.56.236.34/docs/brocade-ml2-mechanism.txt +* N.B.: Please see Prerequisites section regarding ncclient (netconf client library) +* Supports VCS (Virtual Cluster of Switches) +* Issues/Questions/Bugs: sharis@brocade.com + + + + 1. VDX 67xx series of switches + 2. VDX 87xx series of switches + +ML2 plugin requires mechanism driver to support configuring of hardware switches. +Brocade Mechanism for ML2 uses NETCONF at the backend to configure the Brocade switch. +Currently the mechanism drivers support VLANs only. + + +------------+ +------------+ +-------------+ + | | | | | | + Neutron | | | | | Brocade | + v2.0 | Openstack | | Brocade | NETCONF | VCS Switch | + ----+ Neutron +--------+ Mechanism +----------+ | + | ML2 | | Driver | | VDX 67xx | + | Plugin | | | | VDX 87xx | + | | | | | | + | | | | | | + +------------+ +------------+ +-------------+ + + +Configuration + +In order to use this mechnism the brocade configuration file needs to be edited with the appropriate +configuration information: + + % cat /etc/neutron/plugins/ml2/ml2_conf_brocade.ini + [switch] + username = admin + password = password + address = + ostype = NOS + physical_networks = phys1 + +Additionally the brocade mechanism driver needs to be enabled from the ml2 config file: + + % cat /etc/neutron/plugins/ml2/ml2_conf.ini + + [ml2] + tenant_network_types = vlan + type_drivers = local,flat,vlan,gre,vxlan + mechanism_drivers = openvswitch,brocade + # OR mechanism_drivers = openvswitch,linuxbridge,hyperv,brocade + ... + ... + ... + + +Required L2 Agent + +This mechanism driver works in conjunction with an L2 Agent. The agent should be loaded as well in order for it to configure the virtual network int the host machine. Please see the configuration above. Atleast one of linuxbridge or openvswitch must be specified. diff --git a/neutron/plugins/ml2/drivers/brocade/__init__.py b/neutron/plugins/ml2/drivers/brocade/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ml2/drivers/brocade/db/__init__.py b/neutron/plugins/ml2/drivers/brocade/db/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ml2/drivers/brocade/db/models.py b/neutron/plugins/ml2/drivers/brocade/db/models.py new file mode 100644 index 000000000..249540527 --- /dev/null +++ b/neutron/plugins/ml2/drivers/brocade/db/models.py @@ -0,0 +1,139 @@ +# Copyright 2014 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Shiv Haris (sharis@brocade.com) +# Varma Bhupatiraju (vbhupati@#brocade.com) + + +"""Brocade specific database schema/model.""" +import sqlalchemy as sa + +from neutron.db import model_base +from neutron.db import models_v2 + + +class ML2_BrocadeNetwork(model_base.BASEV2, models_v2.HasId, + models_v2.HasTenant): + """Schema for brocade network.""" + + vlan = sa.Column(sa.String(10)) + segment_id = sa.Column(sa.String(36)) + network_type = sa.Column(sa.String(10)) + + +class ML2_BrocadePort(model_base.BASEV2, models_v2.HasId, + models_v2.HasTenant): + """Schema for brocade port.""" + + network_id = sa.Column(sa.String(36), + sa.ForeignKey("ml2_brocadenetworks.id"), + nullable=False) + admin_state_up = sa.Column(sa.Boolean, nullable=False) + physical_interface = sa.Column(sa.String(36)) + vlan_id = sa.Column(sa.String(36)) + + +def create_network(context, net_id, vlan, segment_id, network_type, tenant_id): + """Create a brocade specific network/port-profiles.""" + + # only network_type of vlan is supported + session = context.session + with session.begin(subtransactions=True): + net = get_network(context, net_id, None) + if not net: + net = ML2_BrocadeNetwork(id=net_id, vlan=vlan, + segment_id=segment_id, + network_type='vlan', + tenant_id=tenant_id) + session.add(net) + return net + + +def delete_network(context, net_id): + """Delete a brocade specific network/port-profiles.""" + + session = context.session + with session.begin(subtransactions=True): + net = get_network(context, net_id, None) + if net: + session.delete(net) + + +def get_network(context, net_id, fields=None): + """Get brocade specific network, with vlan extension.""" + + session = context.session + return session.query(ML2_BrocadeNetwork).filter_by(id=net_id).first() + + +def get_networks(context, filters=None, fields=None): + """Get all brocade specific networks.""" + + session = context.session + return session.query(ML2_BrocadeNetwork).all() + + +def create_port(context, port_id, network_id, physical_interface, + vlan_id, tenant_id, admin_state_up): + """Create a brocade specific port, has policy like vlan.""" + + session = context.session + with session.begin(subtransactions=True): + port = get_port(context, port_id) + if not port: + port = ML2_BrocadePort(id=port_id, + network_id=network_id, + physical_interface=physical_interface, + vlan_id=vlan_id, + admin_state_up=admin_state_up, + tenant_id=tenant_id) + session.add(port) + + return port + + +def get_port(context, port_id): + """get a brocade specific port.""" + + session = context.session + return session.query(ML2_BrocadePort).filter_by(id=port_id).first() + + +def get_ports(context, network_id=None): + """get a brocade specific port.""" + + session = context.session + return session.query(ML2_BrocadePort).filter_by( + network_id=network_id).all() + + +def delete_port(context, port_id): + """delete brocade specific port.""" + + session = context.session + with session.begin(subtransactions=True): + port = get_port(context, port_id) + if port: + session.delete(port) + + +def update_port_state(context, port_id, admin_state_up): + """Update port attributes.""" + + session = context.session + with session.begin(subtransactions=True): + session.query(ML2_BrocadePort).filter_by( + id=port_id).update({'admin_state_up': admin_state_up}) diff --git a/neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py b/neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py new file mode 100644 index 000000000..015921df5 --- /dev/null +++ b/neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py @@ -0,0 +1,385 @@ +# Copyright 2014 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Author: +# Shiv Haris (shivharis@hotmail.com) + + +"""Implentation of Brocade ML2 Mechanism driver for ML2 Plugin.""" + +from oslo.config import cfg + +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.ml2 import driver_api +from neutron.plugins.ml2.drivers.brocade.db import models as brocade_db + +LOG = logging.getLogger(__name__) +MECHANISM_VERSION = 0.9 +NOS_DRIVER = 'neutron.plugins.ml2.drivers.brocade.nos.nosdriver.NOSdriver' + +ML2_BROCADE = [cfg.StrOpt('address', default='', + help=_('The address of the host to SSH to')), + cfg.StrOpt('username', default='admin', + help=_('The SSH username to use')), + cfg.StrOpt('password', default='password', secret=True, + help=_('The SSH password to use')), + cfg.StrOpt('physical_networks', default='', + help=_('Allowed physical networks')), + cfg.StrOpt('ostype', default='NOS', + help=_('Unused')) + ] + +cfg.CONF.register_opts(ML2_BROCADE, "ml2_brocade") + + +class BrocadeMechanism(driver_api.MechanismDriver): + """ML2 Mechanism driver for Brocade VDX switches. This is the upper + layer driver class that interfaces to lower layer (NETCONF) below. + + """ + + def __init__(self): + self._driver = None + self._physical_networks = None + self._switch = None + self.initialize() + + def initialize(self): + """Initilize of variables needed by this class.""" + + self._physical_networks = cfg.CONF.ml2_brocade.physical_networks + self.brocade_init() + + def brocade_init(self): + """Brocade specific initialization for this class.""" + + self._switch = {'address': cfg.CONF.ml2_brocade.address, + 'username': cfg.CONF.ml2_brocade.username, + 'password': cfg.CONF.ml2_brocade.password + } + self._driver = importutils.import_object(NOS_DRIVER) + + def create_network_precommit(self, mech_context): + """Create Network in the mechanism specific database table.""" + + network = mech_context.current + context = mech_context._plugin_context + tenant_id = network['tenant_id'] + network_id = network['id'] + + segments = mech_context.network_segments + # currently supports only one segment per network + segment = segments[0] + + network_type = segment['network_type'] + vlan_id = segment['segmentation_id'] + segment_id = segment['id'] + + if segment['physical_network'] not in self._physical_networks: + raise Exception( + _("Brocade Mechanism: failed to create network, " + "network cannot be created in the configured " + "physical network")) + + if network_type != 'vlan': + raise Exception( + _("Brocade Mechanism: failed to create network, " + "only network type vlan is supported")) + + try: + brocade_db.create_network(context, network_id, vlan_id, + segment_id, network_type, tenant_id) + except Exception: + LOG.exception( + _("Brocade Mechanism: failed to create network in db")) + raise Exception( + _("Brocade Mechanism: create_network_precommit failed")) + + LOG.info(_("create network (precommit): %(network_id)s " + "of network type = %(network_type)s " + "with vlan = %(vlan_id)s " + "for tenant %(tenant_id)s"), + {'network_id': network_id, + 'network_type': network_type, + 'vlan_id': vlan_id, + 'tenant_id': tenant_id}) + + def create_network_postcommit(self, mech_context): + """Create Network as a portprofile on the switch.""" + + LOG.debug(_("create_network_postcommit: called")) + + network = mech_context.current + # use network_id to get the network attributes + # ONLY depend on our db for getting back network attributes + # this is so we can replay postcommit from db + context = mech_context._plugin_context + + network_id = network['id'] + network = brocade_db.get_network(context, network_id) + network_type = network['network_type'] + tenant_id = network['tenant_id'] + vlan_id = network['vlan'] + + try: + self._driver.create_network(self._switch['address'], + self._switch['username'], + self._switch['password'], + vlan_id) + except Exception: + LOG.exception(_("Brocade NOS driver: failed in create network")) + brocade_db.delete_network(context, network_id) + raise Exception( + _("Brocade Mechanism: create_network_postcommmit failed")) + + LOG.info(_("created network (postcommit): %(network_id)s" + " of network type = %(network_type)s" + " with vlan = %(vlan_id)s" + " for tenant %(tenant_id)s"), + {'network_id': network_id, + 'network_type': network_type, + 'vlan_id': vlan_id, + 'tenant_id': tenant_id}) + + def delete_network_precommit(self, mech_context): + """Delete Network from the plugin specific database table.""" + + LOG.debug(_("delete_network_precommit: called")) + + network = mech_context.current + network_id = network['id'] + vlan_id = network['provider:segmentation_id'] + tenant_id = network['tenant_id'] + + context = mech_context._plugin_context + + try: + brocade_db.delete_network(context, network_id) + except Exception: + LOG.exception( + _("Brocade Mechanism: failed to delete network in db")) + raise Exception( + _("Brocade Mechanism: delete_network_precommit failed")) + + LOG.info(_("delete network (precommit): %(network_id)s" + " with vlan = %(vlan_id)s" + " for tenant %(tenant_id)s"), + {'network_id': network_id, + 'vlan_id': vlan_id, + 'tenant_id': tenant_id}) + + def delete_network_postcommit(self, mech_context): + """Delete network which translates to removng portprofile + from the switch. + """ + + LOG.debug(_("delete_network_postcommit: called")) + network = mech_context.current + network_id = network['id'] + vlan_id = network['provider:segmentation_id'] + tenant_id = network['tenant_id'] + + try: + self._driver.delete_network(self._switch['address'], + self._switch['username'], + self._switch['password'], + vlan_id) + except Exception: + LOG.exception(_("Brocade NOS driver: failed to delete network")) + raise Exception( + _("Brocade switch exception, " + "delete_network_postcommit failed")) + + LOG.info(_("delete network (postcommit): %(network_id)s" + " with vlan = %(vlan_id)s" + " for tenant %(tenant_id)s"), + {'network_id': network_id, + 'vlan_id': vlan_id, + 'tenant_id': tenant_id}) + + def update_network_precommit(self, mech_context): + """Noop now, it is left here for future.""" + pass + + def update_network_postcommit(self, mech_context): + """Noop now, it is left here for future.""" + pass + + def create_port_precommit(self, mech_context): + """Create logical port on the switch (db update).""" + + LOG.debug(_("create_port_precommit: called")) + + port = mech_context.current + port_id = port['id'] + network_id = port['network_id'] + tenant_id = port['tenant_id'] + admin_state_up = port['admin_state_up'] + + context = mech_context._plugin_context + + network = brocade_db.get_network(context, network_id) + vlan_id = network['vlan'] + + try: + brocade_db.create_port(context, port_id, network_id, + None, + vlan_id, tenant_id, admin_state_up) + except Exception: + LOG.exception(_("Brocade Mechanism: failed to create port in db")) + raise Exception( + _("Brocade Mechanism: create_port_precommit failed")) + + def create_port_postcommit(self, mech_context): + """Associate the assigned MAC address to the portprofile.""" + + LOG.debug(_("create_port_postcommit: called")) + + port = mech_context.current + port_id = port['id'] + network_id = port['network_id'] + tenant_id = port['tenant_id'] + + context = mech_context._plugin_context + + network = brocade_db.get_network(context, network_id) + vlan_id = network['vlan'] + + interface_mac = port['mac_address'] + + # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx + mac = self.mac_reformat_62to34(interface_mac) + try: + self._driver.associate_mac_to_network(self._switch['address'], + self._switch['username'], + self._switch['password'], + vlan_id, + mac) + except Exception: + LOG.exception( + _("Brocade NOS driver: failed to associate mac %s") + % interface_mac) + raise Exception( + _("Brocade switch exception: create_port_postcommit failed")) + + LOG.info( + _("created port (postcommit): port_id=%(port_id)s" + " network_id=%(network_id)s tenant_id=%(tenant_id)s"), + {'port_id': port_id, + 'network_id': network_id, 'tenant_id': tenant_id}) + + def delete_port_precommit(self, mech_context): + """Delete logical port on the switch (db update).""" + + LOG.debug(_("delete_port_precommit: called")) + port = mech_context.current + port_id = port['id'] + + context = mech_context._plugin_context + + try: + brocade_db.delete_port(context, port_id) + except Exception: + LOG.exception(_("Brocade Mechanism: failed to delete port in db")) + raise Exception( + _("Brocade Mechanism: delete_port_precommit failed")) + + def delete_port_postcommit(self, mech_context): + """Dissociate MAC address from the portprofile.""" + + LOG.debug(_("delete_port_postcommit: called")) + port = mech_context.current + port_id = port['id'] + network_id = port['network_id'] + tenant_id = port['tenant_id'] + + context = mech_context._plugin_context + + network = brocade_db.get_network(context, network_id) + vlan_id = network['vlan'] + + interface_mac = port['mac_address'] + + # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx + mac = self.mac_reformat_62to34(interface_mac) + try: + self._driver.dissociate_mac_from_network( + self._switch['address'], + self._switch['username'], + self._switch['password'], + vlan_id, + mac) + except Exception: + LOG.exception( + _("Brocade NOS driver: failed to dissociate MAC %s") % + interface_mac) + raise Exception( + _("Brocade switch exception, delete_port_postcommit failed")) + + LOG.info( + _("delete port (postcommit): port_id=%(port_id)s" + " network_id=%(network_id)s tenant_id=%(tenant_id)s"), + {'port_id': port_id, + 'network_id': network_id, 'tenant_id': tenant_id}) + + def update_port_precommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("update_port_precommit(self: called")) + + def update_port_postcommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("update_port_postcommit: called")) + + def create_subnet_precommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("create_subnetwork_precommit: called")) + + def create_subnet_postcommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("create_subnetwork_postcommit: called")) + + def delete_subnet_precommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("delete_subnetwork_precommit: called")) + + def delete_subnet_postcommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("delete_subnetwork_postcommit: called")) + + def update_subnet_precommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("update_subnet_precommit(self: called")) + + def update_subnet_postcommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("update_subnet_postcommit: called")) + + @staticmethod + def mac_reformat_62to34(interface_mac): + """Transform MAC address format. + + Transforms from 6 groups of 2 hexadecimal numbers delimited by ":" + to 3 groups of 4 hexadecimals numbers delimited by ".". + + :param interface_mac: MAC address in the format xx:xx:xx:xx:xx:xx + :type interface_mac: string + :returns: MAC address in the format xxxx.xxxx.xxxx + :rtype: string + """ + + mac = interface_mac.replace(":", "") + mac = mac[0:4] + "." + mac[4:8] + "." + mac[8:12] + return mac diff --git a/neutron/plugins/ml2/drivers/brocade/nos/__init__.py b/neutron/plugins/ml2/drivers/brocade/nos/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ml2/drivers/brocade/nos/nctemplates.py b/neutron/plugins/ml2/drivers/brocade/nos/nctemplates.py new file mode 100644 index 000000000..dbf7575de --- /dev/null +++ b/neutron/plugins/ml2/drivers/brocade/nos/nctemplates.py @@ -0,0 +1,197 @@ +# Copyright (c) 2014 Brocade Communications Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Varma Bhupatiraju (vbhupati@#brocade.com) +# Shiv Haris (sharis@brocade.com) + + +"""NOS NETCONF XML Configuration Command Templates. + +Interface Configuration Commands +""" + +# Create VLAN (vlan_id) +CREATE_VLAN_INTERFACE = """ + + + + + {vlan_id} + + + + +""" + +# Delete VLAN (vlan_id) +DELETE_VLAN_INTERFACE = """ + + + + + {vlan_id} + + + + +""" + +# +# AMPP Life-cycle Management Configuration Commands +# + +# Create AMPP port-profile (port_profile_name) +CREATE_PORT_PROFILE = """ + + + {name} + + +""" + +# Create VLAN sub-profile for port-profile (port_profile_name) +CREATE_VLAN_PROFILE_FOR_PORT_PROFILE = """ + + + {name} + + + +""" + +# Configure L2 mode for VLAN sub-profile (port_profile_name) +CONFIGURE_L2_MODE_FOR_VLAN_PROFILE = """ + + + {name} + + + + + +""" + +# Configure trunk mode for VLAN sub-profile (port_profile_name) +CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE = """ + + + {name} + + + + trunk + + + + + +""" + +# Configure allowed VLANs for VLAN sub-profile +# (port_profile_name, allowed_vlan, native_vlan) +CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE = """ + + + {name} + + + + + + {vlan_id} + + + + + + + +""" + +# Delete port-profile (port_profile_name) +DELETE_PORT_PROFILE = """ + + + {name} + + +""" + +# Activate port-profile (port_profile_name) +ACTIVATE_PORT_PROFILE = """ + + + + {name} + + + + +""" + +# Deactivate port-profile (port_profile_name) +DEACTIVATE_PORT_PROFILE = """ + + + + {name} + + + + +""" + +# Associate MAC address to port-profile (port_profile_name, mac_address) +ASSOCIATE_MAC_TO_PORT_PROFILE = """ + + + + {name} + + {mac_address} + + + + +""" + +# Dissociate MAC address from port-profile (port_profile_name, mac_address) +DISSOCIATE_MAC_FROM_PORT_PROFILE = """ + + + + {name} + + {mac_address} + + + + +""" + +# +# Constants +# + +# Port profile naming convention for Neutron networks +OS_PORT_PROFILE_NAME = "openstack-profile-{id}" + +# Port profile filter expressions +PORT_PROFILE_XPATH_FILTER = "/port-profile" +PORT_PROFILE_NAME_XPATH_FILTER = "/port-profile[name='{name}']" diff --git a/neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py b/neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py new file mode 100644 index 000000000..f647370ae --- /dev/null +++ b/neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py @@ -0,0 +1,236 @@ +# Copyright 2014 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Varma Bhupatiraju (vbhupati@brocade.com) +# Shiv Haris (shivharis@hotmail.com) + + +"""Brocade NOS Driver implements NETCONF over SSHv2 for +Neutron network life-cycle management. +""" + +from ncclient import manager + +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.ml2.drivers.brocade.nos import nctemplates as template + + +LOG = logging.getLogger(__name__) +SSH_PORT = 22 + + +def nos_unknown_host_cb(host, fingerprint): + """An unknown host callback. + + Returns `True` if it finds the key acceptable, + and `False` if not. This default callback for NOS always returns 'True' + (i.e. trusts all hosts for now). + """ + return True + + +class NOSdriver(): + """NOS NETCONF interface driver for Neutron network. + + Handles life-cycle management of Neutron network (leverages AMPP on NOS) + """ + + def __init__(self): + self.mgr = None + + def connect(self, host, username, password): + """Connect via SSH and initialize the NETCONF session.""" + + # Use the persisted NETCONF connection + if self.mgr and self.mgr.connected: + return self.mgr + + # check if someone forgot to edit the conf file with real values + if host == '': + raise Exception(_("Brocade Switch IP address is not set, " + "check config ml2_conf_brocade.ini file")) + + # Open new NETCONF connection + try: + self.mgr = manager.connect(host=host, port=SSH_PORT, + username=username, password=password, + unknown_host_cb=nos_unknown_host_cb) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Connect failed to switch")) + + LOG.debug(_("Connect success to host %(host)s:%(ssh_port)d"), + dict(host=host, ssh_port=SSH_PORT)) + return self.mgr + + def close_session(self): + """Close NETCONF session.""" + if self.mgr: + self.mgr.close_session() + self.mgr = None + + def create_network(self, host, username, password, net_id): + """Creates a new virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.create_vlan_interface(mgr, net_id) + self.create_port_profile(mgr, name) + self.create_vlan_profile_for_port_profile(mgr, name) + self.configure_l2_mode_for_vlan_profile(mgr, name) + self.configure_trunk_mode_for_vlan_profile(mgr, name) + self.configure_allowed_vlans_for_vlan_profile(mgr, name, net_id) + self.activate_port_profile(mgr, name) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error")) + self.close_session() + + def delete_network(self, host, username, password, net_id): + """Deletes a virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.deactivate_port_profile(mgr, name) + self.delete_port_profile(mgr, name) + self.delete_vlan_interface(mgr, net_id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error")) + self.close_session() + + def associate_mac_to_network(self, host, username, password, + net_id, mac): + """Associates a MAC address to virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.associate_mac_to_port_profile(mgr, name, mac) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error")) + self.close_session() + + def dissociate_mac_from_network(self, host, username, password, + net_id, mac): + """Dissociates a MAC address from virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.dissociate_mac_from_port_profile(mgr, name, mac) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error")) + self.close_session() + + def create_vlan_interface(self, mgr, vlan_id): + """Configures a VLAN interface.""" + + confstr = template.CREATE_VLAN_INTERFACE.format(vlan_id=vlan_id) + mgr.edit_config(target='running', config=confstr) + + def delete_vlan_interface(self, mgr, vlan_id): + """Deletes a VLAN interface.""" + + confstr = template.DELETE_VLAN_INTERFACE.format(vlan_id=vlan_id) + mgr.edit_config(target='running', config=confstr) + + def get_port_profiles(self, mgr): + """Retrieves all port profiles.""" + + filterstr = template.PORT_PROFILE_XPATH_FILTER + response = mgr.get_config(source='running', + filter=('xpath', filterstr)).data_xml + return response + + def get_port_profile(self, mgr, name): + """Retrieves a port profile.""" + + filterstr = template.PORT_PROFILE_NAME_XPATH_FILTER.format(name=name) + response = mgr.get_config(source='running', + filter=('xpath', filterstr)).data_xml + return response + + def create_port_profile(self, mgr, name): + """Creates a port profile.""" + + confstr = template.CREATE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def delete_port_profile(self, mgr, name): + """Deletes a port profile.""" + + confstr = template.DELETE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def activate_port_profile(self, mgr, name): + """Activates a port profile.""" + + confstr = template.ACTIVATE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def deactivate_port_profile(self, mgr, name): + """Deactivates a port profile.""" + + confstr = template.DEACTIVATE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def associate_mac_to_port_profile(self, mgr, name, mac_address): + """Associates a MAC address to a port profile.""" + + confstr = template.ASSOCIATE_MAC_TO_PORT_PROFILE.format( + name=name, mac_address=mac_address) + mgr.edit_config(target='running', config=confstr) + + def dissociate_mac_from_port_profile(self, mgr, name, mac_address): + """Dissociates a MAC address from a port profile.""" + + confstr = template.DISSOCIATE_MAC_FROM_PORT_PROFILE.format( + name=name, mac_address=mac_address) + mgr.edit_config(target='running', config=confstr) + + def create_vlan_profile_for_port_profile(self, mgr, name): + """Creates VLAN sub-profile for port profile.""" + + confstr = template.CREATE_VLAN_PROFILE_FOR_PORT_PROFILE.format( + name=name) + mgr.edit_config(target='running', config=confstr) + + def configure_l2_mode_for_vlan_profile(self, mgr, name): + """Configures L2 mode for VLAN sub-profile.""" + + confstr = template.CONFIGURE_L2_MODE_FOR_VLAN_PROFILE.format( + name=name) + mgr.edit_config(target='running', config=confstr) + + def configure_trunk_mode_for_vlan_profile(self, mgr, name): + """Configures trunk mode for VLAN sub-profile.""" + + confstr = template.CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE.format( + name=name) + mgr.edit_config(target='running', config=confstr) + + def configure_allowed_vlans_for_vlan_profile(self, mgr, name, vlan_id): + """Configures allowed VLANs for VLAN sub-profile.""" + + confstr = template.CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE.format( + name=name, vlan_id=vlan_id) + mgr.edit_config(target='running', config=confstr) diff --git a/neutron/plugins/ml2/drivers/cisco/__init__.py b/neutron/plugins/ml2/drivers/cisco/__init__.py new file mode 100644 index 000000000..788cea1f7 --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/ml2/drivers/cisco/apic/__init__.py b/neutron/plugins/ml2/drivers/cisco/apic/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ml2/drivers/cisco/apic/apic_client.py b/neutron/plugins/ml2/drivers/cisco/apic/apic_client.py new file mode 100644 index 000000000..202e84c1c --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/apic/apic_client.py @@ -0,0 +1,416 @@ +# Copyright (c) 2014 Cisco Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Henry Gessau, Cisco Systems + +import collections +import time + +import requests +import requests.exceptions + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.plugins.ml2.drivers.cisco.apic import exceptions as cexc + + +LOG = logging.getLogger(__name__) + +APIC_CODE_FORBIDDEN = str(requests.codes.forbidden) + + +# Info about a Managed Object's relative name (RN) and container. +class ManagedObjectName(collections.namedtuple( + 'MoPath', ['container', 'rn_fmt', 'can_create'])): + def __new__(cls, container, rn_fmt, can_create=True): + return super(ManagedObjectName, cls).__new__(cls, container, rn_fmt, + can_create) + + +class ManagedObjectClass(object): + + """Information about a Managed Object (MO) class. + + Constructs and keeps track of the distinguished name (DN) and relative + name (RN) of a managed object (MO) class. The DN is the RN of the MO + appended to the recursive RNs of its containers, i.e.: + DN = uni/container-RN/.../container-RN/object-RN + + Also keeps track of whether the MO can be created in the APIC, as some + MOs are read-only or used for specifying relationships. + """ + + supported_mos = { + 'fvTenant': ManagedObjectName(None, 'tn-%s'), + 'fvBD': ManagedObjectName('fvTenant', 'BD-%s'), + 'fvRsBd': ManagedObjectName('fvAEPg', 'rsbd'), + 'fvSubnet': ManagedObjectName('fvBD', 'subnet-[%s]'), + 'fvCtx': ManagedObjectName('fvTenant', 'ctx-%s'), + 'fvRsCtx': ManagedObjectName('fvBD', 'rsctx'), + 'fvAp': ManagedObjectName('fvTenant', 'ap-%s'), + 'fvAEPg': ManagedObjectName('fvAp', 'epg-%s'), + 'fvRsProv': ManagedObjectName('fvAEPg', 'rsprov-%s'), + 'fvRsCons': ManagedObjectName('fvAEPg', 'rscons-%s'), + 'fvRsConsIf': ManagedObjectName('fvAEPg', 'rsconsif-%s'), + 'fvRsDomAtt': ManagedObjectName('fvAEPg', 'rsdomAtt-[%s]'), + 'fvRsPathAtt': ManagedObjectName('fvAEPg', 'rspathAtt-[%s]'), + + 'vzBrCP': ManagedObjectName('fvTenant', 'brc-%s'), + 'vzSubj': ManagedObjectName('vzBrCP', 'subj-%s'), + 'vzFilter': ManagedObjectName('fvTenant', 'flt-%s'), + 'vzRsFiltAtt': ManagedObjectName('vzSubj', 'rsfiltAtt-%s'), + 'vzEntry': ManagedObjectName('vzFilter', 'e-%s'), + 'vzInTerm': ManagedObjectName('vzSubj', 'intmnl'), + 'vzRsFiltAtt__In': ManagedObjectName('vzInTerm', 'rsfiltAtt-%s'), + 'vzOutTerm': ManagedObjectName('vzSubj', 'outtmnl'), + 'vzRsFiltAtt__Out': ManagedObjectName('vzOutTerm', 'rsfiltAtt-%s'), + 'vzCPIf': ManagedObjectName('fvTenant', 'cif-%s'), + 'vzRsIf': ManagedObjectName('vzCPIf', 'rsif'), + + 'vmmProvP': ManagedObjectName(None, 'vmmp-%s', False), + 'vmmDomP': ManagedObjectName('vmmProvP', 'dom-%s'), + 'vmmEpPD': ManagedObjectName('vmmDomP', 'eppd-[%s]'), + + 'physDomP': ManagedObjectName(None, 'phys-%s'), + + 'infra': ManagedObjectName(None, 'infra'), + 'infraNodeP': ManagedObjectName('infra', 'nprof-%s'), + 'infraLeafS': ManagedObjectName('infraNodeP', 'leaves-%s-typ-%s'), + 'infraNodeBlk': ManagedObjectName('infraLeafS', 'nodeblk-%s'), + 'infraRsAccPortP': ManagedObjectName('infraNodeP', 'rsaccPortP-[%s]'), + 'infraAccPortP': ManagedObjectName('infra', 'accportprof-%s'), + 'infraHPortS': ManagedObjectName('infraAccPortP', 'hports-%s-typ-%s'), + 'infraPortBlk': ManagedObjectName('infraHPortS', 'portblk-%s'), + 'infraRsAccBaseGrp': ManagedObjectName('infraHPortS', 'rsaccBaseGrp'), + 'infraFuncP': ManagedObjectName('infra', 'funcprof'), + 'infraAccPortGrp': ManagedObjectName('infraFuncP', 'accportgrp-%s'), + 'infraRsAttEntP': ManagedObjectName('infraAccPortGrp', 'rsattEntP'), + 'infraAttEntityP': ManagedObjectName('infra', 'attentp-%s'), + 'infraRsDomP': ManagedObjectName('infraAttEntityP', 'rsdomP-[%s]'), + 'infraRsVlanNs__phys': ManagedObjectName('physDomP', 'rsvlanNs'), + 'infraRsVlanNs__vmm': ManagedObjectName('vmmDomP', 'rsvlanNs'), + + 'fvnsVlanInstP': ManagedObjectName('infra', 'vlanns-%s-%s'), + 'fvnsEncapBlk__vlan': ManagedObjectName('fvnsVlanInstP', + 'from-%s-to-%s'), + 'fvnsVxlanInstP': ManagedObjectName('infra', 'vxlanns-%s'), + 'fvnsEncapBlk__vxlan': ManagedObjectName('fvnsVxlanInstP', + 'from-%s-to-%s'), + + # Read-only + 'fabricTopology': ManagedObjectName(None, 'topology', False), + 'fabricPod': ManagedObjectName('fabricTopology', 'pod-%s', False), + 'fabricPathEpCont': ManagedObjectName('fabricPod', 'paths-%s', False), + 'fabricPathEp': ManagedObjectName('fabricPathEpCont', 'pathep-%s', + False), + } + + # Note(Henry): The use of a mutable default argument _inst_cache is + # intentional. It persists for the life of MoClass to cache instances. + # noinspection PyDefaultArgument + def __new__(cls, mo_class, _inst_cache={}): + """Ensure we create only one instance per mo_class.""" + try: + return _inst_cache[mo_class] + except KeyError: + new_inst = super(ManagedObjectClass, cls).__new__(cls) + new_inst.__init__(mo_class) + _inst_cache[mo_class] = new_inst + return new_inst + + def __init__(self, mo_class): + self.klass = mo_class + self.klass_name = mo_class.split('__')[0] + mo = self.supported_mos[mo_class] + self.container = mo.container + self.rn_fmt = mo.rn_fmt + self.dn_fmt, self.args = self._dn_fmt() + self.arg_count = self.dn_fmt.count('%s') + rn_has_arg = self.rn_fmt.count('%s') + self.can_create = rn_has_arg and mo.can_create + + def _dn_fmt(self): + """Build the distinguished name format using container and RN. + + DN = uni/container-RN/.../container-RN/object-RN + + Also make a list of the required name arguments. + Note: Call this method only once at init. + """ + arg = [self.klass] if '%s' in self.rn_fmt else [] + if self.container: + container = ManagedObjectClass(self.container) + dn_fmt = '%s/%s' % (container.dn_fmt, self.rn_fmt) + args = container.args + arg + return dn_fmt, args + return 'uni/%s' % self.rn_fmt, arg + + def dn(self, *args): + """Return the distinguished name for a managed object.""" + return self.dn_fmt % args + + +class ApicSession(object): + + """Manages a session with the APIC.""" + + def __init__(self, host, port, usr, pwd, ssl): + protocol = ssl and 'https' or 'http' + self.api_base = '%s://%s:%s/api' % (protocol, host, port) + self.session = requests.Session() + self.session_deadline = 0 + self.session_timeout = 0 + self.cookie = {} + + # Log in + self.authentication = None + self.username = None + self.password = None + if usr and pwd: + self.login(usr, pwd) + + @staticmethod + def _make_data(key, **attrs): + """Build the body for a msg out of a key and some attributes.""" + return json.dumps({key: {'attributes': attrs}}) + + def _api_url(self, api): + """Create the URL for a generic API.""" + return '%s/%s.json' % (self.api_base, api) + + def _mo_url(self, mo, *args): + """Create a URL for a MO lookup by DN.""" + dn = mo.dn(*args) + return '%s/mo/%s.json' % (self.api_base, dn) + + def _qry_url(self, mo): + """Create a URL for a query lookup by MO class.""" + return '%s/class/%s.json' % (self.api_base, mo.klass_name) + + def _check_session(self): + """Check that we are logged in and ensure the session is active.""" + if not self.authentication: + raise cexc.ApicSessionNotLoggedIn + if time.time() > self.session_deadline: + self.refresh() + + def _send(self, request, url, data=None, refreshed=None): + """Send a request and process the response.""" + if data is None: + response = request(url, cookies=self.cookie) + else: + response = request(url, data=data, cookies=self.cookie) + if response is None: + raise cexc.ApicHostNoResponse(url=url) + # Every request refreshes the timeout + self.session_deadline = time.time() + self.session_timeout + if data is None: + request_str = url + else: + request_str = '%s, data=%s' % (url, data) + LOG.debug(_("data = %s"), data) + # imdata is where the APIC returns the useful information + imdata = response.json().get('imdata') + LOG.debug(_("Response: %s"), imdata) + if response.status_code != requests.codes.ok: + try: + err_code = imdata[0]['error']['attributes']['code'] + err_text = imdata[0]['error']['attributes']['text'] + except (IndexError, KeyError): + err_code = '[code for APIC error not found]' + err_text = '[text for APIC error not found]' + # If invalid token then re-login and retry once + if (not refreshed and err_code == APIC_CODE_FORBIDDEN and + err_text.lower().startswith('token was invalid')): + self.login() + return self._send(request, url, data=data, refreshed=True) + raise cexc.ApicResponseNotOk(request=request_str, + status=response.status_code, + reason=response.reason, + err_text=err_text, err_code=err_code) + return imdata + + # REST requests + + def get_data(self, request): + """Retrieve generic data from the server.""" + self._check_session() + url = self._api_url(request) + return self._send(self.session.get, url) + + def get_mo(self, mo, *args): + """Retrieve a managed object by its distinguished name.""" + self._check_session() + url = self._mo_url(mo, *args) + '?query-target=self' + return self._send(self.session.get, url) + + def list_mo(self, mo): + """Retrieve the list of managed objects for a class.""" + self._check_session() + url = self._qry_url(mo) + return self._send(self.session.get, url) + + def post_data(self, request, data): + """Post generic data to the server.""" + self._check_session() + url = self._api_url(request) + return self._send(self.session.post, url, data=data) + + def post_mo(self, mo, *args, **kwargs): + """Post data for a managed object to the server.""" + self._check_session() + url = self._mo_url(mo, *args) + data = self._make_data(mo.klass_name, **kwargs) + return self._send(self.session.post, url, data=data) + + # Session management + + def _save_cookie(self, request, response): + """Save the session cookie and its expiration time.""" + imdata = response.json().get('imdata') + if response.status_code == requests.codes.ok: + attributes = imdata[0]['aaaLogin']['attributes'] + try: + self.cookie = {'APIC-Cookie': attributes['token']} + except KeyError: + raise cexc.ApicResponseNoCookie(request=request) + timeout = int(attributes['refreshTimeoutSeconds']) + LOG.debug(_("APIC session will expire in %d seconds"), timeout) + # Give ourselves a few seconds to refresh before timing out + self.session_timeout = timeout - 5 + self.session_deadline = time.time() + self.session_timeout + else: + attributes = imdata[0]['error']['attributes'] + return attributes + + def login(self, usr=None, pwd=None): + """Log in to controller. Save user name and authentication.""" + usr = usr or self.username + pwd = pwd or self.password + name_pwd = self._make_data('aaaUser', name=usr, pwd=pwd) + url = self._api_url('aaaLogin') + try: + response = self.session.post(url, data=name_pwd, timeout=10.0) + except requests.exceptions.Timeout: + raise cexc.ApicHostNoResponse(url=url) + attributes = self._save_cookie('aaaLogin', response) + if response.status_code == requests.codes.ok: + self.username = usr + self.password = pwd + self.authentication = attributes + else: + self.authentication = None + raise cexc.ApicResponseNotOk(request=url, + status=response.status_code, + reason=response.reason, + err_text=attributes['text'], + err_code=attributes['code']) + + def refresh(self): + """Called when a session has timed out or almost timed out.""" + url = self._api_url('aaaRefresh') + response = self.session.get(url, cookies=self.cookie) + attributes = self._save_cookie('aaaRefresh', response) + if response.status_code == requests.codes.ok: + # We refreshed before the session timed out. + self.authentication = attributes + else: + err_code = attributes['code'] + err_text = attributes['text'] + if (err_code == APIC_CODE_FORBIDDEN and + err_text.lower().startswith('token was invalid')): + # This means the token timed out, so log in again. + LOG.debug(_("APIC session timed-out, logging in again.")) + self.login() + else: + self.authentication = None + raise cexc.ApicResponseNotOk(request=url, + status=response.status_code, + reason=response.reason, + err_text=err_text, + err_code=err_code) + + def logout(self): + """End session with controller.""" + if not self.username: + self.authentication = None + if self.authentication: + data = self._make_data('aaaUser', name=self.username) + self.post_data('aaaLogout', data=data) + self.authentication = None + + +class ManagedObjectAccess(object): + + """CRUD operations on APIC Managed Objects.""" + + def __init__(self, session, mo_class): + self.session = session + self.mo = ManagedObjectClass(mo_class) + + def _create_container(self, *args): + """Recursively create all container objects.""" + if self.mo.container: + container = ManagedObjectAccess(self.session, self.mo.container) + if container.mo.can_create: + container_args = args[0: container.mo.arg_count] + container._create_container(*container_args) + container.session.post_mo(container.mo, *container_args) + + def create(self, *args, **kwargs): + self._create_container(*args) + if self.mo.can_create and 'status' not in kwargs: + kwargs['status'] = 'created' + return self.session.post_mo(self.mo, *args, **kwargs) + + def _mo_attributes(self, obj_data): + if (self.mo.klass_name in obj_data and + 'attributes' in obj_data[self.mo.klass_name]): + return obj_data[self.mo.klass_name]['attributes'] + + def get(self, *args): + """Return a dict of the MO's attributes, or None.""" + imdata = self.session.get_mo(self.mo, *args) + if imdata: + return self._mo_attributes(imdata[0]) + + def list_all(self): + imdata = self.session.list_mo(self.mo) + return filter(None, [self._mo_attributes(obj) for obj in imdata]) + + def list_names(self): + return [obj['name'] for obj in self.list_all()] + + def update(self, *args, **kwargs): + return self.session.post_mo(self.mo, *args, **kwargs) + + def delete(self, *args): + return self.session.post_mo(self.mo, *args, status='deleted') + + +class RestClient(ApicSession): + + """APIC REST client for OpenStack Neutron.""" + + def __init__(self, host, port=80, usr=None, pwd=None, ssl=False): + """Establish a session with the APIC.""" + super(RestClient, self).__init__(host, port, usr, pwd, ssl) + + def __getattr__(self, mo_class): + """Add supported MOs as properties on demand.""" + if mo_class not in ManagedObjectClass.supported_mos: + raise cexc.ApicManagedObjectNotSupported(mo_class=mo_class) + self.__dict__[mo_class] = ManagedObjectAccess(self, mo_class) + return self.__dict__[mo_class] diff --git a/neutron/plugins/ml2/drivers/cisco/apic/apic_manager.py b/neutron/plugins/ml2/drivers/cisco/apic/apic_manager.py new file mode 100644 index 000000000..f86aa597d --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/apic/apic_manager.py @@ -0,0 +1,559 @@ +# Copyright (c) 2014 Cisco Systems Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. + +import itertools +import uuid + +from oslo.config import cfg + +from neutron.openstack.common import excutils +from neutron.plugins.ml2.drivers.cisco.apic import apic_client +from neutron.plugins.ml2.drivers.cisco.apic import apic_model +from neutron.plugins.ml2.drivers.cisco.apic import config +from neutron.plugins.ml2.drivers.cisco.apic import exceptions as cexc + +AP_NAME = 'openstack' +CONTEXT_ENFORCED = '1' +CONTEXT_UNENFORCED = '2' +CONTEXT_DEFAULT = 'default' +DN_KEY = 'dn' +PORT_DN_PATH = 'topology/pod-1/paths-%s/pathep-[eth%s]' +SCOPE_GLOBAL = 'global' +SCOPE_TENANT = 'tenant' +TENANT_COMMON = 'common' + + +def group_by_ranges(i): + """Group a list of numbers into tuples representing contiguous ranges.""" + for a, b in itertools.groupby(enumerate(sorted(i)), lambda (x, y): y - x): + b = list(b) + yield b[0][1], b[-1][1] + + +class APICManager(object): + """Class to manage APIC translations and workflow. + + This class manages translation from Neutron objects to APIC + managed objects and contains workflows to implement these + translations. + """ + def __init__(self): + self.db = apic_model.ApicDbModel() + + apic_conf = cfg.CONF.ml2_cisco_apic + self.switch_dict = config.create_switch_dictionary() + + # Connect to the the APIC + self.apic = apic_client.RestClient( + apic_conf.apic_host, + apic_conf.apic_port, + apic_conf.apic_username, + apic_conf.apic_password + ) + + self.port_profiles = {} + self.vmm_domain = None + self.phys_domain = None + self.vlan_ns = None + self.node_profiles = {} + self.entity_profile = None + self.function_profile = None + self.clear_node_profiles = apic_conf.apic_clear_node_profiles + + def ensure_infra_created_on_apic(self): + """Ensure the infrastructure is setup. + + Loop over the switch dictionary from the config and + setup profiles for switches, modules and ports + """ + # Loop over switches + for switch in self.switch_dict: + # Create a node profile for this switch + self.ensure_node_profile_created_for_switch(switch) + + # Check if a port profile exists for this node + ppname = self.check_infra_port_profiles(switch) + + # Gather port ranges for this switch + modules = self.gather_infra_module_ports(switch) + + # Setup each module and port range + for module in modules: + profile = self.db.get_profile_for_module(switch, ppname, + module) + if not profile: + # Create host port selector for this module + hname = uuid.uuid4() + try: + self.apic.infraHPortS.create(ppname, hname, 'range') + # Add relation to the function profile + fpdn = self.function_profile[DN_KEY] + self.apic.infraRsAccBaseGrp.create(ppname, hname, + 'range', tDn=fpdn) + modules[module].sort() + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + self.apic.infraHPortS.delete(ppname, hname, + 'range') + else: + hname = profile.hpselc_id + + ranges = group_by_ranges(modules[module]) + # Add this module and ports to the profile + for prange in ranges: + # Check if this port block is already added to the profile + if not self.db.get_profile_for_module_and_ports( + switch, ppname, module, prange[0], prange[-1]): + # Create port block for this port range + pbname = uuid.uuid4() + self.apic.infraPortBlk.create(ppname, hname, 'range', + pbname, fromCard=module, + toCard=module, + fromPort=str(prange[0]), + toPort=str(prange[-1])) + # Add DB row + self.db.add_profile_for_module_and_ports( + switch, ppname, hname, module, + prange[0], prange[-1]) + + def check_infra_port_profiles(self, switch): + """Check and create infra port profiles for a node.""" + sprofile = self.db.get_port_profile_for_node(switch) + ppname = None + if not sprofile: + # Generate uuid for port profile name + ppname = uuid.uuid4() + try: + # Create port profile for this switch + pprofile = self.ensure_port_profile_created_on_apic(ppname) + # Add port profile to node profile + ppdn = pprofile[DN_KEY] + self.apic.infraRsAccPortP.create(switch, ppdn) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete port profile + self.apic.infraAccPortP.delete(ppname) + else: + ppname = sprofile.profile_id + + return ppname + + def gather_infra_module_ports(self, switch): + """Build modules and ports per module dictionary.""" + ports = self.switch_dict[switch] + # Gather common modules + modules = {} + for port in ports: + module, sw_port = port.split('/') + if module not in modules: + modules[module] = [] + modules[module].append(int(sw_port)) + + return modules + + def ensure_context_unenforced(self, tenant_id=TENANT_COMMON, + name=CONTEXT_DEFAULT): + """Set the specified tenant's context to unenforced.""" + ctx = self.apic.fvCtx.get(tenant_id, name) + if not ctx: + self.apic.fvCtx.create(tenant_id, name, + pcEnfPref=CONTEXT_UNENFORCED) + elif ctx['pcEnfPref'] != CONTEXT_UNENFORCED: + self.apic.fvCtx.update(tenant_id, name, + pcEnfPref=CONTEXT_UNENFORCED) + + def ensure_context_enforced(self, tenant_id=TENANT_COMMON, + name=CONTEXT_DEFAULT): + """Set the specified tenant's context to enforced.""" + ctx = self.apic.fvCtx.get(tenant_id, name) + if not ctx: + self.apic.fvCtx.create(tenant_id, name, pcEnfPref=CONTEXT_ENFORCED) + elif ctx['pcEnfPref'] != CONTEXT_ENFORCED: + self.apic.fvCtx.update(tenant_id, name, pcEnfPref=CONTEXT_ENFORCED) + + def ensure_entity_profile_created_on_apic(self, name): + """Create the infrastructure entity profile.""" + if self.clear_node_profiles: + self.apic.infraAttEntityP.delete(name) + self.entity_profile = self.apic.infraAttEntityP.get(name) + if not self.entity_profile: + try: + phys_dn = self.phys_domain[DN_KEY] + self.apic.infraAttEntityP.create(name) + # Attach phys domain to entity profile + self.apic.infraRsDomP.create(name, phys_dn) + self.entity_profile = self.apic.infraAttEntityP.get(name) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the created entity profile + self.apic.infraAttEntityP.delete(name) + + def ensure_function_profile_created_on_apic(self, name): + """Create the infrastructure function profile.""" + if self.clear_node_profiles: + self.apic.infraAccPortGrp.delete(name) + self.function_profile = self.apic.infraAccPortGrp.get(name) + if not self.function_profile: + try: + self.apic.infraAccPortGrp.create(name) + # Attach entity profile to function profile + entp_dn = self.entity_profile[DN_KEY] + self.apic.infraRsAttEntP.create(name, tDn=entp_dn) + self.function_profile = self.apic.infraAccPortGrp.get(name) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the created function profile + self.apic.infraAccPortGrp.delete(name) + + def ensure_node_profile_created_for_switch(self, switch_id): + """Creates a switch node profile. + + Create a node profile for a switch and add a switch + to the leaf node selector + """ + if self.clear_node_profiles: + self.apic.infraNodeP.delete(switch_id) + self.db.delete_profile_for_node(switch_id) + sobj = self.apic.infraNodeP.get(switch_id) + if not sobj: + try: + # Create Node profile + self.apic.infraNodeP.create(switch_id) + # Create leaf selector + lswitch_id = uuid.uuid4() + self.apic.infraLeafS.create(switch_id, lswitch_id, 'range') + # Add leaf nodes to the selector + name = uuid.uuid4() + self.apic.infraNodeBlk.create(switch_id, lswitch_id, 'range', + name, from_=switch_id, + to_=switch_id) + sobj = self.apic.infraNodeP.get(switch_id) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Remove the node profile + self.apic.infraNodeP.delete(switch_id) + + self.node_profiles[switch_id] = { + 'object': sobj + } + + def ensure_port_profile_created_on_apic(self, name): + """Create a port profile.""" + try: + self.apic.infraAccPortP.create(name) + return self.apic.infraAccPortP.get(name) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + self.apic.infraAccPortP.delete(name) + + def ensure_vmm_domain_created_on_apic(self, vmm_name, + vlan_ns=None, vxlan_ns=None): + """Create Virtual Machine Manager domain. + + Creates the VMM domain on the APIC and adds a VLAN or VXLAN + namespace to that VMM domain. + TODO (asomya): Add VXLAN support + """ + provider = 'VMware' + if self.clear_node_profiles: + self.apic.vmmDomP.delete(provider, vmm_name) + self.vmm_domain = self.apic.vmmDomP.get(provider, vmm_name) + if not self.vmm_domain: + try: + self.apic.vmmDomP.create(provider, vmm_name) + if vlan_ns: + vlan_ns_dn = vlan_ns[DN_KEY] + self.apic.infraRsVlanNs__vmm.create(provider, vmm_name, + tDn=vlan_ns_dn) + self.vmm_domain = self.apic.vmmDomP.get(provider, vmm_name) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the VMM domain + self.apic.vmmDomP.delete(provider, vmm_name) + + def ensure_phys_domain_created_on_apic(self, phys_name, + vlan_ns=None): + """Create Virtual Machine Manager domain. + + Creates the VMM domain on the APIC and adds a VLAN or VXLAN + namespace to that VMM domain. + TODO (asomya): Add VXLAN support + """ + if self.clear_node_profiles: + self.apic.physDomP.delete(phys_name) + self.phys_domain = self.apic.physDomP.get(phys_name) + if not self.phys_domain: + try: + self.apic.physDomP.create(phys_name) + if vlan_ns: + vlan_ns_dn = vlan_ns[DN_KEY] + self.apic.infraRsVlanNs__phys.create(phys_name, + tDn=vlan_ns_dn) + self.phys_domain = self.apic.physDomP.get(phys_name) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the physical domain + self.apic.physDomP.delete(phys_name) + + def ensure_vlan_ns_created_on_apic(self, name, vlan_min, vlan_max): + """Creates a static VLAN namespace with the given vlan range.""" + ns_args = name, 'static' + if self.clear_node_profiles: + self.apic.fvnsVlanInstP.delete(name, 'dynamic') + self.apic.fvnsVlanInstP.delete(*ns_args) + self.vlan_ns = self.apic.fvnsVlanInstP.get(*ns_args) + if not self.vlan_ns: + try: + self.apic.fvnsVlanInstP.create(*ns_args) + vlan_min = 'vlan-' + vlan_min + vlan_max = 'vlan-' + vlan_max + ns_blk_args = name, 'static', vlan_min, vlan_max + vlan_encap = self.apic.fvnsEncapBlk__vlan.get(*ns_blk_args) + if not vlan_encap: + ns_kw_args = { + 'name': 'encap', + 'from': vlan_min, + 'to': vlan_max + } + self.apic.fvnsEncapBlk__vlan.create(*ns_blk_args, + **ns_kw_args) + self.vlan_ns = self.apic.fvnsVlanInstP.get(*ns_args) + return self.vlan_ns + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the vlan namespace + self.apic.fvnsVlanInstP.delete(*ns_args) + + def ensure_tenant_created_on_apic(self, tenant_id): + """Make sure a tenant exists on the APIC.""" + if not self.apic.fvTenant.get(tenant_id): + self.apic.fvTenant.create(tenant_id) + + def ensure_bd_created_on_apic(self, tenant_id, bd_id): + """Creates a Bridge Domain on the APIC.""" + if not self.apic.fvBD.get(tenant_id, bd_id): + try: + self.apic.fvBD.create(tenant_id, bd_id) + # Add default context to the BD + self.ensure_context_enforced() + self.apic.fvRsCtx.create(tenant_id, bd_id, + tnFvCtxName=CONTEXT_DEFAULT) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the bridge domain + self.apic.fvBD.delete(tenant_id, bd_id) + + def delete_bd_on_apic(self, tenant_id, bd_id): + """Deletes a Bridge Domain from the APIC.""" + self.apic.fvBD.delete(tenant_id, bd_id) + + def ensure_subnet_created_on_apic(self, tenant_id, bd_id, gw_ip): + """Creates a subnet on the APIC + + The gateway ip (gw_ip) should be specified as a CIDR + e.g. 10.0.0.1/24 + """ + if not self.apic.fvSubnet.get(tenant_id, bd_id, gw_ip): + self.apic.fvSubnet.create(tenant_id, bd_id, gw_ip) + + def ensure_filter_created_on_apic(self, tenant_id, filter_id): + """Create a filter on the APIC.""" + if not self.apic.vzFilter.get(tenant_id, filter_id): + self.apic.vzFilter.create(tenant_id, filter_id) + + def ensure_epg_created_for_network(self, tenant_id, network_id, net_name): + """Creates an End Point Group on the APIC. + + Create a new EPG on the APIC for the network spcified. This information + is also tracked in the local DB and associate the bridge domain for the + network with the EPG created. + """ + # Check if an EPG is already present for this network + epg = self.db.get_epg_for_network(network_id) + if epg: + return epg + + # Create a new EPG on the APIC + epg_uid = '-'.join([str(net_name), str(uuid.uuid4())]) + try: + self.apic.fvAEPg.create(tenant_id, AP_NAME, epg_uid) + + # Add bd to EPG + bd = self.apic.fvBD.get(tenant_id, network_id) + bd_name = bd['name'] + + # Create fvRsBd + self.apic.fvRsBd.create(tenant_id, AP_NAME, epg_uid, + tnFvBDName=bd_name) + + # Add EPG to physical domain + phys_dn = self.phys_domain[DN_KEY] + self.apic.fvRsDomAtt.create(tenant_id, AP_NAME, epg_uid, phys_dn) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the EPG + self.apic.fvAEPg.delete(tenant_id, AP_NAME, epg_uid) + + # Stick it in the DB + epg = self.db.write_epg_for_network(network_id, epg_uid) + + return epg + + def delete_epg_for_network(self, tenant_id, network_id): + """Deletes the EPG from the APIC and removes it from the DB.""" + # Check if an EPG is already present for this network + epg = self.db.get_epg_for_network(network_id) + if not epg: + return False + + # Delete this epg + self.apic.fvAEPg.delete(tenant_id, AP_NAME, epg.epg_id) + # Remove DB row + self.db.delete_epg(epg) + + def create_tenant_filter(self, tenant_id): + """Creates a tenant filter and a generic entry under it.""" + fuuid = uuid.uuid4() + try: + # Create a new tenant filter + self.apic.vzFilter.create(tenant_id, fuuid) + # Create a new entry + euuid = uuid.uuid4() + self.apic.vzEntry.create(tenant_id, fuuid, euuid) + return fuuid + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + self.apic.vzFilter.delete(tenant_id, fuuid) + + def set_contract_for_epg(self, tenant_id, epg_id, + contract_id, provider=False): + """Set the contract for an EPG. + + By default EPGs are consumers to a contract. Set provider flag + for a single EPG to act as a contract provider. + """ + if provider: + try: + self.apic.fvRsProv.create(tenant_id, AP_NAME, + epg_id, contract_id) + self.db.set_provider_contract(epg_id) + self.make_tenant_contract_global(tenant_id) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + self.make_tenant_contract_local(tenant_id) + self.apic.fvRsProv.delete(tenant_id, AP_NAME, + epg_id, contract_id) + else: + self.apic.fvRsCons.create(tenant_id, AP_NAME, epg_id, contract_id) + + def delete_contract_for_epg(self, tenant_id, epg_id, + contract_id, provider=False): + """Delete the contract for an End Point Group. + + Check if the EPG was a provider and attempt to grab another contract + consumer from the DB and set that as the new contract provider. + """ + if provider: + self.apic.fvRsProv.delete(tenant_id, AP_NAME, epg_id, contract_id) + self.db.unset_provider_contract(epg_id) + # Pick out another EPG to set as contract provider + epg = self.db.get_an_epg(epg_id) + self.update_contract_for_epg(tenant_id, epg.epg_id, + contract_id, True) + else: + self.apic.fvRsCons.delete(tenant_id, AP_NAME, epg_id, contract_id) + + def update_contract_for_epg(self, tenant_id, epg_id, + contract_id, provider=False): + """Updates the contract for an End Point Group.""" + self.apic.fvRsCons.delete(tenant_id, AP_NAME, epg_id, contract_id) + self.set_contract_for_epg(tenant_id, epg_id, contract_id, provider) + + def create_tenant_contract(self, tenant_id): + """Creates a tenant contract. + + Create a tenant contract if one doesn't exist. Also create a + subject, filter and entry and set the filters to allow all + protocol traffic on all ports + """ + contract = self.db.get_contract_for_tenant(tenant_id) + if not contract: + cuuid = uuid.uuid4() + try: + # Create contract + self.apic.vzBrCP.create(tenant_id, cuuid, scope=SCOPE_TENANT) + acontract = self.apic.vzBrCP.get(tenant_id, cuuid) + # Create subject + suuid = uuid.uuid4() + self.apic.vzSubj.create(tenant_id, cuuid, suuid) + # Create filter and entry + tfilter = self.create_tenant_filter(tenant_id) + # Create interm and outterm + self.apic.vzInTerm.create(tenant_id, cuuid, suuid) + self.apic.vzRsFiltAtt__In.create(tenant_id, cuuid, + suuid, tfilter) + self.apic.vzOutTerm.create(tenant_id, cuuid, suuid) + self.apic.vzRsFiltAtt__Out.create(tenant_id, cuuid, + suuid, tfilter) + # Create contract interface + iuuid = uuid.uuid4() + self.apic.vzCPIf.create(tenant_id, iuuid) + self.apic.vzRsIf.create(tenant_id, iuuid, + tDn=acontract[DN_KEY]) + # Store contract in DB + contract = self.db.write_contract_for_tenant(tenant_id, + cuuid, tfilter) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete tenant contract + self.apic.vzBrCP.delete(tenant_id, cuuid) + + return contract + + def make_tenant_contract_global(self, tenant_id): + """Mark the tenant contract's scope to global.""" + contract = self.db.get_contract_for_tenant(tenant_id) + self.apic.vzBrCP.update(tenant_id, contract.contract_id, + scope=SCOPE_GLOBAL) + + def make_tenant_contract_local(self, tenant_id): + """Mark the tenant contract's scope to tenant.""" + contract = self.db.get_contract_for_tenant(tenant_id) + self.apic.vzBrCP.update(tenant_id, contract.contract_id, + scope=SCOPE_TENANT) + + def ensure_path_created_for_port(self, tenant_id, network_id, + host_id, encap, net_name): + """Create path attribute for an End Point Group.""" + encap = 'vlan-' + str(encap) + epg = self.ensure_epg_created_for_network(tenant_id, network_id, + net_name) + eid = epg.epg_id + + # Get attached switch and port for this host + host_config = config.get_switch_and_port_for_host(host_id) + if not host_config: + raise cexc.ApicHostNotConfigured(host=host_id) + switch, port = host_config + pdn = PORT_DN_PATH % (switch, port) + + # Check if exists + patt = self.apic.fvRsPathAtt.get(tenant_id, AP_NAME, eid, pdn) + if not patt: + self.apic.fvRsPathAtt.create(tenant_id, AP_NAME, eid, pdn, + encap=encap, mode="regular", + instrImedcy="immediate") diff --git a/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py b/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py new file mode 100644 index 000000000..a3c05d630 --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py @@ -0,0 +1,177 @@ +# Copyright (c) 2014 Cisco Systems Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. + +import sqlalchemy as sa + +from neutron.db import api as db_api +from neutron.db import model_base +from neutron.db import models_v2 + + +class NetworkEPG(model_base.BASEV2): + + """EPG's created on the apic per network.""" + + __tablename__ = 'cisco_ml2_apic_epgs' + + network_id = sa.Column(sa.String(255), nullable=False, primary_key=True) + epg_id = sa.Column(sa.String(64), nullable=False) + segmentation_id = sa.Column(sa.String(64), nullable=False) + provider = sa.Column(sa.Boolean, default=False, nullable=False) + + +class PortProfile(model_base.BASEV2): + + """Port profiles created on the APIC.""" + + __tablename__ = 'cisco_ml2_apic_port_profiles' + + node_id = sa.Column(sa.String(255), nullable=False, primary_key=True) + profile_id = sa.Column(sa.String(64), nullable=False) + hpselc_id = sa.Column(sa.String(64), nullable=False) + module = sa.Column(sa.String(10), nullable=False) + from_port = sa.Column(sa.Integer(), nullable=False) + to_port = sa.Column(sa.Integer(), nullable=False) + + +class TenantContract(model_base.BASEV2, models_v2.HasTenant): + + """Contracts (and Filters) created on the APIC.""" + + __tablename__ = 'cisco_ml2_apic_contracts' + + __table_args__ = (sa.PrimaryKeyConstraint('tenant_id'),) + contract_id = sa.Column(sa.String(64), nullable=False) + filter_id = sa.Column(sa.String(64), nullable=False) + + +class ApicDbModel(object): + + """DB Model to manage all APIC DB interactions.""" + + def __init__(self): + self.session = db_api.get_session() + + def get_port_profile_for_node(self, node_id): + """Returns a port profile for a switch if found in the DB.""" + return self.session.query(PortProfile).filter_by( + node_id=node_id).first() + + def get_profile_for_module_and_ports(self, node_id, profile_id, + module, from_port, to_port): + """Returns profile for module and ports. + + Grabs the profile row from the DB for the specified switch, + module (linecard) and from/to port combination. + """ + return self.session.query(PortProfile).filter_by( + node_id=node_id, + module=module, + profile_id=profile_id, + from_port=from_port, + to_port=to_port).first() + + def get_profile_for_module(self, node_id, profile_id, module): + """Returns the first profile for a switch module from the DB.""" + return self.session.query(PortProfile).filter_by( + node_id=node_id, + profile_id=profile_id, + module=module).first() + + def add_profile_for_module_and_ports(self, node_id, profile_id, + hpselc_id, module, + from_port, to_port): + """Adds a profile for switch, module and port range.""" + row = PortProfile(node_id=node_id, profile_id=profile_id, + hpselc_id=hpselc_id, module=module, + from_port=from_port, to_port=to_port) + self.session.add(row) + self.session.flush() + + def get_provider_contract(self): + """Returns provider EPG from the DB if found.""" + return self.session.query(NetworkEPG).filter_by( + provider=True).first() + + def set_provider_contract(self, epg_id): + """Sets an EPG to be a contract provider.""" + epg = self.session.query(NetworkEPG).filter_by( + epg_id=epg_id).first() + if epg: + epg.provider = True + self.session.merge(epg) + self.session.flush() + + def unset_provider_contract(self, epg_id): + """Sets an EPG to be a contract consumer.""" + epg = self.session.query(NetworkEPG).filter_by( + epg_id=epg_id).first() + if epg: + epg.provider = False + self.session.merge(epg) + self.session.flush() + + def get_an_epg(self, exception): + """Returns an EPG from the DB that does not match the id specified.""" + return self.session.query(NetworkEPG).filter( + NetworkEPG.epg_id != exception).first() + + def get_epg_for_network(self, network_id): + """Returns an EPG for a give neutron network.""" + return self.session.query(NetworkEPG).filter_by( + network_id=network_id).first() + + def write_epg_for_network(self, network_id, epg_uid, segmentation_id='1'): + """Stores EPG details for a network. + + NOTE: Segmentation_id is just a placeholder currently, it will be + populated with a proper segment id once segmentation mgmt is + moved to the APIC. + """ + epg = NetworkEPG(network_id=network_id, epg_id=epg_uid, + segmentation_id=segmentation_id) + self.session.add(epg) + self.session.flush() + return epg + + def delete_epg(self, epg): + """Deletes an EPG from the DB.""" + self.session.delete(epg) + self.session.flush() + + def get_contract_for_tenant(self, tenant_id): + """Returns the specified tenant's contract.""" + return self.session.query(TenantContract).filter_by( + tenant_id=tenant_id).first() + + def write_contract_for_tenant(self, tenant_id, contract_id, filter_id): + """Stores a new contract for the given tenant.""" + contract = TenantContract(tenant_id=tenant_id, + contract_id=contract_id, + filter_id=filter_id) + self.session.add(contract) + self.session.flush() + + return contract + + def delete_profile_for_node(self, node_id): + """Deletes the port profile for a node.""" + profile = self.session.query(PortProfile).filter_by( + node_id=node_id).first() + if profile: + self.session.delete(profile) + self.session.flush() diff --git a/neutron/plugins/ml2/drivers/cisco/apic/config.py b/neutron/plugins/ml2/drivers/cisco/apic/config.py new file mode 100644 index 000000000..c5c43f28f --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/apic/config.py @@ -0,0 +1,82 @@ +# Copyright (c) 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. + +from oslo.config import cfg + + +apic_opts = [ + cfg.StrOpt('apic_host', + help=_("Host name or IP Address of the APIC controller")), + cfg.StrOpt('apic_username', + help=_("Username for the APIC controller")), + cfg.StrOpt('apic_password', + help=_("Password for the APIC controller"), secret=True), + cfg.StrOpt('apic_port', + help=_("Communication port for the APIC controller")), + cfg.StrOpt('apic_vmm_provider', default='VMware', + help=_("Name for the VMM domain provider")), + cfg.StrOpt('apic_vmm_domain', default='openstack', + help=_("Name for the VMM domain to be created for Openstack")), + cfg.StrOpt('apic_vlan_ns_name', default='openstack_ns', + help=_("Name for the vlan namespace to be used for openstack")), + cfg.StrOpt('apic_vlan_range', default='2:4093', + help=_("Range of VLAN's to be used for Openstack")), + cfg.StrOpt('apic_node_profile', default='openstack_profile', + help=_("Name of the node profile to be created")), + cfg.StrOpt('apic_entity_profile', default='openstack_entity', + help=_("Name of the entity profile to be created")), + cfg.StrOpt('apic_function_profile', default='openstack_function', + help=_("Name of the function profile to be created")), + cfg.BoolOpt('apic_clear_node_profiles', default=False, + help=_("Clear the node profiles on the APIC at startup " + "(mainly used for testing)")), +] + + +cfg.CONF.register_opts(apic_opts, "ml2_cisco_apic") + + +def get_switch_and_port_for_host(host_id): + for switch, connected in _switch_dict.items(): + for port, hosts in connected.items(): + if host_id in hosts: + return switch, port + + +_switch_dict = {} + + +def create_switch_dictionary(): + multi_parser = cfg.MultiConfigParser() + read_ok = multi_parser.read(cfg.CONF.config_file) + + if len(read_ok) != len(cfg.CONF.config_file): + raise cfg.Error(_("Some config files were not parsed properly")) + + for parsed_file in multi_parser.parsed: + for parsed_item in parsed_file.keys(): + if parsed_item.startswith('apic_switch'): + switch, switch_id = parsed_item.split(':') + if switch.lower() == 'apic_switch': + _switch_dict[switch_id] = {} + port_cfg = parsed_file[parsed_item].items() + for host_list, port in port_cfg: + hosts = host_list.split(',') + port = port[0] + _switch_dict[switch_id][port] = hosts + + return _switch_dict diff --git a/neutron/plugins/ml2/drivers/cisco/apic/exceptions.py b/neutron/plugins/ml2/drivers/cisco/apic/exceptions.py new file mode 100644 index 000000000..b33abb17d --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/apic/exceptions.py @@ -0,0 +1,59 @@ +# Copyright (c) 2014 Cisco Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Henry Gessau, Cisco Systems + +"""Exceptions used by Cisco APIC ML2 mechanism driver.""" + +from neutron.common import exceptions + + +class ApicHostNoResponse(exceptions.NotFound): + """No response from the APIC via the specified URL.""" + message = _("No response from APIC at %(url)s") + + +class ApicResponseNotOk(exceptions.NeutronException): + """A response from the APIC was not HTTP OK.""" + message = _("APIC responded with HTTP status %(status)s: %(reason)s, " + "Request: '%(request)s', " + "APIC error code %(err_code)s: %(err_text)s") + + +class ApicResponseNoCookie(exceptions.NeutronException): + """A response from the APIC did not contain an expected cookie.""" + message = _("APIC failed to provide cookie for %(request)s request") + + +class ApicSessionNotLoggedIn(exceptions.NotAuthorized): + """Attempted APIC operation while not logged in to APIC.""" + message = _("Authorized APIC session not established") + + +class ApicHostNotConfigured(exceptions.NotAuthorized): + """The switch and port for the specified host are not configured.""" + message = _("The switch and port for host '%(host)s' are not configured") + + +class ApicManagedObjectNotSupported(exceptions.NeutronException): + """Attempted to use an unsupported Managed Object.""" + message = _("Managed Object '%(mo_class)s' is not supported") + + +class ApicMultipleVlanRanges(exceptions.NeutronException): + """Multiple VLAN ranges specified.""" + message = _("Multiple VLAN ranges are not supported in the APIC plugin. " + "Please specify a single VLAN range. " + "Current config: '%(vlan_ranges)s'") diff --git a/neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py b/neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py new file mode 100644 index 000000000..d5297df68 --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py @@ -0,0 +1,150 @@ +# Copyright (c) 2014 Cisco Systems Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. + +import netaddr + +from oslo.config import cfg + +from neutron.extensions import portbindings +from neutron.openstack.common import log +from neutron.plugins.common import constants +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers.cisco.apic import apic_manager +from neutron.plugins.ml2.drivers.cisco.apic import exceptions as apic_exc + + +LOG = log.getLogger(__name__) + + +class APICMechanismDriver(api.MechanismDriver): + + def initialize(self): + self.apic_manager = apic_manager.APICManager() + + # Create a Phys domain and VLAN namespace + # Get vlan ns name + ns_name = cfg.CONF.ml2_cisco_apic.apic_vlan_ns_name + + # Grab vlan ranges + if len(cfg.CONF.ml2_type_vlan.network_vlan_ranges) != 1: + raise apic_exc.ApicMultipleVlanRanges( + cfg.CONF.ml2_type_vlan.network_vlan_ranges) + vlan_ranges = cfg.CONF.ml2_type_vlan.network_vlan_ranges[0] + if ',' in vlan_ranges: + raise apic_exc.ApicMultipleVlanRanges(vlan_ranges) + (vlan_min, vlan_max) = vlan_ranges.split(':')[-2:] + + # Create VLAN namespace + vlan_ns = self.apic_manager.ensure_vlan_ns_created_on_apic(ns_name, + vlan_min, + vlan_max) + phys_name = cfg.CONF.ml2_cisco_apic.apic_vmm_domain + # Create Physical domain + self.apic_manager.ensure_phys_domain_created_on_apic(phys_name, + vlan_ns) + + # Create entity profile + ent_name = cfg.CONF.ml2_cisco_apic.apic_entity_profile + self.apic_manager.ensure_entity_profile_created_on_apic(ent_name) + + # Create function profile + func_name = cfg.CONF.ml2_cisco_apic.apic_function_profile + self.apic_manager.ensure_function_profile_created_on_apic(func_name) + + # Create infrastructure on apic + self.apic_manager.ensure_infra_created_on_apic() + + def _perform_port_operations(self, context): + # Get tenant details from port context + tenant_id = context.current['tenant_id'] + + # Get network + network = context.network.current['id'] + net_name = context.network.current['name'] + + # Get port + port = context.current + + # Get segmentation id + if not context.bound_segment: + LOG.debug(_("Port %s is not bound to a segment"), port) + return + seg = None + if (context.bound_segment.get(api.NETWORK_TYPE) in + [constants.TYPE_VLAN]): + seg = context.bound_segment.get(api.SEGMENTATION_ID) + + # Check if a compute port + if not port['device_owner'].startswith('compute'): + # Not a compute port, return + return + + host = port.get(portbindings.HOST_ID) + # Check host that the dhcp agent is running on + filters = {'device_owner': 'network:dhcp', + 'network_id': network} + dhcp_ports = context._plugin.get_ports(context._plugin_context, + filters=filters) + dhcp_hosts = [] + for dhcp_port in dhcp_ports: + dhcp_hosts.append(dhcp_port.get(portbindings.HOST_ID)) + + # Create a static path attachment for this host/epg/switchport combo + self.apic_manager.ensure_tenant_created_on_apic(tenant_id) + if dhcp_hosts: + for dhcp_host in dhcp_hosts: + self.apic_manager.ensure_path_created_for_port(tenant_id, + network, + dhcp_host, seg, + net_name) + if host not in dhcp_hosts: + self.apic_manager.ensure_path_created_for_port(tenant_id, network, + host, seg, net_name) + + def create_port_postcommit(self, context): + self._perform_port_operations(context) + + def update_port_postcommit(self, context): + self._perform_port_operations(context) + + def create_network_postcommit(self, context): + net_id = context.current['id'] + tenant_id = context.current['tenant_id'] + net_name = context.current['name'] + + self.apic_manager.ensure_bd_created_on_apic(tenant_id, net_id) + # Create EPG for this network + self.apic_manager.ensure_epg_created_for_network(tenant_id, net_id, + net_name) + + def delete_network_postcommit(self, context): + net_id = context.current['id'] + tenant_id = context.current['tenant_id'] + + self.apic_manager.delete_bd_on_apic(tenant_id, net_id) + self.apic_manager.delete_epg_for_network(tenant_id, net_id) + + def create_subnet_postcommit(self, context): + tenant_id = context.current['tenant_id'] + network_id = context.current['network_id'] + gateway_ip = context.current['gateway_ip'] + cidr = netaddr.IPNetwork(context.current['cidr']) + netmask = str(cidr.prefixlen) + gateway_ip = gateway_ip + '/' + netmask + + self.apic_manager.ensure_subnet_created_on_apic(tenant_id, network_id, + gateway_ip) diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/README b/neutron/plugins/ml2/drivers/cisco/nexus/README new file mode 100644 index 000000000..21905b036 --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/nexus/README @@ -0,0 +1,19 @@ +Neutron ML2 Cisco Nexus Mechanism Driver README + + +Notes: + +The initial version of this driver supports only a single physical +network. + +For provider networks, extended configuration options are not +currently supported. + +This driver's database may have duplicate entries also found in the +core ML2 database. Since the Cisco Nexus DB code is a port from the +plugins/cisco implementation this duplication will remain until the +plugins/cisco code is deprecated. + + +For more details on using Cisco Nexus switches under ML2 please refer to: +http://wiki.openstack.org/wiki/Neutron/ML2/MechCiscoNexus diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/__init__.py b/neutron/plugins/ml2/drivers/cisco/nexus/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/config.py b/neutron/plugins/ml2/drivers/cisco/nexus/config.py new file mode 100644 index 000000000..3be443088 --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/nexus/config.py @@ -0,0 +1,65 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + + +ml2_cisco_opts = [ + cfg.StrOpt('vlan_name_prefix', default='q-', + help=_("VLAN Name prefix")), + cfg.BoolOpt('svi_round_robin', default=False, + help=_("Distribute SVI interfaces over all switches")), + cfg.StrOpt('managed_physical_network', + help=_("The physical network managed by the switches.")), +] + + +cfg.CONF.register_opts(ml2_cisco_opts, "ml2_cisco") + +# +# Format for ml2_conf_cisco.ini 'ml2_mech_cisco_nexus' is: +# {('', ''): '', ...} +# +# Example: +# {('1.1.1.1', 'username'): 'admin', +# ('1.1.1.1', 'password'): 'mySecretPassword', +# ('1.1.1.1', 'compute1'): '1/1', ...} +# + + +class ML2MechCiscoConfig(object): + """ML2 Mechanism Driver Cisco Configuration class.""" + nexus_dict = {} + + def __init__(self): + self._create_ml2_mech_device_cisco_dictionary() + + def _create_ml2_mech_device_cisco_dictionary(self): + """Create the ML2 device cisco dictionary. + + Read data from the ml2_conf_cisco.ini device supported sections. + """ + multi_parser = cfg.MultiConfigParser() + read_ok = multi_parser.read(cfg.CONF.config_file) + + if len(read_ok) != len(cfg.CONF.config_file): + raise cfg.Error(_("Some config files were not parsed properly")) + + for parsed_file in multi_parser.parsed: + for parsed_item in parsed_file.keys(): + dev_id, sep, dev_ip = parsed_item.partition(':') + if dev_id.lower() == 'ml2_mech_cisco_nexus': + for dev_key, value in parsed_file[parsed_item].items(): + self.nexus_dict[dev_ip, dev_key] = value[0] diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/constants.py b/neutron/plugins/ml2/drivers/cisco/nexus/constants.py new file mode 100644 index 000000000..f3191b0b2 --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/nexus/constants.py @@ -0,0 +1,24 @@ +# Copyright 2011 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + + +CREDENTIAL_USERNAME = 'user_name' +CREDENTIAL_PASSWORD = 'password' + +USERNAME = 'username' +PASSWORD = 'password' + +NETWORK_ADMIN = 'network_admin' diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py b/neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py new file mode 100644 index 000000000..9302f30de --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py @@ -0,0 +1,84 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Exceptions used by Cisco Nexus ML2 mechanism driver.""" + +from neutron.common import exceptions + + +class CredentialNotFound(exceptions.NeutronException): + """Credential with this ID cannot be found.""" + message = _("Credential %(credential_id)s could not be found.") + + +class CredentialNameNotFound(exceptions.NeutronException): + """Credential Name could not be found.""" + message = _("Credential %(credential_name)s could not be found.") + + +class CredentialAlreadyExists(exceptions.NeutronException): + """Credential name already exists.""" + message = _("Credential %(credential_name)s already exists " + "for tenant %(tenant_id)s.") + + +class NexusComputeHostNotConfigured(exceptions.NeutronException): + """Connection to compute host is not configured.""" + message = _("Connection to %(host)s is not configured.") + + +class NexusConnectFailed(exceptions.NeutronException): + """Failed to connect to Nexus switch.""" + message = _("Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s.") + + +class NexusConfigFailed(exceptions.NeutronException): + """Failed to configure Nexus switch.""" + message = _("Failed to configure Nexus: %(config)s. Reason: %(exc)s.") + + +class NexusPortBindingNotFound(exceptions.NeutronException): + """NexusPort Binding is not present.""" + message = _("Nexus Port Binding (%(filters)s) is not present") + + def __init__(self, **kwargs): + filters = ','.join('%s=%s' % i for i in kwargs.items()) + super(NexusPortBindingNotFound, self).__init__(filters=filters) + + +class NexusMissingRequiredFields(exceptions.NeutronException): + """Missing required fields to configure nexus switch.""" + message = _("Missing required field(s) to configure nexus switch: " + "%(fields)s") + + +class NoNexusSviSwitch(exceptions.NeutronException): + """No usable nexus switch found.""" + message = _("No usable Nexus switch found to create SVI interface.") + + +class SubnetNotSpecified(exceptions.NeutronException): + """Subnet id not specified.""" + message = _("No subnet_id specified for router gateway.") + + +class SubnetInterfacePresent(exceptions.NeutronException): + """Subnet SVI interface already exists.""" + message = _("Subnet %(subnet_id)s has an interface on %(router_id)s.") + + +class PortIdForNexusSvi(exceptions.NeutronException): + """Port Id specified for Nexus SVI.""" + message = _('Nexus hardware router gateway only uses Subnet Ids.') diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py b/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py new file mode 100644 index 000000000..8db752829 --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py @@ -0,0 +1,219 @@ +# Copyright 2013 OpenStack Foundation +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +ML2 Mechanism Driver for Cisco Nexus platforms. +""" + +from oslo.config import cfg + +from neutron.common import constants as n_const +from neutron.extensions import portbindings +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers.cisco.nexus import config as conf +from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as excep +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2 as nxos_db +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_network_driver + +LOG = logging.getLogger(__name__) + + +class CiscoNexusMechanismDriver(api.MechanismDriver): + + """Cisco Nexus ML2 Mechanism Driver.""" + + def initialize(self): + # Create ML2 device dictionary from ml2_conf.ini entries. + conf.ML2MechCiscoConfig() + + # Extract configuration parameters from the configuration file. + self._nexus_switches = conf.ML2MechCiscoConfig.nexus_dict + LOG.debug(_("nexus_switches found = %s"), self._nexus_switches) + + self.driver = nexus_network_driver.CiscoNexusDriver() + + def _valid_network_segment(self, segment): + return (cfg.CONF.ml2_cisco.managed_physical_network is None or + cfg.CONF.ml2_cisco.managed_physical_network == + segment[api.PHYSICAL_NETWORK]) + + def _get_vlanid(self, segment): + if (segment and segment[api.NETWORK_TYPE] == p_const.TYPE_VLAN and + self._valid_network_segment(segment)): + return segment.get(api.SEGMENTATION_ID) + + def _is_deviceowner_compute(self, port): + return port['device_owner'].startswith('compute') + + def _is_status_active(self, port): + return port['status'] == n_const.PORT_STATUS_ACTIVE + + def _get_switch_info(self, host_id): + host_connections = [] + for switch_ip, attr in self._nexus_switches: + if str(attr) == str(host_id): + port_id = self._nexus_switches[switch_ip, attr] + if ':' in port_id: + intf_type, port = port_id.split(':') + else: + intf_type, port = 'ethernet', port_id + host_connections.append((switch_ip, intf_type, port)) + + if host_connections: + return host_connections + else: + raise excep.NexusComputeHostNotConfigured(host=host_id) + + def _configure_nxos_db(self, vlan_id, device_id, host_id): + """Create the nexus database entry. + + Called during update precommit port event. + """ + host_connections = self._get_switch_info(host_id) + for switch_ip, intf_type, nexus_port in host_connections: + port_id = '%s:%s' % (intf_type, nexus_port) + nxos_db.add_nexusport_binding(port_id, str(vlan_id), switch_ip, + device_id) + + def _configure_switch_entry(self, vlan_id, device_id, host_id): + """Create a nexus switch entry. + + if needed, create a VLAN in the appropriate switch/port and + configure the appropriate interfaces for this VLAN. + + Called during update postcommit port event. + """ + vlan_name = cfg.CONF.ml2_cisco.vlan_name_prefix + str(vlan_id) + host_connections = self._get_switch_info(host_id) + + for switch_ip, intf_type, nexus_port in host_connections: + # Check to see if this is the first binding to use this vlan on the + # switch/port. Configure switch accordingly. + bindings = nxos_db.get_nexusvlan_binding(vlan_id, switch_ip) + if len(bindings) == 1: + LOG.debug(_("Nexus: create & trunk vlan %s"), vlan_name) + self.driver.create_and_trunk_vlan( + switch_ip, vlan_id, vlan_name, intf_type, nexus_port) + else: + LOG.debug(_("Nexus: trunk vlan %s"), vlan_name) + self.driver.enable_vlan_on_trunk_int(switch_ip, vlan_id, + intf_type, nexus_port) + + def _delete_nxos_db(self, vlan_id, device_id, host_id): + """Delete the nexus database entry. + + Called during delete precommit port event. + """ + try: + rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id) + for row in rows: + nxos_db.remove_nexusport_binding( + row.port_id, row.vlan_id, row.switch_ip, row.instance_id) + except excep.NexusPortBindingNotFound: + return + + def _delete_switch_entry(self, vlan_id, device_id, host_id): + """Delete the nexus switch entry. + + By accessing the current db entries determine if switch + configuration can be removed. + + Called during update postcommit port event. + """ + host_connections = self._get_switch_info(host_id) + for switch_ip, intf_type, nexus_port in host_connections: + # if there are no remaining db entries using this vlan on this + # nexus switch port then remove vlan from the switchport trunk. + port_id = '%s:%s' % (intf_type, nexus_port) + try: + nxos_db.get_port_vlan_switch_binding(port_id, vlan_id, + switch_ip) + except excep.NexusPortBindingNotFound: + self.driver.disable_vlan_on_trunk_int(switch_ip, vlan_id, + intf_type, nexus_port) + + # if there are no remaining db entries using this vlan on this + # nexus switch then remove the vlan. + try: + nxos_db.get_nexusvlan_binding(vlan_id, switch_ip) + except excep.NexusPortBindingNotFound: + self.driver.delete_vlan(switch_ip, vlan_id) + + def _is_vm_migration(self, context): + if not context.bound_segment and context.original_bound_segment: + return (context.current.get(portbindings.HOST_ID) != + context.original.get(portbindings.HOST_ID)) + + def _port_action(self, port, segment, func): + """Verify configuration and then process event.""" + device_id = port.get('device_id') + host_id = port.get(portbindings.HOST_ID) + vlan_id = self._get_vlanid(segment) + + if vlan_id and device_id and host_id: + func(vlan_id, device_id, host_id) + else: + fields = "vlan_id " if not vlan_id else "" + fields += "device_id " if not device_id else "" + fields += "host_id" if not host_id else "" + raise excep.NexusMissingRequiredFields(fields=fields) + + def update_port_precommit(self, context): + """Update port pre-database transaction commit event.""" + + # if VM migration is occurring then remove previous database entry + # else process update event. + if self._is_vm_migration(context): + self._port_action(context.original, + context.original_bound_segment, + self._delete_nxos_db) + else: + if (self._is_deviceowner_compute(context.current) and + self._is_status_active(context.current)): + self._port_action(context.current, + context.bound_segment, + self._configure_nxos_db) + + def update_port_postcommit(self, context): + """Update port non-database commit event.""" + + # if VM migration is occurring then remove previous nexus switch entry + # else process update event. + if self._is_vm_migration(context): + self._port_action(context.original, + context.original_bound_segment, + self._delete_switch_entry) + else: + if (self._is_deviceowner_compute(context.current) and + self._is_status_active(context.current)): + self._port_action(context.current, + context.bound_segment, + self._configure_switch_entry) + + def delete_port_precommit(self, context): + """Delete port pre-database commit event.""" + if self._is_deviceowner_compute(context.current): + self._port_action(context.current, + context.bound_segment, + self._delete_nxos_db) + + def delete_port_postcommit(self, context): + """Delete port non-database commit event.""" + if self._is_deviceowner_compute(context.current): + self._port_action(context.current, + context.bound_segment, + self._delete_switch_entry) diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py new file mode 100644 index 000000000..081b0d0a0 --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py @@ -0,0 +1,143 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sqlalchemy.orm.exc as sa_exc + +import neutron.db.api as db +from neutron.openstack.common import log as logging +from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as c_exc +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_models_v2 + + +LOG = logging.getLogger(__name__) + + +def get_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): + """Lists a nexusport binding.""" + LOG.debug(_("get_nexusport_binding() called")) + return _lookup_all_nexus_bindings(port_id=port_id, + vlan_id=vlan_id, + switch_ip=switch_ip, + instance_id=instance_id) + + +def get_nexusvlan_binding(vlan_id, switch_ip): + """Lists a vlan and switch binding.""" + LOG.debug(_("get_nexusvlan_binding() called")) + return _lookup_all_nexus_bindings(vlan_id=vlan_id, switch_ip=switch_ip) + + +def add_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): + """Adds a nexusport binding.""" + LOG.debug(_("add_nexusport_binding() called")) + session = db.get_session() + binding = nexus_models_v2.NexusPortBinding(port_id=port_id, + vlan_id=vlan_id, + switch_ip=switch_ip, + instance_id=instance_id) + session.add(binding) + session.flush() + return binding + + +def remove_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): + """Removes a nexusport binding.""" + LOG.debug(_("remove_nexusport_binding() called")) + session = db.get_session() + binding = _lookup_all_nexus_bindings(session=session, + vlan_id=vlan_id, + switch_ip=switch_ip, + port_id=port_id, + instance_id=instance_id) + for bind in binding: + session.delete(bind) + session.flush() + return binding + + +def update_nexusport_binding(port_id, new_vlan_id): + """Updates nexusport binding.""" + if not new_vlan_id: + LOG.warning(_("update_nexusport_binding called with no vlan")) + return + LOG.debug(_("update_nexusport_binding called")) + session = db.get_session() + binding = _lookup_one_nexus_binding(session=session, port_id=port_id) + binding.vlan_id = new_vlan_id + session.merge(binding) + session.flush() + return binding + + +def get_nexusvm_bindings(vlan_id, instance_id): + """Lists nexusvm bindings.""" + LOG.debug(_("get_nexusvm_bindings() called")) + return _lookup_all_nexus_bindings(instance_id=instance_id, + vlan_id=vlan_id) + + +def get_port_vlan_switch_binding(port_id, vlan_id, switch_ip): + """Lists nexusvm bindings.""" + LOG.debug(_("get_port_vlan_switch_binding() called")) + return _lookup_all_nexus_bindings(port_id=port_id, + switch_ip=switch_ip, + vlan_id=vlan_id) + + +def get_port_switch_bindings(port_id, switch_ip): + """List all vm/vlan bindings on a Nexus switch port.""" + LOG.debug(_("get_port_switch_bindings() called, " + "port:'%(port_id)s', switch:'%(switch_ip)s'"), + {'port_id': port_id, 'switch_ip': switch_ip}) + try: + return _lookup_all_nexus_bindings(port_id=port_id, + switch_ip=switch_ip) + except c_exc.NexusPortBindingNotFound: + pass + + +def _lookup_nexus_bindings(query_type, session=None, **bfilter): + """Look up 'query_type' Nexus bindings matching the filter. + + :param query_type: 'all', 'one' or 'first' + :param session: db session + :param bfilter: filter for bindings query + :return: bindings if query gave a result, else + raise NexusPortBindingNotFound. + """ + if session is None: + session = db.get_session() + query_method = getattr(session.query( + nexus_models_v2.NexusPortBinding).filter_by(**bfilter), query_type) + try: + bindings = query_method() + if bindings: + return bindings + except sa_exc.NoResultFound: + pass + raise c_exc.NexusPortBindingNotFound(**bfilter) + + +def _lookup_all_nexus_bindings(session=None, **bfilter): + return _lookup_nexus_bindings('all', session, **bfilter) + + +def _lookup_one_nexus_binding(session=None, **bfilter): + return _lookup_nexus_bindings('one', session, **bfilter) + + +def _lookup_first_nexus_binding(session=None, **bfilter): + return _lookup_nexus_bindings('first', session, **bfilter) diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_models_v2.py b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_models_v2.py new file mode 100644 index 000000000..ce7c41663 --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_models_v2.py @@ -0,0 +1,45 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import sqlalchemy as sa + +from neutron.db import model_base + + +class NexusPortBinding(model_base.BASEV2): + """Represents a binding of VM's to nexus ports.""" + + __tablename__ = "cisco_ml2_nexusport_bindings" + + binding_id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) + port_id = sa.Column(sa.String(255)) + vlan_id = sa.Column(sa.Integer, nullable=False) + switch_ip = sa.Column(sa.String(255)) + instance_id = sa.Column(sa.String(255)) + + def __repr__(self): + """Just the binding, without the id key.""" + return ("" % + (self.port_id, self.vlan_id, self.switch_ip, self.instance_id)) + + def __eq__(self, other): + """Compare only the binding, without the id key.""" + return ( + self.port_id == other.port_id and + self.vlan_id == other.vlan_id and + self.switch_ip == other.switch_ip and + self.instance_id == other.instance_id + ) diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py new file mode 100644 index 000000000..983678d11 --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py @@ -0,0 +1,171 @@ +# Copyright 2013 OpenStack Foundation +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Implements a Nexus-OS NETCONF over SSHv2 API Client +""" + +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.ml2.drivers.cisco.nexus import config as conf +from neutron.plugins.ml2.drivers.cisco.nexus import constants as const +from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as cexc +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2 +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_snippets as snipp + +LOG = logging.getLogger(__name__) + + +class CiscoNexusDriver(object): + """Nexus Driver Main Class.""" + def __init__(self): + self.ncclient = None + self.nexus_switches = conf.ML2MechCiscoConfig.nexus_dict + self.connections = {} + + def _import_ncclient(self): + """Import the NETCONF client (ncclient) module. + + The ncclient module is not installed as part of the normal Neutron + distributions. It is imported dynamically in this module so that + the import can be mocked, allowing unit testing without requiring + the installation of ncclient. + + """ + return importutils.import_module('ncclient.manager') + + def _edit_config(self, nexus_host, target='running', config='', + allowed_exc_strs=None): + """Modify switch config for a target config type. + + :param nexus_host: IP address of switch to configure + :param target: Target config type + :param config: Configuration string in XML format + :param allowed_exc_strs: Exceptions which have any of these strings + as a subset of their exception message + (str(exception)) can be ignored + + :raises: NexusConfigFailed + + """ + if not allowed_exc_strs: + allowed_exc_strs = [] + mgr = self.nxos_connect(nexus_host) + try: + mgr.edit_config(target, config=config) + except Exception as e: + for exc_str in allowed_exc_strs: + if exc_str in str(e): + break + else: + # Raise a Neutron exception. Include a description of + # the original ncclient exception. + raise cexc.NexusConfigFailed(config=config, exc=e) + + def nxos_connect(self, nexus_host): + """Make SSH connection to the Nexus Switch.""" + if getattr(self.connections.get(nexus_host), 'connected', None): + return self.connections[nexus_host] + + if not self.ncclient: + self.ncclient = self._import_ncclient() + nexus_ssh_port = int(self.nexus_switches[nexus_host, 'ssh_port']) + nexus_user = self.nexus_switches[nexus_host, const.USERNAME] + nexus_password = self.nexus_switches[nexus_host, const.PASSWORD] + try: + man = self.ncclient.connect(host=nexus_host, + port=nexus_ssh_port, + username=nexus_user, + password=nexus_password) + self.connections[nexus_host] = man + except Exception as e: + # Raise a Neutron exception. Include a description of + # the original ncclient exception. + raise cexc.NexusConnectFailed(nexus_host=nexus_host, exc=e) + + return self.connections[nexus_host] + + def create_xml_snippet(self, customized_config): + """Create XML snippet. + + Creates the Proper XML structure for the Nexus Switch Configuration. + """ + conf_xml_snippet = snipp.EXEC_CONF_SNIPPET % (customized_config) + return conf_xml_snippet + + def create_vlan(self, nexus_host, vlanid, vlanname): + """Create a VLAN on Nexus Switch given the VLAN ID and Name.""" + confstr = self.create_xml_snippet( + snipp.CMD_VLAN_CONF_SNIPPET % (vlanid, vlanname)) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + # Enable VLAN active and no-shutdown states. Some versions of + # Nexus switch do not allow state changes for the extended VLAN + # range (1006-4094), but these errors can be ignored (default + # values are appropriate). + for snippet in [snipp.CMD_VLAN_ACTIVE_SNIPPET, + snipp.CMD_VLAN_NO_SHUTDOWN_SNIPPET]: + try: + confstr = self.create_xml_snippet(snippet % vlanid) + self._edit_config( + nexus_host, + target='running', + config=confstr, + allowed_exc_strs=["Can't modify state for extended", + "Command is only allowed on VLAN"]) + except cexc.NexusConfigFailed: + with excutils.save_and_reraise_exception(): + self.delete_vlan(nexus_host, vlanid) + + def delete_vlan(self, nexus_host, vlanid): + """Delete a VLAN on Nexus Switch given the VLAN ID.""" + confstr = snipp.CMD_NO_VLAN_CONF_SNIPPET % vlanid + confstr = self.create_xml_snippet(confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def enable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type, + interface): + """Enable a VLAN on a trunk interface.""" + # If more than one VLAN is configured on this interface then + # include the 'add' keyword. + if len(nexus_db_v2.get_port_switch_bindings( + '%s:%s' % (intf_type, interface), nexus_host)) == 1: + snippet = snipp.CMD_INT_VLAN_SNIPPET + else: + snippet = snipp.CMD_INT_VLAN_ADD_SNIPPET + confstr = snippet % (intf_type, interface, vlanid, intf_type) + confstr = self.create_xml_snippet(confstr) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def disable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type, + interface): + """Disable a VLAN on a trunk interface.""" + confstr = (snipp.CMD_NO_VLAN_INT_SNIPPET % + (intf_type, interface, vlanid, intf_type)) + confstr = self.create_xml_snippet(confstr) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def create_and_trunk_vlan(self, nexus_host, vlan_id, vlan_name, + intf_type, nexus_port): + """Create VLAN and trunk it on the specified ports.""" + self.create_vlan(nexus_host, vlan_id, vlan_name) + LOG.debug(_("NexusDriver created VLAN: %s"), vlan_id) + if nexus_port: + self.enable_vlan_on_trunk_int(nexus_host, vlan_id, intf_type, + nexus_port) diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_snippets.py b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_snippets.py new file mode 100644 index 000000000..fb38e4199 --- /dev/null +++ b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_snippets.py @@ -0,0 +1,200 @@ +# Copyright 2013 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +""" +Cisco Nexus-OS XML-based configuration snippets. +""" + +import logging + + +LOG = logging.getLogger(__name__) + + +# The following are standard strings, messages used to communicate with Nexus. +EXEC_CONF_SNIPPET = """ + + + <__XML__MODE__exec_configure>%s + + + +""" + +CMD_VLAN_CONF_SNIPPET = """ + + + <__XML__PARAM_value>%s + <__XML__MODE_vlan> + + %s + + + + +""" + +CMD_VLAN_ACTIVE_SNIPPET = """ + + + <__XML__PARAM_value>%s + <__XML__MODE_vlan> + + active + + + + +""" + +CMD_VLAN_NO_SHUTDOWN_SNIPPET = """ + + + <__XML__PARAM_value>%s + <__XML__MODE_vlan> + + + + + + +""" + +CMD_NO_VLAN_CONF_SNIPPET = """ + + + + <__XML__PARAM_value>%s + + + +""" + +CMD_INT_VLAN_HEADER = """ + + <%s> + %s + <__XML__MODE_if-ethernet-switch> + + + + """ + +CMD_VLAN_ID = """ + %s""" + +CMD_VLAN_ADD_ID = """ + %s + """ % CMD_VLAN_ID + +CMD_INT_VLAN_TRAILER = """ + + + + + + + +""" + +CMD_INT_VLAN_SNIPPET = (CMD_INT_VLAN_HEADER + + CMD_VLAN_ID + + CMD_INT_VLAN_TRAILER) + +CMD_INT_VLAN_ADD_SNIPPET = (CMD_INT_VLAN_HEADER + + CMD_VLAN_ADD_ID + + CMD_INT_VLAN_TRAILER) + +CMD_PORT_TRUNK = """ + + <%s> + %s + <__XML__MODE_if-ethernet-switch> + + + + + + + + + + +""" + +CMD_NO_SWITCHPORT = """ + + <%s> + %s + <__XML__MODE_if-ethernet-switch> + + + + + + + +""" + +CMD_NO_VLAN_INT_SNIPPET = """ + + <%s> + %s + <__XML__MODE_if-ethernet-switch> + + + + + + + %s + + + + + + + + +""" + +CMD_VLAN_SVI_SNIPPET = """ + + + %s + <__XML__MODE_vlan> + + + + +
+
%s
+
+
+ +
+
+""" + +CMD_NO_VLAN_SVI_SNIPPET = """ + + + + %s + + + +""" diff --git a/neutron/plugins/ml2/drivers/l2pop/README b/neutron/plugins/ml2/drivers/l2pop/README new file mode 100644 index 000000000..46bb27e54 --- /dev/null +++ b/neutron/plugins/ml2/drivers/l2pop/README @@ -0,0 +1,41 @@ +Neutron ML2 l2 population Mechanism Drivers + +l2 population (l2pop) mechanism drivers implements the ML2 driver to improve +open source plugins overlay implementations (VXLAN with Linux bridge and +GRE/VXLAN with OVS). This mechanism driver is implemented in ML2 to propagate +the forwarding information among agents using a common RPC API. + +More informations could be found on the wiki page [1]. + +VXLAN Linux kernel: +------------------- +The VXLAN Linux kernel module provide all necessary functionalities to populate +the forwarding table and local ARP responder tables. This module appears on +release 3.7 of the vanilla Linux kernel in experimental: +- 3.8: first stable release, no edge replication (multicast necessary), +- 3.9: edge replication only for the broadcasted packets, +- 3.11: edge replication for broadcast, multicast and unknown packets. + +Note: Some distributions (like RHEL) have backported this module on precedent + kernel version. + +OpenvSwitch: +------------ +The OVS OpenFlow tables provide all of the necessary functionality to populate +the forwarding table and local ARP responder tables. +A wiki page describe how the flow tables did evolve on OVS agents: +- [2] without local ARP responder +- [3] with local ARP responder. /!\ This functionality is only available since + the development branch 2.1. It's possible + to disable (enable by default) it through + the flag 'arp_responder'. /!\ + + +Note: A difference persists between the LB and OVS agents when they are used + with the l2-pop mechanism driver (and local ARP responder available). The + LB agent will drop unknown unicast (VXLAN bridge mode), whereas the OVS + agent will flood it. + +[1] https://wiki.openstack.org/wiki/L2population_blueprint +[2] https://wiki.openstack.org/wiki/Ovs-flow-logic#OVS_flows_logic +[3] https://wiki.openstack.org/wiki/Ovs-flow-logic#OVS_flows_logic_with_local_ARP_responder \ No newline at end of file diff --git a/neutron/plugins/ml2/drivers/l2pop/__init__.py b/neutron/plugins/ml2/drivers/l2pop/__init__.py new file mode 100644 index 000000000..b9b2306f9 --- /dev/null +++ b/neutron/plugins/ml2/drivers/l2pop/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange diff --git a/neutron/plugins/ml2/drivers/l2pop/config.py b/neutron/plugins/ml2/drivers/l2pop/config.py new file mode 100644 index 000000000..1e0701e0b --- /dev/null +++ b/neutron/plugins/ml2/drivers/l2pop/config.py @@ -0,0 +1,29 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from oslo.config import cfg + + +l2_population_options = [ + cfg.IntOpt('agent_boot_time', default=180, + help=_('Delay within which agent is expected to update ' + 'existing ports whent it restarts')), +] + +cfg.CONF.register_opts(l2_population_options, "l2pop") diff --git a/neutron/plugins/ml2/drivers/l2pop/constants.py b/neutron/plugins/ml2/drivers/l2pop/constants.py new file mode 100644 index 000000000..2c9b7f96f --- /dev/null +++ b/neutron/plugins/ml2/drivers/l2pop/constants.py @@ -0,0 +1,23 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from neutron.common import constants + +SUPPORTED_AGENT_TYPES = [constants.AGENT_TYPE_OVS, + constants.AGENT_TYPE_LINUXBRIDGE] diff --git a/neutron/plugins/ml2/drivers/l2pop/db.py b/neutron/plugins/ml2/drivers/l2pop/db.py new file mode 100644 index 000000000..3c4fc9bce --- /dev/null +++ b/neutron/plugins/ml2/drivers/l2pop/db.py @@ -0,0 +1,83 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from sqlalchemy import sql + +from neutron.common import constants as const +from neutron.db import agents_db +from neutron.db import db_base_plugin_v2 as base_db +from neutron.db import models_v2 +from neutron.openstack.common import jsonutils +from neutron.openstack.common import timeutils +from neutron.plugins.ml2.drivers.l2pop import constants as l2_const +from neutron.plugins.ml2 import models as ml2_models + + +class L2populationDbMixin(base_db.CommonDbMixin): + + def get_agent_ip_by_host(self, session, agent_host): + agent = self.get_agent_by_host(session, agent_host) + if agent: + return self.get_agent_ip(agent) + + def get_agent_ip(self, agent): + configuration = jsonutils.loads(agent.configurations) + return configuration.get('tunneling_ip') + + def get_agent_uptime(self, agent): + return timeutils.delta_seconds(agent.started_at, + agent.heartbeat_timestamp) + + def get_agent_tunnel_types(self, agent): + configuration = jsonutils.loads(agent.configurations) + return configuration.get('tunnel_types') + + def get_agent_by_host(self, session, agent_host): + with session.begin(subtransactions=True): + query = session.query(agents_db.Agent) + query = query.filter(agents_db.Agent.host == agent_host, + agents_db.Agent.agent_type.in_( + l2_const.SUPPORTED_AGENT_TYPES)) + return query.first() + + def get_network_ports(self, session, network_id): + with session.begin(subtransactions=True): + query = session.query(ml2_models.PortBinding, + agents_db.Agent) + query = query.join(agents_db.Agent, + agents_db.Agent.host == + ml2_models.PortBinding.host) + query = query.join(models_v2.Port) + query = query.filter(models_v2.Port.network_id == network_id, + models_v2.Port.admin_state_up == sql.true(), + agents_db.Agent.agent_type.in_( + l2_const.SUPPORTED_AGENT_TYPES)) + return query + + def get_agent_network_active_port_count(self, session, agent_host, + network_id): + with session.begin(subtransactions=True): + query = session.query(models_v2.Port) + + query = query.join(ml2_models.PortBinding) + query = query.filter(models_v2.Port.network_id == network_id, + models_v2.Port.status == + const.PORT_STATUS_ACTIVE, + ml2_models.PortBinding.host == agent_host) + return query.count() diff --git a/neutron/plugins/ml2/drivers/l2pop/mech_driver.py b/neutron/plugins/ml2/drivers/l2pop/mech_driver.py new file mode 100644 index 000000000..af4a427fc --- /dev/null +++ b/neutron/plugins/ml2/drivers/l2pop/mech_driver.py @@ -0,0 +1,248 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from oslo.config import cfg + +from neutron.common import constants as const +from neutron import context as n_context +from neutron.db import api as db_api +from neutron.openstack.common import log as logging +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers.l2pop import config # noqa +from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db +from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc + +LOG = logging.getLogger(__name__) + + +class L2populationMechanismDriver(api.MechanismDriver, + l2pop_db.L2populationDbMixin): + + def __init__(self): + super(L2populationMechanismDriver, self).__init__() + self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI() + + def initialize(self): + LOG.debug(_("Experimental L2 population driver")) + self.rpc_ctx = n_context.get_admin_context_without_session() + self.migrated_ports = {} + self.deleted_ports = {} + + def _get_port_fdb_entries(self, port): + return [[port['mac_address'], + ip['ip_address']] for ip in port['fixed_ips']] + + def delete_port_precommit(self, context): + # TODO(matrohon): revisit once the original bound segment will be + # available in delete_port_postcommit. in delete_port_postcommit + # agent_active_ports will be equal to 0, and the _update_port_down + # won't need agent_active_ports_count_for_flooding anymore + port_context = context.current + fdb_entries = self._update_port_down(context, port_context, 1) + self.deleted_ports[context.current['id']] = fdb_entries + + def delete_port_postcommit(self, context): + fanout_msg = self.deleted_ports.pop(context.current['id'], None) + if fanout_msg: + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fanout_msg) + + def _get_diff_ips(self, orig, port): + orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']]) + port_ips = set([ip['ip_address'] for ip in port['fixed_ips']]) + + # check if an ip has been added or removed + orig_chg_ips = orig_ips.difference(port_ips) + port_chg_ips = port_ips.difference(orig_ips) + + if orig_chg_ips or port_chg_ips: + return orig_chg_ips, port_chg_ips + + def _fixed_ips_changed(self, context, orig, port, diff_ips): + orig_ips, port_ips = diff_ips + + port_infos = self._get_port_infos(context, orig) + if not port_infos: + return + agent, agent_ip, segment, port_fdb_entries = port_infos + + orig_mac_ip = [[port['mac_address'], ip] for ip in orig_ips] + port_mac_ip = [[port['mac_address'], ip] for ip in port_ips] + + upd_fdb_entries = {port['network_id']: {agent_ip: {}}} + + ports = upd_fdb_entries[port['network_id']][agent_ip] + if orig_mac_ip: + ports['before'] = orig_mac_ip + + if port_mac_ip: + ports['after'] = port_mac_ip + + self.L2populationAgentNotify.update_fdb_entries( + self.rpc_ctx, {'chg_ip': upd_fdb_entries}) + + return True + + def update_port_postcommit(self, context): + port = context.current + orig = context.original + + diff_ips = self._get_diff_ips(orig, port) + if diff_ips: + self._fixed_ips_changed(context, orig, port, diff_ips) + if (port['binding:host_id'] != orig['binding:host_id'] + and port['status'] == const.PORT_STATUS_ACTIVE + and not self.migrated_ports.get(orig['id'])): + # The port has been migrated. We have to store the original + # binding to send appropriate fdb once the port will be set + # on the destination host + self.migrated_ports[orig['id']] = orig + elif port['status'] != orig['status']: + if port['status'] == const.PORT_STATUS_ACTIVE: + self._update_port_up(context) + elif port['status'] == const.PORT_STATUS_DOWN: + fdb_entries = self._update_port_down(context, port) + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fdb_entries) + elif port['status'] == const.PORT_STATUS_BUILD: + orig = self.migrated_ports.pop(port['id'], None) + if orig: + # this port has been migrated : remove its entries from fdb + fdb_entries = self._update_port_down(context, orig) + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fdb_entries) + + def _get_port_infos(self, context, port): + agent_host = port['binding:host_id'] + if not agent_host: + return + + session = db_api.get_session() + agent = self.get_agent_by_host(session, agent_host) + if not agent: + return + + agent_ip = self.get_agent_ip(agent) + if not agent_ip: + LOG.warning(_("Unable to retrieve the agent ip, check the agent " + "configuration.")) + return + + segment = context.bound_segment + if not segment: + LOG.warning(_("Port %(port)s updated by agent %(agent)s " + "isn't bound to any segment"), + {'port': port['id'], 'agent': agent}) + return + + tunnel_types = self.get_agent_tunnel_types(agent) + if segment['network_type'] not in tunnel_types: + return + + fdb_entries = self._get_port_fdb_entries(port) + + return agent, agent_ip, segment, fdb_entries + + def _update_port_up(self, context): + port_context = context.current + port_infos = self._get_port_infos(context, port_context) + if not port_infos: + return + agent, agent_ip, segment, port_fdb_entries = port_infos + + agent_host = port_context['binding:host_id'] + network_id = port_context['network_id'] + + session = db_api.get_session() + agent_active_ports = self.get_agent_network_active_port_count( + session, agent_host, network_id) + + other_fdb_entries = {network_id: + {'segment_id': segment['segmentation_id'], + 'network_type': segment['network_type'], + 'ports': {agent_ip: []}}} + + if agent_active_ports == 1 or ( + self.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time): + # First port activated on current agent in this network, + # we have to provide it with the whole list of fdb entries + agent_fdb_entries = {network_id: + {'segment_id': segment['segmentation_id'], + 'network_type': segment['network_type'], + 'ports': {}}} + ports = agent_fdb_entries[network_id]['ports'] + + network_ports = self.get_network_ports(session, network_id) + for network_port in network_ports: + binding, agent = network_port + if agent.host == agent_host: + continue + + ip = self.get_agent_ip(agent) + if not ip: + LOG.debug(_("Unable to retrieve the agent ip, check " + "the agent %(agent_host)s configuration."), + {'agent_host': agent.host}) + continue + + agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) + agent_ports += self._get_port_fdb_entries(binding.port) + ports[ip] = agent_ports + + # And notify other agents to add flooding entry + other_fdb_entries[network_id]['ports'][agent_ip].append( + const.FLOODING_ENTRY) + + if ports.keys(): + self.L2populationAgentNotify.add_fdb_entries( + self.rpc_ctx, agent_fdb_entries, agent_host) + + # Notify other agents to add fdb rule for current port + other_fdb_entries[network_id]['ports'][agent_ip] += port_fdb_entries + + self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, + other_fdb_entries) + + def _update_port_down(self, context, port_context, + agent_active_ports_count_for_flooding=0): + port_infos = self._get_port_infos(context, port_context) + if not port_infos: + return + agent, agent_ip, segment, port_fdb_entries = port_infos + + agent_host = port_context['binding:host_id'] + network_id = port_context['network_id'] + + session = db_api.get_session() + agent_active_ports = self.get_agent_network_active_port_count( + session, agent_host, network_id) + + other_fdb_entries = {network_id: + {'segment_id': segment['segmentation_id'], + 'network_type': segment['network_type'], + 'ports': {agent_ip: []}}} + if agent_active_ports == agent_active_ports_count_for_flooding: + # Agent is removing its last activated port in this network, + # other agents needs to be notified to delete their flooding entry. + other_fdb_entries[network_id]['ports'][agent_ip].append( + const.FLOODING_ENTRY) + # Notify other agents to remove fdb rules for current port + other_fdb_entries[network_id]['ports'][agent_ip] += port_fdb_entries + + return other_fdb_entries diff --git a/neutron/plugins/ml2/drivers/l2pop/rpc.py b/neutron/plugins/ml2/drivers/l2pop/rpc.py new file mode 100644 index 000000000..b4f171a27 --- /dev/null +++ b/neutron/plugins/ml2/drivers/l2pop/rpc.py @@ -0,0 +1,86 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class L2populationAgentNotifyAPI(rpc_compat.RpcProxy): + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic=topics.AGENT): + super(L2populationAgentNotifyAPI, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + + self.topic_l2pop_update = topics.get_topic_name(topic, + topics.L2POPULATION, + topics.UPDATE) + + def _notification_fanout(self, context, method, fdb_entries): + LOG.debug(_('Fanout notify l2population agents at %(topic)s ' + 'the message %(method)s with %(fdb_entries)s'), + {'topic': self.topic, + 'method': method, + 'fdb_entries': fdb_entries}) + + self.fanout_cast(context, + self.make_msg(method, fdb_entries=fdb_entries), + topic=self.topic_l2pop_update) + + def _notification_host(self, context, method, fdb_entries, host): + LOG.debug(_('Notify l2population agent %(host)s at %(topic)s the ' + 'message %(method)s with %(fdb_entries)s'), + {'host': host, + 'topic': self.topic, + 'method': method, + 'fdb_entries': fdb_entries}) + self.cast(context, + self.make_msg(method, fdb_entries=fdb_entries), + topic='%s.%s' % (self.topic_l2pop_update, host)) + + def add_fdb_entries(self, context, fdb_entries, host=None): + if fdb_entries: + if host: + self._notification_host(context, 'add_fdb_entries', + fdb_entries, host) + else: + self._notification_fanout(context, 'add_fdb_entries', + fdb_entries) + + def remove_fdb_entries(self, context, fdb_entries, host=None): + if fdb_entries: + if host: + self._notification_host(context, 'remove_fdb_entries', + fdb_entries, host) + else: + self._notification_fanout(context, 'remove_fdb_entries', + fdb_entries) + + def update_fdb_entries(self, context, fdb_entries, host=None): + if fdb_entries: + if host: + self._notification_host(context, 'update_fdb_entries', + fdb_entries, host) + else: + self._notification_fanout(context, 'update_fdb_entries', + fdb_entries) diff --git a/neutron/plugins/ml2/drivers/mech_agent.py b/neutron/plugins/ml2/drivers/mech_agent.py new file mode 100644 index 000000000..d0aad3ae9 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_agent.py @@ -0,0 +1,149 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import six + +from neutron.extensions import portbindings +from neutron.openstack.common import log +from neutron.plugins.ml2 import driver_api as api + +LOG = log.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class AgentMechanismDriverBase(api.MechanismDriver): + """Base class for drivers that attach to networks using an L2 agent. + + The AgentMechanismDriverBase provides common code for mechanism + drivers that integrate the ml2 plugin with L2 agents. Port binding + with this driver requires the driver's associated agent to be + running on the port's host, and that agent to have connectivity to + at least one segment of the port's network. + + MechanismDrivers using this base class must pass the agent type to + __init__(), and must implement try_to_bind_segment_for_agent(). + """ + + def __init__(self, agent_type, + supported_vnic_types=[portbindings.VNIC_NORMAL]): + """Initialize base class for specific L2 agent type. + + :param agent_type: Constant identifying agent type in agents_db + :param supported_vnic_types: The binding:vnic_type values we can bind + """ + self.agent_type = agent_type + self.supported_vnic_types = supported_vnic_types + + def initialize(self): + pass + + def bind_port(self, context): + LOG.debug(_("Attempting to bind port %(port)s on " + "network %(network)s"), + {'port': context.current['id'], + 'network': context.network.current['id']}) + vnic_type = context.current.get(portbindings.VNIC_TYPE, + portbindings.VNIC_NORMAL) + if vnic_type not in self.supported_vnic_types: + LOG.debug(_("Refusing to bind due to unsupported vnic_type: %s"), + vnic_type) + return + for agent in context.host_agents(self.agent_type): + LOG.debug(_("Checking agent: %s"), agent) + if agent['alive']: + for segment in context.network.network_segments: + if self.try_to_bind_segment_for_agent(context, segment, + agent): + LOG.debug(_("Bound using segment: %s"), segment) + return + else: + LOG.warning(_("Attempting to bind with dead agent: %s"), + agent) + + @abc.abstractmethod + def try_to_bind_segment_for_agent(self, context, segment, agent): + """Try to bind with segment for agent. + + :param context: PortContext instance describing the port + :param segment: segment dictionary describing segment to bind + :param agent: agents_db entry describing agent to bind + :returns: True iff segment has been bound for agent + + Called inside transaction during bind_port() so that derived + MechanismDrivers can use agent_db data along with built-in + knowledge of the corresponding agent's capabilities to attempt + to bind to the specified network segment for the agent. + + If the segment can be bound for the agent, this function must + call context.set_binding() with appropriate values and then + return True. Otherwise, it must return False. + """ + + +@six.add_metaclass(abc.ABCMeta) +class SimpleAgentMechanismDriverBase(AgentMechanismDriverBase): + """Base class for simple drivers using an L2 agent. + + The SimpleAgentMechanismDriverBase provides common code for + mechanism drivers that integrate the ml2 plugin with L2 agents, + where the binding:vif_type and binding:vif_details values are the + same for all bindings. Port binding with this driver requires the + driver's associated agent to be running on the port's host, and + that agent to have connectivity to at least one segment of the + port's network. + + MechanismDrivers using this base class must pass the agent type + and the values for binding:vif_type and binding:vif_details to + __init__(), and must implement check_segment_for_agent(). + """ + + def __init__(self, agent_type, vif_type, vif_details, + supported_vnic_types=[portbindings.VNIC_NORMAL]): + """Initialize base class for specific L2 agent type. + + :param agent_type: Constant identifying agent type in agents_db + :param vif_type: Value for binding:vif_type when bound + :param vif_details: Dictionary with details for VIF driver when bound + :param supported_vnic_types: The binding:vnic_type values we can bind + """ + super(SimpleAgentMechanismDriverBase, self).__init__( + agent_type, supported_vnic_types) + self.vif_type = vif_type + self.vif_details = vif_details + + def try_to_bind_segment_for_agent(self, context, segment, agent): + if self.check_segment_for_agent(segment, agent): + context.set_binding(segment[api.ID], + self.vif_type, + self.vif_details) + return True + else: + return False + + @abc.abstractmethod + def check_segment_for_agent(self, segment, agent): + """Check if segment can be bound for agent. + + :param segment: segment dictionary describing segment to bind + :param agent: agents_db entry describing agent to bind + :returns: True iff segment can be bound for agent + + Called inside transaction during bind_port so that derived + MechanismDrivers can use agent_db data along with built-in + knowledge of the corresponding agent's capabilities to + determine whether or not the specified network segment can be + bound for the agent. + """ diff --git a/neutron/plugins/ml2/drivers/mech_arista/README b/neutron/plugins/ml2/drivers/mech_arista/README new file mode 100644 index 000000000..6e30bf9e5 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_arista/README @@ -0,0 +1,9 @@ + +Arista Neutron ML2 Mechanism Driver + +This mechanism driver implements ML2 Driver API and is used to manage the virtual and physical networks using Arista Hardware. + +Note: Initial version of this driver support VLANs only. + +For more details on use please refer to: +https://wiki.openstack.org/wiki/Arista-neutron-ml2-driver diff --git a/neutron/plugins/ml2/drivers/mech_arista/__init__.py b/neutron/plugins/ml2/drivers/mech_arista/__init__.py new file mode 100644 index 000000000..788cea1f7 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_arista/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/ml2/drivers/mech_arista/config.py b/neutron/plugins/ml2/drivers/mech_arista/config.py new file mode 100644 index 000000000..2f968c874 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_arista/config.py @@ -0,0 +1,70 @@ +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from oslo.config import cfg + +""" Arista ML2 Mechanism driver specific configuration knobs. + +Following are user configurable options for Arista ML2 Mechanism +driver. The eapi_username, eapi_password, and eapi_host are +required options. Region Name must be the same that is used by +Keystone service. This option is available to support multiple +OpenStack/Neutron controllers. +""" + +ARISTA_DRIVER_OPTS = [ + cfg.StrOpt('eapi_username', + default='', + help=_('Username for Arista EOS. This is required field. ' + 'If not set, all communications to Arista EOS' + 'will fail.')), + cfg.StrOpt('eapi_password', + default='', + secret=True, # do not expose value in the logs + help=_('Password for Arista EOS. This is required field. ' + 'If not set, all communications to Arista EOS ' + 'will fail.')), + cfg.StrOpt('eapi_host', + default='', + help=_('Arista EOS IP address. This is required field. ' + 'If not set, all communications to Arista EOS' + 'will fail.')), + cfg.BoolOpt('use_fqdn', + default=True, + help=_('Defines if hostnames are sent to Arista EOS as FQDNs ' + '("node1.domain.com") or as short names ("node1"). ' + 'This is optional. If not set, a value of "True" ' + 'is assumed.')), + cfg.IntOpt('sync_interval', + default=180, + help=_('Sync interval in seconds between Neutron plugin and ' + 'EOS. This interval defines how often the ' + 'synchronization is performed. This is an optional ' + 'field. If not set, a value of 180 seconds is ' + 'assumed.')), + cfg.StrOpt('region_name', + default='RegionOne', + help=_('Defines Region Name that is assigned to this OpenStack ' + 'Controller. This is useful when multiple ' + 'OpenStack/Neutron controllers are managing the same ' + 'Arista HW clusters. Note that this name must match ' + 'with the region name registered (or known) to keystone ' + 'service. Authentication with Keysotne is performed by ' + 'EOS. This is optional. If not set, a value of ' + '"RegionOne" is assumed.')) +] + +cfg.CONF.register_opts(ARISTA_DRIVER_OPTS, "ml2_arista") diff --git a/neutron/plugins/ml2/drivers/mech_arista/db.py b/neutron/plugins/ml2/drivers/mech_arista/db.py new file mode 100644 index 000000000..f47bcd140 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_arista/db.py @@ -0,0 +1,402 @@ +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sqlalchemy as sa + +from neutron import context as nctx +import neutron.db.api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 + +VLAN_SEGMENTATION = 'vlan' + +UUID_LEN = 36 +STR_LEN = 255 + + +class AristaProvisionedNets(model_base.BASEV2, models_v2.HasId, + models_v2.HasTenant): + """Stores networks provisioned on Arista EOS. + + Saves the segmentation ID for each network that is provisioned + on EOS. This information is used during synchronization between + Neutron and EOS. + """ + __tablename__ = 'arista_provisioned_nets' + + network_id = sa.Column(sa.String(UUID_LEN)) + segmentation_id = sa.Column(sa.Integer) + + def eos_network_representation(self, segmentation_type): + return {u'networkId': self.network_id, + u'segmentationTypeId': self.segmentation_id, + u'segmentationType': segmentation_type} + + +class AristaProvisionedVms(model_base.BASEV2, models_v2.HasId, + models_v2.HasTenant): + """Stores VMs provisioned on Arista EOS. + + All VMs launched on physical hosts connected to Arista + Switches are remembered + """ + __tablename__ = 'arista_provisioned_vms' + + vm_id = sa.Column(sa.String(STR_LEN)) + host_id = sa.Column(sa.String(STR_LEN)) + port_id = sa.Column(sa.String(UUID_LEN)) + network_id = sa.Column(sa.String(UUID_LEN)) + + def eos_vm_representation(self): + return {u'vmId': self.vm_id, + u'host': self.host_id, + u'ports': {self.port_id: [{u'portId': self.port_id, + u'networkId': self.network_id}]}} + + def eos_port_representation(self): + return {u'vmId': self.vm_id, + u'host': self.host_id, + u'portId': self.port_id, + u'networkId': self.network_id} + + +class AristaProvisionedTenants(model_base.BASEV2, models_v2.HasId, + models_v2.HasTenant): + """Stores Tenants provisioned on Arista EOS. + + Tenants list is maintained for sync between Neutron and EOS. + """ + __tablename__ = 'arista_provisioned_tenants' + + def eos_tenant_representation(self): + return {u'tenantId': self.tenant_id} + + +def remember_tenant(tenant_id): + """Stores a tenant information in repository. + + :param tenant_id: globally unique neutron tenant identifier + """ + session = db.get_session() + with session.begin(): + tenant = AristaProvisionedTenants(tenant_id=tenant_id) + session.add(tenant) + + +def forget_tenant(tenant_id): + """Removes a tenant information from repository. + + :param tenant_id: globally unique neutron tenant identifier + """ + session = db.get_session() + with session.begin(): + (session.query(AristaProvisionedTenants). + filter_by(tenant_id=tenant_id). + delete()) + + +def get_all_tenants(): + """Returns a list of all tenants stored in repository.""" + session = db.get_session() + with session.begin(): + return session.query(AristaProvisionedTenants).all() + + +def num_provisioned_tenants(): + """Returns number of tenants stored in repository.""" + session = db.get_session() + with session.begin(): + return session.query(AristaProvisionedTenants).count() + + +def remember_vm(vm_id, host_id, port_id, network_id, tenant_id): + """Stores all relevant information about a VM in repository. + + :param vm_id: globally unique identifier for VM instance + :param host_id: ID of the host where the VM is placed + :param port_id: globally unique port ID that connects VM to network + :param network_id: globally unique neutron network identifier + :param tenant_id: globally unique neutron tenant identifier + """ + session = db.get_session() + with session.begin(): + vm = AristaProvisionedVms( + vm_id=vm_id, + host_id=host_id, + port_id=port_id, + network_id=network_id, + tenant_id=tenant_id) + session.add(vm) + + +def forget_vm(vm_id, host_id, port_id, network_id, tenant_id): + """Removes all relevant information about a VM from repository. + + :param vm_id: globally unique identifier for VM instance + :param host_id: ID of the host where the VM is placed + :param port_id: globally unique port ID that connects VM to network + :param network_id: globally unique neutron network identifier + :param tenant_id: globally unique neutron tenant identifier + """ + session = db.get_session() + with session.begin(): + (session.query(AristaProvisionedVms). + filter_by(vm_id=vm_id, host_id=host_id, + port_id=port_id, tenant_id=tenant_id, + network_id=network_id).delete()) + + +def remember_network(tenant_id, network_id, segmentation_id): + """Stores all relevant information about a Network in repository. + + :param tenant_id: globally unique neutron tenant identifier + :param network_id: globally unique neutron network identifier + :param segmentation_id: VLAN ID that is assigned to the network + """ + session = db.get_session() + with session.begin(): + net = AristaProvisionedNets( + tenant_id=tenant_id, + network_id=network_id, + segmentation_id=segmentation_id) + session.add(net) + + +def forget_network(tenant_id, network_id): + """Deletes all relevant information about a Network from repository. + + :param tenant_id: globally unique neutron tenant identifier + :param network_id: globally unique neutron network identifier + """ + session = db.get_session() + with session.begin(): + (session.query(AristaProvisionedNets). + filter_by(tenant_id=tenant_id, network_id=network_id). + delete()) + + +def get_segmentation_id(tenant_id, network_id): + """Returns Segmentation ID (VLAN) associated with a network. + + :param tenant_id: globally unique neutron tenant identifier + :param network_id: globally unique neutron network identifier + """ + session = db.get_session() + with session.begin(): + net = (session.query(AristaProvisionedNets). + filter_by(tenant_id=tenant_id, + network_id=network_id).first()) + return net and net.segmentation_id or None + + +def is_vm_provisioned(vm_id, host_id, port_id, + network_id, tenant_id): + """Checks if a VM is already known to EOS + + :returns: True, if yes; False otherwise. + :param vm_id: globally unique identifier for VM instance + :param host_id: ID of the host where the VM is placed + :param port_id: globally unique port ID that connects VM to network + :param network_id: globally unique neutron network identifier + :param tenant_id: globally unique neutron tenant identifier + """ + session = db.get_session() + with session.begin(): + num_vm = (session.query(AristaProvisionedVms). + filter_by(tenant_id=tenant_id, + vm_id=vm_id, + port_id=port_id, + network_id=network_id, + host_id=host_id).count()) + return num_vm > 0 + + +def is_network_provisioned(tenant_id, network_id, seg_id=None): + """Checks if a networks is already known to EOS + + :returns: True, if yes; False otherwise. + :param tenant_id: globally unique neutron tenant identifier + :param network_id: globally unique neutron network identifier + :param seg_id: Optionally matches the segmentation ID (VLAN) + """ + session = db.get_session() + with session.begin(): + if not seg_id: + num_nets = (session.query(AristaProvisionedNets). + filter_by(tenant_id=tenant_id, + network_id=network_id).count()) + else: + num_nets = (session.query(AristaProvisionedNets). + filter_by(tenant_id=tenant_id, + network_id=network_id, + segmentation_id=seg_id).count()) + return num_nets > 0 + + +def is_tenant_provisioned(tenant_id): + """Checks if a tenant is already known to EOS + + :returns: True, if yes; False otherwise. + :param tenant_id: globally unique neutron tenant identifier + """ + session = db.get_session() + with session.begin(): + num_tenants = (session.query(AristaProvisionedTenants). + filter_by(tenant_id=tenant_id).count()) + return num_tenants > 0 + + +def num_nets_provisioned(tenant_id): + """Returns number of networks for a given tennat. + + :param tenant_id: globally unique neutron tenant identifier + """ + session = db.get_session() + with session.begin(): + return (session.query(AristaProvisionedNets). + filter_by(tenant_id=tenant_id).count()) + + +def num_vms_provisioned(tenant_id): + """Returns number of VMs for a given tennat. + + :param tenant_id: globally unique neutron tenant identifier + """ + session = db.get_session() + with session.begin(): + return (session.query(AristaProvisionedVms). + filter_by(tenant_id=tenant_id).count()) + + +def get_networks(tenant_id): + """Returns all networks for a given tenant in EOS-compatible format. + + See AristaRPCWrapper.get_network_list() for return value format. + :param tenant_id: globally unique neutron tenant identifier + """ + session = db.get_session() + with session.begin(): + model = AristaProvisionedNets + # hack for pep8 E711: comparison to None should be + # 'if cond is not None' + none = None + all_nets = (session.query(model). + filter(model.tenant_id == tenant_id, + model.segmentation_id != none)) + res = dict( + (net.network_id, net.eos_network_representation( + VLAN_SEGMENTATION)) + for net in all_nets + ) + return res + + +def get_vms(tenant_id): + """Returns all VMs for a given tenant in EOS-compatible format. + + :param tenant_id: globally unique neutron tenant identifier + """ + session = db.get_session() + with session.begin(): + model = AristaProvisionedVms + # hack for pep8 E711: comparison to None should be + # 'if cond is not None' + none = None + all_vms = (session.query(model). + filter(model.tenant_id == tenant_id, + model.host_id != none, + model.vm_id != none, + model.network_id != none, + model.port_id != none)) + res = dict( + (vm.vm_id, vm.eos_vm_representation()) + for vm in all_vms + ) + return res + + +def get_ports(tenant_id): + """Returns all ports of VMs in EOS-compatible format. + + :param tenant_id: globally unique neutron tenant identifier + """ + session = db.get_session() + with session.begin(): + model = AristaProvisionedVms + # hack for pep8 E711: comparison to None should be + # 'if cond is not None' + none = None + all_ports = (session.query(model). + filter(model.tenant_id == tenant_id, + model.host_id != none, + model.vm_id != none, + model.network_id != none, + model.port_id != none)) + res = dict( + (port.port_id, port.eos_port_representation()) + for port in all_ports + ) + return res + + +def get_tenants(): + """Returns list of all tenants in EOS-compatible format.""" + session = db.get_session() + with session.begin(): + model = AristaProvisionedTenants + all_tenants = session.query(model) + res = dict( + (tenant.tenant_id, tenant.eos_tenant_representation()) + for tenant in all_tenants + ) + return res + + +class NeutronNets(db_base_plugin_v2.NeutronDbPluginV2): + """Access to Neutron DB. + + Provides access to the Neutron Data bases for all provisioned + networks as well ports. This data is used during the synchronization + of DB between ML2 Mechanism Driver and Arista EOS + Names of the networks and ports are not stroed in Arista repository + They are pulled from Neutron DB. + """ + + def __init__(self): + self.admin_ctx = nctx.get_admin_context() + + def get_network_name(self, tenant_id, network_id): + network = self._get_network(tenant_id, network_id) + network_name = None + if network: + network_name = network[0]['name'] + return network_name + + def get_all_networks_for_tenant(self, tenant_id): + filters = {'tenant_id': [tenant_id]} + return super(NeutronNets, + self).get_networks(self.admin_ctx, filters=filters) or [] + + def get_all_ports_for_tenant(self, tenant_id): + filters = {'tenant_id': [tenant_id]} + return super(NeutronNets, + self).get_ports(self.admin_ctx, filters=filters) or [] + + def _get_network(self, tenant_id, network_id): + filters = {'tenant_id': [tenant_id], + 'id': [network_id]} + return super(NeutronNets, + self).get_networks(self.admin_ctx, filters=filters) or [] diff --git a/neutron/plugins/ml2/drivers/mech_arista/exceptions.py b/neutron/plugins/ml2/drivers/mech_arista/exceptions.py new file mode 100644 index 000000000..b3dae3dae --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_arista/exceptions.py @@ -0,0 +1,27 @@ +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Exceptions used by Arista ML2 Mechanism Driver.""" + +from neutron.common import exceptions + + +class AristaRpcError(exceptions.NeutronException): + message = _('%(msg)s') + + +class AristaConfigError(exceptions.NeutronException): + message = _('%(msg)s') diff --git a/neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py b/neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py new file mode 100644 index 000000000..d825693e2 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py @@ -0,0 +1,1014 @@ +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading + +import jsonrpclib +from oslo.config import cfg + +from neutron.common import constants as n_const +from neutron.extensions import portbindings +from neutron.openstack.common import log as logging +from neutron.plugins.ml2.common import exceptions as ml2_exc +from neutron.plugins.ml2 import driver_api +from neutron.plugins.ml2.drivers.mech_arista import config # noqa +from neutron.plugins.ml2.drivers.mech_arista import db +from neutron.plugins.ml2.drivers.mech_arista import exceptions as arista_exc + +LOG = logging.getLogger(__name__) + +EOS_UNREACHABLE_MSG = _('Unable to reach EOS') + + +class AristaRPCWrapper(object): + """Wraps Arista JSON RPC. + + All communications between Neutron and EOS are over JSON RPC. + EOS - operating system used on Arista hardware + Command API - JSON RPC API provided by Arista EOS + """ + def __init__(self): + self._server = jsonrpclib.Server(self._eapi_host_url()) + self.keystone_conf = cfg.CONF.keystone_authtoken + self.region = cfg.CONF.ml2_arista.region_name + self._region_updated_time = None + # The cli_commands dict stores the mapping between the CLI command key + # and the actual CLI command. + self.cli_commands = {} + self.initialize_cli_commands() + + def _get_exit_mode_cmds(self, modes): + """Returns a list of 'exit' commands for the modes. + + :param modes: a list of CLI modes to exit out of. + """ + return ['exit'] * len(modes) + + def initialize_cli_commands(self): + self.cli_commands['timestamp'] = [] + + def check_cli_commands(self): + """Checks whether the CLI commands are vaild. + + This method tries to execute the commands on EOS and if it succeedes + the command is stored. + """ + cmd = ['show openstack config region %s timestamp' % self.region] + try: + self._run_eos_cmds(cmd) + self.cli_commands['timestamp'] = cmd + except arista_exc.AristaRpcError: + self.cli_commands['timestamp'] = [] + msg = _("'timestamp' command '%s' is not available on EOS") % cmd + LOG.warn(msg) + + def _keystone_url(self): + keystone_auth_url = ('%s://%s:%s/v2.0/' % + (self.keystone_conf.auth_protocol, + self.keystone_conf.auth_host, + self.keystone_conf.auth_port)) + return keystone_auth_url + + def get_tenants(self): + """Returns dict of all tenants known by EOS. + + :returns: dictionary containing the networks per tenant + and VMs allocated per tenant + """ + cmds = ['show openstack config region %s' % self.region] + command_output = self._run_eos_cmds(cmds) + tenants = command_output[0]['tenants'] + + return tenants + + def plug_port_into_network(self, vm_id, host_id, port_id, + net_id, tenant_id, port_name, device_owner): + """Genric routine plug a port of a VM instace into network. + + :param vm_id: globally unique identifier for VM instance + :param host: ID of the host where the VM is placed + :param port_id: globally unique port ID that connects VM to network + :param network_id: globally unique neutron network identifier + :param tenant_id: globally unique neutron tenant identifier + :param port_name: Name of the port - for display purposes + :param device_owner: Device owner - e.g. compute or network:dhcp + """ + if device_owner == n_const.DEVICE_OWNER_DHCP: + self.plug_dhcp_port_into_network(vm_id, + host_id, + port_id, + net_id, + tenant_id, + port_name) + elif device_owner.startswith('compute'): + self.plug_host_into_network(vm_id, + host_id, + port_id, + net_id, + tenant_id, + port_name) + + def plug_host_into_network(self, vm_id, host, port_id, + network_id, tenant_id, port_name): + """Creates VLAN between TOR and compute host. + + :param vm_id: globally unique identifier for VM instance + :param host: ID of the host where the VM is placed + :param port_id: globally unique port ID that connects VM to network + :param network_id: globally unique neutron network identifier + :param tenant_id: globally unique neutron tenant identifier + :param port_name: Name of the port - for display purposes + """ + cmds = ['tenant %s' % tenant_id, + 'vm id %s hostid %s' % (vm_id, host)] + if port_name: + cmds.append('port id %s name "%s" network-id %s' % + (port_id, port_name, network_id)) + else: + cmds.append('port id %s network-id %s' % + (port_id, network_id)) + cmds.append('exit') + cmds.append('exit') + self._run_openstack_cmds(cmds) + + def plug_dhcp_port_into_network(self, dhcp_id, host, port_id, + network_id, tenant_id, port_name): + """Creates VLAN between TOR and dhcp host. + + :param dhcp_id: globally unique identifier for dhcp + :param host: ID of the host where the dhcp is hosted + :param port_id: globally unique port ID that connects dhcp to network + :param network_id: globally unique neutron network identifier + :param tenant_id: globally unique neutron tenant identifier + :param port_name: Name of the port - for display purposes + """ + cmds = ['tenant %s' % tenant_id, + 'network id %s' % network_id] + if port_name: + cmds.append('dhcp id %s hostid %s port-id %s name "%s"' % + (dhcp_id, host, port_id, port_name)) + else: + cmds.append('dhcp id %s hostid %s port-id %s' % + (dhcp_id, host, port_id)) + cmds.append('exit') + self._run_openstack_cmds(cmds) + + def unplug_host_from_network(self, vm_id, host, port_id, + network_id, tenant_id): + """Removes previously configured VLAN between TOR and a host. + + :param vm_id: globally unique identifier for VM instance + :param host: ID of the host where the VM is placed + :param port_id: globally unique port ID that connects VM to network + :param network_id: globally unique neutron network identifier + :param tenant_id: globally unique neutron tenant identifier + """ + cmds = ['tenant %s' % tenant_id, + 'vm id %s hostid %s' % (vm_id, host), + 'no port id %s' % port_id, + 'exit', + 'exit'] + self._run_openstack_cmds(cmds) + + def unplug_dhcp_port_from_network(self, dhcp_id, host, port_id, + network_id, tenant_id): + """Removes previously configured VLAN between TOR and a dhcp host. + + :param dhcp_id: globally unique identifier for dhcp + :param host: ID of the host where the dhcp is hosted + :param port_id: globally unique port ID that connects dhcp to network + :param network_id: globally unique neutron network identifier + :param tenant_id: globally unique neutron tenant identifier + """ + cmds = ['tenant %s' % tenant_id, + 'network id %s' % network_id, + 'no dhcp id %s port-id %s' % (dhcp_id, port_id), + 'exit'] + self._run_openstack_cmds(cmds) + + def create_network(self, tenant_id, network): + """Creates a single network on Arista hardware + + :param tenant_id: globally unique neutron tenant identifier + :param network: dict containing network_id, network_name and + segmentation_id + """ + self.create_network_bulk(tenant_id, [network]) + + def create_network_bulk(self, tenant_id, network_list): + """Creates a network on Arista Hardware + + :param tenant_id: globally unique neutron tenant identifier + :param network_list: list of dicts containing network_id, network_name + and segmentation_id + """ + cmds = ['tenant %s' % tenant_id] + # Create a reference to function to avoid name lookups in the loop + append_cmd = cmds.append + for network in network_list: + try: + append_cmd('network id %s name "%s"' % + (network['network_id'], network['network_name'])) + except KeyError: + append_cmd('network id %s' % network['network_id']) + # Enter segment mode without exiting out of network mode + append_cmd('segment 1 type vlan id %d' % + network['segmentation_id']) + cmds.extend(self._get_exit_mode_cmds(['segment', 'network', 'tenant'])) + self._run_openstack_cmds(cmds) + + def create_network_segments(self, tenant_id, network_id, + network_name, segments): + """Creates a network on Arista Hardware + + Note: This method is not used at the moment. create_network() + is used instead. This will be used once the support for + multiple segments is added in Neutron. + + :param tenant_id: globally unique neutron tenant identifier + :param network_id: globally unique neutron network identifier + :param network_name: Network name - for display purposes + :param segments: List of segments in a given network + """ + if segments: + cmds = ['tenant %s' % tenant_id, + 'network id %s name "%s"' % (network_id, network_name)] + seg_num = 1 + for seg in segments: + cmds.append('segment %d type %s id %d' % (seg_num, + seg['network_type'], seg['segmentation_id'])) + seg_num += 1 + cmds.append('exit') # exit for segment mode + cmds.append('exit') # exit for network mode + cmds.append('exit') # exit for tenant mode + + self._run_openstack_cmds(cmds) + + def delete_network(self, tenant_id, network_id): + """Deletes a specified network for a given tenant + + :param tenant_id: globally unique neutron tenant identifier + :param network_id: globally unique neutron network identifier + """ + self.delete_network_bulk(tenant_id, [network_id]) + + def delete_network_bulk(self, tenant_id, network_id_list): + """Deletes the network ids specified for a tenant + + :param tenant_id: globally unique neutron tenant identifier + :param network_id_list: list of globally unique neutron network + identifiers + """ + cmds = ['tenant %s' % tenant_id] + for network_id in network_id_list: + cmds.append('no network id %s' % network_id) + cmds.extend(self._get_exit_mode_cmds(['network', 'tenant'])) + self._run_openstack_cmds(cmds) + + def delete_vm(self, tenant_id, vm_id): + """Deletes a VM from EOS for a given tenant + + :param tenant_id : globally unique neutron tenant identifier + :param vm_id : id of a VM that needs to be deleted. + """ + self.delete_vm_bulk(tenant_id, [vm_id]) + + def delete_vm_bulk(self, tenant_id, vm_id_list): + """Deletes VMs from EOS for a given tenant + + :param tenant_id : globally unique neutron tenant identifier + :param vm_id_list : ids of VMs that needs to be deleted. + """ + cmds = ['tenant %s' % tenant_id] + for vm_id in vm_id_list: + cmds.append('no vm id %s' % vm_id) + cmds.extend(self._get_exit_mode_cmds(['vm', 'tenant'])) + self._run_openstack_cmds(cmds) + + def create_vm_port_bulk(self, tenant_id, vm_port_list, vms): + """Sends a bulk request to create ports. + + :param tenant_id: globaly unique neutron tenant identifier + :param vm_port_list: list of ports that need to be created. + :param vms: list of vms to which the ports will be attached to. + """ + cmds = ['tenant %s' % tenant_id] + # Create a reference to function to avoid name lookups in the loop + append_cmd = cmds.append + for port in vm_port_list: + try: + vm = vms[port['device_id']] + except KeyError: + msg = _("VM id %(vmid)s not found for port %(portid)s") % { + 'vmid': port['device_id'], + 'portid': port['id']} + LOG.warn(msg) + continue + + port_name = '' if 'name' not in port else 'name "%s"' % ( + port['name'] + ) + + if port['device_owner'] == n_const.DEVICE_OWNER_DHCP: + append_cmd('network id %s' % port['network_id']) + append_cmd('dhcp id %s hostid %s port-id %s %s' % + (vm['vmId'], vm['host'], port['id'], port_name)) + elif port['device_owner'].startswith('compute'): + append_cmd('vm id %s hostid %s' % (vm['vmId'], vm['host'])) + append_cmd('port id %s %s network-id %s' % + (port['id'], port_name, port['network_id'])) + else: + msg = _("Unknown device owner: %s") % port['device_owner'] + LOG.warn(msg) + continue + + append_cmd('exit') + self._run_openstack_cmds(cmds) + + def delete_tenant(self, tenant_id): + """Deletes a given tenant and all its networks and VMs from EOS. + + :param tenant_id: globally unique neutron tenant identifier + """ + self.delete_tenant_bulk([tenant_id]) + + def delete_tenant_bulk(self, tenant_list): + """Sends a bulk request to delete the tenants. + + :param tenant_list: list of globaly unique neutron tenant ids which + need to be deleted. + """ + + cmds = [] + for tenant in tenant_list: + cmds.append('no tenant %s' % tenant) + cmds.append('exit') + self._run_openstack_cmds(cmds) + + def delete_this_region(self): + """Deleted the region data from EOS.""" + cmds = ['enable', + 'configure', + 'cvx', + 'service openstack', + 'no region %s' % self.region, + 'exit', + 'exit', + 'exit'] + self._run_eos_cmds(cmds) + + def register_with_eos(self): + """This is the registration request with EOS. + + This the initial handshake between Neutron and EOS. + critical end-point information is registered with EOS. + """ + cmds = ['auth url %s user "%s" password "%s"' % + (self._keystone_url(), + self.keystone_conf.admin_user, + self.keystone_conf.admin_password)] + + log_cmds = ['auth url %s user %s password ******' % + (self._keystone_url(), + self.keystone_conf.admin_user)] + + self._run_openstack_cmds(cmds, commands_to_log=log_cmds) + + def clear_region_updated_time(self): + """Clear the region updated time which forces a resync.""" + + self._region_updated_time = None + + def region_in_sync(self): + """Check whether EOS is in sync with Neutron.""" + + eos_region_updated_times = self.get_region_updated_time() + return (self._region_updated_time and + (self._region_updated_time['regionTimestamp'] == + eos_region_updated_times['regionTimestamp'])) + + def get_region_updated_time(self): + """Return the timestamp of the last update. + + This method returns the time at which any entities in the region + were updated. + """ + timestamp_cmd = self.cli_commands['timestamp'] + if timestamp_cmd: + return self._run_eos_cmds(commands=timestamp_cmd)[0] + return None + + def _run_eos_cmds(self, commands, commands_to_log=None): + """Execute/sends a CAPI (Command API) command to EOS. + + In this method, list of commands is appended with prefix and + postfix commands - to make is understandble by EOS. + + :param commands : List of command to be executed on EOS. + :param commands_to_log : This should be set to the command that is + logged. If it is None, then the commands + param is logged. + """ + + log_cmd = commands + if commands_to_log: + log_cmd = commands_to_log + + LOG.info(_('Executing command on Arista EOS: %s'), log_cmd) + + try: + # this returns array of return values for every command in + # full_command list + ret = self._server.runCmds(version=1, cmds=commands) + except Exception as error: + host = cfg.CONF.ml2_arista.eapi_host + msg = (_('Error %(err)s while trying to execute ' + 'commands %(cmd)s on EOS %(host)s') % + {'err': error, 'cmd': commands_to_log, 'host': host}) + LOG.exception(msg) + raise arista_exc.AristaRpcError(msg=msg) + + return ret + + def _build_command(self, cmds): + """Build full EOS's openstack CLI command. + + Helper method to add commands to enter and exit from openstack + CLI modes. + + :param cmds: The openstack CLI commands that need to be executed + in the openstack config mode. + """ + + full_command = [ + 'enable', + 'configure', + 'cvx', + 'service openstack', + 'region %s' % self.region, + ] + full_command.extend(cmds) + full_command.extend(self._get_exit_mode_cmds(['region', + 'openstack', + 'cvx'])) + full_command.extend(self.cli_commands['timestamp']) + return full_command + + def _run_openstack_cmds(self, commands, commands_to_log=None): + """Execute/sends a CAPI (Command API) command to EOS. + + In this method, list of commands is appended with prefix and + postfix commands - to make is understandble by EOS. + + :param commands : List of command to be executed on EOS. + :param commands_to_logs : This should be set to the command that is + logged. If it is None, then the commands + param is logged. + """ + + full_command = self._build_command(commands) + if commands_to_log: + full_log_command = self._build_command(commands_to_log) + else: + full_log_command = None + ret = self._run_eos_cmds(full_command, full_log_command) + # Remove return values for 'configure terminal', + # 'service openstack' and 'exit' commands + if self.cli_commands['timestamp']: + self._region_updated_time = ret[-1] + + def _eapi_host_url(self): + self._validate_config() + + user = cfg.CONF.ml2_arista.eapi_username + pwd = cfg.CONF.ml2_arista.eapi_password + host = cfg.CONF.ml2_arista.eapi_host + + eapi_server_url = ('https://%s:%s@%s/command-api' % + (user, pwd, host)) + return eapi_server_url + + def _validate_config(self): + if cfg.CONF.ml2_arista.get('eapi_host') == '': + msg = _('Required option eapi_host is not set') + LOG.error(msg) + raise arista_exc.AristaConfigError(msg=msg) + if cfg.CONF.ml2_arista.get('eapi_username') == '': + msg = _('Required option eapi_username is not set') + LOG.error(msg) + raise arista_exc.AristaConfigError(msg=msg) + + +class SyncService(object): + """Synchronizatin of information between Neutron and EOS + + Periodically (through configuration option), this service + ensures that Networks and VMs configured on EOS/Arista HW + are always in sync with Neutron DB. + """ + def __init__(self, rpc_wrapper, neutron_db): + self._rpc = rpc_wrapper + self._ndb = neutron_db + self._force_sync = True + + def synchronize(self): + """Sends data to EOS which differs from neutron DB.""" + + LOG.info(_('Syncing Neutron <-> EOS')) + try: + # Get the time at which entities in the region were updated. + # If the times match, then ML2 is in sync with EOS. Otherwise + # perform a complete sync. + if not self._force_sync and self._rpc.region_in_sync(): + LOG.info(_('OpenStack and EOS are in sync!')) + return + except arista_exc.AristaRpcError: + LOG.warning(EOS_UNREACHABLE_MSG) + self._force_sync = True + return + + try: + #Always register with EOS to ensure that it has correct credentials + self._rpc.register_with_eos() + eos_tenants = self._rpc.get_tenants() + except arista_exc.AristaRpcError: + LOG.warning(EOS_UNREACHABLE_MSG) + self._force_sync = True + return + + db_tenants = db.get_tenants() + + if not db_tenants and eos_tenants: + # No tenants configured in Neutron. Clear all EOS state + try: + self._rpc.delete_this_region() + msg = _('No Tenants configured in Neutron DB. But %d ' + 'tenants disovered in EOS during synchronization.' + 'Enitre EOS region is cleared') % len(eos_tenants) + LOG.info(msg) + # Re-register with EOS so that the timestamp is updated. + self._rpc.register_with_eos() + # Region has been completely cleaned. So there is nothing to + # syncronize + self._force_sync = False + except arista_exc.AristaRpcError: + LOG.warning(EOS_UNREACHABLE_MSG) + self._force_sync = True + return + + # Delete tenants that are in EOS, but not in the database + tenants_to_delete = frozenset(eos_tenants.keys()).difference( + db_tenants.keys()) + + if tenants_to_delete: + try: + self._rpc.delete_tenant_bulk(tenants_to_delete) + except arista_exc.AristaRpcError: + LOG.warning(EOS_UNREACHABLE_MSG) + self._force_sync = True + return + + # None of the commands have failed till now. But if subsequent + # operations fail, then force_sync is set to true + self._force_sync = False + + for tenant in db_tenants: + db_nets = db.get_networks(tenant) + db_vms = db.get_vms(tenant) + eos_nets = self._get_eos_networks(eos_tenants, tenant) + eos_vms = self._get_eos_vms(eos_tenants, tenant) + + db_nets_key_set = frozenset(db_nets.keys()) + db_vms_key_set = frozenset(db_vms.keys()) + eos_nets_key_set = frozenset(eos_nets.keys()) + eos_vms_key_set = frozenset(eos_vms.keys()) + + # Find the networks that are present on EOS, but not in Neutron DB + nets_to_delete = eos_nets_key_set.difference(db_nets_key_set) + + # Find the VMs that are present on EOS, but not in Neutron DB + vms_to_delete = eos_vms_key_set.difference(db_vms_key_set) + + # Find the Networks that are present in Neutron DB, but not on EOS + nets_to_update = db_nets_key_set.difference(eos_nets_key_set) + + # Find the VMs that are present in Neutron DB, but not on EOS + vms_to_update = db_vms_key_set.difference(eos_vms_key_set) + + try: + if vms_to_delete: + self._rpc.delete_vm_bulk(tenant, vms_to_delete) + if nets_to_delete: + self._rpc.delete_network_bulk(tenant, nets_to_delete) + if nets_to_update: + # Create a dict of networks keyed by id. + neutron_nets = dict( + (network['id'], network) for network in + self._ndb.get_all_networks_for_tenant(tenant) + ) + + networks = [ + {'network_id': net_id, + 'segmentation_id': + db_nets[net_id]['segmentationTypeId'], + 'network_name': + neutron_nets.get(net_id, {'name': ''})['name'], } + for net_id in nets_to_update + ] + self._rpc.create_network_bulk(tenant, networks) + if vms_to_update: + # Filter the ports to only the vms that we are interested + # in. + vm_ports = [ + port for port in self._ndb.get_all_ports_for_tenant( + tenant) if port['device_id'] in vms_to_update + ] + self._rpc.create_vm_port_bulk(tenant, vm_ports, db_vms) + except arista_exc.AristaRpcError: + LOG.warning(EOS_UNREACHABLE_MSG) + self._force_sync = True + + def _get_eos_networks(self, eos_tenants, tenant): + networks = {} + if eos_tenants and tenant in eos_tenants: + networks = eos_tenants[tenant]['tenantNetworks'] + return networks + + def _get_eos_vms(self, eos_tenants, tenant): + vms = {} + if eos_tenants and tenant in eos_tenants: + vms = eos_tenants[tenant]['tenantVmInstances'] + return vms + + +class AristaDriver(driver_api.MechanismDriver): + """Ml2 Mechanism driver for Arista networking hardware. + + Remebers all networks and VMs that are provisioned on Arista Hardware. + Does not send network provisioning request if the network has already been + provisioned before for the given port. + """ + def __init__(self, rpc=None): + + self.rpc = rpc or AristaRPCWrapper() + self.db_nets = db.AristaProvisionedNets() + self.db_vms = db.AristaProvisionedVms() + self.db_tenants = db.AristaProvisionedTenants() + self.ndb = db.NeutronNets() + + confg = cfg.CONF.ml2_arista + self.segmentation_type = db.VLAN_SEGMENTATION + self.timer = None + self.eos = SyncService(self.rpc, self.ndb) + self.sync_timeout = confg['sync_interval'] + self.eos_sync_lock = threading.Lock() + + def initialize(self): + self.rpc.register_with_eos() + self._cleanup_db() + self.rpc.check_cli_commands() + # Registering with EOS updates self.rpc.region_updated_time. Clear it + # to force an initial sync + self.rpc.clear_region_updated_time() + self._synchronization_thread() + + def create_network_precommit(self, context): + """Remember the tenant, and network information.""" + + network = context.current + segments = context.network_segments + network_id = network['id'] + tenant_id = network['tenant_id'] + segmentation_id = segments[0]['segmentation_id'] + with self.eos_sync_lock: + db.remember_tenant(tenant_id) + db.remember_network(tenant_id, + network_id, + segmentation_id) + + def create_network_postcommit(self, context): + """Provision the network on the Arista Hardware.""" + + network = context.current + network_id = network['id'] + network_name = network['name'] + tenant_id = network['tenant_id'] + segments = context.network_segments + vlan_id = segments[0]['segmentation_id'] + with self.eos_sync_lock: + if db.is_network_provisioned(tenant_id, network_id): + try: + network_dict = { + 'network_id': network_id, + 'segmentation_id': vlan_id, + 'network_name': network_name} + self.rpc.create_network(tenant_id, network_dict) + except arista_exc.AristaRpcError: + LOG.info(EOS_UNREACHABLE_MSG) + raise ml2_exc.MechanismDriverError() + else: + msg = _('Network %s is not created as it is not found in' + 'Arista DB') % network_id + LOG.info(msg) + + def update_network_precommit(self, context): + """At the moment we only support network name change + + Any other change in network is not supported at this time. + We do not store the network names, therefore, no DB store + action is performed here. + """ + new_network = context.current + orig_network = context.original + if new_network['name'] != orig_network['name']: + msg = _('Network name changed to %s') % new_network['name'] + LOG.info(msg) + + def update_network_postcommit(self, context): + """At the moment we only support network name change + + If network name is changed, a new network create request is + sent to the Arista Hardware. + """ + new_network = context.current + orig_network = context.original + if new_network['name'] != orig_network['name']: + network_id = new_network['id'] + network_name = new_network['name'] + tenant_id = new_network['tenant_id'] + vlan_id = new_network['provider:segmentation_id'] + with self.eos_sync_lock: + if db.is_network_provisioned(tenant_id, network_id): + try: + network_dict = { + 'network_id': network_id, + 'segmentation_id': vlan_id, + 'network_name': network_name} + self.rpc.create_network(tenant_id, network_dict) + except arista_exc.AristaRpcError: + LOG.info(EOS_UNREACHABLE_MSG) + raise ml2_exc.MechanismDriverError() + else: + msg = _('Network %s is not updated as it is not found in' + 'Arista DB') % network_id + LOG.info(msg) + + def delete_network_precommit(self, context): + """Delete the network infromation from the DB.""" + network = context.current + network_id = network['id'] + tenant_id = network['tenant_id'] + with self.eos_sync_lock: + if db.is_network_provisioned(tenant_id, network_id): + db.forget_network(tenant_id, network_id) + # if necessary, delete tenant as well. + self.delete_tenant(tenant_id) + + def delete_network_postcommit(self, context): + """Send network delete request to Arista HW.""" + network = context.current + network_id = network['id'] + tenant_id = network['tenant_id'] + with self.eos_sync_lock: + + # Succeed deleting network in case EOS is not accessible. + # EOS state will be updated by sync thread once EOS gets + # alive. + try: + self.rpc.delete_network(tenant_id, network_id) + except arista_exc.AristaRpcError: + LOG.info(EOS_UNREACHABLE_MSG) + raise ml2_exc.MechanismDriverError() + + def create_port_precommit(self, context): + """Remember the infromation about a VM and its ports + + A VM information, along with the physical host information + is saved. + """ + port = context.current + device_id = port['device_id'] + device_owner = port['device_owner'] + host = port[portbindings.HOST_ID] + + # device_id and device_owner are set on VM boot + is_vm_boot = device_id and device_owner + if host and is_vm_boot: + port_id = port['id'] + network_id = port['network_id'] + tenant_id = port['tenant_id'] + with self.eos_sync_lock: + db.remember_vm(device_id, host, port_id, + network_id, tenant_id) + + def create_port_postcommit(self, context): + """Plug a physical host into a network. + + Send provisioning request to Arista Hardware to plug a host + into appropriate network. + """ + port = context.current + device_id = port['device_id'] + device_owner = port['device_owner'] + host = port[portbindings.HOST_ID] + + # device_id and device_owner are set on VM boot + is_vm_boot = device_id and device_owner + if host and is_vm_boot: + port_id = port['id'] + port_name = port['name'] + network_id = port['network_id'] + tenant_id = port['tenant_id'] + with self.eos_sync_lock: + hostname = self._host_name(host) + vm_provisioned = db.is_vm_provisioned(device_id, + host, + port_id, + network_id, + tenant_id) + net_provisioned = db.is_network_provisioned(tenant_id, + network_id) + if vm_provisioned and net_provisioned: + try: + self.rpc.plug_port_into_network(device_id, + hostname, + port_id, + network_id, + tenant_id, + port_name, + device_owner) + except arista_exc.AristaRpcError: + LOG.info(EOS_UNREACHABLE_MSG) + raise ml2_exc.MechanismDriverError() + else: + msg = _('VM %s is not created as it is not found in ' + 'Arista DB') % device_id + LOG.info(msg) + + def update_port_precommit(self, context): + """Update the name of a given port. + + At the moment we only support port name change. + Any other change to port is not supported at this time. + We do not store the port names, therefore, no DB store + action is performed here. + """ + new_port = context.current + orig_port = context.original + if new_port['name'] != orig_port['name']: + msg = _('Port name changed to %s') % new_port['name'] + LOG.info(msg) + + def update_port_postcommit(self, context): + """Update the name of a given port in EOS. + + At the moment we only support port name change + Any other change to port is not supported at this time. + """ + port = context.current + orig_port = context.original + if port['name'] == orig_port['name']: + # nothing to do + return + + device_id = port['device_id'] + device_owner = port['device_owner'] + host = port[portbindings.HOST_ID] + is_vm_boot = device_id and device_owner + + if host and is_vm_boot: + port_id = port['id'] + port_name = port['name'] + network_id = port['network_id'] + tenant_id = port['tenant_id'] + with self.eos_sync_lock: + hostname = self._host_name(host) + segmentation_id = db.get_segmentation_id(tenant_id, + network_id) + vm_provisioned = db.is_vm_provisioned(device_id, + host, + port_id, + network_id, + tenant_id) + net_provisioned = db.is_network_provisioned(tenant_id, + network_id, + segmentation_id) + if vm_provisioned and net_provisioned: + try: + self.rpc.plug_port_into_network(device_id, + hostname, + port_id, + network_id, + tenant_id, + port_name, + device_owner) + except arista_exc.AristaRpcError: + LOG.info(EOS_UNREACHABLE_MSG) + raise ml2_exc.MechanismDriverError() + else: + msg = _('VM %s is not updated as it is not found in ' + 'Arista DB') % device_id + LOG.info(msg) + + def delete_port_precommit(self, context): + """Delete information about a VM and host from the DB.""" + port = context.current + + host_id = port[portbindings.HOST_ID] + device_id = port['device_id'] + tenant_id = port['tenant_id'] + network_id = port['network_id'] + port_id = port['id'] + with self.eos_sync_lock: + if db.is_vm_provisioned(device_id, host_id, port_id, + network_id, tenant_id): + db.forget_vm(device_id, host_id, port_id, + network_id, tenant_id) + # if necessary, delete tenant as well. + self.delete_tenant(tenant_id) + + def delete_port_postcommit(self, context): + """unPlug a physical host from a network. + + Send provisioning request to Arista Hardware to unplug a host + from appropriate network. + """ + port = context.current + device_id = port['device_id'] + host = port[portbindings.HOST_ID] + port_id = port['id'] + network_id = port['network_id'] + tenant_id = port['tenant_id'] + device_owner = port['device_owner'] + + try: + with self.eos_sync_lock: + hostname = self._host_name(host) + if device_owner == n_const.DEVICE_OWNER_DHCP: + self.rpc.unplug_dhcp_port_from_network(device_id, + hostname, + port_id, + network_id, + tenant_id) + else: + self.rpc.unplug_host_from_network(device_id, + hostname, + port_id, + network_id, + tenant_id) + except arista_exc.AristaRpcError: + LOG.info(EOS_UNREACHABLE_MSG) + raise ml2_exc.MechanismDriverError() + + def delete_tenant(self, tenant_id): + """delete a tenant from DB. + + A tenant is deleted only if there is no network or VM configured + configured for this tenant. + """ + objects_for_tenant = (db.num_nets_provisioned(tenant_id) + + db.num_vms_provisioned(tenant_id)) + if not objects_for_tenant: + db.forget_tenant(tenant_id) + + def _host_name(self, hostname): + fqdns_used = cfg.CONF.ml2_arista['use_fqdn'] + return hostname if fqdns_used else hostname.split('.')[0] + + def _synchronization_thread(self): + with self.eos_sync_lock: + self.eos.synchronize() + + self.timer = threading.Timer(self.sync_timeout, + self._synchronization_thread) + self.timer.start() + + def stop_synchronization_thread(self): + if self.timer: + self.timer.cancel() + self.timer = None + + def _cleanup_db(self): + """Clean up any uncessary entries in our DB.""" + db_tenants = db.get_tenants() + for tenant in db_tenants: + neutron_nets = self.ndb.get_all_networks_for_tenant(tenant) + neutron_nets_id = [] + for net in neutron_nets: + neutron_nets_id.append(net['id']) + db_nets = db.get_networks(tenant) + for net_id in db_nets.keys(): + if net_id not in neutron_nets_id: + db.forget_network(tenant, net_id) diff --git a/neutron/plugins/ml2/drivers/mech_bigswitch/__init__.py b/neutron/plugins/ml2/drivers/mech_bigswitch/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ml2/drivers/mech_bigswitch/driver.py b/neutron/plugins/ml2/drivers/mech_bigswitch/driver.py new file mode 100644 index 000000000..d8fd53dd1 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_bigswitch/driver.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2014 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. +# @author: Kevin Benton, Big Switch Networks, Inc. +import copy +import httplib + +import eventlet +from oslo.config import cfg + +from neutron import context as ctx +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import log +from neutron.plugins.bigswitch import config as pl_config +from neutron.plugins.bigswitch import plugin +from neutron.plugins.bigswitch import servermanager +from neutron.plugins.ml2 import driver_api as api + + +LOG = log.getLogger(__name__) + + +class BigSwitchMechanismDriver(plugin.NeutronRestProxyV2Base, + api.MechanismDriver): + + """Mechanism Driver for Big Switch Networks Controller. + + This driver relays the network create, update, delete + operations to the Big Switch Controller. + """ + + def initialize(self): + LOG.debug(_('Initializing driver')) + + # register plugin config opts + pl_config.register_config() + self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size) + # backend doesn't support bulk operations yet + self.native_bulk_support = False + + # init network ctrl connections + self.servers = servermanager.ServerPool() + self.servers.get_topo_function = self._get_all_data + self.servers.get_topo_function_args = {'get_ports': True, + 'get_floating_ips': False, + 'get_routers': False} + self.segmentation_types = ', '.join(cfg.CONF.ml2.type_drivers) + LOG.debug(_("Initialization done")) + + def create_network_postcommit(self, context): + # create network on the network controller + self._send_create_network(context.current) + + def update_network_postcommit(self, context): + # update network on the network controller + self._send_update_network(context.current) + + def delete_network_postcommit(self, context): + # delete network on the network controller + self._send_delete_network(context.current) + + def create_port_postcommit(self, context): + # create port on the network controller + port = self._prepare_port_for_controller(context) + if port: + self.async_port_create(port["network"]["tenant_id"], + port["network"]["id"], port) + + def update_port_postcommit(self, context): + # update port on the network controller + port = self._prepare_port_for_controller(context) + if port: + try: + self.servers.rest_update_port(port["network"]["tenant_id"], + port["network"]["id"], port) + except servermanager.RemoteRestError as e: + with excutils.save_and_reraise_exception() as ctxt: + if (cfg.CONF.RESTPROXY.auto_sync_on_failure and + e.status == httplib.NOT_FOUND and + servermanager.NXNETWORK in e.reason): + ctxt.reraise = False + LOG.error(_("Iconsistency with backend controller " + "triggering full synchronization.")) + topoargs = self.servers.get_topo_function_args + self._send_all_data( + send_ports=topoargs['get_ports'], + send_floating_ips=topoargs['get_floating_ips'], + send_routers=topoargs['get_routers'], + triggered_by_tenant=port["network"]["tenant_id"] + ) + + def delete_port_postcommit(self, context): + # delete port on the network controller + port = context.current + net = context.network.current + self.servers.rest_delete_port(net["tenant_id"], net["id"], port['id']) + + def _prepare_port_for_controller(self, context): + # make a copy so the context isn't changed for other drivers + port = copy.deepcopy(context.current) + net = context.network.current + port['network'] = net + port['bound_segment'] = context.bound_segment + actx = ctx.get_admin_context() + prepped_port = self._extend_port_dict_binding(actx, port) + prepped_port = self._map_state_and_status(prepped_port) + if (portbindings.HOST_ID not in prepped_port or + prepped_port[portbindings.HOST_ID] == ''): + LOG.warning(_("Ignoring port notification to controller because " + "of missing host ID.")) + # in ML2, controller doesn't care about ports without + # the host_id set + return False + return prepped_port diff --git a/neutron/plugins/ml2/drivers/mech_hyperv.py b/neutron/plugins/ml2/drivers/mech_hyperv.py new file mode 100644 index 000000000..b384d3425 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_hyperv.py @@ -0,0 +1,57 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +from neutron.common import constants +from neutron.extensions import portbindings +from neutron.openstack.common import log +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import mech_agent + +LOG = log.getLogger(__name__) + + +class HypervMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): + """Attach to networks using hyperv L2 agent. + + The HypervMechanismDriver integrates the ml2 plugin with the + hyperv L2 agent. Port binding with this driver requires the hyperv + agent to be running on the port's host, and that agent to have + connectivity to at least one segment of the port's network. + """ + + def __init__(self): + super(HypervMechanismDriver, self).__init__( + constants.AGENT_TYPE_HYPERV, + portbindings.VIF_TYPE_HYPERV, + {portbindings.CAP_PORT_FILTER: False}) + + def check_segment_for_agent(self, segment, agent): + mappings = agent['configurations'].get('vswitch_mappings', {}) + LOG.debug(_("Checking segment: %(segment)s " + "for mappings: %(mappings)s"), + {'segment': segment, 'mappings': mappings}) + network_type = segment[api.NETWORK_TYPE] + if network_type == 'local': + return True + elif network_type in ['flat', 'vlan']: + for pattern in mappings: + if re.match(pattern, segment[api.PHYSICAL_NETWORK]): + return True + else: + return False + else: + return False diff --git a/neutron/plugins/ml2/drivers/mech_linuxbridge.py b/neutron/plugins/ml2/drivers/mech_linuxbridge.py new file mode 100644 index 000000000..b304ad4ba --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_linuxbridge.py @@ -0,0 +1,57 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants +from neutron.extensions import portbindings +from neutron.openstack.common import log +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import mech_agent + +LOG = log.getLogger(__name__) + + +class LinuxbridgeMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): + """Attach to networks using linuxbridge L2 agent. + + The LinuxbridgeMechanismDriver integrates the ml2 plugin with the + linuxbridge L2 agent. Port binding with this driver requires the + linuxbridge agent to be running on the port's host, and that agent + to have connectivity to at least one segment of the port's + network. + """ + + def __init__(self): + super(LinuxbridgeMechanismDriver, self).__init__( + constants.AGENT_TYPE_LINUXBRIDGE, + portbindings.VIF_TYPE_BRIDGE, + {portbindings.CAP_PORT_FILTER: True}) + + def check_segment_for_agent(self, segment, agent): + mappings = agent['configurations'].get('interface_mappings', {}) + tunnel_types = agent['configurations'].get('tunnel_types', []) + LOG.debug(_("Checking segment: %(segment)s " + "for mappings: %(mappings)s " + "with tunnel_types: %(tunnel_types)s"), + {'segment': segment, 'mappings': mappings, + 'tunnel_types': tunnel_types}) + network_type = segment[api.NETWORK_TYPE] + if network_type == 'local': + return True + elif network_type in tunnel_types: + return True + elif network_type in ['flat', 'vlan']: + return segment[api.PHYSICAL_NETWORK] in mappings + else: + return False diff --git a/neutron/plugins/ml2/drivers/mech_ofagent.py b/neutron/plugins/ml2/drivers/mech_ofagent.py new file mode 100644 index 000000000..b593e61d6 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_ofagent.py @@ -0,0 +1,61 @@ +# Copyright (C) 2014 VA Linux Systems Japan K.K. +# Based on openvswitch mechanism driver. +# +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K. + +from neutron.common import constants +from neutron.extensions import portbindings +from neutron.openstack.common import log +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import mech_agent + +LOG = log.getLogger(__name__) + + +class OfagentMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): + """Attach to networks using ofagent L2 agent. + + The OfagentMechanismDriver integrates the ml2 plugin with the + ofagent L2 agent. Port binding with this driver requires the + ofagent agent to be running on the port's host, and that agent + to have connectivity to at least one segment of the port's + network. + """ + + def __init__(self): + super(OfagentMechanismDriver, self).__init__( + constants.AGENT_TYPE_OFA, + portbindings.VIF_TYPE_OVS, + {portbindings.CAP_PORT_FILTER: True, + portbindings.OVS_HYBRID_PLUG: True}) + + def check_segment_for_agent(self, segment, agent): + mappings = agent['configurations'].get('bridge_mappings', {}) + tunnel_types = agent['configurations'].get('tunnel_types', []) + LOG.debug(_("Checking segment: %(segment)s " + "for mappings: %(mappings)s " + "with tunnel_types: %(tunnel_types)s"), + {'segment': segment, 'mappings': mappings, + 'tunnel_types': tunnel_types}) + network_type = segment[api.NETWORK_TYPE] + return ( + network_type == p_const.TYPE_LOCAL or + network_type in tunnel_types or + (network_type in [p_const.TYPE_FLAT, p_const.TYPE_VLAN] and + segment[api.PHYSICAL_NETWORK] in mappings) + ) diff --git a/neutron/plugins/ml2/drivers/mech_openvswitch.py b/neutron/plugins/ml2/drivers/mech_openvswitch.py new file mode 100644 index 000000000..0565b9730 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mech_openvswitch.py @@ -0,0 +1,58 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants +from neutron.extensions import portbindings +from neutron.openstack.common import log +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import mech_agent + +LOG = log.getLogger(__name__) + + +class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): + """Attach to networks using openvswitch L2 agent. + + The OpenvswitchMechanismDriver integrates the ml2 plugin with the + openvswitch L2 agent. Port binding with this driver requires the + openvswitch agent to be running on the port's host, and that agent + to have connectivity to at least one segment of the port's + network. + """ + + def __init__(self): + super(OpenvswitchMechanismDriver, self).__init__( + constants.AGENT_TYPE_OVS, + portbindings.VIF_TYPE_OVS, + {portbindings.CAP_PORT_FILTER: True, + portbindings.OVS_HYBRID_PLUG: True}) + + def check_segment_for_agent(self, segment, agent): + mappings = agent['configurations'].get('bridge_mappings', {}) + tunnel_types = agent['configurations'].get('tunnel_types', []) + LOG.debug(_("Checking segment: %(segment)s " + "for mappings: %(mappings)s " + "with tunnel_types: %(tunnel_types)s"), + {'segment': segment, 'mappings': mappings, + 'tunnel_types': tunnel_types}) + network_type = segment[api.NETWORK_TYPE] + if network_type == 'local': + return True + elif network_type in tunnel_types: + return True + elif network_type in ['flat', 'vlan']: + return segment[api.PHYSICAL_NETWORK] in mappings + else: + return False diff --git a/neutron/plugins/ml2/drivers/mechanism_fslsdn.py b/neutron/plugins/ml2/drivers/mechanism_fslsdn.py new file mode 100755 index 000000000..514fd9b86 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mechanism_fslsdn.py @@ -0,0 +1,288 @@ +# Copyright (c) 2014 Freescale Semiconductor +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# @author: Trinath Somanchi, Freescale, Inc + + +from neutronclient.v2_0 import client +from oslo.config import cfg + +from neutron.common import constants as n_const +from neutron.common import log +from neutron.extensions import portbindings +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.plugins.ml2 import driver_api as api + + +LOG = logging.getLogger(__name__) + +# CRD service options required for FSL SDN OS Mech Driver +ml2_fslsdn_opts = [ + cfg.StrOpt('crd_user_name', default='crd', + help=_("CRD service Username")), + cfg.StrOpt('crd_password', default='password', + secret='True', + help=_("CRD Service Password")), + cfg.StrOpt('crd_tenant_name', default='service', + help=_("CRD Tenant Name")), + cfg.StrOpt('crd_auth_url', + default='http://127.0.0.1:5000/v2.0/', + help=_("CRD Auth URL")), + cfg.StrOpt('crd_url', + default='http://127.0.0.1:9797', + help=_("URL for connecting to CRD service")), + cfg.IntOpt('crd_url_timeout', + default=30, + help=_("Timeout value for connecting to " + "CRD service in seconds")), + cfg.StrOpt('crd_region_name', + default='RegionOne', + help=_("Region name for connecting to " + "CRD Service in admin context")), + cfg.BoolOpt('crd_api_insecure', + default=False, + help=_("If set, ignore any SSL validation issues")), + cfg.StrOpt('crd_auth_strategy', + default='keystone', + help=_("Auth strategy for connecting to " + "neutron in admin context")), + cfg.StrOpt('crd_ca_certificates_file', + help=_("Location of ca certificates file to use for " + "CRD client requests.")), +] + +# Register the configuration option for crd service +# required for FSL SDN OS Mechanism driver +cfg.CONF.register_opts(ml2_fslsdn_opts, "ml2_fslsdn") + +# shortcut +FSLCONF = cfg.CONF.ml2_fslsdn + +SERVICE_TYPE = 'crd' + + +class FslsdnMechanismDriver(api.MechanismDriver): + + """Freescale SDN OS Mechanism Driver for ML2 Plugin.""" + + @log.log + def initialize(self): + """Initialize the Mechanism driver.""" + + self.vif_type = portbindings.VIF_TYPE_OVS + self.vif_details = {portbindings.CAP_PORT_FILTER: True} + LOG.info(_("Initializing CRD client... ")) + crd_client_params = { + 'username': FSLCONF.crd_user_name, + 'tenant_name': FSLCONF.crd_tenant_name, + 'region_name': FSLCONF.crd_region_name, + 'password': FSLCONF.crd_password, + 'auth_url': FSLCONF.crd_auth_url, + 'auth_strategy': FSLCONF.crd_auth_strategy, + 'endpoint_url': FSLCONF.crd_url, + 'timeout': FSLCONF.crd_url_timeout, + 'insecure': FSLCONF.crd_api_insecure, + 'service_type': SERVICE_TYPE, + 'ca_cert': FSLCONF.crd_ca_certificates_file, + } + self._crdclient = client.Client(**crd_client_params) + + # Network Management + @staticmethod + @log.log + def _prepare_crd_network(network, segments): + """Helper function to create 'network' data.""" + + return {'network': + {'network_id': network['id'], + 'tenant_id': network['tenant_id'], + 'name': network['name'], + 'status': network['status'], + 'admin_state_up': network['admin_state_up'], + 'segments': segments, + }} + + def create_network_postcommit(self, context): + """Send create_network data to CRD service.""" + + network = context.current + segments = context.network_segments + body = self._prepare_crd_network(network, segments) + self._crdclient.create_network(body=body) + LOG.debug("create_network update sent to CRD Server: %s", body) + + def update_network_postcommit(self, context): + """Send update_network data to CRD service.""" + + network = context.current + segments = context.network_segments + body = self._prepare_crd_network(network, segments) + self._crdclient.update_network(network['id'], body=body) + LOG.debug("update_network update sent to CRD Server: %s", body) + + def delete_network_postcommit(self, context): + """Send delete_network data to CRD service.""" + + network = context.current + self._crdclient.delete_network(network['id']) + LOG.debug( + "delete_network update sent to CRD Server: %s", + network['id']) + + # Port Management + @staticmethod + def _prepare_crd_port(port): + """Helper function to prepare 'port' data.""" + + crd_subnet_id = '' + crd_ipaddress = '' + crd_sec_grps = '' + # Since CRD accepts one Fixed IP, + # so handle only one fixed IP per port. + if len(port['fixed_ips']) > 1: + LOG.debug("More than one fixed IP exists - using first one.") + # check empty fixed_ips list, move on if one or more exists + if len(port['fixed_ips']) != 0: + crd_subnet_id = port['fixed_ips'][0]['subnet_id'] + crd_ipaddress = port['fixed_ips'][0]['ip_address'] + LOG.debug("Handling fixed IP {subnet_id:%(subnet)s, " + "ip_address:%(ip)s}", + {'subnet': crd_subnet_id, 'ip': crd_ipaddress}) + else: + LOG.debug("No fixed IPs found.") + if 'security_groups' in port: + crd_sec_grps = ','.join(port['security_groups']) + return {'port': + {'port_id': port['id'], + 'tenant_id': port['tenant_id'], + 'name': port['name'], + 'network_id': port['network_id'], + 'subnet_id': crd_subnet_id, + 'mac_address': port['mac_address'], + 'device_id': port['device_id'], + 'ip_address': crd_ipaddress, + 'admin_state_up': port['admin_state_up'], + 'status': port['status'], + 'device_owner': port['device_owner'], + 'security_groups': crd_sec_grps, + }} + + def create_port_postcommit(self, context): + """Send create_port data to CRD service.""" + + port = context.current + body = self._prepare_crd_port(port) + self._crdclient.create_port(body=body) + LOG.debug("create_port update sent to CRD Server: %s", body) + + def delete_port_postcommit(self, context): + """Send delete_port data to CRD service.""" + + port = context.current + self._crdclient.delete_port(port['id']) + LOG.debug("delete_port update sent to CRD Server: %s", port['id']) + + # Subnet Management + @staticmethod + @log.log + def _prepare_crd_subnet(subnet): + """Helper function to prepare 'subnet' data.""" + + crd_allocation_pools = '' + crd_dns_nameservers = '' + crd_host_routes = '' + # Handling Allocation IPs + if 'allocation_pools' in subnet: + a_pools = subnet['allocation_pools'] + crd_allocation_pools = ','.join(["%s-%s" % (p['start'], + p['end']) + for p in a_pools]) + # Handling Host Routes + if 'host_routes' in subnet: + crd_host_routes = ','.join(["%s-%s" % (r['destination'], + r['nexthop']) + for r in subnet['host_routes']]) + # Handling DNS Nameservers + if 'dns_nameservers' in subnet: + crd_dns_nameservers = ','.join(subnet['dns_nameservers']) + # return Subnet Data + return {'subnet': + {'subnet_id': subnet['id'], + 'tenant_id': subnet['tenant_id'], + 'name': subnet['name'], + 'network_id': subnet['network_id'], + 'ip_version': subnet['ip_version'], + 'cidr': subnet['cidr'], + 'gateway_ip': subnet['gateway_ip'], + 'dns_nameservers': crd_dns_nameservers, + 'allocation_pools': crd_allocation_pools, + 'host_routes': crd_host_routes, + }} + + def create_subnet_postcommit(self, context): + """Send create_subnet data to CRD service.""" + + subnet = context.current + body = self._prepare_crd_subnet(subnet) + self._crdclient.create_subnet(body=body) + LOG.debug("create_subnet update sent to CRD Server: %s", body) + + def update_subnet_postcommit(self, context): + """Send update_subnet data to CRD service.""" + + subnet = context.current + body = self._prepare_crd_subnet(subnet) + self._crdclient.update_subnet(subnet['id'], body=body) + LOG.debug("update_subnet update sent to CRD Server: %s", body) + + def delete_subnet_postcommit(self, context): + """Send delete_subnet data to CRD service.""" + + subnet = context.current + self._crdclient.delete_subnet(subnet['id']) + LOG.debug("delete_subnet update sent to CRD Server: %s", subnet['id']) + + def bind_port(self, context): + """Set porting binding data for use with nova.""" + + LOG.debug("Attempting to bind port %(port)s on " + "network %(network)s", + {'port': context.current['id'], + 'network': context.network.current['id']}) + # Prepared porting binding data + for segment in context.network.network_segments: + if self.check_segment(segment): + context.set_binding(segment[api.ID], + self.vif_type, + self.vif_details, + status=n_const.PORT_STATUS_ACTIVE) + LOG.debug("Bound using segment: %s", segment) + return + else: + LOG.debug("Refusing to bind port for segment ID %(id)s, " + "segment %(seg)s, phys net %(physnet)s, and " + "network type %(nettype)s", + {'id': segment[api.ID], + 'seg': segment[api.SEGMENTATION_ID], + 'physnet': segment[api.PHYSICAL_NETWORK], + 'nettype': segment[api.NETWORK_TYPE]}) + + @log.log + def check_segment(self, segment): + """Verify a segment is valid for the FSL SDN MechanismDriver.""" + + return segment[api.NETWORK_TYPE] in [constants.TYPE_VLAN, + constants.TYPE_VXLAN] diff --git a/neutron/plugins/ml2/drivers/mechanism_ncs.py b/neutron/plugins/ml2/drivers/mechanism_ncs.py new file mode 100644 index 000000000..833447731 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mechanism_ncs.py @@ -0,0 +1,182 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +from oslo.config import cfg +import requests + +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log +from neutron.plugins.ml2 import driver_api as api + +LOG = log.getLogger(__name__) + +ncs_opts = [ + cfg.StrOpt('url', + help=_("HTTP URL of Tail-f NCS REST interface.")), + cfg.StrOpt('username', + help=_("HTTP username for authentication")), + cfg.StrOpt('password', secret=True, + help=_("HTTP password for authentication")), + cfg.IntOpt('timeout', default=10, + help=_("HTTP timeout in seconds.")) +] + +cfg.CONF.register_opts(ncs_opts, "ml2_ncs") + + +class NCSMechanismDriver(api.MechanismDriver): + + """Mechanism Driver for Tail-f Network Control System (NCS). + + This driver makes portions of the Neutron database available for + service provisioning in NCS. For example, NCS can use this + information to provision physical switches and routers in response + to OpenStack configuration changes. + + The database is replicated from Neutron to NCS using HTTP and JSON. + + The driver has two states: out-of-sync (initially) and in-sync. + + In the out-of-sync state each driver event triggers an attempt + to synchronize the complete database. On success the driver + transitions to the in-sync state. + + In the in-sync state each driver event triggers synchronization + of one network or port. On success the driver stays in-sync and + on failure it transitions to the out-of-sync state. + """ + out_of_sync = True + + def initialize(self): + self.url = cfg.CONF.ml2_ncs.url + self.timeout = cfg.CONF.ml2_ncs.timeout + self.username = cfg.CONF.ml2_ncs.username + self.password = cfg.CONF.ml2_ncs.password + + # Postcommit hooks are used to trigger synchronization. + + def create_network_postcommit(self, context): + self.synchronize('create', 'network', context) + + def update_network_postcommit(self, context): + self.synchronize('update', 'network', context) + + def delete_network_postcommit(self, context): + self.synchronize('delete', 'network', context) + + def create_subnet_postcommit(self, context): + self.synchronize('create', 'subnet', context) + + def update_subnet_postcommit(self, context): + self.synchronize('update', 'subnet', context) + + def delete_subnet_postcommit(self, context): + self.synchronize('delete', 'subnet', context) + + def create_port_postcommit(self, context): + self.synchronize('create', 'port', context) + + def update_port_postcommit(self, context): + self.synchronize('update', 'port', context) + + def delete_port_postcommit(self, context): + self.synchronize('delete', 'port', context) + + def synchronize(self, operation, object_type, context): + """Synchronize NCS with Neutron following a configuration change.""" + if self.out_of_sync: + self.sync_full(context) + else: + self.sync_object(operation, object_type, context) + + def sync_full(self, context): + """Resync the entire database to NCS. + Transition to the in-sync state on success. + """ + dbcontext = context._plugin_context + networks = context._plugin.get_networks(dbcontext) + subnets = context._plugin.get_subnets(dbcontext) + ports = context._plugin.get_ports(dbcontext) + for port in ports: + self.add_security_groups(context, dbcontext, port) + json = {'openstack': {'network': networks, + 'subnet': subnets, + 'port': ports}} + self.sendjson('put', '', json) + self.out_of_sync = False + + def sync_object(self, operation, object_type, context): + """Synchronize the single modified record to NCS. + Transition to the out-of-sync state on failure. + """ + self.out_of_sync = True + dbcontext = context._plugin_context + id = context.current['id'] + urlpath = object_type + '/' + id + if operation == 'delete': + self.sendjson('delete', urlpath, None) + else: + assert operation == 'create' or operation == 'update' + if object_type == 'network': + network = context._plugin.get_network(dbcontext, id) + self.sendjson('put', urlpath, {'network': network}) + elif object_type == 'subnet': + subnet = context._plugin.get_subnet(dbcontext, id) + self.sendjson('put', urlpath, {'subnet': subnet}) + else: + assert object_type == 'port' + port = context._plugin.get_port(dbcontext, id) + self.add_security_groups(context, dbcontext, port) + self.sendjson('put', urlpath, {'port': port}) + self.out_of_sync = False + + def add_security_groups(self, context, dbcontext, port): + """Populate the 'security_groups' field with entire records.""" + groups = [context._plugin.get_security_group(dbcontext, sg) + for sg in port['security_groups']] + port['security_groups'] = groups + + def sendjson(self, method, urlpath, obj): + obj = self.escape_keys(obj) + headers = {'Content-Type': 'application/vnd.yang.data+json'} + if obj is None: + data = None + else: + data = jsonutils.dumps(obj, indent=2) + auth = None + if self.username and self.password: + auth = (self.username, self.password) + if self.url: + url = '/'.join([self.url, urlpath]) + r = requests.request(method, url=url, + headers=headers, data=data, + auth=auth, timeout=self.timeout) + r.raise_for_status() + + def escape_keys(self, obj): + """Escape JSON keys to be NCS compatible. + NCS does not allow period (.) or colon (:) characters. + """ + if isinstance(obj, dict): + obj = dict((self.escape(k), self.escape_keys(v)) + for k, v in obj.iteritems()) + if isinstance(obj, list): + obj = [self.escape_keys(x) for x in obj] + return obj + + def escape(self, string): + return re.sub('[:._]', '-', string) diff --git a/neutron/plugins/ml2/drivers/mechanism_odl.py b/neutron/plugins/ml2/drivers/mechanism_odl.py new file mode 100644 index 000000000..416e870d1 --- /dev/null +++ b/neutron/plugins/ml2/drivers/mechanism_odl.py @@ -0,0 +1,374 @@ +# Copyright (c) 2013-2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Kyle Mestery, Cisco Systems, Inc. +# @author: Dave Tucker, Hewlett-Packard Development Company L.P. + +import time + +from oslo.config import cfg +import requests + +from neutron.common import constants as n_const +from neutron.common import exceptions as n_exc +from neutron.common import utils +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log +from neutron.plugins.common import constants +from neutron.plugins.ml2 import driver_api as api + +LOG = log.getLogger(__name__) + +ODL_NETWORK = 'network' +ODL_NETWORKS = 'networks' +ODL_SUBNET = 'subnet' +ODL_SUBNETS = 'subnets' +ODL_PORT = 'port' +ODL_PORTS = 'ports' + +not_found_exception_map = {ODL_NETWORKS: n_exc.NetworkNotFound, + ODL_SUBNETS: n_exc.SubnetNotFound, + ODL_PORTS: n_exc.PortNotFound} + +odl_opts = [ + cfg.StrOpt('url', + help=_("HTTP URL of OpenDaylight REST interface.")), + cfg.StrOpt('username', + help=_("HTTP username for authentication")), + cfg.StrOpt('password', secret=True, + help=_("HTTP password for authentication")), + cfg.IntOpt('timeout', default=10, + help=_("HTTP timeout in seconds.")), + cfg.IntOpt('session_timeout', default=30, + help=_("Tomcat session timeout in minutes.")), +] + +cfg.CONF.register_opts(odl_opts, "ml2_odl") + + +def try_del(d, keys): + """Ignore key errors when deleting from a dictionary.""" + for key in keys: + try: + del d[key] + except KeyError: + pass + + +class OpendaylightAuthError(n_exc.NeutronException): + message = '%(msg)s' + + +class JsessionId(requests.auth.AuthBase): + + """Attaches the JSESSIONID and JSESSIONIDSSO cookies to an HTTP Request. + + If the cookies are not available or when the session expires, a new + set of cookies are obtained. + """ + + def __init__(self, url, username, password): + """Initialization function for JsessionId.""" + + # NOTE(kmestery) The 'limit' paramater is intended to limit how much + # data is returned from ODL. This is not implemented in the Hydrogen + # release of OpenDaylight, but will be implemented in the Helium + # timeframe. Hydrogen will silently ignore this value. + self.url = str(url) + '/' + ODL_NETWORKS + '?limit=1' + self.username = username + self.password = password + self.auth_cookies = None + self.last_request = None + self.expired = None + self.session_timeout = cfg.CONF.ml2_odl.session_timeout * 60 + self.session_deadline = 0 + + def obtain_auth_cookies(self): + """Make a REST call to obtain cookies for ODL authenticiation.""" + + try: + r = requests.get(self.url, auth=(self.username, self.password)) + r.raise_for_status() + except requests.exceptions.HTTPError as e: + raise OpendaylightAuthError(msg=_("Failed to authenticate with " + "OpenDaylight: %s") % e) + except requests.exceptions.Timeout as e: + raise OpendaylightAuthError(msg=_("Authentication Timed" + " Out: %s") % e) + + jsessionid = r.cookies.get('JSESSIONID') + jsessionidsso = r.cookies.get('JSESSIONIDSSO') + if jsessionid and jsessionidsso: + self.auth_cookies = dict(JSESSIONID=jsessionid, + JSESSIONIDSSO=jsessionidsso) + + def __call__(self, r): + """Verify timestamp for Tomcat session timeout.""" + + if time.time() > self.session_deadline: + self.obtain_auth_cookies() + self.session_deadline = time.time() + self.session_timeout + r.prepare_cookies(self.auth_cookies) + return r + + +class OpenDaylightMechanismDriver(api.MechanismDriver): + + """Mechanism Driver for OpenDaylight. + + This driver was a port from the Tail-F NCS MechanismDriver. The API + exposed by ODL is slightly different from the API exposed by NCS, + but the general concepts are the same. + """ + auth = None + out_of_sync = True + + def initialize(self): + self.url = cfg.CONF.ml2_odl.url + self.timeout = cfg.CONF.ml2_odl.timeout + self.username = cfg.CONF.ml2_odl.username + self.password = cfg.CONF.ml2_odl.password + required_opts = ('url', 'username', 'password') + for opt in required_opts: + if not getattr(self, opt): + raise cfg.RequiredOptError(opt, 'ml2_odl') + self.auth = JsessionId(self.url, self.username, self.password) + self.vif_type = portbindings.VIF_TYPE_OVS + self.vif_details = {portbindings.CAP_PORT_FILTER: True} + + # Postcommit hooks are used to trigger synchronization. + + def create_network_postcommit(self, context): + self.synchronize('create', ODL_NETWORKS, context) + + def update_network_postcommit(self, context): + self.synchronize('update', ODL_NETWORKS, context) + + def delete_network_postcommit(self, context): + self.synchronize('delete', ODL_NETWORKS, context) + + def create_subnet_postcommit(self, context): + self.synchronize('create', ODL_SUBNETS, context) + + def update_subnet_postcommit(self, context): + self.synchronize('update', ODL_SUBNETS, context) + + def delete_subnet_postcommit(self, context): + self.synchronize('delete', ODL_SUBNETS, context) + + def create_port_postcommit(self, context): + self.synchronize('create', ODL_PORTS, context) + + def update_port_postcommit(self, context): + self.synchronize('update', ODL_PORTS, context) + + def delete_port_postcommit(self, context): + self.synchronize('delete', ODL_PORTS, context) + + def synchronize(self, operation, object_type, context): + """Synchronize ODL with Neutron following a configuration change.""" + if self.out_of_sync: + self.sync_full(context) + else: + self.sync_object(operation, object_type, context) + + def filter_create_network_attributes(self, network, context, dbcontext): + """Filter out network attributes not required for a create.""" + try_del(network, ['status', 'subnets']) + + def filter_create_subnet_attributes(self, subnet, context, dbcontext): + """Filter out subnet attributes not required for a create.""" + pass + + def filter_create_port_attributes(self, port, context, dbcontext): + """Filter out port attributes not required for a create.""" + self.add_security_groups(context, dbcontext, port) + # TODO(kmestery): Converting to uppercase due to ODL bug + # https://bugs.opendaylight.org/show_bug.cgi?id=477 + port['mac_address'] = port['mac_address'].upper() + try_del(port, ['status']) + + def sync_resources(self, resource_name, collection_name, resources, + context, dbcontext, attr_filter): + """Sync objects from Neutron over to OpenDaylight. + + This will handle syncing networks, subnets, and ports from Neutron to + OpenDaylight. It also filters out the requisite items which are not + valid for create API operations. + """ + to_be_synced = [] + for resource in resources: + try: + urlpath = collection_name + '/' + resource['id'] + self.sendjson('get', urlpath, None) + except requests.exceptions.HTTPError as e: + with excutils.save_and_reraise_exception() as ctx: + if e.response.status_code == 404: + attr_filter(resource, context, dbcontext) + to_be_synced.append(resource) + ctx.reraise = False + + key = resource_name if len(to_be_synced) == 1 else collection_name + + # 400 errors are returned if an object exists, which we ignore. + self.sendjson('post', collection_name, {key: to_be_synced}, [400]) + + @utils.synchronized('odl-sync-full') + def sync_full(self, context): + """Resync the entire database to ODL. + + Transition to the in-sync state on success. + Note: we only allow a single thead in here at a time. + """ + if not self.out_of_sync: + return + dbcontext = context._plugin_context + networks = context._plugin.get_networks(dbcontext) + subnets = context._plugin.get_subnets(dbcontext) + ports = context._plugin.get_ports(dbcontext) + + self.sync_resources(ODL_NETWORK, ODL_NETWORKS, networks, + context, dbcontext, + self.filter_create_network_attributes) + self.sync_resources(ODL_SUBNET, ODL_SUBNETS, subnets, + context, dbcontext, + self.filter_create_subnet_attributes) + self.sync_resources(ODL_PORT, ODL_PORTS, ports, + context, dbcontext, + self.filter_create_port_attributes) + self.out_of_sync = False + + def filter_update_network_attributes(self, network, context, dbcontext): + """Filter out network attributes for an update operation.""" + try_del(network, ['id', 'status', 'subnets', 'tenant_id']) + + def filter_update_subnet_attributes(self, subnet, context, dbcontext): + """Filter out subnet attributes for an update operation.""" + try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr', + 'allocation_pools', 'tenant_id']) + + def filter_update_port_attributes(self, port, context, dbcontext): + """Filter out port attributes for an update operation.""" + self.add_security_groups(context, dbcontext, port) + try_del(port, ['network_id', 'id', 'status', 'mac_address', + 'tenant_id', 'fixed_ips']) + + create_object_map = {ODL_NETWORKS: filter_create_network_attributes, + ODL_SUBNETS: filter_create_subnet_attributes, + ODL_PORTS: filter_create_port_attributes} + + update_object_map = {ODL_NETWORKS: filter_update_network_attributes, + ODL_SUBNETS: filter_update_subnet_attributes, + ODL_PORTS: filter_update_port_attributes} + + def sync_single_resource(self, operation, object_type, obj_id, + context, attr_filter_create, attr_filter_update): + """Sync over a single resource from Neutron to OpenDaylight. + + Handle syncing a single operation over to OpenDaylight, and correctly + filter attributes out which are not required for the requisite + operation (create or update) being handled. + """ + dbcontext = context._plugin_context + if operation == 'create': + urlpath = object_type + method = 'post' + else: + urlpath = object_type + '/' + obj_id + method = 'put' + + try: + obj_getter = getattr(context._plugin, 'get_%s' % object_type[:-1]) + resource = obj_getter(dbcontext, obj_id) + except not_found_exception_map[object_type]: + LOG.debug(_('%(object_type)s not found (%(obj_id)s)'), + {'object_type': object_type.capitalize(), + 'obj_id': obj_id}) + else: + if operation == 'create': + attr_filter_create(self, resource, context, dbcontext) + elif operation == 'update': + attr_filter_update(self, resource, context, dbcontext) + try: + # 400 errors are returned if an object exists, which we ignore. + self.sendjson(method, urlpath, {object_type[:-1]: resource}, + [400]) + except Exception: + with excutils.save_and_reraise_exception(): + self.out_of_sync = True + + def sync_object(self, operation, object_type, context): + """Synchronize the single modified record to ODL.""" + obj_id = context.current['id'] + + self.sync_single_resource(operation, object_type, obj_id, context, + self.create_object_map[object_type], + self.update_object_map[object_type]) + + def add_security_groups(self, context, dbcontext, port): + """Populate the 'security_groups' field with entire records.""" + groups = [context._plugin.get_security_group(dbcontext, sg) + for sg in port['security_groups']] + port['security_groups'] = groups + + def sendjson(self, method, urlpath, obj, ignorecodes=[]): + """Send json to the OpenDaylight controller.""" + + headers = {'Content-Type': 'application/json'} + data = jsonutils.dumps(obj, indent=2) if obj else None + url = '/'.join([self.url, urlpath]) + LOG.debug(_('ODL-----> sending URL (%s) <-----ODL') % url) + LOG.debug(_('ODL-----> sending JSON (%s) <-----ODL') % obj) + r = requests.request(method, url=url, + headers=headers, data=data, + auth=self.auth, timeout=self.timeout) + + # ignorecodes contains a list of HTTP error codes to ignore. + if r.status_code in ignorecodes: + return + r.raise_for_status() + + def bind_port(self, context): + LOG.debug(_("Attempting to bind port %(port)s on " + "network %(network)s"), + {'port': context.current['id'], + 'network': context.network.current['id']}) + for segment in context.network.network_segments: + if self.check_segment(segment): + context.set_binding(segment[api.ID], + self.vif_type, + self.vif_details, + status=n_const.PORT_STATUS_ACTIVE) + LOG.debug(_("Bound using segment: %s"), segment) + return + else: + LOG.debug(_("Refusing to bind port for segment ID %(id)s, " + "segment %(seg)s, phys net %(physnet)s, and " + "network type %(nettype)s"), + {'id': segment[api.ID], + 'seg': segment[api.SEGMENTATION_ID], + 'physnet': segment[api.PHYSICAL_NETWORK], + 'nettype': segment[api.NETWORK_TYPE]}) + + def check_segment(self, segment): + """Verify a segment is valid for the OpenDaylight MechanismDriver. + + Verify the requested segment is supported by ODL and return True or + False to indicate this to callers. + """ + network_type = segment[api.NETWORK_TYPE] + return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE, + constants.TYPE_VXLAN, constants.TYPE_VLAN] diff --git a/neutron/plugins/ml2/drivers/mlnx/__init__.py b/neutron/plugins/ml2/drivers/mlnx/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ml2/drivers/mlnx/config.py b/neutron/plugins/ml2/drivers/mlnx/config.py new file mode 100644 index 000000000..c9641d53c --- /dev/null +++ b/neutron/plugins/ml2/drivers/mlnx/config.py @@ -0,0 +1,32 @@ +# Copyright (c) 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from neutron.extensions import portbindings + +eswitch_opts = [ + cfg.StrOpt('vnic_type', + default=portbindings.VIF_TYPE_MLNX_DIRECT, + help=_("Type of VM network interface: mlnx_direct or " + "hostdev")), + cfg.BoolOpt('apply_profile_patch', + default=False, + help=_("Enable server compatibility with old nova")), +] + + +cfg.CONF.register_opts(eswitch_opts, "ESWITCH") diff --git a/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py b/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py new file mode 100644 index 000000000..97eb03a4c --- /dev/null +++ b/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py @@ -0,0 +1,91 @@ +# Copyright (c) 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from neutron.common import constants +from neutron.extensions import portbindings +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import mech_agent +from neutron.plugins.ml2.drivers.mlnx import config # noqa + +LOG = log.getLogger(__name__) + + +class MlnxMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): + """Attach to networks using Mellanox eSwitch L2 agent. + + The MellanoxMechanismDriver integrates the ml2 plugin with the + Mellanox eswitch L2 agent. Port binding with this driver requires the + Mellanox eswitch agent to be running on the port's host, and that agent + to have connectivity to at least one segment of the port's + network. + """ + + def __init__(self): + # REVISIT(irenab): update supported_vnic_types to contain + # only VNIC_DIRECT and VNIC_MACVTAP once its possible to specify + # vnic_type via nova API/GUI. Currently VNIC_NORMAL is included + # to enable VM creation via GUI. It should be noted, that if + # several MDs are capable to bing bind port on chosen host, the + # first listed MD will bind the port for VNIC_NORMAL. + super(MlnxMechanismDriver, self).__init__( + constants.AGENT_TYPE_MLNX, + cfg.CONF.ESWITCH.vnic_type, + {portbindings.CAP_PORT_FILTER: False}, + portbindings.VNIC_TYPES) + self.update_profile = cfg.CONF.ESWITCH.apply_profile_patch + + def check_segment_for_agent(self, segment, agent): + mappings = agent['configurations'].get('interface_mappings', {}) + LOG.debug(_("Checking segment: %(segment)s " + "for mappings: %(mappings)s "), + {'segment': segment, 'mappings': mappings}) + + network_type = segment[api.NETWORK_TYPE] + if network_type == 'local': + return True + elif network_type in ['flat', 'vlan']: + return segment[api.PHYSICAL_NETWORK] in mappings + else: + return False + + def try_to_bind_segment_for_agent(self, context, segment, agent): + if self.check_segment_for_agent(segment, agent): + vif_type = self._get_vif_type( + context.current[portbindings.VNIC_TYPE]) + if segment[api.NETWORK_TYPE] in ['flat', 'vlan']: + self.vif_details['physical_network'] = segment[ + 'physical_network'] + context.set_binding(segment[api.ID], + vif_type, + self.vif_details) + # REVISIT(irenab): Temporary solution till nova support + # will be merged for physical_network propagation + # via VIF object to VIFDriver (required by mlnx vif plugging). + if self.update_profile: + profile = {'physical_network': + segment['physical_network']} + context._binding.profile = jsonutils.dumps(profile) + + def _get_vif_type(self, requested_vnic_type): + if requested_vnic_type == portbindings.VNIC_MACVTAP: + return portbindings.VIF_TYPE_MLNX_DIRECT + elif requested_vnic_type == portbindings.VNIC_DIRECT: + return portbindings.VIF_TYPE_MLNX_HOSTDEV + return self.vif_type diff --git a/neutron/plugins/ml2/drivers/type_flat.py b/neutron/plugins/ml2/drivers/type_flat.py new file mode 100644 index 000000000..3e736eabc --- /dev/null +++ b/neutron/plugins/ml2/drivers/type_flat.py @@ -0,0 +1,131 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import sqlalchemy as sa + +from neutron.common import exceptions as exc +from neutron.db import model_base +from neutron.openstack.common import log +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api + +LOG = log.getLogger(__name__) + +flat_opts = [ + cfg.ListOpt('flat_networks', + default=[], + help=_("List of physical_network names with which flat " + "networks can be created. Use * to allow flat " + "networks with arbitrary physical_network names.")) +] + +cfg.CONF.register_opts(flat_opts, "ml2_type_flat") + + +class FlatAllocation(model_base.BASEV2): + """Represent persistent allocation state of a physical network. + + If a record exists for a physical network, then that physical + network has been allocated as a flat network. + """ + + __tablename__ = 'ml2_flat_allocations' + + physical_network = sa.Column(sa.String(64), nullable=False, + primary_key=True) + + +class FlatTypeDriver(api.TypeDriver): + """Manage state for flat networks with ML2. + + The FlatTypeDriver implements the 'flat' network_type. Flat + network segments provide connectivity between VMs and other + devices using any connected IEEE 802.1D conformant + physical_network, without the use of VLAN tags, tunneling, or + other segmentation mechanisms. Therefore at most one flat network + segment can exist on each available physical_network. + """ + + def __init__(self): + self._parse_networks(cfg.CONF.ml2_type_flat.flat_networks) + + def _parse_networks(self, entries): + self.flat_networks = entries + if '*' in self.flat_networks: + LOG.info(_("Arbitrary flat physical_network names allowed")) + self.flat_networks = None + else: + # TODO(rkukura): Validate that each physical_network name + # is neither empty nor too long. + LOG.info(_("Allowable flat physical_network names: %s"), + self.flat_networks) + + def get_type(self): + return p_const.TYPE_FLAT + + def initialize(self): + LOG.info(_("ML2 FlatTypeDriver initialization complete")) + + def validate_provider_segment(self, segment): + physical_network = segment.get(api.PHYSICAL_NETWORK) + if not physical_network: + msg = _("physical_network required for flat provider network") + raise exc.InvalidInput(error_message=msg) + if self.flat_networks and physical_network not in self.flat_networks: + msg = (_("physical_network '%s' unknown for flat provider network") + % physical_network) + raise exc.InvalidInput(error_message=msg) + + for key, value in segment.iteritems(): + if value and key not in [api.NETWORK_TYPE, + api.PHYSICAL_NETWORK]: + msg = _("%s prohibited for flat provider network") % key + raise exc.InvalidInput(error_message=msg) + + def reserve_provider_segment(self, session, segment): + physical_network = segment[api.PHYSICAL_NETWORK] + with session.begin(subtransactions=True): + try: + alloc = (session.query(FlatAllocation). + filter_by(physical_network=physical_network). + with_lockmode('update'). + one()) + raise exc.FlatNetworkInUse( + physical_network=physical_network) + except sa.orm.exc.NoResultFound: + LOG.debug(_("Reserving flat network on physical " + "network %s"), physical_network) + alloc = FlatAllocation(physical_network=physical_network) + session.add(alloc) + + def allocate_tenant_segment(self, session): + # Tenant flat networks are not supported. + return + + def release_segment(self, session, segment): + physical_network = segment[api.PHYSICAL_NETWORK] + with session.begin(subtransactions=True): + try: + alloc = (session.query(FlatAllocation). + filter_by(physical_network=physical_network). + with_lockmode('update'). + one()) + session.delete(alloc) + LOG.debug(_("Releasing flat network on physical " + "network %s"), physical_network) + except sa.orm.exc.NoResultFound: + LOG.warning(_("No flat network found on physical network %s"), + physical_network) diff --git a/neutron/plugins/ml2/drivers/type_gre.py b/neutron/plugins/ml2/drivers/type_gre.py new file mode 100644 index 000000000..abd894bfe --- /dev/null +++ b/neutron/plugins/ml2/drivers/type_gre.py @@ -0,0 +1,190 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +from six import moves +import sqlalchemy as sa +from sqlalchemy.orm import exc as sa_exc + +from neutron.common import exceptions as exc +from neutron.db import api as db_api +from neutron.db import model_base +from neutron.openstack.common import log +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import type_tunnel + +LOG = log.getLogger(__name__) + +gre_opts = [ + cfg.ListOpt('tunnel_id_ranges', + default=[], + help=_("Comma-separated list of : tuples " + "enumerating ranges of GRE tunnel IDs that are " + "available for tenant network allocation")) +] + +cfg.CONF.register_opts(gre_opts, "ml2_type_gre") + + +class GreAllocation(model_base.BASEV2): + + __tablename__ = 'ml2_gre_allocations' + + gre_id = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False, default=False) + + +class GreEndpoints(model_base.BASEV2): + """Represents tunnel endpoint in RPC mode.""" + __tablename__ = 'ml2_gre_endpoints' + + ip_address = sa.Column(sa.String(64), primary_key=True) + + def __repr__(self): + return "" % self.ip_address + + +class GreTypeDriver(type_tunnel.TunnelTypeDriver): + + def get_type(self): + return p_const.TYPE_GRE + + def initialize(self): + self.gre_id_ranges = [] + self._parse_tunnel_ranges( + cfg.CONF.ml2_type_gre.tunnel_id_ranges, + self.gre_id_ranges, + p_const.TYPE_GRE + ) + self._sync_gre_allocations() + + def reserve_provider_segment(self, session, segment): + segmentation_id = segment.get(api.SEGMENTATION_ID) + with session.begin(subtransactions=True): + try: + alloc = (session.query(GreAllocation). + filter_by(gre_id=segmentation_id). + with_lockmode('update'). + one()) + if alloc.allocated: + raise exc.TunnelIdInUse(tunnel_id=segmentation_id) + LOG.debug(_("Reserving specific gre tunnel %s from pool"), + segmentation_id) + alloc.allocated = True + except sa_exc.NoResultFound: + LOG.debug(_("Reserving specific gre tunnel %s outside pool"), + segmentation_id) + alloc = GreAllocation(gre_id=segmentation_id) + alloc.allocated = True + session.add(alloc) + + def allocate_tenant_segment(self, session): + with session.begin(subtransactions=True): + alloc = (session.query(GreAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if alloc: + LOG.debug(_("Allocating gre tunnel id %(gre_id)s"), + {'gre_id': alloc.gre_id}) + alloc.allocated = True + return {api.NETWORK_TYPE: p_const.TYPE_GRE, + api.PHYSICAL_NETWORK: None, + api.SEGMENTATION_ID: alloc.gre_id} + + def release_segment(self, session, segment): + gre_id = segment[api.SEGMENTATION_ID] + with session.begin(subtransactions=True): + try: + alloc = (session.query(GreAllocation). + filter_by(gre_id=gre_id). + with_lockmode('update'). + one()) + alloc.allocated = False + for lo, hi in self.gre_id_ranges: + if lo <= gre_id <= hi: + LOG.debug(_("Releasing gre tunnel %s to pool"), + gre_id) + break + else: + session.delete(alloc) + LOG.debug(_("Releasing gre tunnel %s outside pool"), + gre_id) + except sa_exc.NoResultFound: + LOG.warning(_("gre_id %s not found"), gre_id) + + def _sync_gre_allocations(self): + """Synchronize gre_allocations table with configured tunnel ranges.""" + + # determine current configured allocatable gres + gre_ids = set() + for gre_id_range in self.gre_id_ranges: + tun_min, tun_max = gre_id_range + if tun_max + 1 - tun_min > 1000000: + LOG.error(_("Skipping unreasonable gre ID range " + "%(tun_min)s:%(tun_max)s"), + {'tun_min': tun_min, 'tun_max': tun_max}) + else: + gre_ids |= set(moves.xrange(tun_min, tun_max + 1)) + + session = db_api.get_session() + with session.begin(subtransactions=True): + # remove from table unallocated tunnels not currently allocatable + allocs = (session.query(GreAllocation).all()) + for alloc in allocs: + try: + # see if tunnel is allocatable + gre_ids.remove(alloc.gre_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing tunnel %s from pool"), + alloc.gre_id) + session.delete(alloc) + + # add missing allocatable tunnels to table + for gre_id in sorted(gre_ids): + alloc = GreAllocation(gre_id=gre_id) + session.add(alloc) + + def get_gre_allocation(self, session, gre_id): + return session.query(GreAllocation).filter_by(gre_id=gre_id).first() + + def get_endpoints(self): + """Get every gre endpoints from database.""" + + LOG.debug(_("get_gre_endpoints() called")) + session = db_api.get_session() + + with session.begin(subtransactions=True): + gre_endpoints = session.query(GreEndpoints) + return [{'ip_address': gre_endpoint.ip_address} + for gre_endpoint in gre_endpoints] + + def add_endpoint(self, ip): + LOG.debug(_("add_gre_endpoint() called for ip %s"), ip) + session = db_api.get_session() + with session.begin(subtransactions=True): + try: + gre_endpoint = (session.query(GreEndpoints). + filter_by(ip_address=ip).one()) + LOG.warning(_("Gre endpoint with ip %s already exists"), ip) + except sa_exc.NoResultFound: + gre_endpoint = GreEndpoints(ip_address=ip) + session.add(gre_endpoint) + return gre_endpoint diff --git a/neutron/plugins/ml2/drivers/type_local.py b/neutron/plugins/ml2/drivers/type_local.py new file mode 100644 index 000000000..e0281a245 --- /dev/null +++ b/neutron/plugins/ml2/drivers/type_local.py @@ -0,0 +1,59 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions as exc +from neutron.openstack.common import log +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api + +LOG = log.getLogger(__name__) + + +class LocalTypeDriver(api.TypeDriver): + """Manage state for local networks with ML2. + + The LocalTypeDriver implements the 'local' network_type. Local + network segments provide connectivity between VMs and other + devices running on the same node, provided that a common local + network bridging technology is available to those devices. Local + network segments do not provide any connectivity between nodes. + """ + + def __init__(self): + LOG.info(_("ML2 LocalTypeDriver initialization complete")) + + def get_type(self): + return p_const.TYPE_LOCAL + + def initialize(self): + pass + + def validate_provider_segment(self, segment): + for key, value in segment.iteritems(): + if value and key not in [api.NETWORK_TYPE]: + msg = _("%s prohibited for local provider network") % key + raise exc.InvalidInput(error_message=msg) + + def reserve_provider_segment(self, session, segment): + # No resources to reserve + pass + + def allocate_tenant_segment(self, session): + # No resources to allocate + return {api.NETWORK_TYPE: p_const.TYPE_LOCAL} + + def release_segment(self, session, segment): + # No resources to release + pass diff --git a/neutron/plugins/ml2/drivers/type_tunnel.py b/neutron/plugins/ml2/drivers/type_tunnel.py new file mode 100644 index 000000000..e209029b9 --- /dev/null +++ b/neutron/plugins/ml2/drivers/type_tunnel.py @@ -0,0 +1,132 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import abc +import six + +from neutron.common import exceptions as exc +from neutron.common import topics +from neutron.openstack.common import log +from neutron.plugins.ml2 import driver_api as api + +LOG = log.getLogger(__name__) + +TUNNEL = 'tunnel' + + +@six.add_metaclass(abc.ABCMeta) +class TunnelTypeDriver(api.TypeDriver): + """Define stable abstract interface for ML2 type drivers. + + tunnel type networks rely on tunnel endpoints. This class defines abstract + methods to manage these endpoints. + """ + + @abc.abstractmethod + def add_endpoint(self, ip): + """Register the endpoint in the type_driver database. + + param ip: the ip of the endpoint + """ + pass + + @abc.abstractmethod + def get_endpoints(self): + """Get every endpoint managed by the type_driver + + :returns a list of dict [{id:endpoint_id, ip_address:endpoint_ip},..] + """ + pass + + def _parse_tunnel_ranges(self, tunnel_ranges, current_range, tunnel_type): + for entry in tunnel_ranges: + entry = entry.strip() + try: + tun_min, tun_max = entry.split(':') + tun_min = tun_min.strip() + tun_max = tun_max.strip() + current_range.append((int(tun_min), int(tun_max))) + except ValueError as ex: + LOG.error(_("Invalid tunnel ID range: '%(range)s' - %(e)s. " + "Agent terminated!"), + {'range': tunnel_ranges, 'e': ex}) + LOG.info(_("%(type)s ID ranges: %(range)s"), + {'type': tunnel_type, 'range': current_range}) + + def validate_provider_segment(self, segment): + physical_network = segment.get(api.PHYSICAL_NETWORK) + if physical_network: + msg = _("provider:physical_network specified for %s " + "network") % segment.get(api.NETWORK_TYPE) + raise exc.InvalidInput(error_message=msg) + + segmentation_id = segment.get(api.SEGMENTATION_ID) + if not segmentation_id: + msg = _("segmentation_id required for %s provider " + "network") % segment.get(api.NETWORK_TYPE) + raise exc.InvalidInput(error_message=msg) + + for key, value in segment.items(): + if value and key not in [api.NETWORK_TYPE, + api.SEGMENTATION_ID]: + msg = (_("%(key)s prohibited for %(tunnel)s provider network"), + {'key': key, 'tunnel': segment.get(api.NETWORK_TYPE)}) + raise exc.InvalidInput(error_message=msg) + + +class TunnelRpcCallbackMixin(object): + + def __init__(self, notifier, type_manager): + self.notifier = notifier + self.type_manager = type_manager + + def tunnel_sync(self, rpc_context, **kwargs): + """Update new tunnel. + + Updates the database with the tunnel IP. All listening agents will also + be notified about the new tunnel IP. + """ + tunnel_ip = kwargs.get('tunnel_ip') + tunnel_type = kwargs.get('tunnel_type') + if not tunnel_type: + msg = _("Network_type value needed by the ML2 plugin") + raise exc.InvalidInput(error_message=msg) + driver = self.type_manager.drivers.get(tunnel_type) + if driver: + tunnel = driver.obj.add_endpoint(tunnel_ip) + tunnels = driver.obj.get_endpoints() + entry = {'tunnels': tunnels} + # Notify all other listening agents + self.notifier.tunnel_update(rpc_context, tunnel.ip_address, + tunnel_type) + # Return the list of tunnels IP's to the agent + return entry + else: + msg = _("network_type value '%s' not supported") % tunnel_type + raise exc.InvalidInput(error_message=msg) + + +class TunnelAgentRpcApiMixin(object): + + def _get_tunnel_update_topic(self): + return topics.get_topic_name(self.topic, + TUNNEL, + topics.UPDATE) + + def tunnel_update(self, context, tunnel_ip, tunnel_type): + self.fanout_cast(context, + self.make_msg('tunnel_update', + tunnel_ip=tunnel_ip, + tunnel_type=tunnel_type), + topic=self._get_tunnel_update_topic()) diff --git a/neutron/plugins/ml2/drivers/type_vlan.py b/neutron/plugins/ml2/drivers/type_vlan.py new file mode 100644 index 000000000..0159d5713 --- /dev/null +++ b/neutron/plugins/ml2/drivers/type_vlan.py @@ -0,0 +1,267 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo.config import cfg +from six import moves +import sqlalchemy as sa + +from neutron.common import constants as q_const +from neutron.common import exceptions as exc +from neutron.common import utils +from neutron.db import api as db_api +from neutron.db import model_base +from neutron.openstack.common import log +from neutron.plugins.common import constants as p_const +from neutron.plugins.common import utils as plugin_utils +from neutron.plugins.ml2 import driver_api as api + +LOG = log.getLogger(__name__) + +vlan_opts = [ + cfg.ListOpt('network_vlan_ranges', + default=[], + help=_("List of :: or " + " specifying physical_network names " + "usable for VLAN provider and tenant networks, as " + "well as ranges of VLAN tags on each available for " + "allocation to tenant networks.")) +] + +cfg.CONF.register_opts(vlan_opts, "ml2_type_vlan") + + +class VlanAllocation(model_base.BASEV2): + """Represent allocation state of a vlan_id on a physical network. + + If allocated is False, the vlan_id on the physical_network is + available for allocation to a tenant network. If allocated is + True, the vlan_id on the physical_network is in use, either as a + tenant or provider network. + + When an allocation is released, if the vlan_id for the + physical_network is inside the pool described by + VlanTypeDriver.network_vlan_ranges, then allocated is set to + False. If it is outside the pool, the record is deleted. + """ + + __tablename__ = 'ml2_vlan_allocations' + + physical_network = sa.Column(sa.String(64), nullable=False, + primary_key=True) + vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False) + + +class VlanTypeDriver(api.TypeDriver): + """Manage state for VLAN networks with ML2. + + The VlanTypeDriver implements the 'vlan' network_type. VLAN + network segments provide connectivity between VMs and other + devices using any connected IEEE 802.1Q conformant + physical_network segmented into virtual networks via IEEE 802.1Q + headers. Up to 4094 VLAN network segments can exist on each + available physical_network. + """ + + def __init__(self): + self._parse_network_vlan_ranges() + + def _parse_network_vlan_ranges(self): + try: + self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( + cfg.CONF.ml2_type_vlan.network_vlan_ranges) + # TODO(rkukura): Validate that each physical_network name + # is neither empty nor too long. + except Exception: + LOG.exception(_("Failed to parse network_vlan_ranges. " + "Service terminated!")) + sys.exit(1) + LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) + + def _sync_vlan_allocations(self): + session = db_api.get_session() + with session.begin(subtransactions=True): + # get existing allocations for all physical networks + allocations = dict() + allocs = (session.query(VlanAllocation). + with_lockmode('update')) + for alloc in allocs: + if alloc.physical_network not in allocations: + allocations[alloc.physical_network] = set() + allocations[alloc.physical_network].add(alloc) + + # process vlan ranges for each configured physical network + for (physical_network, + vlan_ranges) in self.network_vlan_ranges.items(): + # determine current configured allocatable vlans for + # this physical network + vlan_ids = set() + for vlan_min, vlan_max in vlan_ranges: + vlan_ids |= set(moves.xrange(vlan_min, vlan_max + 1)) + + # remove from table unallocated vlans not currently + # allocatable + if physical_network in allocations: + for alloc in allocations[physical_network]: + try: + # see if vlan is allocatable + vlan_ids.remove(alloc.vlan_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing vlan %(vlan_id)s on " + "physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': + physical_network}) + session.delete(alloc) + del allocations[physical_network] + + # add missing allocatable vlans to table + for vlan_id in sorted(vlan_ids): + alloc = VlanAllocation(physical_network=physical_network, + vlan_id=vlan_id, + allocated=False) + session.add(alloc) + + # remove from table unallocated vlans for any unconfigured + # physical networks + for allocs in allocations.itervalues(): + for alloc in allocs: + if not alloc.allocated: + LOG.debug(_("Removing vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': + alloc.physical_network}) + session.delete(alloc) + + def get_type(self): + return p_const.TYPE_VLAN + + def initialize(self): + self._sync_vlan_allocations() + LOG.info(_("VlanTypeDriver initialization complete")) + + def validate_provider_segment(self, segment): + physical_network = segment.get(api.PHYSICAL_NETWORK) + if not physical_network: + msg = _("physical_network required for VLAN provider network") + raise exc.InvalidInput(error_message=msg) + if physical_network not in self.network_vlan_ranges: + msg = (_("physical_network '%s' unknown for VLAN provider network") + % physical_network) + raise exc.InvalidInput(error_message=msg) + + segmentation_id = segment.get(api.SEGMENTATION_ID) + if segmentation_id is None: + msg = _("segmentation_id required for VLAN provider network") + raise exc.InvalidInput(error_message=msg) + if not utils.is_valid_vlan_tag(segmentation_id): + msg = (_("segmentation_id out of range (%(min)s through " + "%(max)s)") % + {'min': q_const.MIN_VLAN_TAG, + 'max': q_const.MAX_VLAN_TAG}) + raise exc.InvalidInput(error_message=msg) + + for key, value in segment.items(): + if value and key not in [api.NETWORK_TYPE, + api.PHYSICAL_NETWORK, + api.SEGMENTATION_ID]: + msg = _("%s prohibited for VLAN provider network") % key + raise exc.InvalidInput(error_message=msg) + + def reserve_provider_segment(self, session, segment): + physical_network = segment[api.PHYSICAL_NETWORK] + vlan_id = segment[api.SEGMENTATION_ID] + with session.begin(subtransactions=True): + try: + alloc = (session.query(VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + if alloc.allocated: + raise exc.VlanIdInUse(vlan_id=vlan_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + alloc.allocated = True + except sa.orm.exc.NoResultFound: + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + alloc = VlanAllocation(physical_network=physical_network, + vlan_id=vlan_id, + allocated=True) + session.add(alloc) + + def allocate_tenant_segment(self, session): + with session.begin(subtransactions=True): + alloc = (session.query(VlanAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if alloc: + LOG.debug(_("Allocating vlan %(vlan_id)s on physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': alloc.physical_network}) + alloc.allocated = True + return {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: alloc.physical_network, + api.SEGMENTATION_ID: alloc.vlan_id} + + def release_segment(self, session, segment): + physical_network = segment[api.PHYSICAL_NETWORK] + vlan_id = segment[api.SEGMENTATION_ID] + with session.begin(subtransactions=True): + try: + alloc = (session.query(VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + alloc.allocated = False + inside = False + for vlan_min, vlan_max in self.network_vlan_ranges.get( + physical_network, []): + if vlan_min <= vlan_id <= vlan_max: + inside = True + break + if not inside: + session.delete(alloc) + LOG.debug(_("Releasing vlan %(vlan_id)s on physical " + "network %(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + else: + LOG.debug(_("Releasing vlan %(vlan_id)s on physical " + "network %(physical_network)s to pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + except sa.orm.exc.NoResultFound: + LOG.warning(_("No vlan_id %(vlan_id)s found on physical " + "network %(physical_network)s"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) diff --git a/neutron/plugins/ml2/drivers/type_vxlan.py b/neutron/plugins/ml2/drivers/type_vxlan.py new file mode 100644 index 000000000..3e5d47567 --- /dev/null +++ b/neutron/plugins/ml2/drivers/type_vxlan.py @@ -0,0 +1,203 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Kyle Mestery, Cisco Systems, Inc. + +from oslo.config import cfg +import sqlalchemy as sa +from sqlalchemy.orm import exc as sa_exc + +from neutron.common import exceptions as exc +from neutron.db import api as db_api +from neutron.db import model_base +from neutron.openstack.common import log +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import type_tunnel + +LOG = log.getLogger(__name__) + +VXLAN_UDP_PORT = 4789 +MAX_VXLAN_VNI = 16777215 + +vxlan_opts = [ + cfg.ListOpt('vni_ranges', + default=[], + help=_("Comma-separated list of : tuples " + "enumerating ranges of VXLAN VNI IDs that are " + "available for tenant network allocation")), + cfg.StrOpt('vxlan_group', + help=_("Multicast group for VXLAN. If unset, disables VXLAN " + "multicast mode.")), +] + +cfg.CONF.register_opts(vxlan_opts, "ml2_type_vxlan") + + +class VxlanAllocation(model_base.BASEV2): + + __tablename__ = 'ml2_vxlan_allocations' + + vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False, default=False) + + +class VxlanEndpoints(model_base.BASEV2): + """Represents tunnel endpoint in RPC mode.""" + __tablename__ = 'ml2_vxlan_endpoints' + + ip_address = sa.Column(sa.String(64), primary_key=True) + udp_port = sa.Column(sa.Integer, primary_key=True, nullable=False, + autoincrement=False) + + def __repr__(self): + return "" % self.ip_address + + +class VxlanTypeDriver(type_tunnel.TunnelTypeDriver): + + def get_type(self): + return p_const.TYPE_VXLAN + + def initialize(self): + self.vxlan_vni_ranges = [] + self._parse_tunnel_ranges( + cfg.CONF.ml2_type_vxlan.vni_ranges, + self.vxlan_vni_ranges, + p_const.TYPE_VXLAN + ) + self._sync_vxlan_allocations() + + def reserve_provider_segment(self, session, segment): + segmentation_id = segment.get(api.SEGMENTATION_ID) + with session.begin(subtransactions=True): + try: + alloc = (session.query(VxlanAllocation). + filter_by(vxlan_vni=segmentation_id). + with_lockmode('update'). + one()) + if alloc.allocated: + raise exc.TunnelIdInUse(tunnel_id=segmentation_id) + LOG.debug(_("Reserving specific vxlan tunnel %s from pool"), + segmentation_id) + alloc.allocated = True + except sa_exc.NoResultFound: + LOG.debug(_("Reserving specific vxlan tunnel %s outside pool"), + segmentation_id) + alloc = VxlanAllocation(vxlan_vni=segmentation_id) + alloc.allocated = True + session.add(alloc) + + def allocate_tenant_segment(self, session): + with session.begin(subtransactions=True): + alloc = (session.query(VxlanAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if alloc: + LOG.debug(_("Allocating vxlan tunnel vni %(vxlan_vni)s"), + {'vxlan_vni': alloc.vxlan_vni}) + alloc.allocated = True + return {api.NETWORK_TYPE: p_const.TYPE_VXLAN, + api.PHYSICAL_NETWORK: None, + api.SEGMENTATION_ID: alloc.vxlan_vni} + + def release_segment(self, session, segment): + vxlan_vni = segment[api.SEGMENTATION_ID] + with session.begin(subtransactions=True): + try: + alloc = (session.query(VxlanAllocation). + filter_by(vxlan_vni=vxlan_vni). + with_lockmode('update'). + one()) + alloc.allocated = False + for low, high in self.vxlan_vni_ranges: + if low <= vxlan_vni <= high: + LOG.debug(_("Releasing vxlan tunnel %s to pool"), + vxlan_vni) + break + else: + session.delete(alloc) + LOG.debug(_("Releasing vxlan tunnel %s outside pool"), + vxlan_vni) + except sa_exc.NoResultFound: + LOG.warning(_("vxlan_vni %s not found"), vxlan_vni) + + def _sync_vxlan_allocations(self): + """ + Synchronize vxlan_allocations table with configured tunnel ranges. + """ + + # determine current configured allocatable vnis + vxlan_vnis = set() + for tun_min, tun_max in self.vxlan_vni_ranges: + if tun_max + 1 - tun_min > MAX_VXLAN_VNI: + LOG.error(_("Skipping unreasonable VXLAN VNI range " + "%(tun_min)s:%(tun_max)s"), + {'tun_min': tun_min, 'tun_max': tun_max}) + else: + vxlan_vnis |= set(xrange(tun_min, tun_max + 1)) + + session = db_api.get_session() + with session.begin(subtransactions=True): + # remove from table unallocated tunnels not currently allocatable + allocs = session.query(VxlanAllocation).with_lockmode("update") + for alloc in allocs: + try: + # see if tunnel is allocatable + vxlan_vnis.remove(alloc.vxlan_vni) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing tunnel %s from pool"), + alloc.vxlan_vni) + session.delete(alloc) + + # add missing allocatable tunnels to table + for vxlan_vni in sorted(vxlan_vnis): + alloc = VxlanAllocation(vxlan_vni=vxlan_vni) + session.add(alloc) + + def get_vxlan_allocation(self, session, vxlan_vni): + with session.begin(subtransactions=True): + return session.query(VxlanAllocation).filter_by( + vxlan_vni=vxlan_vni).first() + + def get_endpoints(self): + """Get every vxlan endpoints from database.""" + + LOG.debug(_("get_vxlan_endpoints() called")) + session = db_api.get_session() + + with session.begin(subtransactions=True): + vxlan_endpoints = session.query(VxlanEndpoints) + return [{'ip_address': vxlan_endpoint.ip_address, + 'udp_port': vxlan_endpoint.udp_port} + for vxlan_endpoint in vxlan_endpoints] + + def add_endpoint(self, ip, udp_port=VXLAN_UDP_PORT): + LOG.debug(_("add_vxlan_endpoint() called for ip %s"), ip) + session = db_api.get_session() + with session.begin(subtransactions=True): + try: + vxlan_endpoint = (session.query(VxlanEndpoints). + filter_by(ip_address=ip). + with_lockmode('update').one()) + except sa_exc.NoResultFound: + vxlan_endpoint = VxlanEndpoints(ip_address=ip, + udp_port=udp_port) + session.add(vxlan_endpoint) + return vxlan_endpoint diff --git a/neutron/plugins/ml2/managers.py b/neutron/plugins/ml2/managers.py new file mode 100644 index 000000000..13df6732e --- /dev/null +++ b/neutron/plugins/ml2/managers.py @@ -0,0 +1,480 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import stevedore + +from neutron.common import exceptions as exc +from neutron.extensions import portbindings +from neutron.openstack.common import log +from neutron.plugins.ml2.common import exceptions as ml2_exc +from neutron.plugins.ml2 import driver_api as api + + +LOG = log.getLogger(__name__) + + +class TypeManager(stevedore.named.NamedExtensionManager): + """Manage network segment types using drivers.""" + + def __init__(self): + # Mapping from type name to DriverManager + self.drivers = {} + + LOG.info(_("Configured type driver names: %s"), + cfg.CONF.ml2.type_drivers) + super(TypeManager, self).__init__('neutron.ml2.type_drivers', + cfg.CONF.ml2.type_drivers, + invoke_on_load=True) + LOG.info(_("Loaded type driver names: %s"), self.names()) + self._register_types() + self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types) + + def _register_types(self): + for ext in self: + network_type = ext.obj.get_type() + if network_type in self.drivers: + LOG.error(_("Type driver '%(new_driver)s' ignored because type" + " driver '%(old_driver)s' is already registered" + " for type '%(type)s'"), + {'new_driver': ext.name, + 'old_driver': self.drivers[network_type].name, + 'type': network_type}) + else: + self.drivers[network_type] = ext + LOG.info(_("Registered types: %s"), self.drivers.keys()) + + def _check_tenant_network_types(self, types): + self.tenant_network_types = [] + for network_type in types: + if network_type in self.drivers: + self.tenant_network_types.append(network_type) + else: + msg = _("No type driver for tenant network_type: %s. " + "Service terminated!") % network_type + LOG.error(msg) + raise SystemExit(1) + LOG.info(_("Tenant network_types: %s"), self.tenant_network_types) + + def initialize(self): + for network_type, driver in self.drivers.iteritems(): + LOG.info(_("Initializing driver for type '%s'"), network_type) + driver.obj.initialize() + + def validate_provider_segment(self, segment): + network_type = segment[api.NETWORK_TYPE] + driver = self.drivers.get(network_type) + if driver: + driver.obj.validate_provider_segment(segment) + else: + msg = _("network_type value '%s' not supported") % network_type + raise exc.InvalidInput(error_message=msg) + + def reserve_provider_segment(self, session, segment): + network_type = segment.get(api.NETWORK_TYPE) + driver = self.drivers.get(network_type) + driver.obj.reserve_provider_segment(session, segment) + + def allocate_tenant_segment(self, session): + for network_type in self.tenant_network_types: + driver = self.drivers.get(network_type) + segment = driver.obj.allocate_tenant_segment(session) + if segment: + return segment + raise exc.NoNetworkAvailable() + + def release_segment(self, session, segment): + network_type = segment.get(api.NETWORK_TYPE) + driver = self.drivers.get(network_type) + # ML2 may have been reconfigured since the segment was created, + # so a driver may no longer exist for this network_type. + # REVISIT: network_type-specific db entries may become orphaned + # if a network is deleted and the driver isn't available to release + # the segment. This may be fixed with explicit foreign-key references + # or consistency checks on driver initialization. + if not driver: + LOG.error(_("Failed to release segment '%s' because " + "network type is not supported."), segment) + return + driver.obj.release_segment(session, segment) + + +class MechanismManager(stevedore.named.NamedExtensionManager): + """Manage networking mechanisms using drivers.""" + + def __init__(self): + # Registered mechanism drivers, keyed by name. + self.mech_drivers = {} + # Ordered list of mechanism drivers, defining + # the order in which the drivers are called. + self.ordered_mech_drivers = [] + + LOG.info(_("Configured mechanism driver names: %s"), + cfg.CONF.ml2.mechanism_drivers) + super(MechanismManager, self).__init__('neutron.ml2.mechanism_drivers', + cfg.CONF.ml2.mechanism_drivers, + invoke_on_load=True, + name_order=True) + LOG.info(_("Loaded mechanism driver names: %s"), self.names()) + self._register_mechanisms() + + def _register_mechanisms(self): + """Register all mechanism drivers. + + This method should only be called once in the MechanismManager + constructor. + """ + for ext in self: + self.mech_drivers[ext.name] = ext + self.ordered_mech_drivers.append(ext) + LOG.info(_("Registered mechanism drivers: %s"), + [driver.name for driver in self.ordered_mech_drivers]) + + def initialize(self): + # For ML2 to support bulk operations, each driver must support them + self.native_bulk_support = True + for driver in self.ordered_mech_drivers: + LOG.info(_("Initializing mechanism driver '%s'"), driver.name) + driver.obj.initialize() + self.native_bulk_support &= getattr(driver.obj, + 'native_bulk_support', True) + + def _call_on_drivers(self, method_name, context, + continue_on_failure=False): + """Helper method for calling a method across all mechanism drivers. + + :param method_name: name of the method to call + :param context: context parameter to pass to each method call + :param continue_on_failure: whether or not to continue to call + all mechanism drivers once one has raised an exception + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver call fails. + """ + error = False + for driver in self.ordered_mech_drivers: + try: + getattr(driver.obj, method_name)(context) + except Exception: + LOG.exception( + _("Mechanism driver '%(name)s' failed in %(method)s"), + {'name': driver.name, 'method': method_name} + ) + error = True + if not continue_on_failure: + break + if error: + raise ml2_exc.MechanismDriverError( + method=method_name + ) + + def create_network_precommit(self, context): + """Notify all mechanism drivers during network creation. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver create_network_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("create_network_precommit", context) + + def create_network_postcommit(self, context): + """Notify all mechanism drivers after network creation. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver create_network_postcommit call fails. + + Called after the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propagated + to the caller, where the network will be deleted, triggering + any required cleanup. There is no guarantee that all mechanism + drivers are called in this case. + """ + self._call_on_drivers("create_network_postcommit", context) + + def update_network_precommit(self, context): + """Notify all mechanism drivers during network update. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver update_network_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("update_network_precommit", context) + + def update_network_postcommit(self, context): + """Notify all mechanism drivers after network update. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver update_network_postcommit call fails. + + Called after the database transaction. If any mechanism driver + raises an error, then the error is logged but we continue to + call every other mechanism driver. A MechanismDriverError is + then reraised at the end to notify the caller of a failure. + """ + self._call_on_drivers("update_network_postcommit", context, + continue_on_failure=True) + + def delete_network_precommit(self, context): + """Notify all mechanism drivers during network deletion. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver delete_network_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("delete_network_precommit", context) + + def delete_network_postcommit(self, context): + """Notify all mechanism drivers after network deletion. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver delete_network_postcommit call fails. + + Called after the database transaction. If any mechanism driver + raises an error, then the error is logged but we continue to + call every other mechanism driver. A MechanismDriverError is + then reraised at the end to notify the caller of a failure. In + general we expect the caller to ignore the error, as the + network resource has already been deleted from the database + and it doesn't make sense to undo the action by recreating the + network. + """ + self._call_on_drivers("delete_network_postcommit", context, + continue_on_failure=True) + + def create_subnet_precommit(self, context): + """Notify all mechanism drivers during subnet creation. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver create_subnet_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("create_subnet_precommit", context) + + def create_subnet_postcommit(self, context): + """Notify all mechanism drivers after subnet creation. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver create_subnet_postcommit call fails. + + Called after the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propagated + to the caller, where the subnet will be deleted, triggering + any required cleanup. There is no guarantee that all mechanism + drivers are called in this case. + """ + self._call_on_drivers("create_subnet_postcommit", context) + + def update_subnet_precommit(self, context): + """Notify all mechanism drivers during subnet update. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver update_subnet_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("update_subnet_precommit", context) + + def update_subnet_postcommit(self, context): + """Notify all mechanism drivers after subnet update. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver update_subnet_postcommit call fails. + + Called after the database transaction. If any mechanism driver + raises an error, then the error is logged but we continue to + call every other mechanism driver. A MechanismDriverError is + then reraised at the end to notify the caller of a failure. + """ + self._call_on_drivers("update_subnet_postcommit", context, + continue_on_failure=True) + + def delete_subnet_precommit(self, context): + """Notify all mechanism drivers during subnet deletion. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver delete_subnet_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("delete_subnet_precommit", context) + + def delete_subnet_postcommit(self, context): + """Notify all mechanism drivers after subnet deletion. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver delete_subnet_postcommit call fails. + + Called after the database transaction. If any mechanism driver + raises an error, then the error is logged but we continue to + call every other mechanism driver. A MechanismDriverError is + then reraised at the end to notify the caller of a failure. In + general we expect the caller to ignore the error, as the + subnet resource has already been deleted from the database + and it doesn't make sense to undo the action by recreating the + subnet. + """ + self._call_on_drivers("delete_subnet_postcommit", context, + continue_on_failure=True) + + def create_port_precommit(self, context): + """Notify all mechanism drivers during port creation. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver create_port_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("create_port_precommit", context) + + def create_port_postcommit(self, context): + """Notify all mechanism drivers of port creation. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver create_port_postcommit call fails. + + Called after the database transaction. Errors raised by + mechanism drivers are left to propagate to the caller, where + the port will be deleted, triggering any required + cleanup. There is no guarantee that all mechanism drivers are + called in this case. + """ + self._call_on_drivers("create_port_postcommit", context) + + def update_port_precommit(self, context): + """Notify all mechanism drivers during port update. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver update_port_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("update_port_precommit", context) + + def update_port_postcommit(self, context): + """Notify all mechanism drivers after port update. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver update_port_postcommit call fails. + + Called after the database transaction. If any mechanism driver + raises an error, then the error is logged but we continue to + call every other mechanism driver. A MechanismDriverError is + then reraised at the end to notify the caller of a failure. + """ + self._call_on_drivers("update_port_postcommit", context, + continue_on_failure=True) + + def delete_port_precommit(self, context): + """Notify all mechanism drivers during port deletion. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver delete_port_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("delete_port_precommit", context) + + def delete_port_postcommit(self, context): + """Notify all mechanism drivers after port deletion. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver delete_port_postcommit call fails. + + Called after the database transaction. If any mechanism driver + raises an error, then the error is logged but we continue to + call every other mechanism driver. A MechanismDriverError is + then reraised at the end to notify the caller of a failure. In + general we expect the caller to ignore the error, as the + port resource has already been deleted from the database + and it doesn't make sense to undo the action by recreating the + port. + """ + self._call_on_drivers("delete_port_postcommit", context, + continue_on_failure=True) + + def bind_port(self, context): + """Attempt to bind a port using registered mechanism drivers. + + :param context: PortContext instance describing the port + + Called inside transaction context on session, prior to + create_port_precommit or update_port_precommit, to + attempt to establish a port binding. + """ + binding = context._binding + LOG.debug(_("Attempting to bind port %(port)s on host %(host)s " + "for vnic_type %(vnic_type)s with profile %(profile)s"), + {'port': context._port['id'], + 'host': binding.host, + 'vnic_type': binding.vnic_type, + 'profile': binding.profile}) + for driver in self.ordered_mech_drivers: + try: + driver.obj.bind_port(context) + if binding.segment: + binding.driver = driver.name + LOG.debug(_("Bound port: %(port)s, host: %(host)s, " + "vnic_type: %(vnic_type)s, " + "profile: %(profile)s" + "driver: %(driver)s, vif_type: %(vif_type)s, " + "vif_details: %(vif_details)s, " + "segment: %(segment)s"), + {'port': context._port['id'], + 'host': binding.host, + 'vnic_type': binding.vnic_type, + 'profile': binding.profile, + 'driver': binding.driver, + 'vif_type': binding.vif_type, + 'vif_details': binding.vif_details, + 'segment': binding.segment}) + return + except Exception: + LOG.exception(_("Mechanism driver %s failed in " + "bind_port"), + driver.name) + binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED + LOG.warning(_("Failed to bind port %(port)s on host %(host)s"), + {'port': context._port['id'], + 'host': binding.host}) diff --git a/neutron/plugins/ml2/models.py b/neutron/plugins/ml2/models.py new file mode 100644 index 000000000..0ab805f1c --- /dev/null +++ b/neutron/plugins/ml2/models.py @@ -0,0 +1,76 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import portbindings + +BINDING_PROFILE_LEN = 4095 + + +class NetworkSegment(model_base.BASEV2, models_v2.HasId): + """Represent persistent state of a network segment. + + A network segment is a portion of a neutron network with a + specific physical realization. A neutron network can consist of + one or more segments. + """ + + __tablename__ = 'ml2_network_segments' + + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + nullable=False) + network_type = sa.Column(sa.String(32), nullable=False) + physical_network = sa.Column(sa.String(64)) + segmentation_id = sa.Column(sa.Integer) + + +class PortBinding(model_base.BASEV2): + """Represent binding-related state of a port. + + A port binding stores the port attributes required for the + portbindings extension, as well as internal ml2 state such as + which MechanismDriver and which segment are used by the port + binding. + """ + + __tablename__ = 'ml2_port_bindings' + + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + host = sa.Column(sa.String(255), nullable=False, default='') + vnic_type = sa.Column(sa.String(64), nullable=False, + default=portbindings.VNIC_NORMAL) + profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False, + default='') + vif_type = sa.Column(sa.String(64), nullable=False) + vif_details = sa.Column(sa.String(4095), nullable=False, default='') + driver = sa.Column(sa.String(64)) + segment = sa.Column(sa.String(36), + sa.ForeignKey('ml2_network_segments.id', + ondelete="SET NULL")) + + # Add a relationship to the Port model in order to instruct SQLAlchemy to + # eagerly load port bindings + port = orm.relationship( + models_v2.Port, + backref=orm.backref("port_binding", + lazy='joined', uselist=False, + cascade='delete')) diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py new file mode 100644 index 000000000..a324637c5 --- /dev/null +++ b/neutron/plugins/ml2/plugin.py @@ -0,0 +1,791 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import contextlib + +from oslo.config import cfg +from sqlalchemy import exc as sql_exc +from sqlalchemy.orm import exc as sa_exc + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as const +from neutron.common import exceptions as exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import extradhcpopt_db +from neutron.db import models_v2 +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import extra_dhcp_opt as edo_ext +from neutron.extensions import multiprovidernet as mpnet +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron import manager +from neutron.openstack.common import db as os_db +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import lockutils +from neutron.openstack.common import log +from neutron.plugins.common import constants as service_constants +from neutron.plugins.ml2.common import exceptions as ml2_exc +from neutron.plugins.ml2 import config # noqa +from neutron.plugins.ml2 import db +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2 import driver_context +from neutron.plugins.ml2 import managers +from neutron.plugins.ml2 import models +from neutron.plugins.ml2 import rpc + +LOG = log.getLogger(__name__) + +# REVISIT(rkukura): Move this and other network_type constants to +# providernet.py? +TYPE_MULTI_SEGMENT = 'multi-segment' + + +class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + addr_pair_db.AllowedAddressPairsMixin, + extradhcpopt_db.ExtraDhcpOptMixin): + + """Implement the Neutron L2 abstractions using modules. + + Ml2Plugin is a Neutron plugin based on separately extensible sets + of network types and mechanisms for connecting to networks of + those types. The network types and mechanisms are implemented as + drivers loaded via Python entry points. Networks can be made up of + multiple segments (not yet fully implemented). + """ + + # This attribute specifies whether the plugin supports or not + # bulk/pagination/sorting operations. Name mangling is used in + # order to ensure it is qualified by class + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + # List of supported extensions + _supported_extension_aliases = ["provider", "external-net", "binding", + "quotas", "security-group", "agent", + "dhcp_agent_scheduler", + "multi-provider", "allowed-address-pairs", + "extra_dhcp_opt"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + # First load drivers, then initialize DB, then initialize drivers + self.type_manager = managers.TypeManager() + self.mechanism_manager = managers.MechanismManager() + super(Ml2Plugin, self).__init__() + self.type_manager.initialize() + self.mechanism_manager.initialize() + # bulk support depends on the underlying drivers + self.__native_bulk_support = self.mechanism_manager.native_bulk_support + + self._setup_rpc() + + # REVISIT(rkukura): Use stevedore for these? + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + + LOG.info(_("Modular L2 Plugin initialization complete")) + + def _setup_rpc(self): + self.notifier = rpc.AgentNotifierApi(topics.AGENT) + self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + + def start_rpc_listeners(self): + self.endpoints = [rpc.RpcCallbacks(self.notifier, self.type_manager), + agents_db.AgentExtRpcCallback()] + self.topic = topics.PLUGIN + self.conn = rpc_compat.create_connection(new=True) + self.conn.create_consumer(self.topic, self.endpoints, + fanout=False) + return self.conn.consume_in_threads() + + def _process_provider_segment(self, segment): + network_type = self._get_attribute(segment, provider.NETWORK_TYPE) + physical_network = self._get_attribute(segment, + provider.PHYSICAL_NETWORK) + segmentation_id = self._get_attribute(segment, + provider.SEGMENTATION_ID) + + if attributes.is_attr_set(network_type): + segment = {api.NETWORK_TYPE: network_type, + api.PHYSICAL_NETWORK: physical_network, + api.SEGMENTATION_ID: segmentation_id} + self.type_manager.validate_provider_segment(segment) + return segment + + msg = _("network_type required") + raise exc.InvalidInput(error_message=msg) + + def _process_provider_create(self, network): + segments = [] + + if any(attributes.is_attr_set(network.get(f)) + for f in (provider.NETWORK_TYPE, provider.PHYSICAL_NETWORK, + provider.SEGMENTATION_ID)): + # Verify that multiprovider and provider attributes are not set + # at the same time. + if attributes.is_attr_set(network.get(mpnet.SEGMENTS)): + raise mpnet.SegmentsSetInConjunctionWithProviders() + + network_type = self._get_attribute(network, provider.NETWORK_TYPE) + physical_network = self._get_attribute(network, + provider.PHYSICAL_NETWORK) + segmentation_id = self._get_attribute(network, + provider.SEGMENTATION_ID) + segments = [{provider.NETWORK_TYPE: network_type, + provider.PHYSICAL_NETWORK: physical_network, + provider.SEGMENTATION_ID: segmentation_id}] + elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)): + segments = network[mpnet.SEGMENTS] + else: + return + + return [self._process_provider_segment(s) for s in segments] + + def _get_attribute(self, attrs, key): + value = attrs.get(key) + if value is attributes.ATTR_NOT_SPECIFIED: + value = None + return value + + def _extend_network_dict_provider(self, context, network): + id = network['id'] + segments = db.get_network_segments(context.session, id) + if not segments: + LOG.error(_("Network %s has no segments"), id) + network[provider.NETWORK_TYPE] = None + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = None + elif len(segments) > 1: + network[mpnet.SEGMENTS] = [ + {provider.NETWORK_TYPE: segment[api.NETWORK_TYPE], + provider.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK], + provider.SEGMENTATION_ID: segment[api.SEGMENTATION_ID]} + for segment in segments] + else: + segment = segments[0] + network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE] + network[provider.PHYSICAL_NETWORK] = segment[api.PHYSICAL_NETWORK] + network[provider.SEGMENTATION_ID] = segment[api.SEGMENTATION_ID] + + def _filter_nets_provider(self, context, nets, filters): + # TODO(rkukura): Implement filtering. + return nets + + def _process_port_binding(self, mech_context, attrs): + binding = mech_context._binding + port = mech_context.current + self._update_port_dict_binding(port, binding) + + host = attrs and attrs.get(portbindings.HOST_ID) + host_set = attributes.is_attr_set(host) + + vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE) + vnic_type_set = attributes.is_attr_set(vnic_type) + + # CLI can't send {}, so treat None as {} + profile = attrs and attrs.get(portbindings.PROFILE) + profile_set = profile is not attributes.ATTR_NOT_SPECIFIED + if profile_set and not profile: + profile = {} + + if binding.vif_type != portbindings.VIF_TYPE_UNBOUND: + if (not host_set and not vnic_type_set and not profile_set and + binding.segment): + return False + self._delete_port_binding(mech_context) + + # Return True only if an agent notification is needed. + # This will happen if a new host, vnic_type, or profile was specified + # that differs from the current one. Note that host_set is True + # even if the host is an empty string + ret_value = ((host_set and binding.get('host') != host) or + (vnic_type_set and + binding.get('vnic_type') != vnic_type) or + (profile_set and self._get_profile(binding) != profile)) + + if host_set: + binding.host = host + port[portbindings.HOST_ID] = host + + if vnic_type_set: + binding.vnic_type = vnic_type + port[portbindings.VNIC_TYPE] = vnic_type + + if profile_set: + binding.profile = jsonutils.dumps(profile) + if len(binding.profile) > models.BINDING_PROFILE_LEN: + msg = _("binding:profile value too large") + raise exc.InvalidInput(error_message=msg) + port[portbindings.PROFILE] = profile + + # To try to [re]bind if host is non-empty. + if binding.host: + self.mechanism_manager.bind_port(mech_context) + self._update_port_dict_binding(port, binding) + + # Update the port status if requested by the bound driver. + if binding.segment and mech_context._new_port_status: + # REVISIT(rkukura): This function is currently called + # inside a transaction with the port either newly + # created or locked for update. After the fix for bug + # 1276391 is merged, this will no longer be true, and + # the port status update will need to be handled in + # the transaction that commits the new binding. + port_db = db.get_port(mech_context._plugin_context.session, + port['id']) + port_db.status = mech_context._new_port_status + port['status'] = mech_context._new_port_status + + return ret_value + + def _update_port_dict_binding(self, port, binding): + port[portbindings.HOST_ID] = binding.host + port[portbindings.VNIC_TYPE] = binding.vnic_type + port[portbindings.PROFILE] = self._get_profile(binding) + port[portbindings.VIF_TYPE] = binding.vif_type + port[portbindings.VIF_DETAILS] = self._get_vif_details(binding) + + def _get_vif_details(self, binding): + if binding.vif_details: + try: + return jsonutils.loads(binding.vif_details) + except Exception: + LOG.error(_("Serialized vif_details DB value '%(value)s' " + "for port %(port)s is invalid"), + {'value': binding.vif_details, + 'port': binding.port_id}) + return {} + + def _get_profile(self, binding): + if binding.profile: + try: + return jsonutils.loads(binding.profile) + except Exception: + LOG.error(_("Serialized profile DB value '%(value)s' for " + "port %(port)s is invalid"), + {'value': binding.profile, + 'port': binding.port_id}) + return {} + + def _delete_port_binding(self, mech_context): + binding = mech_context._binding + binding.vif_type = portbindings.VIF_TYPE_UNBOUND + binding.vif_details = '' + binding.driver = None + binding.segment = None + port = mech_context.current + self._update_port_dict_binding(port, binding) + + def _ml2_extend_port_dict_binding(self, port_res, port_db): + # None when called during unit tests for other plugins. + if port_db.port_binding: + self._update_port_dict_binding(port_res, port_db.port_binding) + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.PORTS, ['_ml2_extend_port_dict_binding']) + + # Note - The following hook methods have "ml2" in their names so + # that they are not called twice during unit tests due to global + # registration of hooks in portbindings_db.py used by other + # plugins. + + def _ml2_port_model_hook(self, context, original_model, query): + query = query.outerjoin(models.PortBinding, + (original_model.id == + models.PortBinding.port_id)) + return query + + def _ml2_port_result_filter_hook(self, query, filters): + values = filters and filters.get(portbindings.HOST_ID, []) + if not values: + return query + return query.filter(models.PortBinding.host.in_(values)) + + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Port, + "ml2_port_bindings", + '_ml2_port_model_hook', + None, + '_ml2_port_result_filter_hook') + + def _notify_port_updated(self, mech_context): + port = mech_context._port + segment = mech_context.bound_segment + if not segment: + # REVISIT(rkukura): This should notify agent to unplug port + network = mech_context.network.current + LOG.warning(_("In _notify_port_updated(), no bound segment for " + "port %(port_id)s on network %(network_id)s"), + {'port_id': port['id'], + 'network_id': network['id']}) + return + self.notifier.port_update(mech_context._plugin_context, port, + segment[api.NETWORK_TYPE], + segment[api.SEGMENTATION_ID], + segment[api.PHYSICAL_NETWORK]) + + # TODO(apech): Need to override bulk operations + + def create_network(self, context, network): + net_data = network['network'] + segments = self._process_provider_create(net_data) + tenant_id = self._get_tenant_id_for_create(context, net_data) + + session = context.session + with session.begin(subtransactions=True): + self._ensure_default_security_group(context, tenant_id) + result = super(Ml2Plugin, self).create_network(context, network) + network_id = result['id'] + self._process_l3_create(context, result, net_data) + # REVISIT(rkukura): Consider moving all segment management + # to TypeManager. + if segments: + for segment in segments: + self.type_manager.reserve_provider_segment(session, + segment) + db.add_network_segment(session, network_id, segment) + else: + segment = self.type_manager.allocate_tenant_segment(session) + db.add_network_segment(session, network_id, segment) + self._extend_network_dict_provider(context, result) + mech_context = driver_context.NetworkContext(self, context, + result) + self.mechanism_manager.create_network_precommit(mech_context) + + try: + self.mechanism_manager.create_network_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + with excutils.save_and_reraise_exception(): + LOG.error(_("mechanism_manager.create_network_postcommit " + "failed, deleting network '%s'"), result['id']) + self.delete_network(context, result['id']) + return result + + def update_network(self, context, id, network): + provider._raise_if_updates_provider_attributes(network['network']) + + session = context.session + with session.begin(subtransactions=True): + original_network = super(Ml2Plugin, self).get_network(context, id) + updated_network = super(Ml2Plugin, self).update_network(context, + id, + network) + self._process_l3_update(context, updated_network, + network['network']) + self._extend_network_dict_provider(context, updated_network) + mech_context = driver_context.NetworkContext( + self, context, updated_network, + original_network=original_network) + self.mechanism_manager.update_network_precommit(mech_context) + + # TODO(apech) - handle errors raised by update_network, potentially + # by re-calling update_network with the previous attributes. For + # now the error is propogated to the caller, which is expected to + # either undo/retry the operation or delete the resource. + self.mechanism_manager.update_network_postcommit(mech_context) + return updated_network + + def get_network(self, context, id, fields=None): + session = context.session + with session.begin(subtransactions=True): + result = super(Ml2Plugin, self).get_network(context, id, None) + self._extend_network_dict_provider(context, result) + + return self._fields(result, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + session = context.session + with session.begin(subtransactions=True): + nets = super(Ml2Plugin, + self).get_networks(context, filters, None, sorts, + limit, marker, page_reverse) + for net in nets: + self._extend_network_dict_provider(context, net) + + nets = self._filter_nets_provider(context, nets, filters) + nets = self._filter_nets_l3(context, nets, filters) + + return [self._fields(net, fields) for net in nets] + + def delete_network(self, context, id): + # REVISIT(rkukura) The super(Ml2Plugin, self).delete_network() + # function is not used because it auto-deletes ports and + # subnets from the DB without invoking the derived class's + # delete_port() or delete_subnet(), preventing mechanism + # drivers from being called. This approach should be revisited + # when the API layer is reworked during icehouse. + + LOG.debug(_("Deleting network %s"), id) + session = context.session + while True: + try: + with session.begin(subtransactions=True): + self._process_l3_delete(context, id) + + # Get ports to auto-delete. + ports = (session.query(models_v2.Port). + enable_eagerloads(False). + filter_by(network_id=id). + with_lockmode('update').all()) + LOG.debug(_("Ports to auto-delete: %s"), ports) + only_auto_del = all(p.device_owner + in db_base_plugin_v2. + AUTO_DELETE_PORT_OWNERS + for p in ports) + if not only_auto_del: + LOG.debug(_("Tenant-owned ports exist")) + raise exc.NetworkInUse(net_id=id) + + # Get subnets to auto-delete. + subnets = (session.query(models_v2.Subnet). + enable_eagerloads(False). + filter_by(network_id=id). + with_lockmode('update').all()) + LOG.debug(_("Subnets to auto-delete: %s"), subnets) + + if not (ports or subnets): + network = self.get_network(context, id) + mech_context = driver_context.NetworkContext(self, + context, + network) + self.mechanism_manager.delete_network_precommit( + mech_context) + + record = self._get_network(context, id) + LOG.debug(_("Deleting network record %s"), record) + session.delete(record) + + for segment in mech_context.network_segments: + self.type_manager.release_segment(session, segment) + + # The segment records are deleted via cascade from the + # network record, so explicit removal is not necessary. + LOG.debug(_("Committing transaction")) + break + except os_db.exception.DBError as e: + with excutils.save_and_reraise_exception() as ctxt: + if isinstance(e.inner_exception, sql_exc.IntegrityError): + ctxt.reraise = False + msg = _("A concurrent port creation has occurred") + LOG.warning(msg) + continue + + for port in ports: + try: + self.delete_port(context, port.id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Exception auto-deleting port %s"), + port.id) + + for subnet in subnets: + try: + self.delete_subnet(context, subnet.id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Exception auto-deleting subnet %s"), + subnet.id) + + try: + self.mechanism_manager.delete_network_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + # TODO(apech) - One or more mechanism driver failed to + # delete the network. Ideally we'd notify the caller of + # the fact that an error occurred. + LOG.error(_("mechanism_manager.delete_network_postcommit failed")) + self.notifier.network_delete(context, id) + + def create_subnet(self, context, subnet): + session = context.session + with session.begin(subtransactions=True): + result = super(Ml2Plugin, self).create_subnet(context, subnet) + mech_context = driver_context.SubnetContext(self, context, result) + self.mechanism_manager.create_subnet_precommit(mech_context) + + try: + self.mechanism_manager.create_subnet_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + with excutils.save_and_reraise_exception(): + LOG.error(_("mechanism_manager.create_subnet_postcommit " + "failed, deleting subnet '%s'"), result['id']) + self.delete_subnet(context, result['id']) + return result + + def update_subnet(self, context, id, subnet): + session = context.session + with session.begin(subtransactions=True): + original_subnet = super(Ml2Plugin, self).get_subnet(context, id) + updated_subnet = super(Ml2Plugin, self).update_subnet( + context, id, subnet) + mech_context = driver_context.SubnetContext( + self, context, updated_subnet, original_subnet=original_subnet) + self.mechanism_manager.update_subnet_precommit(mech_context) + + # TODO(apech) - handle errors raised by update_subnet, potentially + # by re-calling update_subnet with the previous attributes. For + # now the error is propogated to the caller, which is expected to + # either undo/retry the operation or delete the resource. + self.mechanism_manager.update_subnet_postcommit(mech_context) + return updated_subnet + + def delete_subnet(self, context, id): + # REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet() + # function is not used because it deallocates the subnet's addresses + # from ports in the DB without invoking the derived class's + # update_port(), preventing mechanism drivers from being called. + # This approach should be revisited when the API layer is reworked + # during icehouse. + + LOG.debug(_("Deleting subnet %s"), id) + session = context.session + while True: + with session.begin(subtransactions=True): + subnet = self.get_subnet(context, id) + # Get ports to auto-deallocate + allocated = (session.query(models_v2.IPAllocation). + filter_by(subnet_id=id). + join(models_v2.Port). + filter_by(network_id=subnet['network_id']). + with_lockmode('update').all()) + LOG.debug(_("Ports to auto-deallocate: %s"), allocated) + only_auto_del = all(not a.port_id or + a.ports.device_owner in db_base_plugin_v2. + AUTO_DELETE_PORT_OWNERS + for a in allocated) + if not only_auto_del: + LOG.debug(_("Tenant-owned ports exist")) + raise exc.SubnetInUse(subnet_id=id) + + if not allocated: + mech_context = driver_context.SubnetContext(self, context, + subnet) + self.mechanism_manager.delete_subnet_precommit( + mech_context) + + LOG.debug(_("Deleting subnet record")) + record = self._get_subnet(context, id) + session.delete(record) + + LOG.debug(_("Committing transaction")) + break + + for a in allocated: + if a.port_id: + # calling update_port() for each allocation to remove the + # IP from the port and call the MechanismDrivers + data = {'port': + {'fixed_ips': [{'subnet_id': ip.subnet_id, + 'ip_address': ip.ip_address} + for ip in a.ports.fixed_ips + if ip.subnet_id != id]}} + try: + self.update_port(context, a.port_id, data) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Exception deleting fixed_ip from " + "port %s"), a.port_id) + session.delete(a) + + try: + self.mechanism_manager.delete_subnet_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + # TODO(apech) - One or more mechanism driver failed to + # delete the subnet. Ideally we'd notify the caller of + # the fact that an error occurred. + LOG.error(_("mechanism_manager.delete_subnet_postcommit failed")) + + def create_port(self, context, port): + attrs = port['port'] + attrs['status'] = const.PORT_STATUS_DOWN + + session = context.session + with session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) + result = super(Ml2Plugin, self).create_port(context, port) + self._process_port_create_security_group(context, result, sgids) + network = self.get_network(context, result['network_id']) + mech_context = driver_context.PortContext(self, context, result, + network) + self._process_port_binding(mech_context, attrs) + result[addr_pair.ADDRESS_PAIRS] = ( + self._process_create_allowed_address_pairs( + context, result, + attrs.get(addr_pair.ADDRESS_PAIRS))) + self._process_port_create_extra_dhcp_opts(context, result, + dhcp_opts) + self.mechanism_manager.create_port_precommit(mech_context) + + try: + self.mechanism_manager.create_port_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + with excutils.save_and_reraise_exception(): + LOG.error(_("mechanism_manager.create_port_postcommit " + "failed, deleting port '%s'"), result['id']) + self.delete_port(context, result['id']) + self.notify_security_groups_member_updated(context, result) + return result + + def update_port(self, context, id, port): + attrs = port['port'] + need_port_update_notify = False + + session = context.session + with session.begin(subtransactions=True): + try: + port_db = (session.query(models_v2.Port). + enable_eagerloads(False). + filter_by(id=id).with_lockmode('update').one()) + except sa_exc.NoResultFound: + raise exc.PortNotFound(port_id=id) + original_port = self._make_port_dict(port_db) + updated_port = super(Ml2Plugin, self).update_port(context, id, + port) + if addr_pair.ADDRESS_PAIRS in port['port']: + need_port_update_notify |= ( + self.update_address_pairs_on_port(context, id, port, + original_port, + updated_port)) + need_port_update_notify |= self.update_security_group_on_port( + context, id, port, original_port, updated_port) + network = self.get_network(context, original_port['network_id']) + need_port_update_notify |= self._update_extra_dhcp_opts_on_port( + context, id, port, updated_port) + mech_context = driver_context.PortContext( + self, context, updated_port, network, + original_port=original_port) + need_port_update_notify |= self._process_port_binding( + mech_context, attrs) + self.mechanism_manager.update_port_precommit(mech_context) + + # TODO(apech) - handle errors raised by update_port, potentially + # by re-calling update_port with the previous attributes. For + # now the error is propogated to the caller, which is expected to + # either undo/retry the operation or delete the resource. + self.mechanism_manager.update_port_postcommit(mech_context) + + need_port_update_notify |= self.is_security_group_member_updated( + context, original_port, updated_port) + + if original_port['admin_state_up'] != updated_port['admin_state_up']: + need_port_update_notify = True + + if need_port_update_notify: + self._notify_port_updated(mech_context) + + return updated_port + + def delete_port(self, context, id, l3_port_check=True): + LOG.debug(_("Deleting port %s"), id) + l3plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if l3plugin and l3_port_check: + l3plugin.prevent_l3_port_deletion(context, id) + + session = context.session + # REVISIT: Serialize this operation with a semaphore to prevent + # undesired eventlet yields leading to 'lock wait timeout' errors + with contextlib.nested(lockutils.lock('db-access'), + session.begin(subtransactions=True)): + try: + port_db = (session.query(models_v2.Port). + enable_eagerloads(False). + filter_by(id=id).with_lockmode('update').one()) + except sa_exc.NoResultFound: + # the port existed when l3plugin.prevent_l3_port_deletion + # was called but now is already gone + LOG.debug(_("The port '%s' was deleted"), id) + return + port = self._make_port_dict(port_db) + + network = self.get_network(context, port['network_id']) + mech_context = driver_context.PortContext(self, context, port, + network) + self.mechanism_manager.delete_port_precommit(mech_context) + self._delete_port_security_group_bindings(context, id) + LOG.debug(_("Calling base delete_port")) + if l3plugin: + l3plugin.disassociate_floatingips(context, id) + + super(Ml2Plugin, self).delete_port(context, id) + + try: + self.mechanism_manager.delete_port_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + # TODO(apech) - One or more mechanism driver failed to + # delete the port. Ideally we'd notify the caller of the + # fact that an error occurred. + LOG.error(_("mechanism_manager.delete_port_postcommit failed")) + self.notify_security_groups_member_updated(context, port) + + def update_port_status(self, context, port_id, status): + updated = False + session = context.session + # REVISIT: Serialize this operation with a semaphore to prevent + # undesired eventlet yields leading to 'lock wait timeout' errors + with contextlib.nested(lockutils.lock('db-access'), + session.begin(subtransactions=True)): + port = db.get_port(session, port_id) + if not port: + LOG.warning(_("Port %(port)s updated up by agent not found"), + {'port': port_id}) + return False + if port.status != status: + original_port = self._make_port_dict(port) + port.status = status + updated_port = self._make_port_dict(port) + network = self.get_network(context, + original_port['network_id']) + mech_context = driver_context.PortContext( + self, context, updated_port, network, + original_port=original_port) + self.mechanism_manager.update_port_precommit(mech_context) + updated = True + + if updated: + self.mechanism_manager.update_port_postcommit(mech_context) + + return True + + def port_bound_to_host(self, port_id, host): + port_host = db.get_port_binding_host(port_id) + return (port_host == host) diff --git a/neutron/plugins/ml2/rpc.py b/neutron/plugins/ml2/rpc.py new file mode 100644 index 000000000..c744147c6 --- /dev/null +++ b/neutron/plugins/ml2/rpc.py @@ -0,0 +1,239 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo import messaging + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import constants as q_const +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.db import api as db_api +from neutron.db import dhcp_rpc_base +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron import manager +from neutron.openstack.common import log +from neutron.openstack.common import uuidutils +from neutron.plugins.ml2 import db +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import type_tunnel +# REVISIT(kmestery): Allow the type and mechanism drivers to supply the +# mixins and eventually remove the direct dependencies on type_tunnel. + +LOG = log.getLogger(__name__) + +TAP_DEVICE_PREFIX = 'tap' +TAP_DEVICE_PREFIX_LENGTH = 3 + + +class RpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin, + type_tunnel.TunnelRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + # history + # 1.0 Initial version (from openvswitch/linuxbridge) + # 1.1 Support Security Group RPC + + # FIXME(ihrachys): we can't use rpc_compat.RpcCallback here due to + # inheritance problems + target = messaging.Target(version=RPC_API_VERSION) + + def __init__(self, notifier, type_manager): + # REVISIT(kmestery): This depends on the first three super classes + # not having their own __init__ functions. If an __init__() is added + # to one, this could break. Fix this and add a unit test to cover this + # test in H3. + super(RpcCallbacks, self).__init__(notifier, type_manager) + + @classmethod + def _device_to_port_id(cls, device): + # REVISIT(rkukura): Consider calling into MechanismDrivers to + # process device names, or having MechanismDrivers supply list + # of device prefixes to strip. + if device.startswith(TAP_DEVICE_PREFIX): + return device[TAP_DEVICE_PREFIX_LENGTH:] + else: + # REVISIT(irenab): Consider calling into bound MD to + # handle the get_device_details RPC, then remove the 'else' clause + if not uuidutils.is_uuid_like(device): + port = db.get_port_from_device_mac(device) + if port: + return port.id + return device + + @classmethod + def get_port_from_device(cls, device): + port_id = cls._device_to_port_id(device) + port = db.get_port_and_sgs(port_id) + if port: + port['device'] = device + return port + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s details requested by agent " + "%(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port_id = self._device_to_port_id(device) + + session = db_api.get_session() + with session.begin(subtransactions=True): + port = db.get_port(session, port_id) + if not port: + LOG.warning(_("Device %(device)s requested by agent " + "%(agent_id)s not found in database"), + {'device': device, 'agent_id': agent_id}) + return {'device': device} + + segments = db.get_network_segments(session, port.network_id) + if not segments: + LOG.warning(_("Device %(device)s requested by agent " + "%(agent_id)s has network %(network_id)s with " + "no segments"), + {'device': device, + 'agent_id': agent_id, + 'network_id': port.network_id}) + return {'device': device} + + binding = db.ensure_port_binding(session, port.id) + if not binding.segment: + LOG.warning(_("Device %(device)s requested by agent " + "%(agent_id)s on network %(network_id)s not " + "bound, vif_type: %(vif_type)s"), + {'device': device, + 'agent_id': agent_id, + 'network_id': port.network_id, + 'vif_type': binding.vif_type}) + return {'device': device} + + segment = self._find_segment(segments, binding.segment) + if not segment: + LOG.warning(_("Device %(device)s requested by agent " + "%(agent_id)s on network %(network_id)s " + "invalid segment, vif_type: %(vif_type)s"), + {'device': device, + 'agent_id': agent_id, + 'network_id': port.network_id, + 'vif_type': binding.vif_type}) + return {'device': device} + + new_status = (q_const.PORT_STATUS_BUILD if port.admin_state_up + else q_const.PORT_STATUS_DOWN) + if port.status != new_status: + plugin = manager.NeutronManager.get_plugin() + plugin.update_port_status(rpc_context, + port_id, + new_status) + port.status = new_status + entry = {'device': device, + 'network_id': port.network_id, + 'port_id': port.id, + 'admin_state_up': port.admin_state_up, + 'network_type': segment[api.NETWORK_TYPE], + 'segmentation_id': segment[api.SEGMENTATION_ID], + 'physical_network': segment[api.PHYSICAL_NETWORK]} + LOG.debug(_("Returning: %s"), entry) + return entry + + def _find_segment(self, segments, segment_id): + for segment in segments: + if segment[api.ID] == segment_id: + return segment + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + # TODO(garyk) - live migration and port status + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + LOG.debug(_("Device %(device)s no longer exists at agent " + "%(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + plugin = manager.NeutronManager.get_plugin() + port_id = self._device_to_port_id(device) + port_exists = True + if (host and not plugin.port_bound_to_host(port_id, host)): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + return {'device': device, + 'exists': port_exists} + + port_exists = plugin.update_port_status(rpc_context, port_id, + q_const.PORT_STATUS_DOWN) + + return {'device': device, + 'exists': port_exists} + + def update_device_up(self, rpc_context, **kwargs): + """Device is up on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + LOG.debug(_("Device %(device)s up at agent %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + plugin = manager.NeutronManager.get_plugin() + port_id = self._device_to_port_id(device) + if (host and not plugin.port_bound_to_host(port_id, host)): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + return + + plugin.update_port_status(rpc_context, port_id, + q_const.PORT_STATUS_ACTIVE) + + +class AgentNotifierApi(rpc_compat.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin, + type_tunnel.TunnelAgentRpcApiMixin): + """Agent side of the openvswitch rpc API. + + API version history: + 1.0 - Initial version. + 1.1 - Added get_active_networks_info, create_dhcp_port, + update_dhcp_port, and removed get_dhcp_port methods. + + """ + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + + def network_delete(self, context, network_id): + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, network_type, segmentation_id, + physical_network): + self.fanout_cast(context, + self.make_msg('port_update', + port=port, + network_type=network_type, + segmentation_id=segmentation_id, + physical_network=physical_network), + topic=self.topic_port_update) diff --git a/neutron/plugins/mlnx/README b/neutron/plugins/mlnx/README new file mode 100644 index 000000000..97c24ce0b --- /dev/null +++ b/neutron/plugins/mlnx/README @@ -0,0 +1,8 @@ +Mellanox Neutron Plugin + +This plugin implements Neutron v2 APIs with support for +Mellanox embedded switch functionality as part of the +VPI (Ethernet/InfiniBand) HCA. + +For more details on the plugin, please refer to the following link: +https://wiki.openstack.org/wiki/Mellanox-Quantum diff --git a/neutron/plugins/mlnx/__init__.py b/neutron/plugins/mlnx/__init__.py new file mode 100644 index 000000000..c818bfe31 --- /dev/null +++ b/neutron/plugins/mlnx/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/neutron/plugins/mlnx/agent/__init__.py b/neutron/plugins/mlnx/agent/__init__.py new file mode 100644 index 000000000..c818bfe31 --- /dev/null +++ b/neutron/plugins/mlnx/agent/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/neutron/plugins/mlnx/agent/eswitch_neutron_agent.py b/neutron/plugins/mlnx/agent/eswitch_neutron_agent.py new file mode 100644 index 000000000..f60f02bb7 --- /dev/null +++ b/neutron/plugins/mlnx/agent/eswitch_neutron_agent.py @@ -0,0 +1,438 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import socket +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import constants as q_constants +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils as q_utils +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as p_const +from neutron.plugins.mlnx.agent import utils +from neutron.plugins.mlnx.common import config # noqa +from neutron.plugins.mlnx.common import exceptions + +LOG = logging.getLogger(__name__) + + +class EswitchManager(object): + def __init__(self, interface_mappings, endpoint, timeout): + self.utils = utils.EswitchUtils(endpoint, timeout) + self.interface_mappings = interface_mappings + self.network_map = {} + self.utils.define_fabric_mappings(interface_mappings) + + def get_port_id_by_mac(self, port_mac): + for network_id, data in self.network_map.iteritems(): + for port in data['ports']: + if port['port_mac'] == port_mac: + return port['port_id'] + err_msg = _("Agent cache inconsistency - port id " + "is not stored for %s") % port_mac + LOG.error(err_msg) + raise exceptions.MlnxException(err_msg=err_msg) + + def get_vnics_mac(self): + return set(self.utils.get_attached_vnics().keys()) + + def vnic_port_exists(self, port_mac): + return port_mac in self.utils.get_attached_vnics() + + def remove_network(self, network_id): + if network_id in self.network_map: + del self.network_map[network_id] + else: + LOG.debug(_("Network %s not defined on Agent."), network_id) + + def port_down(self, network_id, physical_network, port_mac): + """Sets port to down. + + Check internal network map for port data. + If port exists set port to Down + """ + for network_id, data in self.network_map.iteritems(): + for port in data['ports']: + if port['port_mac'] == port_mac: + self.utils.port_down(physical_network, port_mac) + return + LOG.info(_('Network %s is not available on this agent'), network_id) + + def port_up(self, network_id, network_type, + physical_network, seg_id, port_id, port_mac): + """Sets port to up. + + Update internal network map with port data. + - Check if vnic defined + - configure eswitch vport + - set port to Up + """ + LOG.debug(_("Connecting port %s"), port_id) + + if network_id not in self.network_map: + self.provision_network(port_id, port_mac, + network_id, network_type, + physical_network, seg_id) + net_map = self.network_map[network_id] + net_map['ports'].append({'port_id': port_id, 'port_mac': port_mac}) + + if network_type == p_const.TYPE_VLAN: + LOG.info(_('Binding Segmentation ID %(seg_id)s' + 'to eSwitch for vNIC mac_address %(mac)s'), + {'seg_id': seg_id, + 'mac': port_mac}) + self.utils.set_port_vlan_id(physical_network, + seg_id, + port_mac) + self.utils.port_up(physical_network, port_mac) + else: + LOG.error(_('Unsupported network type %s'), network_type) + + def port_release(self, port_mac): + """Clear port configuration from eSwitch.""" + for network_id, net_data in self.network_map.iteritems(): + for port in net_data['ports']: + if port['port_mac'] == port_mac: + self.utils.port_release(net_data['physical_network'], + port['port_mac']) + return + LOG.info(_('Port_mac %s is not available on this agent'), port_mac) + + def provision_network(self, port_id, port_mac, + network_id, network_type, + physical_network, segmentation_id): + LOG.info(_("Provisioning network %s"), network_id) + if network_type == p_const.TYPE_VLAN: + LOG.debug(_("Creating VLAN Network")) + else: + LOG.error(_("Unknown network type %(network_type)s " + "for network %(network_id)s"), + {'network_type': network_type, + 'network_id': network_id}) + return + data = { + 'physical_network': physical_network, + 'network_type': network_type, + 'ports': [], + 'vlan_id': segmentation_id} + self.network_map[network_id] = data + + +class MlnxEswitchRpcCallbacks(rpc_compat.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + + # Set RPC API version to 1.0 by default. + # history + # 1.1 Support Security Group RPC + RPC_API_VERSION = '1.1' + + def __init__(self, context, agent): + super(MlnxEswitchRpcCallbacks, self).__init__() + self.context = context + self.agent = agent + self.eswitch = agent.eswitch + self.sg_agent = agent + + def network_delete(self, context, **kwargs): + LOG.debug(_("network_delete received")) + network_id = kwargs.get('network_id') + if not network_id: + LOG.warning(_("Invalid Network ID, cannot remove Network")) + else: + LOG.debug(_("Delete network %s"), network_id) + self.eswitch.remove_network(network_id) + + def port_update(self, context, **kwargs): + LOG.debug(_("port_update received")) + port = kwargs.get('port') + net_type = kwargs.get('network_type') + segmentation_id = kwargs.get('segmentation_id') + if not segmentation_id: + # compatibility with pre-Havana RPC vlan_id encoding + segmentation_id = kwargs.get('vlan_id') + physical_network = kwargs.get('physical_network') + net_id = port['network_id'] + if self.eswitch.vnic_port_exists(port['mac_address']): + if 'security_groups' in port: + self.sg_agent.refresh_firewall() + try: + if port['admin_state_up']: + self.eswitch.port_up(net_id, + net_type, + physical_network, + segmentation_id, + port['id'], + port['mac_address']) + # update plugin about port status + self.agent.plugin_rpc.update_device_up(self.context, + port['mac_address'], + self.agent.agent_id, + cfg.CONF.host) + else: + self.eswitch.port_down(net_id, + physical_network, + port['mac_address']) + # update plugin about port status + self.agent.plugin_rpc.update_device_down( + self.context, + port['mac_address'], + self.agent.agent_id, + cfg.CONF.host) + except rpc_compat.MessagingTimeout: + LOG.error(_("RPC timeout while updating port %s"), port['id']) + else: + LOG.debug(_("No port %s defined on agent."), port['id']) + + +class MlnxEswitchPluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class MlnxEswitchNeutronAgent(sg_rpc.SecurityGroupAgentRpcMixin): + # Set RPC API version to 1.0 by default. + #RPC_API_VERSION = '1.0' + + def __init__(self, interface_mapping): + self._polling_interval = cfg.CONF.AGENT.polling_interval + self._setup_eswitches(interface_mapping) + configurations = {'interface_mappings': interface_mapping} + self.agent_state = { + 'binary': 'neutron-mlnx-agent', + 'host': cfg.CONF.host, + 'topic': q_constants.L2_AGENT_TOPIC, + 'configurations': configurations, + 'agent_type': q_constants.AGENT_TYPE_MLNX, + 'start_flag': True} + self._setup_rpc() + self.init_firewall() + + def _setup_eswitches(self, interface_mapping): + daemon = cfg.CONF.ESWITCH.daemon_endpoint + timeout = cfg.CONF.ESWITCH.request_timeout + self.eswitch = EswitchManager(interface_mapping, daemon, timeout) + + def _report_state(self): + try: + devices = len(self.eswitch.get_vnics_mac()) + self.agent_state.get('configurations')['devices'] = devices + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def _setup_rpc(self): + self.agent_id = 'mlnx-agent.%s' % socket.gethostname() + LOG.info(_("RPC agent_id: %s"), self.agent_id) + + self.topic = topics.AGENT + self.plugin_rpc = MlnxEswitchPluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + # RPC network init + self.context = context.get_admin_context_without_session() + # Handle updates from service + self.endpoints = [MlnxEswitchRpcCallbacks(self.context, self)] + # Define the listening consumers for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.NETWORK, topics.DELETE], + [topics.SECURITY_GROUP, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + report_interval = cfg.CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def update_ports(self, registered_ports): + ports = self.eswitch.get_vnics_mac() + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def process_network_ports(self, port_info): + resync_a = False + resync_b = False + if port_info.get('added'): + LOG.debug(_("Ports added!")) + resync_a = self.treat_devices_added(port_info['added']) + if port_info.get('removed'): + LOG.debug(_("Ports removed!")) + resync_b = self.treat_devices_removed(port_info['removed']) + # If one of the above opertaions fails => resync with plugin + return (resync_a | resync_b) + + def treat_vif_port(self, port_id, port_mac, + network_id, network_type, + physical_network, segmentation_id, + admin_state_up): + if self.eswitch.vnic_port_exists(port_mac): + if admin_state_up: + self.eswitch.port_up(network_id, + network_type, + physical_network, + segmentation_id, + port_id, + port_mac) + else: + self.eswitch.port_down(network_id, physical_network, port_mac) + else: + LOG.debug(_("No port %s defined on agent."), port_id) + + def treat_devices_added(self, devices): + resync = False + for device in devices: + LOG.info(_("Adding port with mac %s"), device) + try: + dev_details = self.plugin_rpc.get_device_details( + self.context, + device, + self.agent_id) + except Exception as e: + LOG.debug(_("Unable to get device dev_details for device " + "with mac_address %(device)s: due to %(exc)s"), + {'device': device, 'exc': e}) + resync = True + continue + if 'port_id' in dev_details: + LOG.info(_("Port %s updated"), device) + LOG.debug(_("Device details %s"), str(dev_details)) + self.treat_vif_port(dev_details['port_id'], + dev_details['device'], + dev_details['network_id'], + dev_details['network_type'], + dev_details['physical_network'], + dev_details['segmentation_id'], + dev_details['admin_state_up']) + if dev_details.get('admin_state_up'): + self.plugin_rpc.update_device_up(self.context, + device, + self.agent_id) + else: + LOG.debug(_("Device with mac_address %s not defined " + "on Neutron Plugin"), device) + return resync + + def treat_devices_removed(self, devices): + resync = False + for device in devices: + LOG.info(_("Removing device with mac_address %s"), device) + try: + port_id = self.eswitch.get_port_id_by_mac(device) + dev_details = self.plugin_rpc.update_device_down(self.context, + port_id, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("Removing port failed for device %(device)s " + "due to %(exc)s"), {'device': device, 'exc': e}) + resync = True + continue + if dev_details['exists']: + LOG.info(_("Port %s updated."), device) + else: + LOG.debug(_("Device %s not defined on plugin"), device) + self.eswitch.port_release(device) + return resync + + def daemon_loop(self): + sync = True + ports = set() + + LOG.info(_("eSwitch Agent Started!")) + + while True: + try: + start = time.time() + if sync: + LOG.info(_("Agent out of sync with plugin!")) + ports.clear() + sync = False + + port_info = self.update_ports(ports) + # notify plugin about port deltas + if port_info: + LOG.debug(_("Agent loop process devices!")) + # If treat devices fails - must resync with plugin + sync = self.process_network_ports(port_info) + ports = port_info['current'] + except exceptions.RequestTimeout: + LOG.exception(_("Request timeout in agent event loop " + "eSwitchD is not responding - exiting...")) + raise SystemExit(1) + except Exception: + LOG.exception(_("Error in agent event loop")) + sync = True + # sleep till end of polling interval + elapsed = (time.time() - start) + if (elapsed < self._polling_interval): + time.sleep(self._polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)"), + {'polling_interval': self._polling_interval, + 'elapsed': elapsed}) + + +def main(): + common_config.init(sys.argv[1:]) + common_config.setup_logging(cfg.CONF) + + try: + interface_mappings = q_utils.parse_mappings( + cfg.CONF.ESWITCH.physical_interface_mappings) + except ValueError as e: + LOG.error(_("Parsing physical_interface_mappings failed: %s." + " Agent terminated!"), e) + sys.exit(1) + LOG.info(_("Interface mappings: %s"), interface_mappings) + + try: + agent = MlnxEswitchNeutronAgent(interface_mappings) + except Exception as e: + LOG.error(_("Failed on Agent initialisation : %s." + " Agent terminated!"), e) + sys.exit(1) + + # Start everything. + LOG.info(_("Agent initialised successfully, now running... ")) + agent.daemon_loop() + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/neutron/plugins/mlnx/agent/utils.py b/neutron/plugins/mlnx/agent/utils.py new file mode 100644 index 000000000..924be790f --- /dev/null +++ b/neutron/plugins/mlnx/agent/utils.py @@ -0,0 +1,144 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.openstack.common import importutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.mlnx.common import comm_utils +from neutron.plugins.mlnx.common import exceptions + +zmq = importutils.try_import('eventlet.green.zmq') + +LOG = logging.getLogger(__name__) + + +class EswitchUtils(object): + def __init__(self, daemon_endpoint, timeout): + if not zmq: + msg = _("Failed to import eventlet.green.zmq. " + "Won't connect to eSwitchD - exiting...") + LOG.error(msg) + raise SystemExit(1) + self.__conn = None + self.daemon = daemon_endpoint + self.timeout = timeout + + @property + def _conn(self): + if self.__conn is None: + context = zmq.Context() + socket = context.socket(zmq.REQ) + socket.setsockopt(zmq.LINGER, 0) + socket.connect(self.daemon) + self.__conn = socket + self.poller = zmq.Poller() + self.poller.register(self._conn, zmq.POLLIN) + return self.__conn + + @comm_utils.RetryDecorator(exceptions.RequestTimeout) + def send_msg(self, msg): + self._conn.send(msg) + + socks = dict(self.poller.poll(self.timeout)) + if socks.get(self._conn) == zmq.POLLIN: + recv_msg = self._conn.recv() + response = self.parse_response_msg(recv_msg) + return response + else: + self._conn.setsockopt(zmq.LINGER, 0) + self._conn.close() + self.poller.unregister(self._conn) + self.__conn = None + raise exceptions.RequestTimeout() + + def parse_response_msg(self, recv_msg): + msg = jsonutils.loads(recv_msg) + if msg['status'] == 'OK': + if 'response' in msg: + return msg.get('response') + return + elif msg['status'] == 'FAIL': + msg_dict = dict(action=msg['action'], reason=msg['reason']) + error_msg = _("Action %(action)s failed: %(reason)s") % msg_dict + else: + error_msg = _("Unknown operation status %s") % msg['status'] + LOG.error(error_msg) + raise exceptions.OperationFailed(err_msg=error_msg) + + def get_attached_vnics(self): + LOG.debug(_("get_attached_vnics")) + msg = jsonutils.dumps({'action': 'get_vnics', 'fabric': '*'}) + vnics = self.send_msg(msg) + return vnics + + def set_port_vlan_id(self, physical_network, + segmentation_id, port_mac): + LOG.debug(_("Set Vlan %(segmentation_id)s on Port %(port_mac)s " + "on Fabric %(physical_network)s"), + {'port_mac': port_mac, + 'segmentation_id': segmentation_id, + 'physical_network': physical_network}) + msg = jsonutils.dumps({'action': 'set_vlan', + 'fabric': physical_network, + 'port_mac': port_mac, + 'vlan': segmentation_id}) + self.send_msg(msg) + + def define_fabric_mappings(self, interface_mapping): + for fabric, phy_interface in interface_mapping.iteritems(): + LOG.debug(_("Define Fabric %(fabric)s on interface %(ifc)s"), + {'fabric': fabric, + 'ifc': phy_interface}) + msg = jsonutils.dumps({'action': 'define_fabric_mapping', + 'fabric': fabric, + 'interface': phy_interface}) + self.send_msg(msg) + + def port_up(self, fabric, port_mac): + LOG.debug(_("Port Up for %(port_mac)s on fabric %(fabric)s"), + {'port_mac': port_mac, 'fabric': fabric}) + msg = jsonutils.dumps({'action': 'port_up', + 'fabric': fabric, + 'ref_by': 'mac_address', + 'mac': 'port_mac'}) + self.send_msg(msg) + + def port_down(self, fabric, port_mac): + LOG.debug(_("Port Down for %(port_mac)s on fabric %(fabric)s"), + {'port_mac': port_mac, 'fabric': fabric}) + msg = jsonutils.dumps({'action': 'port_down', + 'fabric': fabric, + 'ref_by': 'mac_address', + 'mac': port_mac}) + self.send_msg(msg) + + def port_release(self, fabric, port_mac): + LOG.debug(_("Port Release for %(port_mac)s on fabric %(fabric)s"), + {'port_mac': port_mac, 'fabric': fabric}) + msg = jsonutils.dumps({'action': 'port_release', + 'fabric': fabric, + 'ref_by': 'mac_address', + 'mac': port_mac}) + self.send_msg(msg) + + def get_eswitch_ports(self, fabric): + # TODO(irena) - to implement for next phase + return {} + + def get_eswitch_id(self, fabric): + # TODO(irena) - to implement for next phase + return "" diff --git a/neutron/plugins/mlnx/agent_notify_api.py b/neutron/plugins/mlnx/agent_notify_api.py new file mode 100644 index 000000000..1874da826 --- /dev/null +++ b/neutron/plugins/mlnx/agent_notify_api.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class AgentNotifierApi(rpc_compat.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + """Agent side of the Embedded Switch RPC API. + + API version history: + 1.0 - Initial version. + 1.1 - Added get_active_networks_info, create_dhcp_port, + and update_dhcp_port methods. + """ + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic = topic + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + + def network_delete(self, context, network_id): + LOG.debug(_("Sending delete network message")) + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, physical_network, + network_type, vlan_id): + LOG.debug(_("Sending update port message")) + kwargs = {'port': port, + 'network_type': network_type, + 'physical_network': physical_network, + 'segmentation_id': vlan_id} + if cfg.CONF.AGENT.rpc_support_old_agents: + kwargs['vlan_id'] = vlan_id + msg = self.make_msg('port_update', **kwargs) + self.fanout_cast(context, msg, + topic=self.topic_port_update) diff --git a/neutron/plugins/mlnx/common/__init__.py b/neutron/plugins/mlnx/common/__init__.py new file mode 100644 index 000000000..c818bfe31 --- /dev/null +++ b/neutron/plugins/mlnx/common/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/neutron/plugins/mlnx/common/comm_utils.py b/neutron/plugins/mlnx/common/comm_utils.py new file mode 100644 index 000000000..a1a0f4a8a --- /dev/null +++ b/neutron/plugins/mlnx/common/comm_utils.py @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +from oslo.config import cfg + +from neutron.openstack.common import log as logging +from neutron.plugins.mlnx.common import config # noqa + +LOG = logging.getLogger(__name__) + + +class RetryDecorator(object): + """Retry decorator reruns a method 'retries' times if an exception occurs. + + Decorator for retrying a method if exceptionToCheck exception occurs + If method raises exception, retries 'retries' times with increasing + back off period between calls with 'interval' multiplier + + :param exceptionToCheck: the exception to check + :param interval: initial delay between retries in seconds + :param retries: number of times to try before giving up + :raises: exceptionToCheck + """ + sleep_fn = time.sleep + + def __init__(self, exceptionToCheck, + interval=cfg.CONF.ESWITCH.request_timeout / 1000, + retries=cfg.CONF.ESWITCH.retries, + backoff_rate=cfg.CONF.ESWITCH.backoff_rate): + self.exc = exceptionToCheck + self.interval = interval + self.retries = retries + self.backoff_rate = backoff_rate + + def __call__(self, original_func): + def decorated(*args, **kwargs): + sleep_interval = self.interval + num_of_iter = self.retries + while num_of_iter > 0: + try: + return original_func(*args, **kwargs) + except self.exc: + LOG.debug(_("Request timeout - call again after " + "%s seconds"), sleep_interval) + RetryDecorator.sleep_fn(sleep_interval) + num_of_iter -= 1 + sleep_interval *= self.backoff_rate + + return original_func(*args, **kwargs) + return decorated diff --git a/neutron/plugins/mlnx/common/config.py b/neutron/plugins/mlnx/common/config.py new file mode 100644 index 000000000..b75e87be3 --- /dev/null +++ b/neutron/plugins/mlnx/common/config.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.plugins.mlnx.common import constants + +DEFAULT_VLAN_RANGES = ['default:1:1000'] +DEFAULT_INTERFACE_MAPPINGS = [] + +vlan_opts = [ + cfg.StrOpt('tenant_network_type', default='vlan', + help=_("Network type for tenant networks " + "(local, vlan, or none)")), + cfg.ListOpt('network_vlan_ranges', + default=DEFAULT_VLAN_RANGES, + help=_("List of :: " + "or ")), + cfg.ListOpt('physical_network_type_mappings', + default=[], + help=_("List of : " + " with physical_network_type is either eth or ib")), + cfg.StrOpt('physical_network_type', default='eth', + help=_("Physical network type for provider network " + "(eth or ib)")) +] + + +eswitch_opts = [ + cfg.ListOpt('physical_interface_mappings', + default=DEFAULT_INTERFACE_MAPPINGS, + help=_("List of :")), + cfg.StrOpt('vnic_type', + default=constants.VIF_TYPE_DIRECT, + help=_("Type of VM network interface: mlnx_direct or " + "hostdev")), + cfg.StrOpt('daemon_endpoint', + default='tcp://127.0.0.1:60001', + help=_('eswitch daemon end point')), + cfg.IntOpt('request_timeout', default=3000, + help=_("The number of milliseconds the agent will wait for " + "response on request to daemon.")), + cfg.IntOpt('retries', default=3, + help=_("The number of retries the agent will send request " + "to daemon before giving up")), + cfg.IntOpt('backoff_rate', default=2, + help=_("backoff rate multiplier for waiting period between " + "retries for request to daemon, i.e. value of 2 will " + " double the request timeout each retry")), +] + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), + cfg.BoolOpt('rpc_support_old_agents', default=False, + help=_("Enable server RPC compatibility with old agents")), +] + + +cfg.CONF.register_opts(vlan_opts, "MLNX") +cfg.CONF.register_opts(eswitch_opts, "ESWITCH") +cfg.CONF.register_opts(agent_opts, "AGENT") +config.register_agent_state_opts_helper(cfg.CONF) +config.register_root_helper(cfg.CONF) diff --git a/neutron/plugins/mlnx/common/constants.py b/neutron/plugins/mlnx/common/constants.py new file mode 100644 index 000000000..2277cb7bb --- /dev/null +++ b/neutron/plugins/mlnx/common/constants.py @@ -0,0 +1,28 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +LOCAL_VLAN_ID = -2 +FLAT_VLAN_ID = -1 + +# Values for physical network_type +TYPE_IB = 'ib' +TYPE_ETH = 'eth' + +VIF_TYPE_DIRECT = 'mlnx_direct' +VIF_TYPE_HOSTDEV = 'hostdev' + +VNIC_TYPE = 'vnic_type' diff --git a/neutron/plugins/mlnx/common/exceptions.py b/neutron/plugins/mlnx/common/exceptions.py new file mode 100644 index 000000000..6fd168215 --- /dev/null +++ b/neutron/plugins/mlnx/common/exceptions.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.common import exceptions as qexc + + +class MlnxException(qexc.NeutronException): + message = _("Mlnx Exception: %(err_msg)s") + + +class RequestTimeout(qexc.NeutronException): + message = _("Request Timeout: no response from eSwitchD") + + +class OperationFailed(qexc.NeutronException): + message = _("Operation Failed: %(err_msg)s") diff --git a/neutron/plugins/mlnx/db/__init__.py b/neutron/plugins/mlnx/db/__init__.py new file mode 100644 index 000000000..c818bfe31 --- /dev/null +++ b/neutron/plugins/mlnx/db/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/neutron/plugins/mlnx/db/mlnx_db_v2.py b/neutron/plugins/mlnx/db/mlnx_db_v2.py new file mode 100644 index 000000000..507934b0d --- /dev/null +++ b/neutron/plugins/mlnx/db/mlnx_db_v2.py @@ -0,0 +1,257 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import moves +from sqlalchemy.orm import exc + +from neutron.common import exceptions as n_exc +import neutron.db.api as db +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.mlnx.common import config # noqa +from neutron.plugins.mlnx.db import mlnx_models_v2 + +LOG = logging.getLogger(__name__) + + +def _remove_non_allocatable_vlans(session, allocations, + physical_network, vlan_ids): + if physical_network in allocations: + for entry in allocations[physical_network]: + try: + # see if vlan is allocatable + vlan_ids.remove(entry.segmentation_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not entry.allocated: + # it's not, so remove it from table + LOG.debug(_( + "Removing vlan %(seg_id)s on " + "physical network " + "%(net)s from pool"), + {'seg_id': entry.segmentation_id, + 'net': physical_network}) + session.delete(entry) + del allocations[physical_network] + + +def _add_missing_allocatable_vlans(session, physical_network, vlan_ids): + for vlan_id in sorted(vlan_ids): + entry = mlnx_models_v2.SegmentationIdAllocation(physical_network, + vlan_id) + session.add(entry) + + +def _remove_unconfigured_vlans(session, allocations): + for entries in allocations.itervalues(): + for entry in entries: + if not entry.allocated: + LOG.debug(_("Removing vlan %(seg_id)s on physical " + "network %(net)s from pool"), + {'seg_id': entry.segmentation_id, + 'net': entry.physical_network}) + session.delete(entry) + + +def sync_network_states(network_vlan_ranges): + """Synchronize network_states table with current configured VLAN ranges.""" + + session = db.get_session() + with session.begin(): + # get existing allocations for all physical networks + allocations = dict() + entries = (session.query(mlnx_models_v2.SegmentationIdAllocation). + all()) + for entry in entries: + allocations.setdefault(entry.physical_network, set()).add(entry) + + # process vlan ranges for each configured physical network + for physical_network, vlan_ranges in network_vlan_ranges.iteritems(): + # determine current configured allocatable vlans for this + # physical network + vlan_ids = set() + for vlan_range in vlan_ranges: + vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1)) + + # remove from table unallocated vlans not currently allocatable + _remove_non_allocatable_vlans(session, allocations, + physical_network, vlan_ids) + + # add missing allocatable vlans to table + _add_missing_allocatable_vlans(session, physical_network, vlan_ids) + + # remove from table unallocated vlans for any unconfigured physical + # networks + _remove_unconfigured_vlans(session, allocations) + + +def get_network_state(physical_network, segmentation_id): + """Get entry of specified network.""" + session = db.get_session() + qry = session.query(mlnx_models_v2.SegmentationIdAllocation) + qry = qry.filter_by(physical_network=physical_network, + segmentation_id=segmentation_id) + return qry.first() + + +def reserve_network(session): + with session.begin(subtransactions=True): + entry = (session.query(mlnx_models_v2.SegmentationIdAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if not entry: + raise n_exc.NoNetworkAvailable() + LOG.debug(_("Reserving vlan %(seg_id)s on physical network " + "%(net)s from pool"), + {'seg_id': entry.segmentation_id, + 'net': entry.physical_network}) + entry.allocated = True + return (entry.physical_network, entry.segmentation_id) + + +def reserve_specific_network(session, physical_network, segmentation_id): + with session.begin(subtransactions=True): + log_args = {'seg_id': segmentation_id, 'phy_net': physical_network} + try: + entry = (session.query(mlnx_models_v2.SegmentationIdAllocation). + filter_by(physical_network=physical_network, + segmentation_id=segmentation_id). + with_lockmode('update').one()) + if entry.allocated: + raise n_exc.VlanIdInUse(vlan_id=segmentation_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(seg_id)s " + "on physical network %(phy_net)s from pool"), + log_args) + entry.allocated = True + except exc.NoResultFound: + LOG.debug(_("Reserving specific vlan %(seg_id)s on " + "physical network %(phy_net)s outside pool"), + log_args) + entry = mlnx_models_v2.SegmentationIdAllocation(physical_network, + segmentation_id) + entry.allocated = True + session.add(entry) + + +def release_network(session, physical_network, + segmentation_id, network_vlan_ranges): + with session.begin(subtransactions=True): + log_args = {'seg_id': segmentation_id, 'phy_net': physical_network} + try: + state = (session.query(mlnx_models_v2.SegmentationIdAllocation). + filter_by(physical_network=physical_network, + segmentation_id=segmentation_id). + with_lockmode('update'). + one()) + state.allocated = False + inside = False + for vlan_range in network_vlan_ranges.get(physical_network, []): + if (segmentation_id >= vlan_range[0] and + segmentation_id <= vlan_range[1]): + inside = True + break + if inside: + LOG.debug(_("Releasing vlan %(seg_id)s " + "on physical network " + "%(phy_net)s to pool"), + log_args) + else: + LOG.debug(_("Releasing vlan %(seg_id)s " + "on physical network " + "%(phy_net)s outside pool"), + log_args) + session.delete(state) + except exc.NoResultFound: + LOG.warning(_("vlan_id %(seg_id)s on physical network " + "%(phy_net)s not found"), + log_args) + + +def add_network_binding(session, network_id, network_type, + physical_network, vlan_id): + with session.begin(subtransactions=True): + binding = mlnx_models_v2.NetworkBinding(network_id, network_type, + physical_network, vlan_id) + session.add(binding) + + +def get_network_binding(session, network_id): + return (session.query(mlnx_models_v2.NetworkBinding). + filter_by(network_id=network_id).first()) + + +def add_port_profile_binding(session, port_id, vnic_type): + with session.begin(subtransactions=True): + binding = mlnx_models_v2.PortProfileBinding(port_id, vnic_type) + session.add(binding) + + +def get_port_profile_binding(session, port_id): + return (session.query(mlnx_models_v2.PortProfileBinding). + filter_by(port_id=port_id).first()) + + +def get_port_from_device(device): + """Get port from database.""" + LOG.debug(_("get_port_from_device() called")) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id.startswith(device)) + port_and_sgs = query.all() + if not port_and_sgs: + return + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict['security_groups'] = [ + sg_id for port_in_db, sg_id in port_and_sgs if sg_id + ] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict + + +def get_port_from_device_mac(device_mac): + """Get port from database.""" + LOG.debug(_("Get_port_from_device_mac() called")) + session = db.get_session() + qry = session.query(models_v2.Port).filter_by(mac_address=device_mac) + return qry.first() + + +def set_port_status(port_id, status): + """Set the port status.""" + LOG.debug(_("Set_port_status as %s called"), status) + session = db.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.merge(port) + session.flush() + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) diff --git a/neutron/plugins/mlnx/db/mlnx_models_v2.py b/neutron/plugins/mlnx/db/mlnx_models_v2.py new file mode 100644 index 000000000..561086157 --- /dev/null +++ b/neutron/plugins/mlnx/db/mlnx_models_v2.py @@ -0,0 +1,86 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sqlalchemy as sa + +from neutron.db import model_base + + +class SegmentationIdAllocation(model_base.BASEV2): + """Represents allocation state of segmentation_id on physical network.""" + __tablename__ = 'segmentation_id_allocation' + + physical_network = sa.Column(sa.String(64), nullable=False, + primary_key=True) + segmentation_id = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False, default=False) + + def __init__(self, physical_network, segmentation_id): + self.physical_network = physical_network + self.segmentation_id = segmentation_id + self.allocated = False + + def __repr__(self): + return "" % (self.physical_network, + self.segmentation_id, + self.allocated) + + +class NetworkBinding(model_base.BASEV2): + """Represents binding of virtual network. + + Binds network to physical_network and segmentation_id + """ + __tablename__ = 'mlnx_network_bindings' + + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + network_type = sa.Column(sa.String(32), nullable=False) + physical_network = sa.Column(sa.String(64)) + segmentation_id = sa.Column(sa.Integer, nullable=False) + + def __init__(self, network_id, network_type, physical_network, vlan_id): + self.network_id = network_id + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = vlan_id + + def __repr__(self): + return "" % (self.network_id, + self.network_type, + self.physical_network, + self.segmentation_id) + + +class PortProfileBinding(model_base.BASEV2): + """Represents port profile binding to the port on virtual network.""" + __tablename__ = 'port_profile' + + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + vnic_type = sa.Column(sa.String(32), nullable=False) + + def __init__(self, port_id, vnic_type): + self.port_id = port_id + self.vnic_type = vnic_type + + def __repr__(self): + return "" % (self.port_id, + self.vnic_type) diff --git a/neutron/plugins/mlnx/mlnx_plugin.py b/neutron/plugins/mlnx/mlnx_plugin.py new file mode 100644 index 000000000..16d72df55 --- /dev/null +++ b/neutron/plugins/mlnx/mlnx_plugin.py @@ -0,0 +1,512 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as q_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_gwmode_db +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.common import utils as plugin_utils +from neutron.plugins.mlnx import agent_notify_api +from neutron.plugins.mlnx.common import constants +from neutron.plugins.mlnx.db import mlnx_db_v2 as db +from neutron.plugins.mlnx import rpc_callbacks + +LOG = logging.getLogger(__name__) + + +class MellanoxEswitchPlugin(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + portbindings_db.PortBindingMixin): + """Realization of Neutron API on Mellanox HCA embedded switch technology. + + Current plugin provides embedded HCA Switch connectivity. + Code is based on the Linux Bridge plugin content to + support consistency with L3 & DHCP Agents. + + A new VLAN is created for each network. An agent is relied upon + to perform the actual HCA configuration on each host. + + The provider extension is also supported. + + The port binding extension enables an external application relay + information to and from the plugin. + """ + + # This attribute specifies whether the plugin supports or not + # bulk operations. Name mangling is used in order to ensure it + # is qualified by class + __native_bulk_support = True + + _supported_extension_aliases = ["provider", "external-net", "router", + "ext-gw-mode", "binding", "quotas", + "security-group", "agent", "extraroute", + "l3_agent_scheduler", + "dhcp_agent_scheduler"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + """Start Mellanox Neutron Plugin.""" + super(MellanoxEswitchPlugin, self).__init__() + self._parse_network_config() + db.sync_network_states(self.network_vlan_ranges) + self._set_tenant_network_type() + self.vnic_type = cfg.CONF.ESWITCH.vnic_type + self.base_binding_dict = { + portbindings.VIF_TYPE: self.vnic_type, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + self._setup_rpc() + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver + ) + LOG.debug(_("Mellanox Embedded Switch Plugin initialisation complete")) + + def _setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = rpc_compat.create_connection(new=True) + self.endpoints = [rpc_callbacks.MlnxRpcCallbacks(), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + self.notifier = agent_notify_api.AgentNotifierApi(topics.AGENT) + self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + l3_rpc_agent_api.L3AgentNotifyAPI() + ) + + def _parse_network_config(self): + self._parse_physical_network_types() + self._parse_network_vlan_ranges() + for network in self.network_vlan_ranges.keys(): + if not self.phys_network_type_maps.get(network): + self.phys_network_type_maps[network] = self.physical_net_type + + def _parse_physical_network_types(self): + """Parse physical network types configuration. + + Verify default physical network type is valid. + Parse physical network mappings. + """ + self.physical_net_type = cfg.CONF.MLNX.physical_network_type + if self.physical_net_type not in (constants.TYPE_ETH, + constants.TYPE_IB): + LOG.error(_("Invalid physical network type %(type)s." + "Server terminated!"), {'type': self.physical_net_type}) + raise SystemExit(1) + try: + self.phys_network_type_maps = utils.parse_mappings( + cfg.CONF.MLNX.physical_network_type_mappings) + except ValueError as e: + LOG.error(_("Parsing physical_network_type failed: %s." + " Server terminated!"), e) + raise SystemExit(1) + for network, type in self.phys_network_type_maps.iteritems(): + if type not in (constants.TYPE_ETH, constants.TYPE_IB): + LOG.error(_("Invalid physical network type %(type)s " + " for network %(net)s. Server terminated!"), + {'net': network, 'type': type}) + raise SystemExit(1) + LOG.info(_("Physical Network type mappings: %s"), + self.phys_network_type_maps) + + def _parse_network_vlan_ranges(self): + try: + self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( + cfg.CONF.MLNX.network_vlan_ranges) + except Exception as ex: + LOG.error(_("%s. Server terminated!"), ex) + sys.exit(1) + LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) + + def _extend_network_dict_provider(self, context, network): + binding = db.get_network_binding(context.session, network['id']) + network[provider.NETWORK_TYPE] = binding.network_type + if binding.network_type == svc_constants.TYPE_FLAT: + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = None + elif binding.network_type == svc_constants.TYPE_LOCAL: + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = None + else: + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = binding.segmentation_id + + def _set_tenant_network_type(self): + self.tenant_network_type = cfg.CONF.MLNX.tenant_network_type + if self.tenant_network_type not in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_LOCAL, + svc_constants.TYPE_NONE]: + LOG.error(_("Invalid tenant_network_type: %s. " + "Service terminated!"), + self.tenant_network_type) + sys.exit(1) + + def _process_provider_create(self, context, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + + network_type_set = attributes.is_attr_set(network_type) + physical_network_set = attributes.is_attr_set(physical_network) + segmentation_id_set = attributes.is_attr_set(segmentation_id) + + if not (network_type_set or physical_network_set or + segmentation_id_set): + return (None, None, None) + + if not network_type_set: + msg = _("provider:network_type required") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == svc_constants.TYPE_FLAT: + self._process_flat_net(segmentation_id_set) + segmentation_id = constants.FLAT_VLAN_ID + + elif network_type == svc_constants.TYPE_VLAN: + self._process_vlan_net(segmentation_id, segmentation_id_set) + + elif network_type == svc_constants.TYPE_LOCAL: + self._process_local_net(physical_network_set, + segmentation_id_set) + segmentation_id = constants.LOCAL_VLAN_ID + physical_network = None + + else: + msg = _("provider:network_type %s not supported") % network_type + raise n_exc.InvalidInput(error_message=msg) + physical_network = self._process_net_type(network_type, + physical_network, + physical_network_set) + return (network_type, physical_network, segmentation_id) + + def _process_flat_net(self, segmentation_id_set): + if segmentation_id_set: + msg = _("provider:segmentation_id specified for flat network") + raise n_exc.InvalidInput(error_message=msg) + + def _process_vlan_net(self, segmentation_id, segmentation_id_set): + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + if not utils.is_valid_vlan_tag(segmentation_id): + msg = (_("provider:segmentation_id out of range " + "(%(min_id)s through %(max_id)s)") % + {'min_id': q_const.MIN_VLAN_TAG, + 'max_id': q_const.MAX_VLAN_TAG}) + raise n_exc.InvalidInput(error_message=msg) + + def _process_local_net(self, physical_network_set, segmentation_id_set): + if physical_network_set: + msg = _("provider:physical_network specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + if segmentation_id_set: + msg = _("provider:segmentation_id specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + + def _process_net_type(self, network_type, + physical_network, + physical_network_set): + if network_type in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_FLAT]: + if physical_network_set: + if physical_network not in self.network_vlan_ranges: + msg = _("Unknown provider:physical_network " + "%s") % physical_network + raise n_exc.InvalidInput(error_message=msg) + elif 'default' in self.network_vlan_ranges: + physical_network = 'default' + else: + msg = _("provider:physical_network required") + raise n_exc.InvalidInput(error_message=msg) + return physical_network + + def _check_port_binding_for_net_type(self, vnic_type, net_type): + """ + VIF_TYPE_DIRECT is valid only for Ethernet fabric + """ + if net_type == constants.TYPE_ETH: + return vnic_type in (constants.VIF_TYPE_DIRECT, + constants.VIF_TYPE_HOSTDEV) + elif net_type == constants.TYPE_IB: + return vnic_type == constants.VIF_TYPE_HOSTDEV + return False + + def _process_port_binding_create(self, context, attrs): + binding_profile = attrs.get(portbindings.PROFILE) + binding_profile_set = attributes.is_attr_set(binding_profile) + + net_binding = db.get_network_binding(context.session, + attrs.get('network_id')) + phy_net = net_binding.physical_network + + if not binding_profile_set: + return self.vnic_type + if constants.VNIC_TYPE in binding_profile: + vnic_type = binding_profile[constants.VNIC_TYPE] + phy_net_type = self.phys_network_type_maps[phy_net] + if vnic_type in (constants.VIF_TYPE_DIRECT, + constants.VIF_TYPE_HOSTDEV): + if self._check_port_binding_for_net_type(vnic_type, + phy_net_type): + self.base_binding_dict[portbindings.VIF_TYPE] = vnic_type + return vnic_type + else: + msg = (_("Unsupported vnic type %(vnic_type)s " + "for physical network type %(net_type)s") % + {'vnic_type': vnic_type, 'net_type': phy_net_type}) + else: + msg = _("Invalid vnic_type on port_create") + else: + msg = _("vnic_type is not defined in port profile") + raise n_exc.InvalidInput(error_message=msg) + + def create_network(self, context, network): + (network_type, physical_network, + vlan_id) = self._process_provider_create(context, + network['network']) + session = context.session + with session.begin(subtransactions=True): + #set up default security groups + tenant_id = self._get_tenant_id_for_create( + context, network['network']) + self._ensure_default_security_group(context, tenant_id) + + if not network_type: + # tenant network + network_type = self.tenant_network_type + if network_type == svc_constants.TYPE_NONE: + raise n_exc.TenantNetworksDisabled() + elif network_type == svc_constants.TYPE_VLAN: + physical_network, vlan_id = db.reserve_network(session) + else: # TYPE_LOCAL + vlan_id = constants.LOCAL_VLAN_ID + else: + # provider network + if network_type in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_FLAT]: + db.reserve_specific_network(session, + physical_network, + vlan_id) + net = super(MellanoxEswitchPlugin, self).create_network(context, + network) + db.add_network_binding(session, net['id'], + network_type, + physical_network, + vlan_id) + + self._process_l3_create(context, net, network['network']) + self._extend_network_dict_provider(context, net) + # note - exception will rollback entire transaction + LOG.debug(_("Created network: %s"), net['id']) + return net + + def update_network(self, context, net_id, network): + LOG.debug(_("Update network")) + provider._raise_if_updates_provider_attributes(network['network']) + + session = context.session + with session.begin(subtransactions=True): + net = super(MellanoxEswitchPlugin, self).update_network(context, + net_id, + network) + self._process_l3_update(context, net, network['network']) + self._extend_network_dict_provider(context, net) + return net + + def delete_network(self, context, net_id): + LOG.debug(_("Delete network")) + session = context.session + with session.begin(subtransactions=True): + binding = db.get_network_binding(session, net_id) + self._process_l3_delete(context, net_id) + super(MellanoxEswitchPlugin, self).delete_network(context, + net_id) + if binding.segmentation_id != constants.LOCAL_VLAN_ID: + db.release_network(session, binding.physical_network, + binding.segmentation_id, + self.network_vlan_ranges) + # the network_binding record is deleted via cascade from + # the network record, so explicit removal is not necessary + self.notifier.network_delete(context, net_id) + + def get_network(self, context, net_id, fields=None): + session = context.session + with session.begin(subtransactions=True): + net = super(MellanoxEswitchPlugin, self).get_network(context, + net_id, + None) + self._extend_network_dict_provider(context, net) + return self._fields(net, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + session = context.session + with session.begin(subtransactions=True): + nets = super(MellanoxEswitchPlugin, + self).get_networks(context, filters, None, sorts, + limit, marker, page_reverse) + for net in nets: + self._extend_network_dict_provider(context, net) + + return [self._fields(net, fields) for net in nets] + + def _extend_port_dict_binding(self, context, port): + port_binding = db.get_port_profile_binding(context.session, + port['id']) + if port_binding: + port[portbindings.VIF_TYPE] = port_binding.vnic_type + binding = db.get_network_binding(context.session, + port['network_id']) + fabric = binding.physical_network + port[portbindings.PROFILE] = {'physical_network': fabric} + return port + + def create_port(self, context, port): + LOG.debug(_("create_port with %s"), port) + session = context.session + port_data = port['port'] + with session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + # Set port status as 'DOWN'. This will be updated by agent + port['port']['status'] = q_const.PORT_STATUS_DOWN + + vnic_type = self._process_port_binding_create(context, + port['port']) + + port = super(MellanoxEswitchPlugin, + self).create_port(context, port) + + self._process_portbindings_create_and_update(context, + port_data, + port) + db.add_port_profile_binding(context.session, port['id'], vnic_type) + + self._process_port_create_security_group( + context, port, sgids) + self.notify_security_groups_member_updated(context, port) + return self._extend_port_dict_binding(context, port) + + def get_port(self, context, id, fields=None): + port = super(MellanoxEswitchPlugin, self).get_port(context, + id, + fields) + self._extend_port_dict_binding(context, port) + return self._fields(port, fields) + + def get_ports(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + res_ports = [] + ports = super(MellanoxEswitchPlugin, + self).get_ports(context, filters, fields, sorts, + limit, marker, page_reverse) + for port in ports: + port = self._extend_port_dict_binding(context, port) + res_ports.append(self._fields(port, fields)) + return res_ports + + def update_port(self, context, port_id, port): + original_port = self.get_port(context, port_id) + session = context.session + need_port_update_notify = False + + with session.begin(subtransactions=True): + updated_port = super(MellanoxEswitchPlugin, self).update_port( + context, port_id, port) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + need_port_update_notify = self.update_security_group_on_port( + context, port_id, port, original_port, updated_port) + + need_port_update_notify |= self.is_security_group_member_updated( + context, original_port, updated_port) + + if original_port['admin_state_up'] != updated_port['admin_state_up']: + need_port_update_notify = True + + if need_port_update_notify: + binding = db.get_network_binding(context.session, + updated_port['network_id']) + self.notifier.port_update(context, updated_port, + binding.physical_network, + binding.network_type, + binding.segmentation_id) + return self._extend_port_dict_binding(context, updated_port) + + def delete_port(self, context, port_id, l3_port_check=True): + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, port_id) + + session = context.session + with session.begin(subtransactions=True): + self.disassociate_floatingips(context, port_id) + port = self.get_port(context, port_id) + self._delete_port_security_group_bindings(context, port_id) + super(MellanoxEswitchPlugin, self).delete_port(context, port_id) + + self.notify_security_groups_member_updated(context, port) diff --git a/neutron/plugins/mlnx/rpc_callbacks.py b/neutron/plugins/mlnx/rpc_callbacks.py new file mode 100644 index 000000000..346d35822 --- /dev/null +++ b/neutron/plugins/mlnx/rpc_callbacks.py @@ -0,0 +1,119 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo.config import cfg + +from neutron.common import constants as q_const +from neutron.common import rpc_compat +from neutron.db import api as db_api +from neutron.db import dhcp_rpc_base +from neutron.db import l3_rpc_base +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.openstack.common import log as logging +from neutron.plugins.mlnx.db import mlnx_db_v2 as db + +LOG = logging.getLogger(__name__) + + +class MlnxRpcCallbacks(rpc_compat.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + # History + # 1.1 Support Security Group RPC + RPC_API_VERSION = '1.1' + + #to be compatible with Linux Bridge Agent on Network Node + TAP_PREFIX_LEN = 3 + + @classmethod + def get_port_from_device(cls, device): + """Get port according to device. + + To maintain compatibility with Linux Bridge L2 Agent for DHCP/L3 + services get device either by linux bridge plugin + device name convention or by mac address + """ + port = db.get_port_from_device(device[cls.TAP_PREFIX_LEN:]) + if port: + port['device'] = device + else: + port = db.get_port_from_device_mac(device) + if port: + port['device'] = device + return port + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = self.get_port_from_device(device) + if port: + binding = db.get_network_binding(db_api.get_session(), + port['network_id']) + entry = {'device': device, + 'physical_network': binding.physical_network, + 'network_type': binding.network_type, + 'segmentation_id': binding.segmentation_id, + 'network_id': port['network_id'], + 'port_mac': port['mac_address'], + 'port_id': port['id'], + 'admin_state_up': port['admin_state_up']} + if cfg.CONF.AGENT.rpc_support_old_agents: + entry['vlan_id'] = binding.segmentation_id + new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up'] + else q_const.PORT_STATUS_DOWN) + if port['status'] != new_status: + db.set_port_status(port['id'], new_status) + else: + entry = {'device': device} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = self.get_port_from_device(device) + if port: + entry = {'device': device, + 'exists': True} + if port['status'] != q_const.PORT_STATUS_DOWN: + # Set port status to DOWN + db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN) + else: + entry = {'device': device, + 'exists': False} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def update_device_up(self, rpc_context, **kwargs): + """Device is up on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s up %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = self.get_port_from_device(device) + if port: + if port['status'] != q_const.PORT_STATUS_ACTIVE: + # Set port status to ACTIVE + db.set_port_status(port['id'], q_const.PORT_STATUS_ACTIVE) + else: + LOG.debug(_("%s can not be found in database"), device) diff --git a/neutron/plugins/nec/README b/neutron/plugins/nec/README new file mode 100644 index 000000000..694b80e99 --- /dev/null +++ b/neutron/plugins/nec/README @@ -0,0 +1,13 @@ +Quantum NEC OpenFlow Plugin + + +# -- What's this? + +https://wiki.openstack.org/wiki/Neutron/NEC_OpenFlow_Plugin + + +# -- Installation + +Use QuickStart Script for this plugin. This provides you auto installation and +configuration of Nova, Neutron and Trema. +https://github.com/nec-openstack/quantum-openflow-plugin/tree/folsom diff --git a/neutron/plugins/nec/__init__.py b/neutron/plugins/nec/__init__.py new file mode 100644 index 000000000..362a36068 --- /dev/null +++ b/neutron/plugins/nec/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/nec/agent/__init__.py b/neutron/plugins/nec/agent/__init__.py new file mode 100644 index 000000000..362a36068 --- /dev/null +++ b/neutron/plugins/nec/agent/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/nec/agent/nec_neutron_agent.py b/neutron/plugins/nec/agent/nec_neutron_agent.py new file mode 100755 index 000000000..6ab5f82b4 --- /dev/null +++ b/neutron/plugins/nec/agent/nec_neutron_agent.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python +# Copyright 2012 NEC Corporation. +# Based on ryu/openvswitch agents. +# +# Copyright 2012 Isaku Yamahata +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU +# @author: Akihiro MOTOKI + +import socket +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from neutron.agent.linux import ovs_lib +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import constants as q_const +from neutron.common import rpc_compat +from neutron.common import topics +from neutron import context as q_context +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.nec.common import config + + +LOG = logging.getLogger(__name__) + + +class NECPluginApi(agent_rpc.PluginApi): + BASE_RPC_API_VERSION = '1.0' + + def update_ports(self, context, agent_id, datapath_id, + port_added, port_removed): + """RPC to update information of ports on Neutron Server.""" + LOG.info(_("Update ports: added=%(added)s, " + "removed=%(removed)s"), + {'added': port_added, 'removed': port_removed}) + self.call(context, + self.make_msg('update_ports', + topic=topics.AGENT, + agent_id=agent_id, + datapath_id=datapath_id, + port_added=port_added, + port_removed=port_removed)) + + +class NECAgentRpcCallback(rpc_compat.RpcCallback): + + RPC_API_VERSION = '1.0' + + def __init__(self, context, agent, sg_agent): + super(NECAgentRpcCallback, self).__init__() + self.context = context + self.agent = agent + self.sg_agent = sg_agent + + def port_update(self, context, **kwargs): + LOG.debug(_("port_update received: %s"), kwargs) + port = kwargs.get('port') + # Validate that port is on OVS + vif_port = self.agent.int_br.get_vif_port_by_id(port['id']) + if not vif_port: + return + + if ext_sg.SECURITYGROUPS in port: + self.sg_agent.refresh_firewall() + + +class SecurityGroupServerRpcApi(rpc_compat.RpcProxy, + sg_rpc.SecurityGroupServerRpcApiMixin): + + def __init__(self, topic): + super(SecurityGroupServerRpcApi, self).__init__( + topic=topic, default_version=sg_rpc.SG_RPC_VERSION) + + +class SecurityGroupAgentRpcCallback( + rpc_compat.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + + RPC_API_VERSION = sg_rpc.SG_RPC_VERSION + + def __init__(self, context, sg_agent): + super(SecurityGroupAgentRpcCallback, self).__init__() + self.context = context + self.sg_agent = sg_agent + + +class SecurityGroupAgentRpc(sg_rpc.SecurityGroupAgentRpcMixin): + + def __init__(self, context): + self.context = context + self.plugin_rpc = SecurityGroupServerRpcApi(topics.PLUGIN) + self.init_firewall() + + +class NECNeutronAgent(object): + + def __init__(self, integ_br, root_helper, polling_interval): + '''Constructor. + + :param integ_br: name of the integration bridge. + :param root_helper: utility to use when running shell cmds. + :param polling_interval: interval (secs) to check the bridge. + ''' + self.int_br = ovs_lib.OVSBridge(integ_br, root_helper) + self.polling_interval = polling_interval + self.cur_ports = [] + self.need_sync = True + + self.datapath_id = "0x%s" % self.int_br.get_datapath_id() + + self.agent_state = { + 'binary': 'neutron-nec-agent', + 'host': config.CONF.host, + 'topic': q_const.L2_AGENT_TOPIC, + 'configurations': {}, + 'agent_type': q_const.AGENT_TYPE_NEC, + 'start_flag': True} + + self.setup_rpc() + + def setup_rpc(self): + self.host = socket.gethostname() + self.agent_id = 'nec-q-agent.%s' % self.host + LOG.info(_("RPC agent_id: %s"), self.agent_id) + + self.topic = topics.AGENT + self.context = q_context.get_admin_context_without_session() + + self.plugin_rpc = NECPluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.sg_agent = SecurityGroupAgentRpc(self.context) + + # RPC network init + # Handle updates from service + self.callback_nec = NECAgentRpcCallback(self.context, + self, self.sg_agent) + self.callback_sg = SecurityGroupAgentRpcCallback(self.context, + self.sg_agent) + self.endpoints = [self.callback_nec, self.callback_sg] + # Define the listening consumer for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + report_interval = config.CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def _report_state(self): + try: + # How many devices are likely used by a VM + num_devices = len(self.cur_ports) + self.agent_state['configurations']['devices'] = num_devices + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def _vif_port_to_port_info(self, vif_port): + return dict(id=vif_port.vif_id, port_no=vif_port.ofport, + mac=vif_port.vif_mac) + + def _process_security_group(self, port_added, port_removed): + if port_added: + devices_added = [p['id'] for p in port_added] + self.sg_agent.prepare_devices_filter(devices_added) + if port_removed: + self.sg_agent.remove_devices_filter(port_removed) + + def loop_handler(self): + try: + # self.cur_ports will be kept until loop_handler succeeds. + cur_ports = [] if self.need_sync else self.cur_ports + new_ports = [] + + port_added = [] + for vif_port in self.int_br.get_vif_ports(): + port_id = vif_port.vif_id + new_ports.append(port_id) + if port_id not in cur_ports: + port_info = self._vif_port_to_port_info(vif_port) + port_added.append(port_info) + + port_removed = [] + for port_id in cur_ports: + if port_id not in new_ports: + port_removed.append(port_id) + + if port_added or port_removed: + self.plugin_rpc.update_ports(self.context, + self.agent_id, self.datapath_id, + port_added, port_removed) + self._process_security_group(port_added, port_removed) + else: + LOG.debug(_("No port changed.")) + + self.cur_ports = new_ports + self.need_sync = False + except Exception: + LOG.exception(_("Error in agent event loop")) + self.need_sync = True + + def daemon_loop(self): + """Main processing loop for NEC Plugin Agent.""" + while True: + self.loop_handler() + time.sleep(self.polling_interval) + + +def main(): + common_config.init(sys.argv[1:]) + + common_config.setup_logging(config.CONF) + + # Determine which agent type to use. + integ_br = config.OVS.integration_bridge + root_helper = config.AGENT.root_helper + polling_interval = config.AGENT.polling_interval + + agent = NECNeutronAgent(integ_br, root_helper, polling_interval) + + # Start everything. + agent.daemon_loop() + + +if __name__ == "__main__": + main() diff --git a/neutron/plugins/nec/common/__init__.py b/neutron/plugins/nec/common/__init__.py new file mode 100644 index 000000000..362a36068 --- /dev/null +++ b/neutron/plugins/nec/common/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/nec/common/config.py b/neutron/plugins/nec/common/config.py new file mode 100644 index 000000000..70f4a1a63 --- /dev/null +++ b/neutron/plugins/nec/common/config.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.plugins.nec.common import constants as nconst + + +ovs_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_("Integration bridge to use")), +] + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), +] + +ofc_opts = [ + cfg.StrOpt('host', default='127.0.0.1', + help=_("Host to connect to")), + cfg.StrOpt('path_prefix', default='', + help=_("Base URL of OFC REST API. " + "It is prepended to each API request.")), + cfg.StrOpt('port', default='8888', + help=_("Port to connect to")), + cfg.StrOpt('driver', default='trema', + help=_("Driver to use")), + cfg.BoolOpt('enable_packet_filter', default=True, + help=_("Enable packet filter")), + cfg.BoolOpt('use_ssl', default=False, + help=_("Use SSL to connect")), + cfg.StrOpt('key_file', + help=_("Key file")), + cfg.StrOpt('cert_file', + help=_("Certificate file")), + cfg.BoolOpt('insecure_ssl', default=False, + help=_("Disable SSL certificate verification")), + cfg.IntOpt('api_max_attempts', default=3, + help=_("Maximum attempts per OFC API request." + "NEC plugin retries API request to OFC " + "when OFC returns ServiceUnavailable (503)." + "The value must be greater than 0.")), +] + +provider_opts = [ + cfg.StrOpt('default_router_provider', + default=nconst.DEFAULT_ROUTER_PROVIDER, + help=_('Default router provider to use.')), + cfg.ListOpt('router_providers', + default=nconst.DEFAULT_ROUTER_PROVIDERS, + help=_('List of enabled router providers.')) +] + + +cfg.CONF.register_opts(ovs_opts, "OVS") +cfg.CONF.register_opts(agent_opts, "AGENT") +cfg.CONF.register_opts(ofc_opts, "OFC") +cfg.CONF.register_opts(provider_opts, "PROVIDER") +config.register_agent_state_opts_helper(cfg.CONF) +config.register_root_helper(cfg.CONF) + +# shortcuts +CONF = cfg.CONF +OVS = cfg.CONF.OVS +AGENT = cfg.CONF.AGENT +OFC = cfg.CONF.OFC +PROVIDER = cfg.CONF.PROVIDER diff --git a/neutron/plugins/nec/common/constants.py b/neutron/plugins/nec/common/constants.py new file mode 100644 index 000000000..b1bc7e5b3 --- /dev/null +++ b/neutron/plugins/nec/common/constants.py @@ -0,0 +1,24 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +ROUTER_PROVIDER_L3AGENT = 'l3-agent' +ROUTER_PROVIDER_OPENFLOW = 'openflow' + +DEFAULT_ROUTER_PROVIDERS = [ROUTER_PROVIDER_L3AGENT, ROUTER_PROVIDER_OPENFLOW] +DEFAULT_ROUTER_PROVIDER = ROUTER_PROVIDER_L3AGENT + +ROUTER_STATUS_ACTIVE = 'ACTIVE' +ROUTER_STATUS_ERROR = 'ERROR' diff --git a/neutron/plugins/nec/common/exceptions.py b/neutron/plugins/nec/common/exceptions.py new file mode 100644 index 000000000..375135586 --- /dev/null +++ b/neutron/plugins/nec/common/exceptions.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +from neutron.common import exceptions as qexc + + +class OFCException(qexc.NeutronException): + message = _("An OFC exception has occurred: %(reason)s") + + def __init__(self, **kwargs): + super(OFCException, self).__init__(**kwargs) + self.status = kwargs.get('status') + self.err_msg = kwargs.get('err_msg') + self.err_code = kwargs.get('err_code') + + +class OFCResourceNotFound(qexc.NotFound): + message = _("The specified OFC resource (%(resource)s) is not found.") + + +class NECDBException(qexc.NeutronException): + message = _("An exception occurred in NECPluginV2 DB: %(reason)s") + + +class OFCMappingNotFound(qexc.NotFound): + message = _("Neutron-OFC resource mapping for " + "%(resource)s %(neutron_id)s is not found. " + "It may be deleted during processing.") + + +class OFCServiceUnavailable(OFCException): + message = _("OFC returns Server Unavailable (503) " + "(Retry-After=%(retry_after)s)") + + def __init__(self, **kwargs): + super(OFCServiceUnavailable, self).__init__(**kwargs) + self.retry_after = kwargs.get('retry_after') + + +class PortInfoNotFound(qexc.NotFound): + message = _("PortInfo %(id)s could not be found") + + +class ProfilePortInfoInvalidDataPathId(qexc.InvalidInput): + message = _('Invalid input for operation: ' + 'datapath_id should be a hex string ' + 'with at most 8 bytes') + + +class ProfilePortInfoInvalidPortNo(qexc.InvalidInput): + message = _('Invalid input for operation: ' + 'port_no should be [0:65535]') + + +class RouterExternalGatewayNotSupported(qexc.BadRequest): + message = _("Router (provider=%(provider)s) does not support " + "an external network") + + +class ProviderNotFound(qexc.NotFound): + message = _("Provider %(provider)s could not be found") + + +class RouterOverLimit(qexc.Conflict): + message = _("Cannot create more routers with provider=%(provider)s") + + +class RouterProviderMismatch(qexc.Conflict): + message = _("Provider of Router %(router_id)s is %(provider)s. " + "This operation is supported only for router provider " + "%(expected_provider)s.") diff --git a/neutron/plugins/nec/common/ofc_client.py b/neutron/plugins/nec/common/ofc_client.py new file mode 100644 index 000000000..21fb5f74b --- /dev/null +++ b/neutron/plugins/nec/common/ofc_client.py @@ -0,0 +1,158 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import time + +import requests + +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.plugins.nec.common import config +from neutron.plugins.nec.common import exceptions as nexc + + +LOG = logging.getLogger(__name__) + + +class OFCClient(object): + """A HTTP/HTTPS client for OFC Drivers.""" + + def __init__(self, host="127.0.0.1", port=8888, use_ssl=False, + key_file=None, cert_file=None, insecure_ssl=False): + """Creates a new client to some OFC. + + :param host: The host where service resides + :param port: The port where service resides + :param use_ssl: True to use SSL, False to use HTTP + :param key_file: The SSL key file to use if use_ssl is true + :param cert_file: The SSL cert file to use if use_ssl is true + :param insecure_ssl: Don't verify SSL certificate + """ + self.host = host + self.port = port + self.use_ssl = use_ssl + self.key_file = key_file + self.cert_file = cert_file + self.insecure_ssl = insecure_ssl + self.connection = None + + def _format_error_message(self, status, detail): + detail = ' ' + detail if detail else '' + return (_("Operation on OFC failed: %(status)s%(msg)s") % + {'status': status, 'msg': detail}) + + def _get_response(self, method, action, body=None): + headers = {"Content-Type": "application/json"} + protocol = "http" + certs = {'key_file': self.key_file, 'cert_file': self.cert_file} + certs = dict((x, certs[x]) for x in certs if certs[x] is not None) + verify = True + + if self.use_ssl: + protocol = "https" + if self.insecure_ssl: + verify = False + + url = "%s://%s:%d%s" % (protocol, self.host, int(self.port), + action) + + res = requests.request(method, url, data=body, headers=headers, + cert=certs, verify=verify) + return res + + def do_single_request(self, method, action, body=None): + action = config.OFC.path_prefix + action + LOG.debug(_("Client request: %(host)s:%(port)s " + "%(method)s %(action)s [%(body)s]"), + {'host': self.host, 'port': self.port, + 'method': method, 'action': action, 'body': body}) + if type(body) is dict: + body = json.dumps(body) + try: + res = self._get_response(method, action, body) + data = res.text + LOG.debug(_("OFC returns [%(status)s:%(data)s]"), + {'status': res.status_code, + 'data': data}) + + # Try to decode JSON data if possible. + try: + data = json.loads(data) + except (ValueError, TypeError): + pass + + if res.status_code in (requests.codes.OK, + requests.codes.CREATED, + requests.codes.ACCEPTED, + requests.codes.NO_CONTENT): + return data + elif res.status_code == requests.codes.SERVICE_UNAVAILABLE: + retry_after = res.headers.get('retry-after') + LOG.warning(_("OFC returns ServiceUnavailable " + "(retry-after=%s)"), retry_after) + raise nexc.OFCServiceUnavailable(retry_after=retry_after) + elif res.status_code == requests.codes.NOT_FOUND: + LOG.info(_("Specified resource %s does not exist on OFC "), + action) + raise nexc.OFCResourceNotFound(resource=action) + else: + LOG.warning(_("Operation on OFC failed: " + "status=%(status)s, detail=%(detail)s"), + {'status': res.status_code, 'detail': data}) + params = {'reason': _("Operation on OFC failed"), + 'status': res.status_code} + if isinstance(data, dict): + params['err_code'] = data.get('err_code') + params['err_msg'] = data.get('err_msg') + else: + params['err_msg'] = data + raise nexc.OFCException(**params) + except requests.exceptions.RequestException as e: + reason = _("Failed to connect OFC : %s") % e + LOG.error(reason) + raise nexc.OFCException(reason=reason) + + def do_request(self, method, action, body=None): + max_attempts = config.OFC.api_max_attempts + for i in range(max_attempts, 0, -1): + try: + return self.do_single_request(method, action, body) + except nexc.OFCServiceUnavailable as e: + with excutils.save_and_reraise_exception() as ctxt: + try: + wait_time = int(e.retry_after) + except (ValueError, TypeError): + wait_time = None + if i > 1 and wait_time: + LOG.info(_("Waiting for %s seconds due to " + "OFC Service_Unavailable."), wait_time) + time.sleep(wait_time) + ctxt.reraise = False + continue + + def get(self, action): + return self.do_request("GET", action) + + def post(self, action, body=None): + return self.do_request("POST", action, body=body) + + def put(self, action, body=None): + return self.do_request("PUT", action, body=body) + + def delete(self, action): + return self.do_request("DELETE", action) diff --git a/neutron/plugins/nec/common/utils.py b/neutron/plugins/nec/common/utils.py new file mode 100644 index 000000000..a628d8ef0 --- /dev/null +++ b/neutron/plugins/nec/common/utils.py @@ -0,0 +1,24 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def cmp_dpid(dpid_a, dpid_b): + """Compare two datapath IDs as hexadecimal int. + + It returns True if equal, otherwise False. + """ + try: + return (int(dpid_a, 16) == int(dpid_b, 16)) + except Exception: + return False diff --git a/neutron/plugins/nec/db/__init__.py b/neutron/plugins/nec/db/__init__.py new file mode 100644 index 000000000..362a36068 --- /dev/null +++ b/neutron/plugins/nec/db/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/nec/db/api.py b/neutron/plugins/nec/db/api.py new file mode 100644 index 000000000..7963fdce4 --- /dev/null +++ b/neutron/plugins/nec/db/api.py @@ -0,0 +1,186 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import sqlalchemy as sa + +from neutron.db import api as db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.nec.common import config # noqa +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.db import models as nmodels + + +LOG = logging.getLogger(__name__) +OFP_VLAN_NONE = 0xffff + + +resource_map = {'ofc_tenant': nmodels.OFCTenantMapping, + 'ofc_network': nmodels.OFCNetworkMapping, + 'ofc_port': nmodels.OFCPortMapping, + 'ofc_router': nmodels.OFCRouterMapping, + 'ofc_packet_filter': nmodels.OFCFilterMapping} + + +# utitlity methods + +def _get_resource_model(resource): + return resource_map[resource] + + +def clear_db(base=model_base.BASEV2): + db.clear_db(base) + + +def get_ofc_item(session, resource, neutron_id): + model = _get_resource_model(resource) + if not model: + return + try: + return session.query(model).filter_by(neutron_id=neutron_id).one() + except sa.orm.exc.NoResultFound: + return + + +def get_ofc_id(session, resource, neutron_id): + ofc_item = get_ofc_item(session, resource, neutron_id) + if ofc_item: + return ofc_item.ofc_id + else: + raise nexc.OFCMappingNotFound(resource=resource, + neutron_id=neutron_id) + + +def exists_ofc_item(session, resource, neutron_id): + if get_ofc_item(session, resource, neutron_id): + return True + else: + return False + + +def find_ofc_item(session, resource, ofc_id): + try: + model = _get_resource_model(resource) + params = dict(ofc_id=ofc_id) + return (session.query(model).filter_by(**params).one()) + except sa.orm.exc.NoResultFound: + return None + + +def add_ofc_item(session, resource, neutron_id, ofc_id): + try: + model = _get_resource_model(resource) + params = dict(neutron_id=neutron_id, ofc_id=ofc_id) + item = model(**params) + with session.begin(subtransactions=True): + session.add(item) + session.flush() + except Exception as exc: + LOG.exception(exc) + raise nexc.NECDBException(reason=exc.message) + return item + + +def del_ofc_item(session, resource, neutron_id): + try: + model = _get_resource_model(resource) + with session.begin(subtransactions=True): + item = session.query(model).filter_by(neutron_id=neutron_id).one() + session.delete(item) + return True + except sa.orm.exc.NoResultFound: + LOG.warning(_("del_ofc_item(): NotFound item " + "(resource=%(resource)s, id=%(id)s) "), + {'resource': resource, 'id': neutron_id}) + return False + + +def get_portinfo(session, id): + try: + return (session.query(nmodels.PortInfo). + filter_by(id=id). + one()) + except sa.orm.exc.NoResultFound: + return None + + +def add_portinfo(session, id, datapath_id='', port_no=0, + vlan_id=OFP_VLAN_NONE, mac=''): + try: + portinfo = nmodels.PortInfo(id=id, datapath_id=datapath_id, + port_no=port_no, vlan_id=vlan_id, mac=mac) + with session.begin(subtransactions=True): + session.add(portinfo) + except Exception as exc: + LOG.exception(exc) + raise nexc.NECDBException(reason=exc.message) + return portinfo + + +def del_portinfo(session, id): + try: + with session.begin(subtransactions=True): + portinfo = session.query(nmodels.PortInfo).filter_by(id=id).one() + session.delete(portinfo) + except sa.orm.exc.NoResultFound: + LOG.warning(_("del_portinfo(): NotFound portinfo for " + "port_id: %s"), id) + + +def get_active_ports_on_ofc(context, network_id, port_id=None): + """Retrieve ports on OFC on a given network. + + It returns a list of tuple (neutron port_id, OFC id). + """ + query = context.session.query(nmodels.OFCPortMapping) + query = query.join(models_v2.Port, + nmodels.OFCPortMapping.neutron_id == models_v2.Port.id) + query = query.filter(models_v2.Port.network_id == network_id) + if port_id: + query = query.filter(nmodels.OFCPortMapping.neutron_id == port_id) + + return [(p['neutron_id'], p['ofc_id']) for p in query] + + +def get_port_from_device(port_id): + """Get port from database.""" + LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id == port_id) + port_and_sgs = query.all() + if not port_and_sgs: + return None + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict[ext_sg.SECURITYGROUPS] = [ + sg_id for port_, sg_id in port_and_sgs if sg_id] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict diff --git a/neutron/plugins/nec/db/models.py b/neutron/plugins/nec/db/models.py new file mode 100644 index 000000000..302cb610d --- /dev/null +++ b/neutron/plugins/nec/db/models.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.db import model_base +from neutron.db import models_v2 + + +"""New mapping tables.""" + + +class OFCId(object): + """Resource ID on OpenFlow Controller.""" + ofc_id = sa.Column(sa.String(255), unique=True, nullable=False) + + +class NeutronId(object): + """Logical ID on Neutron.""" + neutron_id = sa.Column(sa.String(36), primary_key=True) + + +class OFCTenantMapping(model_base.BASEV2, NeutronId, OFCId): + """Represents a Tenant on OpenFlow Network/Controller.""" + + +class OFCNetworkMapping(model_base.BASEV2, NeutronId, OFCId): + """Represents a Network on OpenFlow Network/Controller.""" + + +class OFCPortMapping(model_base.BASEV2, NeutronId, OFCId): + """Represents a Port on OpenFlow Network/Controller.""" + + +class OFCRouterMapping(model_base.BASEV2, NeutronId, OFCId): + """Represents a router on OpenFlow Network/Controller.""" + + +class OFCFilterMapping(model_base.BASEV2, NeutronId, OFCId): + """Represents a Filter on OpenFlow Network/Controller.""" + + +class PortInfo(model_base.BASEV2): + """Represents a Virtual Interface.""" + id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + datapath_id = sa.Column(sa.String(36), nullable=False) + port_no = sa.Column(sa.Integer, nullable=False) + vlan_id = sa.Column(sa.Integer, nullable=False) + mac = sa.Column(sa.String(32), nullable=False) + port = orm.relationship( + models_v2.Port, + backref=orm.backref("portinfo", + lazy='joined', uselist=False, + cascade='delete')) diff --git a/neutron/plugins/nec/db/packetfilter.py b/neutron/plugins/nec/db/packetfilter.py new file mode 100644 index 000000000..b4ff68940 --- /dev/null +++ b/neutron/plugins/nec/db/packetfilter.py @@ -0,0 +1,220 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012-2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc as sa_exc +from sqlalchemy import sql + +from neutron.api.v2 import attributes +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.openstack.common import uuidutils +from neutron.plugins.nec.db import models as nmodels +from neutron.plugins.nec.extensions import packetfilter as ext_pf + + +PF_STATUS_ACTIVE = 'ACTIVE' +PF_STATUS_DOWN = 'DOWN' +PF_STATUS_ERROR = 'ERROR' + +INT_FIELDS = ('eth_type', 'src_port', 'dst_port') + + +class PacketFilter(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a packet filter.""" + name = sa.Column(sa.String(255)) + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + nullable=False) + priority = sa.Column(sa.Integer, nullable=False) + action = sa.Column(sa.String(16), nullable=False) + # condition + in_port = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + nullable=True) + src_mac = sa.Column(sa.String(32), nullable=False) + dst_mac = sa.Column(sa.String(32), nullable=False) + eth_type = sa.Column(sa.Integer, nullable=False) + src_cidr = sa.Column(sa.String(64), nullable=False) + dst_cidr = sa.Column(sa.String(64), nullable=False) + protocol = sa.Column(sa.String(16), nullable=False) + src_port = sa.Column(sa.Integer, nullable=False) + dst_port = sa.Column(sa.Integer, nullable=False) + # status + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + status = sa.Column(sa.String(16), nullable=False) + + network = orm.relationship( + models_v2.Network, + backref=orm.backref('packetfilters', lazy='joined', cascade='delete'), + uselist=False) + in_port_ref = orm.relationship( + models_v2.Port, + backref=orm.backref('packetfilters', lazy='joined', cascade='delete'), + primaryjoin="Port.id==PacketFilter.in_port", + uselist=False) + + +class PacketFilterDbMixin(object): + + def _make_packet_filter_dict(self, pf_entry, fields=None): + res = {'id': pf_entry['id'], + 'name': pf_entry['name'], + 'tenant_id': pf_entry['tenant_id'], + 'network_id': pf_entry['network_id'], + 'action': pf_entry['action'], + 'priority': pf_entry['priority'], + 'in_port': pf_entry['in_port'], + # "or None" ensure the filed is None if empty + 'src_mac': pf_entry['src_mac'] or None, + 'dst_mac': pf_entry['dst_mac'] or None, + 'eth_type': pf_entry['eth_type'] or None, + 'src_cidr': pf_entry['src_cidr'] or None, + 'dst_cidr': pf_entry['dst_cidr'] or None, + 'protocol': pf_entry['protocol'] or None, + 'src_port': pf_entry['src_port'] or None, + 'dst_port': pf_entry['dst_port'] or None, + 'admin_state_up': pf_entry['admin_state_up'], + 'status': pf_entry['status']} + return self._fields(res, fields) + + def _get_packet_filter(self, context, id): + try: + pf_entry = self._get_by_id(context, PacketFilter, id) + except sa_exc.NoResultFound: + raise ext_pf.PacketFilterNotFound(id=id) + return pf_entry + + def get_packet_filter(self, context, id, fields=None): + pf_entry = self._get_packet_filter(context, id) + return self._make_packet_filter_dict(pf_entry, fields) + + def get_packet_filters(self, context, filters=None, fields=None): + return self._get_collection(context, + PacketFilter, + self._make_packet_filter_dict, + filters=filters, + fields=fields) + + def _replace_unspecified_field(self, params, key): + if not attributes.is_attr_set(params[key]): + if key == 'in_port': + params[key] = None + elif key in INT_FIELDS: + # Integer field + params[key] = 0 + else: + params[key] = '' + + def _get_eth_type_for_protocol(self, protocol): + if protocol.upper() in ("ICMP", "TCP", "UDP"): + return 0x800 + elif protocol.upper() == "ARP": + return 0x806 + + def _set_eth_type_from_protocol(self, filter_dict): + if filter_dict.get('protocol'): + eth_type = self._get_eth_type_for_protocol(filter_dict['protocol']) + if eth_type: + filter_dict['eth_type'] = eth_type + + def _check_eth_type_and_protocol(self, new_filter, current_filter): + if 'protocol' in new_filter or 'eth_type' not in new_filter: + return + eth_type = self._get_eth_type_for_protocol(current_filter['protocol']) + if not eth_type: + return + if eth_type != new_filter['eth_type']: + raise ext_pf.PacketFilterEtherTypeProtocolMismatch( + eth_type=hex(new_filter['eth_type']), + protocol=current_filter['protocol']) + + def create_packet_filter(self, context, packet_filter): + pf_dict = packet_filter['packet_filter'] + tenant_id = self._get_tenant_id_for_create(context, pf_dict) + + if pf_dict['in_port'] == attributes.ATTR_NOT_SPECIFIED: + # validate network ownership + self.get_network(context, pf_dict['network_id']) + else: + # validate port ownership + self.get_port(context, pf_dict['in_port']) + + params = {'tenant_id': tenant_id, + 'id': pf_dict.get('id') or uuidutils.generate_uuid(), + 'name': pf_dict['name'], + 'network_id': pf_dict['network_id'], + 'priority': pf_dict['priority'], + 'action': pf_dict['action'], + 'admin_state_up': pf_dict.get('admin_state_up', True), + 'status': PF_STATUS_DOWN, + 'in_port': pf_dict['in_port'], + 'src_mac': pf_dict['src_mac'], + 'dst_mac': pf_dict['dst_mac'], + 'eth_type': pf_dict['eth_type'], + 'src_cidr': pf_dict['src_cidr'], + 'dst_cidr': pf_dict['dst_cidr'], + 'src_port': pf_dict['src_port'], + 'dst_port': pf_dict['dst_port'], + 'protocol': pf_dict['protocol']} + for key in params: + self._replace_unspecified_field(params, key) + self._set_eth_type_from_protocol(params) + + with context.session.begin(subtransactions=True): + pf_entry = PacketFilter(**params) + context.session.add(pf_entry) + + return self._make_packet_filter_dict(pf_entry) + + def update_packet_filter(self, context, id, packet_filter): + params = packet_filter['packet_filter'] + for key in params: + self._replace_unspecified_field(params, key) + self._set_eth_type_from_protocol(params) + with context.session.begin(subtransactions=True): + pf_entry = self._get_packet_filter(context, id) + self._check_eth_type_and_protocol(params, pf_entry) + pf_entry.update(params) + return self._make_packet_filter_dict(pf_entry) + + def delete_packet_filter(self, context, id): + with context.session.begin(subtransactions=True): + pf_entry = self._get_packet_filter(context, id) + context.session.delete(pf_entry) + + def get_packet_filters_for_port(self, context, port): + """Retrieve packet filters on OFC on a given port. + + It returns a list of tuple (neutron filter_id, OFC id). + """ + query = (context.session.query(nmodels.OFCFilterMapping) + .join(PacketFilter, + nmodels.OFCFilterMapping.neutron_id == PacketFilter.id) + .filter(PacketFilter.admin_state_up == sql.true())) + + network_id = port['network_id'] + net_pf_query = (query.filter(PacketFilter.network_id == network_id) + .filter(PacketFilter.in_port == sql.null())) + net_filters = [(pf['neutron_id'], pf['ofc_id']) for pf in net_pf_query] + + port_pf_query = query.filter(PacketFilter.in_port == port['id']) + port_filters = [(pf['neutron_id'], pf['ofc_id']) + for pf in port_pf_query] + + return net_filters + port_filters diff --git a/neutron/plugins/nec/db/router.py b/neutron/plugins/nec/db/router.py new file mode 100644 index 000000000..9659cd7fd --- /dev/null +++ b/neutron/plugins/nec/db/router.py @@ -0,0 +1,92 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc as sa_exc + +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class RouterProvider(models_v2.model_base.BASEV2): + """Represents a binding of router_id to provider.""" + provider = sa.Column(sa.String(255)) + router_id = sa.Column(sa.String(36), + sa.ForeignKey('routers.id', ondelete="CASCADE"), + primary_key=True) + + router = orm.relationship(l3_db.Router, uselist=False, + backref=orm.backref('provider', uselist=False, + lazy='joined', + cascade='delete')) + + +def _get_router_providers_query(query, provider=None, router_ids=None): + if provider: + query = query.filter_by(provider=provider) + if router_ids: + column = RouterProvider.router_id + query = query.filter(column.in_(router_ids)) + return query + + +def get_router_providers(session, provider=None, router_ids=None): + """Retrieve a list of a pair of router ID and its provider.""" + query = session.query(RouterProvider) + query = _get_router_providers_query(query, provider, router_ids) + return [{'provider': router.provider, 'router_id': router.router_id} + for router in query] + + +def get_routers_by_provider(session, provider, router_ids=None): + """Retrieve a list of router IDs with the given provider.""" + query = session.query(RouterProvider.router_id) + query = _get_router_providers_query(query, provider, router_ids) + return [router[0] for router in query] + + +def get_router_count_by_provider(session, provider, tenant_id=None): + """Return the number of routers with the given provider.""" + query = session.query(RouterProvider).filter_by(provider=provider) + if tenant_id: + query = (query.join('router'). + filter(l3_db.Router.tenant_id == tenant_id)) + return query.count() + + +def get_provider_by_router(session, router_id): + """Retrieve a provider of the given router.""" + try: + binding = (session.query(RouterProvider). + filter_by(router_id=router_id). + one()) + except sa_exc.NoResultFound: + return None + return binding.provider + + +def add_router_provider_binding(session, provider, router_id): + """Add a router provider association.""" + LOG.debug(_("Add provider binding " + "(router=%(router_id)s, provider=%(provider)s)"), + {'router_id': router_id, 'provider': provider}) + binding = RouterProvider(provider=provider, router_id=router_id) + session.add(binding) + return binding diff --git a/neutron/plugins/nec/drivers/__init__.py b/neutron/plugins/nec/drivers/__init__.py new file mode 100644 index 000000000..196c699ce --- /dev/null +++ b/neutron/plugins/nec/drivers/__init__.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) +DRIVER_PATH = "neutron.plugins.nec.drivers.%s" +DRIVER_LIST = { + 'trema': DRIVER_PATH % "trema.TremaPortBaseDriver", + 'trema_port': DRIVER_PATH % "trema.TremaPortBaseDriver", + 'trema_portmac': DRIVER_PATH % "trema.TremaPortMACBaseDriver", + 'trema_mac': DRIVER_PATH % "trema.TremaMACBaseDriver", + 'pfc': DRIVER_PATH % "pfc.PFCV51Driver", + 'pfc_v3': DRIVER_PATH % "pfc.PFCV3Driver", + 'pfc_v4': DRIVER_PATH % "pfc.PFCV4Driver", + 'pfc_v5': DRIVER_PATH % "pfc.PFCV5Driver", + 'pfc_v51': DRIVER_PATH % "pfc.PFCV51Driver", +} + + +def get_driver(driver_name): + LOG.info(_("Loading OFC driver: %s"), driver_name) + driver_klass = DRIVER_LIST.get(driver_name) or driver_name + return importutils.import_class(driver_klass) diff --git a/neutron/plugins/nec/drivers/pfc.py b/neutron/plugins/nec/drivers/pfc.py new file mode 100644 index 000000000..85921b712 --- /dev/null +++ b/neutron/plugins/nec/drivers/pfc.py @@ -0,0 +1,374 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU +# @author: Akihiro MOTOKI + +import re +import uuid + +import netaddr + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as qexc +from neutron.common import log as call_log +from neutron import manager +from neutron.plugins.nec.common import ofc_client +from neutron.plugins.nec.extensions import packetfilter as ext_pf +from neutron.plugins.nec import ofc_driver_base + + +class InvalidOFCIdFormat(qexc.NeutronException): + message = _("OFC %(resource)s ID has an invalid format: %(ofc_id)s") + + +class PFCDriverBase(ofc_driver_base.OFCDriverBase): + """Base Class for PDC Drivers. + + PFCDriverBase provides methods to handle PFC resources through REST API. + This uses ofc resource path instead of ofc resource ID. + + The class implements the API for PFC V4.0 or later. + """ + + router_supported = False + + match_ofc_network_id = re.compile( + "^/tenants/(?P[^/]+)/networks/(?P[^/]+)$") + match_ofc_port_id = re.compile( + "^/tenants/(?P[^/]+)/networks/(?P[^/]+)" + "/ports/(?P[^/]+)$") + + def __init__(self, conf_ofc): + self.client = ofc_client.OFCClient(host=conf_ofc.host, + port=conf_ofc.port, + use_ssl=conf_ofc.use_ssl, + key_file=conf_ofc.key_file, + cert_file=conf_ofc.cert_file, + insecure_ssl=conf_ofc.insecure_ssl) + + @classmethod + def filter_supported(cls): + return False + + def _generate_pfc_str(self, raw_str): + """Generate PFC acceptable String.""" + return re.sub(r'[^0-9a-zA-Z]', '_', raw_str) + + def _generate_pfc_id(self, id_str): + """Generate ID on PFC. + + Currently, PFC ID must be less than 32. + Shorten UUID string length from 36 to 31 by follows: + * delete UUID Version and hyphen (see RFC4122) + * ensure str length + """ + try: + # openstack.common.uuidutils.is_uuid_like() returns + # False for KeyStone tenant_id, so uuid.UUID is used + # directly here to accept tenant_id as UUID string + uuid_str = str(uuid.UUID(id_str)).replace('-', '') + uuid_no_version = uuid_str[:12] + uuid_str[13:] + return uuid_no_version[:31] + except Exception: + return self._generate_pfc_str(id_str)[:31] + + def _generate_pfc_description(self, desc): + """Generate Description on PFC. + + Currently, PFC Description must be less than 128. + """ + return self._generate_pfc_str(desc)[:127] + + def _extract_ofc_network_id(self, ofc_network_id): + match = self.match_ofc_network_id.match(ofc_network_id) + if match: + return match.group('network_id') + raise InvalidOFCIdFormat(resource='network', ofc_id=ofc_network_id) + + def _extract_ofc_port_id(self, ofc_port_id): + match = self.match_ofc_port_id.match(ofc_port_id) + if match: + return {'tenant': match.group('tenant_id'), + 'network': match.group('network_id'), + 'port': match.group('port_id')} + raise InvalidOFCIdFormat(resource='port', ofc_id=ofc_port_id) + + def create_tenant(self, description, tenant_id=None): + ofc_tenant_id = self._generate_pfc_id(tenant_id) + body = {'id': ofc_tenant_id} + self.client.post('/tenants', body=body) + return '/tenants/' + ofc_tenant_id + + def delete_tenant(self, ofc_tenant_id): + return self.client.delete(ofc_tenant_id) + + def create_network(self, ofc_tenant_id, description, network_id=None): + path = "%s/networks" % ofc_tenant_id + pfc_desc = self._generate_pfc_description(description) + body = {'description': pfc_desc} + res = self.client.post(path, body=body) + ofc_network_id = res['id'] + return path + '/' + ofc_network_id + + def delete_network(self, ofc_network_id): + return self.client.delete(ofc_network_id) + + def create_port(self, ofc_network_id, portinfo, + port_id=None, filters=None): + path = "%s/ports" % ofc_network_id + body = {'datapath_id': portinfo.datapath_id, + 'port': str(portinfo.port_no), + 'vid': str(portinfo.vlan_id)} + if self.filter_supported() and filters: + body['filters'] = [self._extract_ofc_filter_id(pf[1]) + for pf in filters] + res = self.client.post(path, body=body) + ofc_port_id = res['id'] + return path + '/' + ofc_port_id + + def delete_port(self, ofc_port_id): + return self.client.delete(ofc_port_id) + + +class PFCFilterDriverMixin(object): + """PFC PacketFilter Driver Mixin.""" + filters_path = "/filters" + filter_path = "/filters/%s" + + # PFC specific constants + MIN_PRIORITY = 1 + MAX_PRIORITY = 32766 + CREATE_ONLY_FIELDS = ['action', 'priority'] + PFC_ALLOW_ACTION = "pass" + PFC_DROP_ACTION = "drop" + + match_ofc_filter_id = re.compile("^/filters/(?P[^/]+)$") + + @classmethod + def filter_supported(cls): + return True + + def _set_param(self, filter_dict, body, key, create, convert_to=None): + if key in filter_dict: + if filter_dict[key]: + if convert_to: + body[key] = convert_to(filter_dict[key]) + else: + body[key] = filter_dict[key] + elif not create: + body[key] = "" + + def _generate_body(self, filter_dict, apply_ports=None, create=True): + body = {} + + if create: + # action : pass, drop (mandatory) + if filter_dict['action'].lower() in ext_pf.ALLOW_ACTIONS: + body['action'] = self.PFC_ALLOW_ACTION + else: + body['action'] = self.PFC_DROP_ACTION + # priority : mandatory + body['priority'] = filter_dict['priority'] + + for key in ['src_mac', 'dst_mac', 'src_port', 'dst_port']: + self._set_param(filter_dict, body, key, create) + + for key in ['src_cidr', 'dst_cidr']: + # CIDR must contain netmask even if it is an address. + convert_to = lambda x: str(netaddr.IPNetwork(x)) + self._set_param(filter_dict, body, key, create, convert_to) + + # protocol : decimal (0-255) + if 'protocol' in filter_dict: + if (not filter_dict['protocol'] or + # In the case of ARP, ip_proto should be set to wildcard. + # eth_type is set during adding an entry to DB layer. + filter_dict['protocol'].lower() == ext_pf.PROTO_NAME_ARP): + if not create: + body['protocol'] = "" + elif filter_dict['protocol'].lower() == constants.PROTO_NAME_ICMP: + body['protocol'] = constants.PROTO_NUM_ICMP + elif filter_dict['protocol'].lower() == constants.PROTO_NAME_TCP: + body['protocol'] = constants.PROTO_NUM_TCP + elif filter_dict['protocol'].lower() == constants.PROTO_NAME_UDP: + body['protocol'] = constants.PROTO_NUM_UDP + else: + body['protocol'] = int(filter_dict['protocol'], 0) + + # eth_type : hex (0x0-0xFFFF) + self._set_param(filter_dict, body, 'eth_type', create, hex) + + # apply_ports + if apply_ports: + # each element of apply_ports is a tuple of (neutron_id, ofc_id), + body['apply_ports'] = [] + for p in apply_ports: + try: + body['apply_ports'].append(self._extract_ofc_port_id(p[1])) + except InvalidOFCIdFormat: + pass + + return body + + def _validate_filter_common(self, filter_dict): + # Currently PFC support only IPv4 CIDR. + for field in ['src_cidr', 'dst_cidr']: + if (not filter_dict.get(field) or + filter_dict[field] == attributes.ATTR_NOT_SPECIFIED): + continue + net = netaddr.IPNetwork(filter_dict[field]) + if net.version != 4: + raise ext_pf.PacketFilterIpVersionNonSupported( + version=net.version, field=field, value=filter_dict[field]) + if ('priority' in filter_dict and + not (self.MIN_PRIORITY <= filter_dict['priority'] + <= self.MAX_PRIORITY)): + raise ext_pf.PacketFilterInvalidPriority( + min=self.MIN_PRIORITY, max=self.MAX_PRIORITY) + + def _validate_duplicate_priority(self, context, filter_dict): + plugin = manager.NeutronManager.get_plugin() + filters = {'network_id': [filter_dict['network_id']], + 'priority': [filter_dict['priority']]} + ret = plugin.get_packet_filters(context, filters=filters, + fields=['id']) + if ret: + raise ext_pf.PacketFilterDuplicatedPriority( + priority=filter_dict['priority']) + + def validate_filter_create(self, context, filter_dict): + self._validate_filter_common(filter_dict) + self._validate_duplicate_priority(context, filter_dict) + + def validate_filter_update(self, context, filter_dict): + for field in self.CREATE_ONLY_FIELDS: + if field in filter_dict: + raise ext_pf.PacketFilterUpdateNotSupported(field=field) + self._validate_filter_common(filter_dict) + + @call_log.log + def create_filter(self, ofc_network_id, filter_dict, + portinfo=None, filter_id=None, apply_ports=None): + body = self._generate_body(filter_dict, apply_ports, create=True) + res = self.client.post(self.filters_path, body=body) + # filter_id passed from a caller is not used. + # ofc_filter_id is generated by PFC because the prefix of + # filter_id has special meaning and it is internally used. + ofc_filter_id = res['id'] + return self.filter_path % ofc_filter_id + + @call_log.log + def update_filter(self, ofc_filter_id, filter_dict): + body = self._generate_body(filter_dict, create=False) + self.client.put(ofc_filter_id, body) + + @call_log.log + def delete_filter(self, ofc_filter_id): + return self.client.delete(ofc_filter_id) + + def _extract_ofc_filter_id(self, ofc_filter_id): + match = self.match_ofc_filter_id.match(ofc_filter_id) + if match: + return match.group('filter_id') + raise InvalidOFCIdFormat(resource='filter', ofc_id=ofc_filter_id) + + def convert_ofc_filter_id(self, context, ofc_filter_id): + # PFC Packet Filter is supported after the format of mapping tables + # are changed, so it is enough just to return ofc_filter_id + return ofc_filter_id + + +class PFCRouterDriverMixin(object): + + router_supported = True + router_nat_supported = False + + def create_router(self, ofc_tenant_id, router_id, description): + path = '%s/routers' % ofc_tenant_id + res = self.client.post(path, body=None) + ofc_router_id = res['id'] + return path + '/' + ofc_router_id + + def delete_router(self, ofc_router_id): + return self.client.delete(ofc_router_id) + + def add_router_interface(self, ofc_router_id, ofc_net_id, + ip_address=None, mac_address=None): + # ip_address : / (e.g., 10.0.0.0/24) + path = '%s/interfaces' % ofc_router_id + body = {'net_id': self._extract_ofc_network_id(ofc_net_id)} + if ip_address: + body['ip_address'] = ip_address + if mac_address: + body['mac_address'] = mac_address + res = self.client.post(path, body=body) + return path + '/' + res['id'] + + def update_router_interface(self, ofc_router_inf_id, + ip_address=None, mac_address=None): + # ip_address : / (e.g., 10.0.0.0/24) + if not ip_address and not mac_address: + return + body = {} + if ip_address: + body['ip_address'] = ip_address + if mac_address: + body['mac_address'] = mac_address + return self.client.put(ofc_router_inf_id, body=body) + + def delete_router_interface(self, ofc_router_inf_id): + return self.client.delete(ofc_router_inf_id) + + def list_router_routes(self, ofc_router_id): + path = '%s/routes' % ofc_router_id + ret = self.client.get(path) + # Prepend ofc_router_id to route_id + for r in ret['routes']: + r['id'] = ofc_router_id + '/routes/' + r['id'] + return ret['routes'] + + def add_router_route(self, ofc_router_id, destination, nexthop): + path = '%s/routes' % ofc_router_id + body = {'destination': destination, + 'nexthop': nexthop} + ret = self.client.post(path, body=body) + return path + '/' + ret['id'] + + def delete_router_route(self, ofc_router_route_id): + return self.client.delete(ofc_router_route_id) + + +class PFCV3Driver(PFCDriverBase): + + def create_tenant(self, description, tenant_id): + ofc_tenant_id = self._generate_pfc_id(tenant_id) + return "/tenants/" + ofc_tenant_id + + def delete_tenant(self, ofc_tenant_id): + pass + + +class PFCV4Driver(PFCDriverBase): + pass + + +class PFCV5Driver(PFCRouterDriverMixin, PFCDriverBase): + pass + + +class PFCV51Driver(PFCFilterDriverMixin, PFCV5Driver): + pass diff --git a/neutron/plugins/nec/drivers/trema.py b/neutron/plugins/nec/drivers/trema.py new file mode 100644 index 000000000..875a55d34 --- /dev/null +++ b/neutron/plugins/nec/drivers/trema.py @@ -0,0 +1,250 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU +# @author: Akihiro MOTOKI + +from neutron.openstack.common import uuidutils +from neutron.plugins.nec.common import ofc_client +from neutron.plugins.nec import ofc_driver_base + + +class TremaDriverBase(ofc_driver_base.OFCDriverBase): + """Common class for Trema (Sliceable Switch) Drivers.""" + networks_path = "/networks" + network_path = "/networks/%s" + + router_supported = False + + def __init__(self, conf_ofc): + # Trema sliceable REST API does not support HTTPS + self.client = ofc_client.OFCClient(host=conf_ofc.host, + port=conf_ofc.port) + + def _get_network_id(self, ofc_network_id): + # ofc_network_id : /networks/ + return ofc_network_id.split('/')[2] + + def _get_tenant_id(self, tenant_id): + # Trema does not use tenant_id, but it returns + # /tenants/ format to keep consistency with PFC driver. + return '/tenants/' + tenant_id + + def create_tenant(self, description, tenant_id=None): + return self._get_tenant_id(tenant_id or uuidutils.generate_uuid()) + + def update_tenant(self, ofc_tenant_id, description): + pass + + def delete_tenant(self, ofc_tenant_id): + pass + + def create_network(self, ofc_tenant_id, description, network_id=None): + ofc_network_id = network_id or uuidutils.generate_uuid() + body = {'id': ofc_network_id, 'description': description} + self.client.post(self.networks_path, body=body) + return self.network_path % ofc_network_id + + def delete_network(self, ofc_network_id): + return self.client.delete(ofc_network_id) + + +class TremaFilterDriverMixin(object): + """Trema (Sliceable Switch) PacketFilter Driver Mixin.""" + filters_path = "/filters" + filter_path = "/filters/%s" + + @classmethod + def filter_supported(cls): + return True + + def create_filter(self, ofc_network_id, filter_dict, + portinfo=None, filter_id=None, apply_ports=None): + if filter_dict['action'].upper() in ["ACCEPT", "ALLOW"]: + ofc_action = "ALLOW" + elif filter_dict['action'].upper() in ["DROP", "DENY"]: + ofc_action = "DENY" + + body = {'priority': filter_dict['priority'], + 'slice': self._get_network_id(ofc_network_id), + 'action': ofc_action} + ofp_wildcards = ["dl_vlan", "dl_vlan_pcp", "nw_tos"] + + if portinfo: + body['in_datapath_id'] = portinfo.datapath_id + body['in_port'] = portinfo.port_no + else: + body['wildcards'] = "in_datapath_id" + ofp_wildcards.append("in_port") + + if filter_dict['src_mac']: + body['dl_src'] = filter_dict['src_mac'] + else: + ofp_wildcards.append("dl_src") + + if filter_dict['dst_mac']: + body['dl_dst'] = filter_dict['dst_mac'] + else: + ofp_wildcards.append("dl_dst") + + if filter_dict['src_cidr']: + body['nw_src'] = filter_dict['src_cidr'] + else: + ofp_wildcards.append("nw_src:32") + + if filter_dict['dst_cidr']: + body['nw_dst'] = filter_dict['dst_cidr'] + else: + ofp_wildcards.append("nw_dst:32") + + if filter_dict['protocol']: + if filter_dict['protocol'].upper() == "ICMP": + body['dl_type'] = "0x800" + body['nw_proto'] = hex(1) + elif filter_dict['protocol'].upper() == "TCP": + body['dl_type'] = "0x800" + body['nw_proto'] = hex(6) + elif filter_dict['protocol'].upper() == "UDP": + body['dl_type'] = "0x800" + body['nw_proto'] = hex(17) + elif filter_dict['protocol'].upper() == "ARP": + body['dl_type'] = "0x806" + ofp_wildcards.append("nw_proto") + else: + body['nw_proto'] = filter_dict['protocol'] + else: + ofp_wildcards.append("nw_proto") + + if 'dl_type' in body: + pass + elif filter_dict['eth_type']: + body['dl_type'] = filter_dict['eth_type'] + else: + ofp_wildcards.append("dl_type") + + if filter_dict['src_port']: + body['tp_src'] = hex(filter_dict['src_port']) + else: + ofp_wildcards.append("tp_src") + + if filter_dict['dst_port']: + body['tp_dst'] = hex(filter_dict['dst_port']) + else: + ofp_wildcards.append("tp_dst") + + ofc_filter_id = filter_id or uuidutils.generate_uuid() + body['id'] = ofc_filter_id + + body['ofp_wildcards'] = ','.join(ofp_wildcards) + + self.client.post(self.filters_path, body=body) + return self.filter_path % ofc_filter_id + + def delete_filter(self, ofc_filter_id): + return self.client.delete(ofc_filter_id) + + +class TremaPortBaseDriver(TremaDriverBase, TremaFilterDriverMixin): + """Trema (Sliceable Switch) Driver for port base binding. + + TremaPortBaseDriver uses port base binding. + Ports are identified by datapath_id, port_no and vlan_id. + """ + ports_path = "%(network)s/ports" + port_path = "%(network)s/ports/%(port)s" + + def create_port(self, ofc_network_id, portinfo, + port_id=None, filters=None): + ofc_port_id = port_id or uuidutils.generate_uuid() + path = self.ports_path % {'network': ofc_network_id} + body = {'id': ofc_port_id, + 'datapath_id': portinfo.datapath_id, + 'port': str(portinfo.port_no), + 'vid': str(portinfo.vlan_id)} + self.client.post(path, body=body) + return self.port_path % {'network': ofc_network_id, + 'port': ofc_port_id} + + def delete_port(self, ofc_port_id): + return self.client.delete(ofc_port_id) + + +class TremaPortMACBaseDriver(TremaDriverBase, TremaFilterDriverMixin): + """Trema (Sliceable Switch) Driver for port-mac base binding. + + TremaPortBaseDriver uses port-mac base binding. + Ports are identified by datapath_id, port_no, vlan_id and mac. + """ + ports_path = "%(network)s/ports" + port_path = "%(network)s/ports/%(port)s" + attachments_path = "%(network)s/ports/%(port)s/attachments" + attachment_path = "%(network)s/ports/%(port)s/attachments/%(attachment)s" + + def create_port(self, ofc_network_id, portinfo, port_id=None, + filters=None): + #NOTE: This Driver create slices with Port-MAC Based bindings on Trema + # Sliceable. It's REST API requires Port Based binding before you + # define Port-MAC Based binding. + ofc_port_id = port_id or uuidutils.generate_uuid() + dummy_port_id = "dummy-%s" % ofc_port_id + + path = self.ports_path % {'network': ofc_network_id} + body = {'id': dummy_port_id, + 'datapath_id': portinfo.datapath_id, + 'port': str(portinfo.port_no), + 'vid': str(portinfo.vlan_id)} + self.client.post(path, body=body) + + path = self.attachments_path % {'network': ofc_network_id, + 'port': dummy_port_id} + body = {'id': ofc_port_id, 'mac': portinfo.mac} + self.client.post(path, body=body) + + path = self.port_path % {'network': ofc_network_id, + 'port': dummy_port_id} + self.client.delete(path) + + return self.attachment_path % {'network': ofc_network_id, + 'port': dummy_port_id, + 'attachment': ofc_port_id} + + def delete_port(self, ofc_port_id): + return self.client.delete(ofc_port_id) + + +class TremaMACBaseDriver(TremaDriverBase): + """Trema (Sliceable Switch) Driver for mac base binding. + + TremaPortBaseDriver uses mac base binding. + Ports are identified by mac. + """ + attachments_path = "%(network)s/attachments" + attachment_path = "%(network)s/attachments/%(attachment)s" + + @classmethod + def filter_supported(cls): + return False + + def create_port(self, ofc_network_id, portinfo, port_id=None, + filters=None): + ofc_port_id = port_id or uuidutils.generate_uuid() + path = self.attachments_path % {'network': ofc_network_id} + body = {'id': ofc_port_id, 'mac': portinfo.mac} + self.client.post(path, body=body) + return self.attachment_path % {'network': ofc_network_id, + 'attachment': ofc_port_id} + + def delete_port(self, ofc_port_id): + return self.client.delete(ofc_port_id) diff --git a/neutron/plugins/nec/extensions/__init__.py b/neutron/plugins/nec/extensions/__init__.py new file mode 100644 index 000000000..362a36068 --- /dev/null +++ b/neutron/plugins/nec/extensions/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/nec/extensions/packetfilter.py b/neutron/plugins/nec/extensions/packetfilter.py new file mode 100644 index 000000000..2dddfd41a --- /dev/null +++ b/neutron/plugins/nec/extensions/packetfilter.py @@ -0,0 +1,208 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012-2013 NEC Corporation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ryota MIBU +# + +from oslo.config import cfg + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron.common import constants +from neutron.common import exceptions +from neutron import manager +from neutron import quota + + +quota_packet_filter_opts = [ + cfg.IntOpt('quota_packet_filter', + default=100, + help=_("Number of packet_filters allowed per tenant, " + "-1 for unlimited")) +] +cfg.CONF.register_opts(quota_packet_filter_opts, 'QUOTAS') + + +class PacketFilterNotFound(exceptions.NotFound): + message = _("PacketFilter %(id)s could not be found") + + +class PacketFilterIpVersionNonSupported(exceptions.BadRequest): + message = _("IP version %(version)s is not supported for %(field)s " + "(%(value)s is specified)") + + +class PacketFilterInvalidPriority(exceptions.BadRequest): + message = _("Packet Filter priority should be %(min)s-%(max)s (included)") + + +class PacketFilterUpdateNotSupported(exceptions.BadRequest): + message = _("%(field)s field cannot be updated") + + +class PacketFilterDuplicatedPriority(exceptions.BadRequest): + message = _("The backend does not support duplicated priority. " + "Priority %(priority)s is in use") + + +class PacketFilterEtherTypeProtocolMismatch(exceptions.Conflict): + message = _("Ether Type '%(eth_type)s' conflicts with protocol " + "'%(protocol)s'. Update or clear protocol before " + "changing ether type.") + + +def convert_to_int_dec_and_hex(data): + try: + return int(data, 0) + except (ValueError, TypeError): + pass + try: + return int(data) + except (ValueError, TypeError): + msg = _("'%s' is not a integer") % data + raise exceptions.InvalidInput(error_message=msg) + + +def convert_to_int_or_none(data): + if data is None: + return + return convert_to_int_dec_and_hex(data) + + +PROTO_NAME_ARP = 'arp' +SUPPORTED_PROTOCOLS = [constants.PROTO_NAME_ICMP, + constants.PROTO_NAME_TCP, + constants.PROTO_NAME_UDP, + PROTO_NAME_ARP] +ALLOW_ACTIONS = ['allow', 'accept'] +DROP_ACTIONS = ['drop', 'deny'] +SUPPORTED_ACTIONS = ALLOW_ACTIONS + DROP_ACTIONS + +ALIAS = 'packet-filter' +RESOURCE = 'packet_filter' +COLLECTION = 'packet_filters' +PACKET_FILTER_ACTION_REGEX = '(?i)^(%s)$' % '|'.join(SUPPORTED_ACTIONS) +PACKET_FILTER_PROTOCOL_REGEX = ('(?i)^(%s|0x[0-9a-fA-F]+|[0-9]+|)$' % + '|'.join(SUPPORTED_PROTOCOLS)) +PACKET_FILTER_ATTR_PARAMS = { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, 'default': '', + 'validate': {'type:string': None}, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'network_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attributes.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'action': {'allow_post': True, 'allow_put': True, + 'validate': {'type:regex': PACKET_FILTER_ACTION_REGEX}, + 'is_visible': True}, + 'priority': {'allow_post': True, 'allow_put': True, + 'convert_to': convert_to_int_dec_and_hex, + 'is_visible': True}, + 'in_port': {'allow_post': True, 'allow_put': False, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'src_mac': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'validate': {'type:mac_address_or_none': None}, + 'is_visible': True}, + 'dst_mac': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'validate': {'type:mac_address_or_none': None}, + 'is_visible': True}, + 'eth_type': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'convert_to': convert_to_int_or_none, + 'is_visible': True}, + 'src_cidr': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'validate': {'type:subnet_or_none': None}, + 'is_visible': True}, + 'dst_cidr': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'validate': {'type:subnet_or_none': None}, + 'is_visible': True}, + 'protocol': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'validate': {'type:regex_or_none': + PACKET_FILTER_PROTOCOL_REGEX}, + 'is_visible': True}, + 'src_port': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'convert_to': convert_to_int_or_none, + 'is_visible': True}, + 'dst_port': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'convert_to': convert_to_int_or_none, + 'is_visible': True}, +} +PACKET_FILTER_ATTR_MAP = {COLLECTION: PACKET_FILTER_ATTR_PARAMS} + + +class Packetfilter(extensions.ExtensionDescriptor): + @classmethod + def get_name(cls): + return ALIAS + + @classmethod + def get_alias(cls): + return ALIAS + + @classmethod + def get_description(cls): + return "PacketFilters on OFC" + + @classmethod + def get_namespace(cls): + return "http://www.nec.co.jp/api/ext/packet_filter/v2.0" + + @classmethod + def get_updated(cls): + return "2013-07-16T00:00:00+09:00" + + @classmethod + def get_resources(cls): + qresource = quota.CountableResource(RESOURCE, + quota._count_resource, + 'quota_%s' % RESOURCE) + quota.QUOTAS.register_resource(qresource) + + resource = base.create_resource(COLLECTION, RESOURCE, + manager.NeutronManager.get_plugin(), + PACKET_FILTER_ATTR_PARAMS) + pf_ext = extensions.ResourceExtension( + COLLECTION, resource, attr_map=PACKET_FILTER_ATTR_PARAMS) + return [pf_ext] + + def get_extended_resources(self, version): + if version == "2.0": + return PACKET_FILTER_ATTR_MAP + else: + return {} diff --git a/neutron/plugins/nec/extensions/router_provider.py b/neutron/plugins/nec/extensions/router_provider.py new file mode 100644 index 000000000..102e23218 --- /dev/null +++ b/neutron/plugins/nec/extensions/router_provider.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.v2 import attributes +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +ROUTER_PROVIDER = 'provider' + +ROUTER_PROVIDER_ATTRIBUTE = { + 'routers': {ROUTER_PROVIDER: + {'allow_post': True, + 'allow_put': False, + 'is_visible': True, + 'default': attributes.ATTR_NOT_SPECIFIED} + } +} + + +class Router_provider(object): + @classmethod + def get_name(cls): + return "Router Provider" + + @classmethod + def get_alias(cls): + return "router_provider" + + @classmethod + def get_description(cls): + return "Router Provider Support" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/router_provider/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-08-20T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return ROUTER_PROVIDER_ATTRIBUTE + else: + return {} diff --git a/neutron/plugins/nec/nec_plugin.py b/neutron/plugins/nec/nec_plugin.py new file mode 100644 index 000000000..f2225e733 --- /dev/null +++ b/neutron/plugins/nec/nec_plugin.py @@ -0,0 +1,781 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012-2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU +# @author: Akihiro MOTOKI + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api import extensions as neutron_extensions +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.v2 import attributes as attrs +from neutron.common import constants as const +from neutron.common import exceptions as n_exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_base +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.nec.common import config +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.common import utils as necutils +from neutron.plugins.nec.db import api as ndb +from neutron.plugins.nec.db import router as rdb +from neutron.plugins.nec import extensions +from neutron.plugins.nec import nec_router +from neutron.plugins.nec import ofc_manager +from neutron.plugins.nec import packet_filter + +LOG = logging.getLogger(__name__) + + +class NECPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + nec_router.RouterMixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + nec_router.L3AgentSchedulerDbMixin, + packet_filter.PacketFilterMixin, + portbindings_db.PortBindingMixin, + addr_pair_db.AllowedAddressPairsMixin): + """NECPluginV2 controls an OpenFlow Controller. + + The Neutron NECPluginV2 maps L2 logical networks to L2 virtualized networks + on an OpenFlow enabled network. An OpenFlow Controller (OFC) provides + L2 network isolation without VLAN and this plugin controls the OFC. + + NOTE: This is for Neutron API V2. Codes for V1.0 and V1.1 are available + at https://github.com/nec-openstack/neutron-openflow-plugin . + + The port binding extension enables an external application relay + information to and from the plugin. + """ + _supported_extension_aliases = ["agent", + "allowed-address-pairs", + "binding", + "dhcp_agent_scheduler", + "external-net", + "ext-gw-mode", + "extraroute", + "l3_agent_scheduler", + "packet-filter", + "quotas", + "router", + "router_provider", + "security-group", + ] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self.remove_packet_filter_extension_if_disabled(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + super(NECPluginV2, self).__init__() + self.ofc = ofc_manager.OFCManager(self.safe_reference) + self.base_binding_dict = self._get_base_binding_dict() + portbindings_base.register_port_dict_function() + + neutron_extensions.append_api_extensions_path(extensions.__path__) + + self.setup_rpc() + self.l3_rpc_notifier = nec_router.L3AgentNotifyAPI() + + self.network_scheduler = importutils.import_object( + config.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + config.CONF.router_scheduler_driver + ) + + nec_router.load_driver(self.safe_reference, self.ofc) + self.port_handlers = { + 'create': { + const.DEVICE_OWNER_ROUTER_GW: self.create_router_port, + const.DEVICE_OWNER_ROUTER_INTF: self.create_router_port, + 'default': self.activate_port_if_ready, + }, + 'delete': { + const.DEVICE_OWNER_ROUTER_GW: self.delete_router_port, + const.DEVICE_OWNER_ROUTER_INTF: self.delete_router_port, + 'default': self.deactivate_port, + } + } + + def setup_rpc(self): + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = rpc_compat.create_connection(new=True) + self.notifier = NECPluginV2AgentNotifierApi(topics.AGENT) + self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[const.AGENT_TYPE_L3] = ( + nec_router.L3AgentNotifyAPI() + ) + + # NOTE: callback_sg is referred to from the sg unit test. + self.callback_sg = SecurityGroupServerRpcCallback() + self.endpoints = [ + NECPluginV2RPCCallbacks(self.safe_reference), + DhcpRpcCallback(), + L3RpcCallback(), + self.callback_sg, + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def _update_resource_status(self, context, resource, id, status): + """Update status of specified resource.""" + request = {'status': status} + obj_getter = getattr(self, '_get_%s' % resource) + with context.session.begin(subtransactions=True): + obj_db = obj_getter(context, id) + obj_db.update(request) + + def _update_resource_status_if_changed(self, context, resource_type, + resource_dict, new_status): + if resource_dict['status'] != new_status: + self._update_resource_status(context, resource_type, + resource_dict['id'], + new_status) + resource_dict['status'] = new_status + + def _check_ofc_tenant_in_use(self, context, tenant_id): + """Check if the specified tenant is used.""" + # All networks are created on OFC + filters = {'tenant_id': [tenant_id]} + if self.get_networks_count(context, filters=filters): + return True + if rdb.get_router_count_by_provider(context.session, + nec_router.PROVIDER_OPENFLOW, + tenant_id): + return True + return False + + def _cleanup_ofc_tenant(self, context, tenant_id): + if not self._check_ofc_tenant_in_use(context, tenant_id): + try: + if self.ofc.exists_ofc_tenant(context, tenant_id): + self.ofc.delete_ofc_tenant(context, tenant_id) + else: + LOG.debug(_('_cleanup_ofc_tenant: No OFC tenant for %s'), + tenant_id) + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + reason = _("delete_ofc_tenant() failed due to %s") % exc + LOG.warn(reason) + + def activate_port_if_ready(self, context, port, network=None): + """Activate port by creating port on OFC if ready. + + Conditions to activate port on OFC are: + * port admin_state is UP + * network admin_state is UP + * portinfo are available (to identify port on OFC) + """ + if not network: + network = super(NECPluginV2, self).get_network(context, + port['network_id']) + + if not port['admin_state_up']: + LOG.debug(_("activate_port_if_ready(): skip, " + "port.admin_state_up is False.")) + return port + elif not network['admin_state_up']: + LOG.debug(_("activate_port_if_ready(): skip, " + "network.admin_state_up is False.")) + return port + elif not ndb.get_portinfo(context.session, port['id']): + LOG.debug(_("activate_port_if_ready(): skip, " + "no portinfo for this port.")) + return port + elif self.ofc.exists_ofc_port(context, port['id']): + LOG.debug(_("activate_port_if_ready(): skip, " + "ofc_port already exists.")) + return port + + try: + self.ofc.create_ofc_port(context, port['id'], port) + port_status = const.PORT_STATUS_ACTIVE + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + LOG.error(_("create_ofc_port() failed due to %s"), exc) + port_status = const.PORT_STATUS_ERROR + + if port_status != port['status']: + self._update_resource_status(context, "port", port['id'], + port_status) + port['status'] = port_status + + return port + + def deactivate_port(self, context, port, raise_exc=True): + """Deactivate port by deleting port from OFC if exists.""" + if not self.ofc.exists_ofc_port(context, port['id']): + LOG.debug(_("deactivate_port(): skip, ofc_port for port=%s " + "does not exist."), port['id']) + return port + + try: + self.ofc.delete_ofc_port(context, port['id'], port) + self._update_resource_status_if_changed( + context, "port", port, const.PORT_STATUS_DOWN) + return port + except (nexc.OFCResourceNotFound, nexc.OFCMappingNotFound): + # There is a case where multiple delete_port operation are + # running concurrently. For example, delete_port from + # release_dhcp_port and deletion of network owned ports in + # delete_network. In such cases delete_ofc_port may receive + # 404 error from OFC. + # Also there is a case where neutron port is deleted + # between exists_ofc_port and get_ofc_id in delete_ofc_port. + # In this case OFCMappingNotFound is raised. + # These two cases are valid situations. + LOG.info(_("deactivate_port(): OFC port for port=%s is " + "already removed."), port['id']) + # The port is already removed, so there is no need + # to update status in the database. + port['status'] = const.PORT_STATUS_DOWN + return port + except nexc.OFCException as exc: + with excutils.save_and_reraise_exception() as ctxt: + LOG.error(_("Failed to delete port=%(port)s from OFC: " + "%(exc)s"), {'port': port['id'], 'exc': exc}) + self._update_resource_status_if_changed( + context, "port", port, const.PORT_STATUS_ERROR) + if not raise_exc: + ctxt.reraise = False + return port + + def _net_status(self, network): + # NOTE: NEC Plugin accept admin_state_up. When it's False, this plugin + # deactivate all ports on the network to drop all packet and show + # status='DOWN' to users. But the network is kept defined on OFC. + if network['network']['admin_state_up']: + return const.NET_STATUS_ACTIVE + else: + return const.NET_STATUS_DOWN + + def create_network(self, context, network): + """Create a new network entry on DB, and create it on OFC.""" + LOG.debug(_("NECPluginV2.create_network() called, " + "network=%s ."), network) + tenant_id = self._get_tenant_id_for_create(context, network['network']) + net_name = network['network']['name'] + net_id = uuidutils.generate_uuid() + + #set up default security groups + self._ensure_default_security_group(context, tenant_id) + + network['network']['id'] = net_id + network['network']['status'] = self._net_status(network) + + try: + if not self.ofc.exists_ofc_tenant(context, tenant_id): + self.ofc.create_ofc_tenant(context, tenant_id) + self.ofc.create_ofc_network(context, tenant_id, net_id, net_name) + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + LOG.error(_("Failed to create network id=%(id)s on " + "OFC: %(exc)s"), {'id': net_id, 'exc': exc}) + network['network']['status'] = const.NET_STATUS_ERROR + + with context.session.begin(subtransactions=True): + new_net = super(NECPluginV2, self).create_network(context, network) + self._process_l3_create(context, new_net, network['network']) + + return new_net + + def update_network(self, context, id, network): + """Update network and handle resources associated with the network. + + Update network entry on DB. If 'admin_state_up' was changed, activate + or deactivate ports and packetfilters associated with the network. + """ + LOG.debug(_("NECPluginV2.update_network() called, " + "id=%(id)s network=%(network)s ."), + {'id': id, 'network': network}) + + if 'admin_state_up' in network['network']: + network['network']['status'] = self._net_status(network) + + session = context.session + with session.begin(subtransactions=True): + old_net = super(NECPluginV2, self).get_network(context, id) + new_net = super(NECPluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, new_net, network['network']) + + changed = (old_net['admin_state_up'] != new_net['admin_state_up']) + if changed and not new_net['admin_state_up']: + # disable all active ports of the network + filters = dict(network_id=[id], status=[const.PORT_STATUS_ACTIVE]) + ports = super(NECPluginV2, self).get_ports(context, + filters=filters) + for port in ports: + # If some error occurs, status of errored port is set to ERROR. + # This is avoids too many rollback. + # TODO(amotoki): Raise an exception after all port operations + # are finished to inform the caller of API of the failure. + self.deactivate_port(context, port, raise_exc=False) + elif changed and new_net['admin_state_up']: + # enable ports of the network + filters = dict(network_id=[id], status=[const.PORT_STATUS_DOWN], + admin_state_up=[True]) + ports = super(NECPluginV2, self).get_ports(context, + filters=filters) + for port in ports: + self.activate_port_if_ready(context, port, new_net) + + return new_net + + def delete_network(self, context, id): + """Delete network and packet_filters associated with the network. + + Delete network entry from DB and OFC. Then delete packet_filters + associated with the network. If the network is the last resource + of the tenant, delete unnessary ofc_tenant. + """ + LOG.debug(_("NECPluginV2.delete_network() called, id=%s ."), id) + net_db = self._get_network(context, id) + tenant_id = net_db['tenant_id'] + ports = self.get_ports(context, filters={'network_id': [id]}) + + # check if there are any tenant owned ports in-use; + # consider ports owned by floating ips as auto_delete as if there are + # no other tenant owned ports, those floating ips are disassociated + # and will be auto deleted with self._process_l3_delete() + only_auto_del = all(p['device_owner'] in + db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS or + p['device_owner'] == const.DEVICE_OWNER_FLOATINGIP + for p in ports) + if not only_auto_del: + raise n_exc.NetworkInUse(net_id=id) + + self._process_l3_delete(context, id) + + # Make sure auto-delete ports on OFC are deleted. + # If an error occurs during port deletion, + # delete_network will be aborted. + for port in [p for p in ports if p['device_owner'] + in db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS]: + port = self.deactivate_port(context, port) + + # delete all packet_filters of the network from the controller + for pf in net_db.packetfilters: + self.delete_packet_filter(context, pf['id']) + + if self.ofc.exists_ofc_network(context, id): + try: + self.ofc.delete_ofc_network(context, id, net_db) + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + reason = _("delete_network() failed due to %s") % exc + LOG.error(reason) + self._update_resource_status( + context, "network", net_db['id'], + const.NET_STATUS_ERROR) + + super(NECPluginV2, self).delete_network(context, id) + + self._cleanup_ofc_tenant(context, tenant_id) + + def _get_base_binding_dict(self): + binding = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases, + portbindings.OVS_HYBRID_PLUG: True + } + } + return binding + + def _extend_port_dict_binding_portinfo(self, port_res, portinfo): + if portinfo: + port_res[portbindings.PROFILE] = { + 'datapath_id': portinfo['datapath_id'], + 'port_no': portinfo['port_no'], + } + elif portbindings.PROFILE in port_res: + del port_res[portbindings.PROFILE] + + def _validate_portinfo(self, profile): + key_specs = { + 'datapath_id': {'type:string': None, 'required': True}, + 'port_no': {'type:non_negative': None, 'required': True, + 'convert_to': attrs.convert_to_int} + } + msg = attrs._validate_dict_or_empty(profile, key_specs=key_specs) + if msg: + raise n_exc.InvalidInput(error_message=msg) + + datapath_id = profile.get('datapath_id') + port_no = profile.get('port_no') + try: + dpid = int(datapath_id, 16) + except ValueError: + raise nexc.ProfilePortInfoInvalidDataPathId() + if dpid > 0xffffffffffffffffL: + raise nexc.ProfilePortInfoInvalidDataPathId() + # Make sure dpid is a hex string beginning with 0x. + dpid = hex(dpid) + + if int(port_no) > 65535: + raise nexc.ProfilePortInfoInvalidPortNo() + + return {'datapath_id': dpid, 'port_no': port_no} + + def _process_portbindings_portinfo_create(self, context, port_data, port): + """Add portinfo according to bindings:profile in create_port(). + + :param context: neutron api request context + :param port_data: port attributes passed in PUT request + :param port: port attributes to be returned + """ + profile = port_data.get(portbindings.PROFILE) + # If portbindings.PROFILE is None, unspecified or an empty dict + # it is regarded that portbinding.PROFILE is not set. + profile_set = attrs.is_attr_set(profile) and profile + if profile_set: + portinfo = self._validate_portinfo(profile) + portinfo['mac'] = port['mac_address'] + ndb.add_portinfo(context.session, port['id'], **portinfo) + else: + portinfo = None + self._extend_port_dict_binding_portinfo(port, portinfo) + + def _process_portbindings_portinfo_update(self, context, port_data, port): + """Update portinfo according to bindings:profile in update_port(). + + :param context: neutron api request context + :param port_data: port attributes passed in PUT request + :param port: port attributes to be returned + :returns: 'ADD', 'MOD', 'DEL' or None + """ + if portbindings.PROFILE not in port_data: + return + profile = port_data.get(portbindings.PROFILE) + # If binding:profile is None or an empty dict, + # it means binding:.profile needs to be cleared. + # TODO(amotoki): Allow Make None in binding:profile in + # the API layer. See LP bug #1220011. + profile_set = attrs.is_attr_set(profile) and profile + cur_portinfo = ndb.get_portinfo(context.session, port['id']) + if profile_set: + portinfo = self._validate_portinfo(profile) + portinfo_changed = 'ADD' + if cur_portinfo: + if (necutils.cmp_dpid(portinfo['datapath_id'], + cur_portinfo.datapath_id) and + portinfo['port_no'] == cur_portinfo.port_no): + return + ndb.del_portinfo(context.session, port['id']) + portinfo_changed = 'MOD' + portinfo['mac'] = port['mac_address'] + ndb.add_portinfo(context.session, port['id'], **portinfo) + elif cur_portinfo: + portinfo_changed = 'DEL' + portinfo = None + ndb.del_portinfo(context.session, port['id']) + else: + portinfo = None + portinfo_changed = None + self._extend_port_dict_binding_portinfo(port, portinfo) + return portinfo_changed + + def extend_port_dict_binding(self, port_res, port_db): + super(NECPluginV2, self).extend_port_dict_binding(port_res, port_db) + self._extend_port_dict_binding_portinfo(port_res, port_db.portinfo) + + def _process_portbindings_create(self, context, port_data, port): + super(NECPluginV2, self)._process_portbindings_create_and_update( + context, port_data, port) + self._process_portbindings_portinfo_create(context, port_data, port) + + def _process_portbindings_update(self, context, port_data, port): + super(NECPluginV2, self)._process_portbindings_create_and_update( + context, port_data, port) + portinfo_changed = self._process_portbindings_portinfo_update( + context, port_data, port) + return portinfo_changed + + def _get_port_handler(self, operation, device_owner): + handlers = self.port_handlers[operation] + handler = handlers.get(device_owner) + if handler: + return handler + else: + return handlers['default'] + + def create_port(self, context, port): + """Create a new port entry on DB, then try to activate it.""" + LOG.debug(_("NECPluginV2.create_port() called, port=%s ."), port) + + port['port']['status'] = const.PORT_STATUS_DOWN + + port_data = port['port'] + with context.session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + port = super(NECPluginV2, self).create_port(context, port) + self._process_portbindings_create(context, port_data, port) + self._process_port_create_security_group( + context, port, sgids) + port[addr_pair.ADDRESS_PAIRS] = ( + self._process_create_allowed_address_pairs( + context, port, + port_data.get(addr_pair.ADDRESS_PAIRS))) + self.notify_security_groups_member_updated(context, port) + + handler = self._get_port_handler('create', port['device_owner']) + return handler(context, port) + + def _update_ofc_port_if_required(self, context, old_port, new_port, + portinfo_changed): + def get_ofport_exist(port): + return (port['admin_state_up'] and + bool(port.get(portbindings.PROFILE))) + + # Determine it is required to update OFC port + need_add = False + need_del = False + need_packet_filter_update = False + + old_ofport_exist = get_ofport_exist(old_port) + new_ofport_exist = get_ofport_exist(new_port) + + if old_port['admin_state_up'] != new_port['admin_state_up']: + if new_port['admin_state_up']: + need_add |= new_ofport_exist + else: + need_del |= old_ofport_exist + + if portinfo_changed: + if portinfo_changed in ['DEL', 'MOD']: + need_del |= old_ofport_exist + if portinfo_changed in ['ADD', 'MOD']: + need_add |= new_ofport_exist + need_packet_filter_update |= True + + # Update OFC port if required + if need_del: + self.deactivate_port(context, new_port) + if need_packet_filter_update: + self.deactivate_packet_filters_by_port(context, id) + if need_add: + if need_packet_filter_update: + self.activate_packet_filters_by_port(context, id) + self.activate_port_if_ready(context, new_port) + + def update_port(self, context, id, port): + """Update port, and handle packetfilters associated with the port. + + Update network entry on DB. If admin_state_up was changed, activate + or deactivate the port and packetfilters associated with it. + """ + LOG.debug(_("NECPluginV2.update_port() called, " + "id=%(id)s port=%(port)s ."), + {'id': id, 'port': port}) + need_port_update_notify = False + with context.session.begin(subtransactions=True): + old_port = super(NECPluginV2, self).get_port(context, id) + new_port = super(NECPluginV2, self).update_port(context, id, port) + portinfo_changed = self._process_portbindings_update( + context, port['port'], new_port) + if addr_pair.ADDRESS_PAIRS in port['port']: + need_port_update_notify |= ( + self.update_address_pairs_on_port(context, id, port, + old_port, + new_port)) + need_port_update_notify |= self.update_security_group_on_port( + context, id, port, old_port, new_port) + + need_port_update_notify |= self.is_security_group_member_updated( + context, old_port, new_port) + if need_port_update_notify: + self.notifier.port_update(context, new_port) + + self._update_ofc_port_if_required(context, old_port, new_port, + portinfo_changed) + return new_port + + def delete_port(self, context, id, l3_port_check=True): + """Delete port and packet_filters associated with the port.""" + LOG.debug(_("NECPluginV2.delete_port() called, id=%s ."), id) + # ext_sg.SECURITYGROUPS attribute for the port is required + # since notifier.security_groups_member_updated() need the attribute. + # Thus we need to call self.get_port() instead of super().get_port() + port_db = self._get_port(context, id) + port = self._make_port_dict(port_db) + + handler = self._get_port_handler('delete', port['device_owner']) + # handler() raises an exception if an error occurs during processing. + port = handler(context, port) + + # delete all packet_filters of the port from the controller + for pf in port_db.packetfilters: + self.delete_packet_filter(context, pf['id']) + + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + with context.session.begin(subtransactions=True): + self.disassociate_floatingips(context, id) + self._delete_port_security_group_bindings(context, id) + super(NECPluginV2, self).delete_port(context, id) + self.notify_security_groups_member_updated(context, port) + + +class NECPluginV2AgentNotifierApi(rpc_compat.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + '''RPC API for NEC plugin agent.''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(NECPluginV2AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_port_update = topics.get_topic_name( + topic, topics.PORT, topics.UPDATE) + + def port_update(self, context, port): + self.fanout_cast(context, + self.make_msg('port_update', + port=port), + topic=self.topic_port_update) + + +class DhcpRpcCallback(rpc_compat.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin): + # DhcpPluginApi BASE_RPC_API_VERSION + RPC_API_VERSION = '1.1' + + +class L3RpcCallback(rpc_compat.RpcCallback, l3_rpc_base.L3RpcCallbackMixin): + # 1.0 L3PluginApi BASE_RPC_API_VERSION + # 1.1 Support update_floatingip_statuses + RPC_API_VERSION = '1.1' + + +class SecurityGroupServerRpcCallback( + rpc_compat.RpcCallback, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + + RPC_API_VERSION = sg_rpc.SG_RPC_VERSION + + @staticmethod + def get_port_from_device(device): + port = ndb.get_port_from_device(device) + if port: + port['device'] = device + LOG.debug(_("NECPluginV2RPCCallbacks.get_port_from_device() called, " + "device=%(device)s => %(ret)s."), + {'device': device, 'ret': port}) + return port + + +class NECPluginV2RPCCallbacks(rpc_compat.RpcCallback): + + RPC_API_VERSION = '1.0' + + def __init__(self, plugin): + super(NECPluginV2RPCCallbacks, self).__init__() + self.plugin = plugin + + def update_ports(self, rpc_context, **kwargs): + """Update ports' information and activate/deavtivate them. + + Expected input format is: + {'topic': 'q-agent-notifier', + 'agent_id': 'nec-q-agent.' + , + 'datapath_id': , + 'port_added': [,...], + 'port_removed': [,...]} + """ + LOG.debug(_("NECPluginV2RPCCallbacks.update_ports() called, " + "kwargs=%s ."), kwargs) + datapath_id = kwargs['datapath_id'] + session = rpc_context.session + for p in kwargs.get('port_added', []): + id = p['id'] + portinfo = ndb.get_portinfo(session, id) + if portinfo: + if (necutils.cmp_dpid(portinfo.datapath_id, datapath_id) and + portinfo.port_no == p['port_no']): + LOG.debug(_("update_ports(): ignore unchanged portinfo in " + "port_added message (port_id=%s)."), id) + continue + ndb.del_portinfo(session, id) + port = self._get_port(rpc_context, id) + if port: + ndb.add_portinfo(session, id, datapath_id, p['port_no'], + mac=p.get('mac', '')) + # NOTE: Make sure that packet filters on this port exist while + # the port is active to avoid unexpected packet transfer. + if portinfo: + self.plugin.deactivate_port(rpc_context, port, + raise_exc=False) + self.plugin.deactivate_packet_filters_by_port( + rpc_context, id, raise_exc=False) + self.plugin.activate_packet_filters_by_port(rpc_context, id) + self.plugin.activate_port_if_ready(rpc_context, port) + for id in kwargs.get('port_removed', []): + portinfo = ndb.get_portinfo(session, id) + if not portinfo: + LOG.debug(_("update_ports(): ignore port_removed message " + "due to portinfo for port_id=%s was not " + "registered"), id) + continue + if not necutils.cmp_dpid(portinfo.datapath_id, datapath_id): + LOG.debug(_("update_ports(): ignore port_removed message " + "received from different host " + "(registered_datapath_id=%(registered)s, " + "received_datapath_id=%(received)s)."), + {'registered': portinfo.datapath_id, + 'received': datapath_id}) + continue + ndb.del_portinfo(session, id) + port = self._get_port(rpc_context, id) + if port: + self.plugin.deactivate_port(rpc_context, port, raise_exc=False) + self.plugin.deactivate_packet_filters_by_port( + rpc_context, id, raise_exc=False) + + def _get_port(self, context, port_id): + try: + return self.plugin.get_port(context, port_id) + except n_exc.PortNotFound: + return None diff --git a/neutron/plugins/nec/nec_router.py b/neutron/plugins/nec/nec_router.py new file mode 100644 index 000000000..e1a6ef6c9 --- /dev/null +++ b/neutron/plugins/nec/nec_router.py @@ -0,0 +1,358 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Akihiro Motoki + +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as n_exc +from neutron.db import db_base_plugin_v2 +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_db +from neutron.db import l3_gwmode_db +from neutron.db import models_v2 +from neutron.extensions import l3 +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.nec.common import config +from neutron.plugins.nec.common import constants as nconst +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.db import router as rdb +from neutron.plugins.nec.extensions import router_provider as ext_provider + +LOG = logging.getLogger(__name__) + +PROVIDER_L3AGENT = nconst.ROUTER_PROVIDER_L3AGENT +PROVIDER_OPENFLOW = nconst.ROUTER_PROVIDER_OPENFLOW + +ROUTER_DRIVER_PATH = 'neutron.plugins.nec.router_drivers.' +ROUTER_DRIVER_MAP = { + PROVIDER_L3AGENT: ROUTER_DRIVER_PATH + 'RouterL3AgentDriver', + PROVIDER_OPENFLOW: ROUTER_DRIVER_PATH + 'RouterOpenFlowDriver' +} + +ROUTER_DRIVERS = {} + +STATUS_ACTIVE = nconst.ROUTER_STATUS_ACTIVE +STATUS_ERROR = nconst.ROUTER_STATUS_ERROR + + +class RouterMixin(extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin): + + def create_router(self, context, router): + """Create a new router entry on DB, and create it on OFC.""" + LOG.debug(_("RouterMixin.create_router() called, " + "router=%s ."), router) + tenant_id = self._get_tenant_id_for_create(context, router['router']) + + provider = get_provider_with_default( + router['router'].get(ext_provider.ROUTER_PROVIDER)) + driver = get_driver_by_provider(provider) + + with context.session.begin(subtransactions=True): + new_router = super(RouterMixin, self).create_router(context, + router) + new_router['gw_port'] = self._get_gw_port_detail( + context, driver, new_router['gw_port_id']) + rdb.add_router_provider_binding(context.session, + provider, str(new_router['id'])) + self._extend_router_dict_provider(new_router, provider) + + # create router on the network controller + try: + return driver.create_router(context, tenant_id, new_router) + except nexc.RouterOverLimit: + with excutils.save_and_reraise_exception(): + super(RouterMixin, self).delete_router(context, + new_router['id']) + + def update_router(self, context, router_id, router): + LOG.debug(_("RouterMixin.update_router() called, " + "id=%(id)s, router=%(router)s ."), + {'id': router_id, 'router': router}) + + with context.session.begin(subtransactions=True): + old_rtr = super(RouterMixin, self).get_router(context, router_id) + provider = old_rtr[ext_provider.ROUTER_PROVIDER] + driver = get_driver_by_provider(provider) + old_rtr['gw_port'] = self._get_gw_port_detail( + context, driver, old_rtr['gw_port_id']) + new_rtr = super(RouterMixin, self).update_router( + context, router_id, router) + new_rtr['gw_port'] = self._get_gw_port_detail( + context, driver, new_rtr['gw_port_id']) + driver.update_router(context, router_id, old_rtr, new_rtr) + return new_rtr + + def delete_router(self, context, router_id): + LOG.debug(_("RouterMixin.delete_router() called, id=%s."), router_id) + + router = super(RouterMixin, self).get_router(context, router_id) + tenant_id = router['tenant_id'] + # Since l3_db.delete_router() has no interaction with the plugin layer, + # we need to check if the router can be deleted first. + self._check_router_in_use(context, router_id) + driver = self._get_router_driver_by_id(context, router_id) + # If gw_port exists, remove it. + gw_port = self._get_gw_port(context, router_id) + if gw_port: + driver.delete_interface(context, router_id, gw_port) + driver.delete_router(context, router_id, router) + + super(RouterMixin, self).delete_router(context, router_id) + + self._cleanup_ofc_tenant(context, tenant_id) + + def add_router_interface(self, context, router_id, interface_info): + LOG.debug(_("RouterMixin.add_router_interface() called, " + "id=%(id)s, interface=%(interface)s."), + {'id': router_id, 'interface': interface_info}) + return super(RouterMixin, self).add_router_interface( + context, router_id, interface_info) + + def remove_router_interface(self, context, router_id, interface_info): + LOG.debug(_("RouterMixin.remove_router_interface() called, " + "id=%(id)s, interface=%(interface)s."), + {'id': router_id, 'interface': interface_info}) + return super(RouterMixin, self).remove_router_interface( + context, router_id, interface_info) + + def create_router_port(self, context, port): + # This method is called from plugin.create_port() + router_id = port['device_id'] + driver = self._get_router_driver_by_id(context, router_id) + port = driver.add_interface(context, router_id, port) + return port + + def delete_router_port(self, context, port): + # This method is called from plugin.delete_port() + router_id = port['device_id'] + driver = self._get_router_driver_by_id(context, router_id) + return driver.delete_interface(context, router_id, port) + + def _get_gw_port_detail(self, context, driver, gw_port_id): + if not gw_port_id or not driver.need_gw_info: + return + ctx_elevated = context.elevated() + gw_port = self._get_port(ctx_elevated, gw_port_id) + # At this moment gw_port has been created, so it is guaranteed + # that fixed_ip is assigned for the gw_port. + ext_subnet_id = gw_port['fixed_ips'][0]['subnet_id'] + ext_subnet = self._get_subnet(ctx_elevated, ext_subnet_id) + gw_info = {'network_id': gw_port['network_id'], + 'ip_address': gw_port['fixed_ips'][0]['ip_address'], + 'mac_address': gw_port['mac_address'], + 'cidr': ext_subnet['cidr'], + 'gateway_ip': ext_subnet['gateway_ip']} + return gw_info + + def _get_gw_port(self, context, router_id): + device_filter = {'device_id': [router_id], + 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_GW]} + ports = self.get_ports(context.elevated(), filters=device_filter) + if ports: + return ports[0] + + def _check_router_in_use(self, context, router_id): + with context.session.begin(subtransactions=True): + # Ensure that the router is not used + router_filter = {'router_id': [router_id]} + fips = self.get_floatingips_count(context.elevated(), + filters=router_filter) + if fips: + raise l3.RouterInUse(router_id=router_id) + + device_filter = {'device_id': [router_id], + 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} + ports = self.get_ports_count(context.elevated(), + filters=device_filter) + if ports: + raise l3.RouterInUse(router_id=router_id) + + def _get_router_for_floatingip(self, context, internal_port, + internal_subnet_id, + external_network_id): + """Get a router for a requested floating IP. + + OpenFlow vrouter does not support NAT, so we need to exclude them + from candidate routers for floating IP association. + This method is called in l3_db.get_assoc_data(). + """ + subnet_db = self._get_subnet(context, internal_subnet_id) + if not subnet_db['gateway_ip']: + msg = (_('Cannot add floating IP to port on subnet %s ' + 'which has no gateway_ip') % internal_subnet_id) + raise n_exc.BadRequest(resource='floatingip', msg=msg) + + # find router interface ports on this network + router_intf_qry = context.session.query(models_v2.Port) + router_intf_ports = router_intf_qry.filter_by( + network_id=internal_port['network_id'], + device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF) + + for intf_p in router_intf_ports: + if intf_p['fixed_ips'][0]['subnet_id'] == internal_subnet_id: + router_id = intf_p['device_id'] + router_gw_qry = context.session.query(models_v2.Port) + has_gw_port = router_gw_qry.filter_by( + network_id=external_network_id, + device_id=router_id, + device_owner=l3_db.DEVICE_OWNER_ROUTER_GW).count() + driver = self._get_router_driver_by_id(context, router_id) + if (has_gw_port and driver.floating_ip_support()): + return router_id + + raise l3.ExternalGatewayForFloatingIPNotFound( + subnet_id=internal_subnet_id, + external_network_id=external_network_id, + port_id=internal_port['id']) + + def _get_sync_routers(self, context, router_ids=None, active=None): + """Query routers and their gw ports for l3 agent. + + The difference from the superclass in l3_db is that this method + only lists routers hosted on l3-agents. + """ + router_list = super(RouterMixin, self)._get_sync_routers( + context, router_ids, active) + if router_list: + _router_ids = [r['id'] for r in router_list] + agent_routers = rdb.get_routers_by_provider( + context.session, 'l3-agent', + router_ids=_router_ids) + router_list = [r for r in router_list + if r['id'] in agent_routers] + return router_list + + def _get_router_driver_by_id(self, context, router_id): + provider = self._get_provider_by_router_id(context, router_id) + return get_driver_by_provider(provider) + + def _get_provider_by_router_id(self, context, router_id): + return rdb.get_provider_by_router(context.session, router_id) + + def _extend_router_dict_provider(self, router_res, provider): + router_res[ext_provider.ROUTER_PROVIDER] = provider + + def extend_router_dict_provider(self, router_res, router_db): + # NOTE: router_db.provider is None just after creating a router, + # so we need to skip setting router_provider here. + if not router_db.provider: + return + self._extend_router_dict_provider(router_res, + router_db.provider['provider']) + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + l3.ROUTERS, [extend_router_dict_provider]) + + +class L3AgentSchedulerDbMixin(l3_agentschedulers_db.L3AgentSchedulerDbMixin): + + def auto_schedule_routers(self, context, host, router_ids): + router_ids = rdb.get_routers_by_provider( + context.session, nconst.ROUTER_PROVIDER_L3AGENT, router_ids) + # If no l3-agent hosted router, there is no need to schedule. + if not router_ids: + return + return super(L3AgentSchedulerDbMixin, self).auto_schedule_routers( + context, host, router_ids) + + def schedule_router(self, context, router): + if (self._get_provider_by_router_id(context, router) == + nconst.ROUTER_PROVIDER_L3AGENT): + return super(L3AgentSchedulerDbMixin, self).schedule_router( + context, router) + + def add_router_to_l3_agent(self, context, id, router_id): + provider = self._get_provider_by_router_id(context, router_id) + if provider != nconst.ROUTER_PROVIDER_L3AGENT: + raise nexc.RouterProviderMismatch( + router_id=router_id, provider=provider, + expected_provider=nconst.ROUTER_PROVIDER_L3AGENT) + return super(L3AgentSchedulerDbMixin, self).add_router_to_l3_agent( + context, id, router_id) + + +class L3AgentNotifyAPI(l3_rpc_agent_api.L3AgentNotifyAPI): + + def _notification(self, context, method, router_ids, operation, data): + """Notify all the agents that are hosting the routers. + + _notification() is called in L3 db plugin for all routers regardless + the routers are hosted on l3 agents or not. When the routers are + not hosted on l3 agents, there is no need to notify. + This method filters routers not hosted by l3 agents. + """ + router_ids = rdb.get_routers_by_provider( + context.session, nconst.ROUTER_PROVIDER_L3AGENT, router_ids) + super(L3AgentNotifyAPI, self)._notification( + context, method, router_ids, operation, data) + + +def load_driver(plugin, ofc_manager): + + if (PROVIDER_OPENFLOW in ROUTER_DRIVER_MAP and + not ofc_manager.driver.router_supported): + LOG.warning( + _('OFC does not support router with provider=%(provider)s, ' + 'so removed it from supported provider ' + '(new router driver map=%(driver_map)s)'), + {'provider': PROVIDER_OPENFLOW, + 'driver_map': ROUTER_DRIVER_MAP}) + del ROUTER_DRIVER_MAP[PROVIDER_OPENFLOW] + + if config.PROVIDER.default_router_provider not in ROUTER_DRIVER_MAP: + LOG.error(_('default_router_provider %(default)s is supported! ' + 'Please specify one of %(supported)s'), + {'default': config.PROVIDER.default_router_provider, + 'supported': ROUTER_DRIVER_MAP.keys()}) + raise SystemExit(1) + + enabled_providers = (set(config.PROVIDER.router_providers + + [config.PROVIDER.default_router_provider]) & + set(ROUTER_DRIVER_MAP.keys())) + + for driver in enabled_providers: + driver_klass = importutils.import_class(ROUTER_DRIVER_MAP[driver]) + ROUTER_DRIVERS[driver] = driver_klass(plugin, ofc_manager) + + LOG.info(_('Enabled router drivers: %s'), ROUTER_DRIVERS.keys()) + + if not ROUTER_DRIVERS: + LOG.error(_('No router provider is enabled. neutron-server terminated!' + ' (supported=%(supported)s, configured=%(config)s)'), + {'supported': ROUTER_DRIVER_MAP.keys(), + 'config': config.PROVIDER.router_providers}) + raise SystemExit(1) + + +def get_provider_with_default(provider): + if not attr.is_attr_set(provider): + provider = config.PROVIDER.default_router_provider + elif provider not in ROUTER_DRIVERS: + raise nexc.ProviderNotFound(provider=provider) + return provider + + +def get_driver_by_provider(provider): + if provider is None: + provider = config.PROVIDER.default_router_provider + elif provider not in ROUTER_DRIVERS: + raise nexc.ProviderNotFound(provider=provider) + return ROUTER_DRIVERS[provider] diff --git a/neutron/plugins/nec/ofc_driver_base.py b/neutron/plugins/nec/ofc_driver_base.py new file mode 100644 index 000000000..cde69c36d --- /dev/null +++ b/neutron/plugins/nec/ofc_driver_base.py @@ -0,0 +1,105 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU +# @author: Akihiro MOTOKI + +import abc +import six + + +@six.add_metaclass(abc.ABCMeta) +class OFCDriverBase(object): + """OpenFlow Controller (OFC) Driver Specification. + + OFCDriverBase defines the minimum set of methods required by this plugin. + It would be better that other methods like update_* are implemented. + """ + + @abc.abstractmethod + def create_tenant(self, description, tenant_id=None): + """Create a new tenant at OpenFlow Controller. + + :param description: A description of this tenant. + :param tenant_id: A hint of OFC tenant ID. + A driver could use this id as a OFC id or ignore it. + :returns: ID of the tenant created at OpenFlow Controller. + :raises: neutron.plugin.nec.common.exceptions.OFCException + """ + pass + + @abc.abstractmethod + def delete_tenant(self, ofc_tenant_id): + """Delete a tenant at OpenFlow Controller. + + :raises: neutron.plugin.nec.common.exceptions.OFCException + """ + pass + + @abc.abstractmethod + def create_network(self, ofc_tenant_id, description, network_id=None): + """Create a new network on specified OFC tenant at OpenFlow Controller. + + :param ofc_tenant_id: a OFC tenant ID in which a new network belongs. + :param description: A description of this network. + :param network_id: A hint of an ID of OFC network. + :returns: ID of the network created at OpenFlow Controller. + ID returned must be unique in the OpenFlow Controller. + If a network is identified in conjunction with other information + such as a tenant ID, such information should be included in the ID. + :raises: neutron.plugin.nec.common.exceptions.OFCException + """ + pass + + @abc.abstractmethod + def delete_network(self, ofc_network_id): + """Delete a netwrok at OpenFlow Controller. + + :raises: neutron.plugin.nec.common.exceptions.OFCException + """ + pass + + @abc.abstractmethod + def create_port(self, ofc_network_id, portinfo, + port_id=None, filters=None): + """Create a new port on specified network at OFC. + + :param ofc_network_id: a OFC tenant ID in which a new port belongs. + :param portinfo: An OpenFlow information of this port. + {'datapath_id': Switch ID that a port connected. + 'port_no': Port Number that a port connected on a Swtich. + 'vlan_id': VLAN ID that a port tagging. + 'mac': Mac address. + } + :param port_id: A hint of an ID of OFC port. + ID returned must be unique in the OpenFlow Controller. + + If a port is identified in combination with a network or + a tenant, such information should be included in the ID. + :param filters: A list of packet filter associated with the port. + Each element is a tuple (neutron ID, OFC ID) + + :returns: ID of the port created at OpenFlow Controller. + :raises: neutron.plugin.nec.common.exceptions.OFCException + """ + pass + + @abc.abstractmethod + def delete_port(self, ofc_port_id): + """Delete a port at OpenFlow Controller. + + :raises: neutron.plugin.nec.common.exceptions.OFCException + """ + pass diff --git a/neutron/plugins/nec/ofc_manager.py b/neutron/plugins/nec/ofc_manager.py new file mode 100644 index 000000000..a448a5445 --- /dev/null +++ b/neutron/plugins/nec/ofc_manager.py @@ -0,0 +1,201 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU +# @author: Akihiro MOTOKI + +import netaddr + +from neutron.common import utils +from neutron.openstack.common import log as logging +from neutron.plugins.nec.common import config +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.db import api as ndb +from neutron.plugins.nec import drivers + + +LOG = logging.getLogger(__name__) + + +class OFCManager(object): + """This class manages an OpenFlow Controller and map resources. + + This class manage an OpenFlow Controller (OFC) with a driver specified in + a configuration of this plugin. This keeps mappings between IDs on Neutron + and OFC for various entities such as Tenant, Network and Filter. A Port on + OFC is identified by a switch ID 'datapath_id' and a port number 'port_no' + of the switch. An ID named as 'ofc_*' is used to identify resource on OFC. + """ + + def __init__(self, plugin): + self.driver = drivers.get_driver(config.OFC.driver)(config.OFC) + self.plugin = plugin + + def _get_ofc_id(self, context, resource, neutron_id): + return ndb.get_ofc_id(context.session, resource, neutron_id) + + def _exists_ofc_item(self, context, resource, neutron_id): + return ndb.exists_ofc_item(context.session, resource, neutron_id) + + def _add_ofc_item(self, context, resource, neutron_id, ofc_id): + # Ensure a new item is added to the new mapping table + ndb.add_ofc_item(context.session, resource, neutron_id, ofc_id) + + def _del_ofc_item(self, context, resource, neutron_id): + ndb.del_ofc_item(context.session, resource, neutron_id) + + def ensure_ofc_tenant(self, context, tenant_id): + if not self.exists_ofc_tenant(context, tenant_id): + self.create_ofc_tenant(context, tenant_id) + + def create_ofc_tenant(self, context, tenant_id): + desc = "ID=%s at OpenStack." % tenant_id + ofc_tenant_id = self.driver.create_tenant(desc, tenant_id) + self._add_ofc_item(context, "ofc_tenant", tenant_id, ofc_tenant_id) + + def exists_ofc_tenant(self, context, tenant_id): + return self._exists_ofc_item(context, "ofc_tenant", tenant_id) + + def delete_ofc_tenant(self, context, tenant_id): + ofc_tenant_id = self._get_ofc_id(context, "ofc_tenant", tenant_id) + self.driver.delete_tenant(ofc_tenant_id) + self._del_ofc_item(context, "ofc_tenant", tenant_id) + + def create_ofc_network(self, context, tenant_id, network_id, + network_name=None): + ofc_tenant_id = self._get_ofc_id(context, "ofc_tenant", tenant_id) + desc = "ID=%s Name=%s at Neutron." % (network_id, network_name) + ofc_net_id = self.driver.create_network(ofc_tenant_id, desc, + network_id) + self._add_ofc_item(context, "ofc_network", network_id, ofc_net_id) + + def exists_ofc_network(self, context, network_id): + return self._exists_ofc_item(context, "ofc_network", network_id) + + def delete_ofc_network(self, context, network_id, network): + ofc_net_id = self._get_ofc_id(context, "ofc_network", network_id) + self.driver.delete_network(ofc_net_id) + self._del_ofc_item(context, "ofc_network", network_id) + + def create_ofc_port(self, context, port_id, port): + ofc_net_id = self._get_ofc_id(context, "ofc_network", + port['network_id']) + portinfo = ndb.get_portinfo(context.session, port_id) + if not portinfo: + raise nexc.PortInfoNotFound(id=port_id) + + # Associate packet filters + filters = self.plugin.get_packet_filters_for_port(context, port) + if filters is not None: + params = {'filters': filters} + else: + params = {} + + ofc_port_id = self.driver.create_port(ofc_net_id, portinfo, port_id, + **params) + self._add_ofc_item(context, "ofc_port", port_id, ofc_port_id) + + def exists_ofc_port(self, context, port_id): + return self._exists_ofc_item(context, "ofc_port", port_id) + + def delete_ofc_port(self, context, port_id, port): + ofc_port_id = self._get_ofc_id(context, "ofc_port", port_id) + self.driver.delete_port(ofc_port_id) + self._del_ofc_item(context, "ofc_port", port_id) + + def create_ofc_packet_filter(self, context, filter_id, filter_dict): + ofc_net_id = self._get_ofc_id(context, "ofc_network", + filter_dict['network_id']) + in_port_id = filter_dict.get('in_port') + portinfo = None + if in_port_id: + portinfo = ndb.get_portinfo(context.session, in_port_id) + if not portinfo: + raise nexc.PortInfoNotFound(id=in_port_id) + + # Collect ports to be associated with the filter + apply_ports = ndb.get_active_ports_on_ofc( + context, filter_dict['network_id'], in_port_id) + ofc_pf_id = self.driver.create_filter(ofc_net_id, + filter_dict, portinfo, filter_id, + apply_ports) + self._add_ofc_item(context, "ofc_packet_filter", filter_id, ofc_pf_id) + + def update_ofc_packet_filter(self, context, filter_id, filter_dict): + ofc_pf_id = self._get_ofc_id(context, "ofc_packet_filter", filter_id) + ofc_pf_id = self.driver.convert_ofc_filter_id(context, ofc_pf_id) + self.driver.update_filter(ofc_pf_id, filter_dict) + + def exists_ofc_packet_filter(self, context, filter_id): + return self._exists_ofc_item(context, "ofc_packet_filter", filter_id) + + def delete_ofc_packet_filter(self, context, filter_id): + ofc_pf_id = self._get_ofc_id(context, "ofc_packet_filter", filter_id) + self.driver.delete_filter(ofc_pf_id) + self._del_ofc_item(context, "ofc_packet_filter", filter_id) + + def create_ofc_router(self, context, tenant_id, router_id, name=None): + ofc_tenant_id = self._get_ofc_id(context, "ofc_tenant", tenant_id) + desc = "ID=%s Name=%s at Neutron." % (router_id, name) + ofc_router_id = self.driver.create_router(ofc_tenant_id, router_id, + desc) + self._add_ofc_item(context, "ofc_router", router_id, ofc_router_id) + + def exists_ofc_router(self, context, router_id): + return self._exists_ofc_item(context, "ofc_router", router_id) + + def delete_ofc_router(self, context, router_id, router): + ofc_router_id = self._get_ofc_id(context, "ofc_router", router_id) + self.driver.delete_router(ofc_router_id) + self._del_ofc_item(context, "ofc_router", router_id) + + def add_ofc_router_interface(self, context, router_id, port_id, port): + # port must have the following fields: + # network_id, cidr, ip_address, mac_address + ofc_router_id = self._get_ofc_id(context, "ofc_router", router_id) + ofc_net_id = self._get_ofc_id(context, "ofc_network", + port['network_id']) + ip_address = '%s/%s' % (port['ip_address'], + netaddr.IPNetwork(port['cidr']).prefixlen) + mac_address = port['mac_address'] + ofc_inf_id = self.driver.add_router_interface( + ofc_router_id, ofc_net_id, ip_address, mac_address) + # Use port mapping table to maintain an interface of OFC router + self._add_ofc_item(context, "ofc_port", port_id, ofc_inf_id) + + def delete_ofc_router_interface(self, context, router_id, port_id): + # Use port mapping table to maintain an interface of OFC router + ofc_inf_id = self._get_ofc_id(context, "ofc_port", port_id) + self.driver.delete_router_interface(ofc_inf_id) + self._del_ofc_item(context, "ofc_port", port_id) + + def update_ofc_router_route(self, context, router_id, new_routes): + ofc_router_id = self._get_ofc_id(context, "ofc_router", router_id) + ofc_routes = self.driver.list_router_routes(ofc_router_id) + route_dict = {} + cur_routes = [] + for r in ofc_routes: + key = ','.join((r['destination'], r['nexthop'])) + route_dict[key] = r['id'] + del r['id'] + cur_routes.append(r) + added, removed = utils.diff_list_of_dict(cur_routes, new_routes) + for r in removed: + key = ','.join((r['destination'], r['nexthop'])) + route_id = route_dict[key] + self.driver.delete_router_route(route_id) + for r in added: + self.driver.add_router_route(ofc_router_id, r['destination'], + r['nexthop']) diff --git a/neutron/plugins/nec/packet_filter.py b/neutron/plugins/nec/packet_filter.py new file mode 100644 index 000000000..df48ebff8 --- /dev/null +++ b/neutron/plugins/nec/packet_filter.py @@ -0,0 +1,258 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012-2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.nec.common import config +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.db import api as ndb +from neutron.plugins.nec.db import packetfilter as pf_db + + +LOG = logging.getLogger(__name__) + + +class PacketFilterMixin(pf_db.PacketFilterDbMixin): + """Mixin class to add packet filter to NECPluginV2.""" + + @property + def packet_filter_enabled(self): + if not hasattr(self, '_packet_filter_enabled'): + self._packet_filter_enabled = ( + config.OFC.enable_packet_filter and + self.ofc.driver.filter_supported()) + return self._packet_filter_enabled + + def remove_packet_filter_extension_if_disabled(self, aliases): + if not self.packet_filter_enabled: + LOG.debug(_('Disabled packet-filter extension.')) + aliases.remove('packet-filter') + + def create_packet_filter(self, context, packet_filter): + """Create a new packet_filter entry on DB, then try to activate it.""" + LOG.debug(_("create_packet_filter() called, packet_filter=%s ."), + packet_filter) + + if hasattr(self.ofc.driver, 'validate_filter_create'): + pf = packet_filter['packet_filter'] + self.ofc.driver.validate_filter_create(context, pf) + pf = super(PacketFilterMixin, self).create_packet_filter( + context, packet_filter) + + return self.activate_packet_filter_if_ready(context, pf) + + def update_packet_filter(self, context, id, packet_filter): + """Update packet_filter entry on DB, and recreate it if changed. + + If any rule of the packet_filter was changed, recreate it on OFC. + """ + LOG.debug(_("update_packet_filter() called, " + "id=%(id)s packet_filter=%(packet_filter)s ."), + {'id': id, 'packet_filter': packet_filter}) + + pf_data = packet_filter['packet_filter'] + if hasattr(self.ofc.driver, 'validate_filter_update'): + self.ofc.driver.validate_filter_update(context, pf_data) + + # validate ownership + pf_old = self.get_packet_filter(context, id) + + pf = super(PacketFilterMixin, self).update_packet_filter( + context, id, packet_filter) + + def _packet_filter_changed(old_pf, new_pf): + LOG.debug('old_pf=%(old_pf)s, new_pf=%(new_pf)s', + {'old_pf': old_pf, 'new_pf': new_pf}) + # When the status is ERROR, force sync to OFC. + if old_pf['status'] == pf_db.PF_STATUS_ERROR: + LOG.debug('update_packet_filter: Force filter update ' + 'because the previous status is ERROR.') + return True + for key in new_pf: + if key in ('id', 'name', 'tenant_id', 'network_id', + 'in_port', 'status'): + continue + if old_pf[key] != new_pf[key]: + return True + return False + + if _packet_filter_changed(pf_old, pf): + if hasattr(self.ofc.driver, 'update_filter'): + # admin_state is changed + if pf_old['admin_state_up'] != pf['admin_state_up']: + LOG.debug('update_packet_filter: admin_state ' + 'is changed to %s', pf['admin_state_up']) + if pf['admin_state_up']: + self.activate_packet_filter_if_ready(context, pf) + else: + self.deactivate_packet_filter(context, pf) + elif pf['admin_state_up']: + LOG.debug('update_packet_filter: admin_state is ' + 'unchanged (True)') + if self.ofc.exists_ofc_packet_filter(context, id): + pf = self._update_packet_filter(context, pf, pf_data) + else: + pf = self.activate_packet_filter_if_ready(context, pf) + else: + LOG.debug('update_packet_filter: admin_state is unchanged ' + '(False). No need to update OFC filter.') + else: + pf = self.deactivate_packet_filter(context, pf) + pf = self.activate_packet_filter_if_ready(context, pf) + + return pf + + def _update_packet_filter(self, context, new_pf, pf_data): + pf_id = new_pf['id'] + prev_status = new_pf['status'] + try: + # If previous status is ERROR, try to sync all attributes. + pf = new_pf if prev_status == pf_db.PF_STATUS_ERROR else pf_data + self.ofc.update_ofc_packet_filter(context, pf_id, pf) + new_status = pf_db.PF_STATUS_ACTIVE + if new_status != prev_status: + self._update_resource_status(context, "packet_filter", + pf_id, new_status) + new_pf['status'] = new_status + return new_pf + except Exception as exc: + with excutils.save_and_reraise_exception(): + if (isinstance(exc, nexc.OFCException) or + isinstance(exc, nexc.OFCConsistencyBroken)): + LOG.error(_("Failed to create packet_filter id=%(id)s on " + "OFC: %(exc)s"), + {'id': pf_id, 'exc': exc}) + new_status = pf_db.PF_STATUS_ERROR + if new_status != prev_status: + self._update_resource_status(context, "packet_filter", + pf_id, new_status) + + def delete_packet_filter(self, context, id): + """Deactivate and delete packet_filter.""" + LOG.debug(_("delete_packet_filter() called, id=%s ."), id) + + # validate ownership + pf = self.get_packet_filter(context, id) + + # deactivate_packet_filter() raises an exception + # if an error occurs during processing. + pf = self.deactivate_packet_filter(context, pf) + + super(PacketFilterMixin, self).delete_packet_filter(context, id) + + def activate_packet_filter_if_ready(self, context, packet_filter): + """Activate packet_filter by creating filter on OFC if ready. + + Conditions to create packet_filter on OFC are: + * packet_filter admin_state is UP + * (if 'in_port' is specified) portinfo is available + """ + LOG.debug(_("activate_packet_filter_if_ready() called, " + "packet_filter=%s."), packet_filter) + + pf_id = packet_filter['id'] + in_port_id = packet_filter.get('in_port') + current = packet_filter['status'] + + pf_status = current + if not packet_filter['admin_state_up']: + LOG.debug(_("activate_packet_filter_if_ready(): skip pf_id=%s, " + "packet_filter.admin_state_up is False."), pf_id) + elif in_port_id and not ndb.get_portinfo(context.session, in_port_id): + LOG.debug(_("activate_packet_filter_if_ready(): skip " + "pf_id=%s, no portinfo for the in_port."), pf_id) + elif self.ofc.exists_ofc_packet_filter(context, packet_filter['id']): + LOG.debug(_("_activate_packet_filter_if_ready(): skip, " + "ofc_packet_filter already exists.")) + else: + LOG.debug(_("activate_packet_filter_if_ready(): create " + "packet_filter id=%s on OFC."), pf_id) + try: + self.ofc.create_ofc_packet_filter(context, pf_id, + packet_filter) + pf_status = pf_db.PF_STATUS_ACTIVE + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + LOG.error(_("Failed to create packet_filter id=%(id)s on " + "OFC: %(exc)s"), {'id': pf_id, 'exc': exc}) + pf_status = pf_db.PF_STATUS_ERROR + + if pf_status != current: + self._update_resource_status(context, "packet_filter", pf_id, + pf_status) + packet_filter.update({'status': pf_status}) + + return packet_filter + + def deactivate_packet_filter(self, context, packet_filter): + """Deactivate packet_filter by deleting filter from OFC if exixts.""" + LOG.debug(_("deactivate_packet_filter_if_ready() called, " + "packet_filter=%s."), packet_filter) + pf_id = packet_filter['id'] + + if not self.ofc.exists_ofc_packet_filter(context, pf_id): + LOG.debug(_("deactivate_packet_filter(): skip, " + "Not found OFC Mapping for packet_filter id=%s."), + pf_id) + return packet_filter + + LOG.debug(_("deactivate_packet_filter(): " + "deleting packet_filter id=%s from OFC."), pf_id) + try: + self.ofc.delete_ofc_packet_filter(context, pf_id) + self._update_resource_status_if_changed( + context, "packet_filter", packet_filter, pf_db.PF_STATUS_DOWN) + return packet_filter + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + LOG.error(_("Failed to delete packet_filter id=%(id)s " + "from OFC: %(exc)s"), + {'id': pf_id, 'exc': str(exc)}) + self._update_resource_status_if_changed( + context, "packet_filter", packet_filter, + pf_db.PF_STATUS_ERROR) + + def activate_packet_filters_by_port(self, context, port_id): + if not self.packet_filter_enabled: + return + + filters = {'in_port': [port_id], 'admin_state_up': [True], + 'status': [pf_db.PF_STATUS_DOWN]} + pfs = self.get_packet_filters(context, filters=filters) + for pf in pfs: + self.activate_packet_filter_if_ready(context, pf) + + def deactivate_packet_filters_by_port(self, context, port_id, + raise_exc=True): + if not self.packet_filter_enabled: + return + + filters = {'in_port': [port_id], 'status': [pf_db.PF_STATUS_ACTIVE]} + pfs = self.get_packet_filters(context, filters=filters) + error = False + for pf in pfs: + try: + self.deactivate_packet_filter(context, pf) + except (nexc.OFCException, nexc.OFCMappingNotFound): + error = True + if raise_exc and error: + raise nexc.OFCException(_('Error occurred while disabling packet ' + 'filter(s) for port %s'), port_id) + + def get_packet_filters_for_port(self, context, port): + if self.packet_filter_enabled: + return super(PacketFilterMixin, + self).get_packet_filters_for_port(context, port) diff --git a/neutron/plugins/nec/router_drivers.py b/neutron/plugins/nec/router_drivers.py new file mode 100644 index 000000000..407ea5365 --- /dev/null +++ b/neutron/plugins/nec/router_drivers.py @@ -0,0 +1,224 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Akihiro Motoki + +import abc +import httplib + +import six + +from neutron.common import log as call_log +from neutron.common import utils +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.nec.common import constants as nconst +from neutron.plugins.nec.common import exceptions as nexc + +LOG = logging.getLogger(__name__) + +PROVIDER_OPENFLOW = nconst.ROUTER_PROVIDER_OPENFLOW + + +@six.add_metaclass(abc.ABCMeta) +class RouterDriverBase(object): + + def __init__(self, plugin, ofc_manager): + self.plugin = plugin + self.ofc = ofc_manager + + def floating_ip_support(self): + return True + + @abc.abstractmethod + def create_router(self, context, tenant_id, router): + pass + + @abc.abstractmethod + def update_router(self, context, router_id, old_router, new_router): + pass + + @abc.abstractmethod + def delete_router(self, context, router_id, router): + pass + + @abc.abstractmethod + def add_interface(self, context, router_id, port): + pass + + @abc.abstractmethod + def delete_interface(self, context, router_id, port): + pass + + +class RouterL3AgentDriver(RouterDriverBase): + + need_gw_info = False + + @call_log.log + def create_router(self, context, tenant_id, router): + return router + + @call_log.log + def update_router(self, context, router_id, old_router, new_router): + return new_router + + @call_log.log + def delete_router(self, context, router_id, router): + pass + + @call_log.log + def add_interface(self, context, router_id, port): + return self.plugin.activate_port_if_ready(context, port) + + @call_log.log + def delete_interface(self, context, router_id, port): + return self.plugin.deactivate_port(context, port) + + +class RouterOpenFlowDriver(RouterDriverBase): + + need_gw_info = True + + def floating_ip_support(self): + return self.ofc.driver.router_nat_supported + + def _process_gw_port(self, gw_info, routes): + if gw_info and gw_info['gateway_ip']: + routes.append({'destination': '0.0.0.0/0', + 'nexthop': gw_info['gateway_ip']}) + + @call_log.log + def create_router(self, context, tenant_id, router): + try: + router_id = router['id'] + added_routes = [] + self.ofc.ensure_ofc_tenant(context, tenant_id) + self.ofc.create_ofc_router(context, tenant_id, router_id, + router['name']) + self._process_gw_port(router['gw_port'], added_routes) + if added_routes: + self.ofc.update_ofc_router_route(context, router_id, + added_routes, []) + new_status = nconst.ROUTER_STATUS_ACTIVE + self.plugin._update_resource_status(context, "router", + router['id'], + new_status) + router['status'] = new_status + return router + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + if (isinstance(exc, nexc.OFCException) and + exc.status == httplib.CONFLICT): + raise nexc.RouterOverLimit(provider=PROVIDER_OPENFLOW) + reason = _("create_router() failed due to %s") % exc + LOG.error(reason) + new_status = nconst.ROUTER_STATUS_ERROR + self._update_resource_status(context, "router", + router['id'], + new_status) + + @call_log.log + def update_router(self, context, router_id, old_router, new_router): + old_routes = old_router['routes'][:] + new_routes = new_router['routes'][:] + self._process_gw_port(old_router['gw_port'], old_routes) + self._process_gw_port(new_router['gw_port'], new_routes) + added, removed = utils.diff_list_of_dict(old_routes, new_routes) + if added or removed: + try: + # NOTE(amotoki): PFC supports one-by-one route update at now. + # It means there may be a case where some route is updated but + # some not. To allow the next call of failures to sync routes + # with Neutron side, we pass the whole new routes here. + # PFC should support atomic route update in the future. + self.ofc.update_ofc_router_route(context, router_id, + new_routes) + new_status = nconst.ROUTER_STATUS_ACTIVE + self.plugin._update_resource_status( + context, "router", router_id, new_status) + new_router['status'] = new_status + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + reason = _("_update_ofc_routes() failed due to %s") % exc + LOG.error(reason) + new_status = nconst.ROUTER_STATUS_ERROR + self.plugin._update_resource_status( + context, "router", router_id, new_status) + return new_router + + @call_log.log + def delete_router(self, context, router_id, router): + if not self.ofc.exists_ofc_router(context, router_id): + return + try: + self.ofc.delete_ofc_router(context, router_id, router) + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + LOG.error(_("delete_router() failed due to %s"), exc) + self.plugin._update_resource_status( + context, "router", router_id, nconst.ROUTER_STATUS_ERROR) + + @call_log.log + def add_interface(self, context, router_id, port): + port_id = port['id'] + # port['fixed_ips'] may be empty if ext_net has no subnet. + # Such port is invalid for a router port and we don't create a port + # on OFC. The port is removed in l3_db._create_router_gw_port. + if not port['fixed_ips']: + msg = _('RouterOpenFlowDriver.add_interface(): the requested port ' + 'has no subnet. add_interface() is skipped. ' + 'router_id=%(id)s, port=%(port)s)') + LOG.warning(msg, {'id': router_id, 'port': port}) + return port + fixed_ip = port['fixed_ips'][0] + subnet = self.plugin._get_subnet(context, fixed_ip['subnet_id']) + port_info = {'network_id': port['network_id'], + 'ip_address': fixed_ip['ip_address'], + 'cidr': subnet['cidr'], + 'mac_address': port['mac_address']} + try: + self.ofc.add_ofc_router_interface(context, router_id, + port_id, port_info) + new_status = nconst.ROUTER_STATUS_ACTIVE + self.plugin._update_resource_status( + context, "port", port_id, new_status) + return port + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + reason = _("add_router_interface() failed due to %s") % exc + LOG.error(reason) + new_status = nconst.ROUTER_STATUS_ERROR + self.plugin._update_resource_status( + context, "port", port_id, new_status) + + @call_log.log + def delete_interface(self, context, router_id, port): + port_id = port['id'] + try: + self.ofc.delete_ofc_router_interface(context, router_id, port_id) + new_status = nconst.ROUTER_STATUS_ACTIVE + self.plugin._update_resource_status(context, "port", port_id, + new_status) + port['status'] = new_status + return port + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + reason = _("delete_router_interface() failed due to %s") % exc + LOG.error(reason) + new_status = nconst.ROUTER_STATUS_ERROR + self.plugin._update_resource_status(context, "port", port_id, + new_status) diff --git a/neutron/plugins/nuage/__init__.py b/neutron/plugins/nuage/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/nuage/common/__init__.py b/neutron/plugins/nuage/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/nuage/common/config.py b/neutron/plugins/nuage/common/config.py new file mode 100644 index 000000000..cd5a8a80a --- /dev/null +++ b/neutron/plugins/nuage/common/config.py @@ -0,0 +1,47 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + +from oslo.config import cfg + + +restproxy_opts = [ + cfg.StrOpt('server', default='localhost:8800', + help=_("IP Address and Port of Nuage's VSD server")), + cfg.StrOpt('serverauth', default='username:password', + secret=True, + help=_("Username and password for authentication")), + cfg.BoolOpt('serverssl', default=False, + help=_("Boolean for SSL connection with VSD server")), + cfg.StrOpt('base_uri', default='/', + help=_("Nuage provided base uri to reach out to VSD")), + cfg.StrOpt('organization', default='system', + help=_("Organization name in which VSD will orchestrate " + "network resources using openstack")), + cfg.StrOpt('auth_resource', default='', + help=_("Nuage provided uri for initial authorization to " + "access VSD")), + cfg.StrOpt('default_net_partition_name', + default='OpenStackDefaultNetPartition', + help=_("Default Network partition in which VSD will " + "orchestrate network resources using openstack")), + cfg.IntOpt('default_floatingip_quota', + default=254, + help=_("Per Net Partition quota of floating ips")), +] + + +def nuage_register_cfg_opts(): + cfg.CONF.register_opts(restproxy_opts, "RESTPROXY") diff --git a/neutron/plugins/nuage/common/constants.py b/neutron/plugins/nuage/common/constants.py new file mode 100644 index 000000000..ff2680bf7 --- /dev/null +++ b/neutron/plugins/nuage/common/constants.py @@ -0,0 +1,28 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + +from neutron.common import constants + +AUTO_CREATE_PORT_OWNERS = [ + constants.DEVICE_OWNER_DHCP, + constants.DEVICE_OWNER_ROUTER_INTF, + constants.DEVICE_OWNER_ROUTER_GW, + constants.DEVICE_OWNER_FLOATINGIP +] + +NOVA_PORT_OWNER_PREF = 'compute:' + +SR_TYPE_FLOATING = "FLOATING" diff --git a/neutron/plugins/nuage/common/exceptions.py b/neutron/plugins/nuage/common/exceptions.py new file mode 100644 index 000000000..2e1158896 --- /dev/null +++ b/neutron/plugins/nuage/common/exceptions.py @@ -0,0 +1,24 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + + +''' Nuage specific exceptions ''' + +from neutron.common import exceptions as n_exc + + +class OperationNotSupported(n_exc.InvalidConfigurationOption): + message = _("Nuage Plugin does not support this operation: %(msg)s") diff --git a/neutron/plugins/nuage/extensions/__init__.py b/neutron/plugins/nuage/extensions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/nuage/extensions/netpartition.py b/neutron/plugins/nuage/extensions/netpartition.py new file mode 100644 index 000000000..c731e1ded --- /dev/null +++ b/neutron/plugins/nuage/extensions/netpartition.py @@ -0,0 +1,107 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + +import abc + +from neutron.api import extensions +from neutron.api.v2 import base +from neutron import manager +from neutron import quota + + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + 'net_partitions': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': '', + 'validate': {'type:name_not_default': None}}, + 'description': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': '', + 'validate': {'type:string_or_none': None}}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + }, +} + + +class Netpartition(object): + """Extension class supporting net_partition. + """ + + @classmethod + def get_name(cls): + return "NetPartition" + + @classmethod + def get_alias(cls): + return "net-partition" + + @classmethod + def get_description(cls): + return "NetPartition" + + @classmethod + def get_namespace(cls): + return "http://nuagenetworks.net/ext/net_partition/api/v1.0" + + @classmethod + def get_updated(cls): + return "2014-01-01T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + plugin = manager.NeutronManager.get_plugin() + resource_name = 'net_partition' + collection_name = resource_name.replace('_', '-') + "s" + params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) + quota.QUOTAS.register_resource_by_name(resource_name) + controller = base.create_resource(collection_name, + resource_name, + plugin, params, allow_bulk=True) + ex = extensions.ResourceExtension(collection_name, + controller) + exts.append(ex) + + return exts + + +class NetPartitionPluginBase(object): + + @abc.abstractmethod + def create_net_partition(self, context, router): + pass + + @abc.abstractmethod + def update_net_partition(self, context, id, router): + pass + + @abc.abstractmethod + def get_net_partition(self, context, id, fields=None): + pass + + @abc.abstractmethod + def delete_net_partition(self, context, id): + pass + + @abc.abstractmethod + def get_net_partitions(self, context, filters=None, fields=None): + pass diff --git a/neutron/plugins/nuage/extensions/nuage_router.py b/neutron/plugins/nuage/extensions/nuage_router.py new file mode 100644 index 000000000..55d4e58d3 --- /dev/null +++ b/neutron/plugins/nuage/extensions/nuage_router.py @@ -0,0 +1,73 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + + +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': { + 'net_partition': { + 'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': None, + 'validate': {'type:string_or_none': None} + }, + 'rd': { + 'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': None, + 'validate': {'type:string_or_none': None} + }, + 'rt': { + 'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': None, + 'validate': {'type:string_or_none': None} + }, + }, +} + + +class Nuage_router(object): + """Extension class supporting nuage router. + """ + + @classmethod + def get_name(cls): + return "Nuage router" + + @classmethod + def get_alias(cls): + return "nuage-router" + + @classmethod + def get_description(cls): + return "Nuage Router" + + @classmethod + def get_namespace(cls): + return "http://nuagenetworks.net/ext/routers/api/v1.0" + + @classmethod + def get_updated(cls): + return "2014-01-01T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/plugins/nuage/extensions/nuage_subnet.py b/neutron/plugins/nuage/extensions/nuage_subnet.py new file mode 100644 index 000000000..b3705d5f6 --- /dev/null +++ b/neutron/plugins/nuage/extensions/nuage_subnet.py @@ -0,0 +1,59 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + + +EXTENDED_ATTRIBUTES_2_0 = { + 'subnets': { + 'net_partition': { + 'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': None, + 'validate': {'type:string_or_none': None} + }, + }, +} + + +class Nuage_subnet(object): + """Extension class supporting Nuage subnet. + """ + + @classmethod + def get_name(cls): + return "Nuage subnet" + + @classmethod + def get_alias(cls): + return "nuage-subnet" + + @classmethod + def get_description(cls): + return "Nuage subnet" + + @classmethod + def get_namespace(cls): + return "http://nuagenetworks.net/ext/subnets/api/v1.0" + + @classmethod + def get_updated(cls): + return "2014-01-01T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/plugins/nuage/nuage_models.py b/neutron/plugins/nuage/nuage_models.py new file mode 100644 index 000000000..f3ebcffa1 --- /dev/null +++ b/neutron/plugins/nuage/nuage_models.py @@ -0,0 +1,102 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + +from sqlalchemy import Boolean, Column, ForeignKey, String + +from neutron.db import model_base +from neutron.db import models_v2 + + +class NetPartition(model_base.BASEV2, models_v2.HasId): + __tablename__ = 'nuage_net_partitions' + name = Column(String(64)) + l3dom_tmplt_id = Column(String(36)) + l2dom_tmplt_id = Column(String(36)) + + +class NetPartitionRouter(model_base.BASEV2): + __tablename__ = "nuage_net_partition_router_mapping" + net_partition_id = Column(String(36), + ForeignKey('nuage_net_partitions.id', + ondelete="CASCADE"), + primary_key=True) + router_id = Column(String(36), + ForeignKey('routers.id', ondelete="CASCADE"), + primary_key=True) + nuage_router_id = Column(String(36)) + + +class RouterZone(model_base.BASEV2): + __tablename__ = "nuage_router_zone_mapping" + router_id = Column(String(36), + ForeignKey('routers.id', ondelete="CASCADE"), + primary_key=True) + nuage_zone_id = Column(String(36)) + nuage_user_id = Column(String(36)) + nuage_group_id = Column(String(36)) + + +class SubnetL2Domain(model_base.BASEV2): + __tablename__ = 'nuage_subnet_l2dom_mapping' + subnet_id = Column(String(36), + ForeignKey('subnets.id', ondelete="CASCADE"), + primary_key=True) + net_partition_id = Column(String(36), + ForeignKey('nuage_net_partitions.id', + ondelete="CASCADE")) + nuage_subnet_id = Column(String(36)) + nuage_l2dom_tmplt_id = Column(String(36)) + nuage_user_id = Column(String(36)) + nuage_group_id = Column(String(36)) + + +class PortVPortMapping(model_base.BASEV2): + __tablename__ = 'nuage_port_mapping' + port_id = Column(String(36), + ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + nuage_vport_id = Column(String(36)) + nuage_vif_id = Column(String(36)) + static_ip = Column(Boolean()) + + +class RouterRoutesMapping(model_base.BASEV2, models_v2.Route): + __tablename__ = 'nuage_routerroutes_mapping' + router_id = Column(String(36), + ForeignKey('routers.id', + ondelete="CASCADE"), + primary_key=True, + nullable=False) + nuage_route_id = Column(String(36)) + + +class FloatingIPPoolMapping(model_base.BASEV2): + __tablename__ = "nuage_floatingip_pool_mapping" + fip_pool_id = Column(String(36), primary_key=True) + net_id = Column(String(36), + ForeignKey('networks.id', ondelete="CASCADE")) + router_id = Column(String(36)) + + +class FloatingIPMapping(model_base.BASEV2): + __tablename__ = 'nuage_floatingip_mapping' + fip_id = Column(String(36), + ForeignKey('floatingips.id', + ondelete="CASCADE"), + primary_key=True) + router_id = Column(String(36)) + nuage_fip_id = Column(String(36)) diff --git a/neutron/plugins/nuage/nuagedb.py b/neutron/plugins/nuage/nuagedb.py new file mode 100644 index 000000000..bd1b2f3d2 --- /dev/null +++ b/neutron/plugins/nuage/nuagedb.py @@ -0,0 +1,202 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + +from neutron.db import db_base_plugin_v2 +from neutron.plugins.nuage import nuage_models + + +def add_entrouter_mapping(session, np_id, + router_id, + n_l3id): + ent_rtr_mapping = nuage_models.NetPartitionRouter(net_partition_id=np_id, + router_id=router_id, + nuage_router_id=n_l3id) + session.add(ent_rtr_mapping) + + +def add_rtrzone_mapping(session, neutron_router_id, + nuage_zone_id, + nuage_user_id=None, + nuage_group_id=None): + rtr_zone_mapping = nuage_models.RouterZone(router_id=neutron_router_id, + nuage_zone_id=nuage_zone_id, + nuage_user_id=nuage_user_id, + nuage_group_id=nuage_group_id) + session.add(rtr_zone_mapping) + + +def add_subnetl2dom_mapping(session, neutron_subnet_id, + nuage_sub_id, + np_id, + l2dom_id=None, + nuage_user_id=None, + nuage_group_id=None): + subnet_l2dom = nuage_models.SubnetL2Domain(subnet_id=neutron_subnet_id, + nuage_subnet_id=nuage_sub_id, + net_partition_id=np_id, + nuage_l2dom_tmplt_id=l2dom_id, + nuage_user_id=nuage_user_id, + nuage_group_id=nuage_group_id) + session.add(subnet_l2dom) + + +def update_subnetl2dom_mapping(subnet_l2dom, + new_dict): + subnet_l2dom.update(new_dict) + + +def delete_subnetl2dom_mapping(session, subnet_l2dom): + session.delete(subnet_l2dom) + + +def add_port_vport_mapping(session, port_id, nuage_vport_id, + nuage_vif_id, static_ip): + port_mapping = nuage_models.PortVPortMapping(port_id=port_id, + nuage_vport_id=nuage_vport_id, + nuage_vif_id=nuage_vif_id, + static_ip=static_ip) + session.add(port_mapping) + return port_mapping + + +def update_port_vport_mapping(port_mapping, + new_dict): + port_mapping.update(new_dict) + + +def get_port_mapping_by_id(session, id): + query = session.query(nuage_models.PortVPortMapping) + return query.filter_by(port_id=id).first() + + +def get_ent_rtr_mapping_by_rtrid(session, rtrid): + query = session.query(nuage_models.NetPartitionRouter) + return query.filter_by(router_id=rtrid).first() + + +def get_rtr_zone_mapping(session, router_id): + query = session.query(nuage_models.RouterZone) + return query.filter_by(router_id=router_id).first() + + +def get_subnet_l2dom_by_id(session, id): + query = session.query(nuage_models.SubnetL2Domain) + return query.filter_by(subnet_id=id).first() + + +def add_net_partition(session, netpart_id, + l3dom_id, l2dom_id, + ent_name): + net_partitioninst = nuage_models.NetPartition(id=netpart_id, + name=ent_name, + l3dom_tmplt_id=l3dom_id, + l2dom_tmplt_id=l2dom_id) + session.add(net_partitioninst) + return net_partitioninst + + +def delete_net_partition(session, net_partition): + session.delete(net_partition) + + +def get_ent_rtr_mapping_by_entid(session, + entid): + query = session.query(nuage_models.NetPartitionRouter) + return query.filter_by(net_partition_id=entid).all() + + +def get_net_partition_by_name(session, name): + query = session.query(nuage_models.NetPartition) + return query.filter_by(name=name).first() + + +def get_net_partition_by_id(session, id): + query = session.query(nuage_models.NetPartition) + return query.filter_by(id=id).first() + + +def get_net_partitions(session, filters=None, fields=None): + query = session.query(nuage_models.NetPartition) + common_db = db_base_plugin_v2.CommonDbMixin() + query = common_db._apply_filters_to_query(query, + nuage_models.NetPartition, + filters) + return query + + +def delete_static_route(session, static_route): + session.delete(static_route) + + +def get_router_route_mapping(session, id, route): + qry = session.query(nuage_models.RouterRoutesMapping) + return qry.filter_by(router_id=id, + destination=route['destination'], + nexthop=route['nexthop']).one() + + +def add_static_route(session, router_id, nuage_rtr_id, + destination, nexthop): + staticrt = nuage_models.RouterRoutesMapping(router_id=router_id, + nuage_route_id=nuage_rtr_id, + destination=destination, + nexthop=nexthop) + session.add(staticrt) + return staticrt + + +def add_fip_mapping(session, neutron_fip_id, router_id, nuage_fip_id): + fip = nuage_models.FloatingIPMapping(fip_id=neutron_fip_id, + router_id=router_id, + nuage_fip_id=nuage_fip_id) + session.add(fip) + return fip + + +def delete_fip_mapping(session, fip_mapping): + session.delete(fip_mapping) + + +def add_fip_pool_mapping(session, fip_pool_id, net_id, router_id=None): + fip_pool_mapping = nuage_models.FloatingIPPoolMapping( + fip_pool_id=fip_pool_id, + net_id=net_id, + router_id=router_id) + session.add(fip_pool_mapping) + return fip_pool_mapping + + +def delete_fip_pool_mapping(session, fip_pool_mapping): + session.delete(fip_pool_mapping) + + +def get_fip_pool_by_id(session, id): + query = session.query(nuage_models.FloatingIPPoolMapping) + return query.filter_by(fip_pool_id=id).first() + + +def get_fip_pool_from_netid(session, net_id): + query = session.query(nuage_models.FloatingIPPoolMapping) + return query.filter_by(net_id=net_id).first() + + +def get_fip_mapping_by_id(session, id): + qry = session.query(nuage_models.FloatingIPMapping) + return qry.filter_by(fip_id=id).first() + + +def update_fip_pool_mapping(fip_pool_mapping, new_dict): + fip_pool_mapping.update(new_dict) diff --git a/neutron/plugins/nuage/plugin.py b/neutron/plugins/nuage/plugin.py new file mode 100644 index 000000000..bf95c1eec --- /dev/null +++ b/neutron/plugins/nuage/plugin.py @@ -0,0 +1,1006 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + + +import re + +import netaddr +from oslo.config import cfg +from sqlalchemy.orm import exc + +from neutron.api import extensions as neutron_extensions +from neutron.api.v2 import attributes +from neutron.common import constants as os_constants +from neutron.common import exceptions as n_exc +from neutron.common import utils +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.db import quota_db # noqa +from neutron.extensions import external_net +from neutron.extensions import l3 +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.plugins.nuage.common import config +from neutron.plugins.nuage.common import constants +from neutron.plugins.nuage.common import exceptions as nuage_exc +from neutron.plugins.nuage import extensions +from neutron.plugins.nuage.extensions import netpartition +from neutron.plugins.nuage import nuagedb +from neutron import policy + + +class NuagePlugin(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_db.L3_NAT_db_mixin, + netpartition.NetPartitionPluginBase): + """Class that implements Nuage Networks' plugin functionality.""" + supported_extension_aliases = ["router", "binding", "external-net", + "net-partition", "nuage-router", + "nuage-subnet", "quotas", "extraroute"] + + binding_view = "extension:port_binding:view" + + def __init__(self): + super(NuagePlugin, self).__init__() + neutron_extensions.append_api_extensions_path(extensions.__path__) + config.nuage_register_cfg_opts() + self.nuageclient_init() + net_partition = cfg.CONF.RESTPROXY.default_net_partition_name + self._create_default_net_partition(net_partition) + + def nuageclient_init(self): + server = cfg.CONF.RESTPROXY.server + serverauth = cfg.CONF.RESTPROXY.serverauth + serverssl = cfg.CONF.RESTPROXY.serverssl + base_uri = cfg.CONF.RESTPROXY.base_uri + auth_resource = cfg.CONF.RESTPROXY.auth_resource + organization = cfg.CONF.RESTPROXY.organization + nuageclient = importutils.import_module('nuagenetlib.nuageclient') + self.nuageclient = nuageclient.NuageClient(server, base_uri, + serverssl, serverauth, + auth_resource, + organization) + + def _resource_finder(self, context, for_resource, resource, user_req): + match = re.match(attributes.UUID_PATTERN, user_req[resource]) + if match: + obj_lister = getattr(self, "get_%s" % resource) + found_resource = obj_lister(context, user_req[resource]) + if not found_resource: + msg = (_("%(resource)s with id %(resource_id)s does not " + "exist") % {'resource': resource, + 'resource_id': user_req[resource]}) + raise n_exc.BadRequest(resource=for_resource, msg=msg) + else: + filter = {'name': [user_req[resource]]} + obj_lister = getattr(self, "get_%ss" % resource) + found_resource = obj_lister(context, filters=filter) + if not found_resource: + msg = (_("Either %(resource)s %(req_resource)s not found " + "or you dont have credential to access it") + % {'resource': resource, + 'req_resource': user_req[resource]}) + raise n_exc.BadRequest(resource=for_resource, msg=msg) + if len(found_resource) > 1: + msg = (_("More than one entry found for %(resource)s " + "%(req_resource)s. Use id instead") + % {'resource': resource, + 'req_resource': user_req[resource]}) + raise n_exc.BadRequest(resource=for_resource, msg=msg) + found_resource = found_resource[0] + return found_resource + + def _update_port_ip(self, context, port, new_ip): + subid = port['fixed_ips'][0]['subnet_id'] + new_fixed_ips = {} + new_fixed_ips['subnet_id'] = subid + new_fixed_ips['ip_address'] = new_ip + ips, prev_ips = self._update_ips_for_port(context, + port["network_id"], + port['id'], + port["fixed_ips"], + [new_fixed_ips]) + + # Update ips if necessary + for ip in ips: + allocated = models_v2.IPAllocation( + network_id=port['network_id'], port_id=port['id'], + ip_address=ip['ip_address'], subnet_id=ip['subnet_id']) + context.session.add(allocated) + + def _create_update_port(self, context, port, + port_mapping, subnet_mapping): + filters = {'device_id': [port['device_id']]} + ports = self.get_ports(context, filters) + netpart_id = subnet_mapping['net_partition_id'] + net_partition = nuagedb.get_net_partition_by_id(context.session, + netpart_id) + params = { + 'id': port['device_id'], + 'mac': port['mac_address'], + 'parent_id': subnet_mapping['nuage_subnet_id'], + 'net_partition': net_partition, + 'ip': None, + 'no_of_ports': len(ports), + 'tenant': port['tenant_id'] + } + if port_mapping['static_ip']: + params['ip'] = port['fixed_ips'][0]['ip_address'] + + nuage_vm = self.nuageclient.create_vms(params) + if nuage_vm: + if port['fixed_ips'][0]['ip_address'] != str(nuage_vm['ip']): + self._update_port_ip(context, port, nuage_vm['ip']) + port_dict = { + 'nuage_vport_id': nuage_vm['vport_id'], + 'nuage_vif_id': nuage_vm['vif_id'] + } + nuagedb.update_port_vport_mapping(port_mapping, + port_dict) + + def create_port(self, context, port): + session = context.session + with session.begin(subtransactions=True): + p = port['port'] + port = super(NuagePlugin, self).create_port(context, port) + device_owner = port.get('device_owner', None) + if (device_owner and + device_owner not in constants.AUTO_CREATE_PORT_OWNERS): + if 'fixed_ips' not in port or len(port['fixed_ips']) == 0: + return self._extend_port_dict_binding(context, port) + subnet_id = port['fixed_ips'][0]['subnet_id'] + subnet_mapping = nuagedb.get_subnet_l2dom_by_id(session, + subnet_id) + if subnet_mapping: + static_ip = False + if (attributes.is_attr_set(p['fixed_ips']) and + 'ip_address' in p['fixed_ips'][0]): + static_ip = True + nuage_vport_id = None + nuage_vif_id = None + port_mapping = nuagedb.add_port_vport_mapping( + session, + port['id'], + nuage_vport_id, + nuage_vif_id, + static_ip) + port_prefix = constants.NOVA_PORT_OWNER_PREF + if port['device_owner'].startswith(port_prefix): + #This request is coming from nova + try: + self._create_update_port(context, port, + port_mapping, + subnet_mapping) + except Exception: + with excutils.save_and_reraise_exception(): + super(NuagePlugin, self).delete_port( + context, + port['id']) + return self._extend_port_dict_binding(context, port) + + def update_port(self, context, id, port): + p = port['port'] + if p.get('device_owner', '').startswith( + constants.NOVA_PORT_OWNER_PREF): + session = context.session + with session.begin(subtransactions=True): + port = self._get_port(context, id) + port.update(p) + if 'fixed_ips' not in port or len(port['fixed_ips']) == 0: + return self._make_port_dict(port) + subnet_id = port['fixed_ips'][0]['subnet_id'] + subnet_mapping = nuagedb.get_subnet_l2dom_by_id(session, + subnet_id) + if not subnet_mapping: + msg = (_("Subnet %s not found on VSD") % subnet_id) + raise n_exc.BadRequest(resource='port', msg=msg) + port_mapping = nuagedb.get_port_mapping_by_id(session, + id) + if not port_mapping: + msg = (_("Port-Mapping for port %s not " + " found on VSD") % id) + raise n_exc.BadRequest(resource='port', msg=msg) + if not port_mapping['nuage_vport_id']: + self._create_update_port(context, port, + port_mapping, subnet_mapping) + updated_port = self._make_port_dict(port) + else: + updated_port = super(NuagePlugin, self).update_port(context, id, + port) + return updated_port + + def delete_port(self, context, id, l3_port_check=True): + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + port = self._get_port(context, id) + port_mapping = nuagedb.get_port_mapping_by_id(context.session, + id) + # This is required for to pass ut test_floatingip_port_delete + self.disassociate_floatingips(context, id) + if not port['fixed_ips']: + return super(NuagePlugin, self).delete_port(context, id) + + sub_id = port['fixed_ips'][0]['subnet_id'] + subnet_mapping = nuagedb.get_subnet_l2dom_by_id(context.session, + sub_id) + if not subnet_mapping: + return super(NuagePlugin, self).delete_port(context, id) + + netpart_id = subnet_mapping['net_partition_id'] + net_partition = nuagedb.get_net_partition_by_id(context.session, + netpart_id) + # Need to call this explicitly to delete vport_vporttag_mapping + if constants.NOVA_PORT_OWNER_PREF in port['device_owner']: + # This was a VM Port + filters = {'device_id': [port['device_id']]} + ports = self.get_ports(context, filters) + params = { + 'no_of_ports': len(ports), + 'net_partition': net_partition, + 'tenant': port['tenant_id'], + 'mac': port['mac_address'], + 'nuage_vif_id': port_mapping['nuage_vif_id'], + 'id': port['device_id'] + } + self.nuageclient.delete_vms(params) + super(NuagePlugin, self).delete_port(context, id) + + def _check_view_auth(self, context, resource, action): + return policy.check(context, action, resource) + + def _extend_port_dict_binding(self, context, port): + if self._check_view_auth(context, port, self.binding_view): + port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS + port[portbindings.VIF_DETAILS] = { + portbindings.CAP_PORT_FILTER: False + } + return port + + def get_port(self, context, id, fields=None): + port = super(NuagePlugin, self).get_port(context, id, fields) + return self._fields(self._extend_port_dict_binding(context, port), + fields) + + def get_ports(self, context, filters=None, fields=None): + ports = super(NuagePlugin, self).get_ports(context, filters, fields) + return [self._fields(self._extend_port_dict_binding(context, port), + fields) for port in ports] + + def _check_router_subnet_for_tenant(self, context): + # Search router and subnet tables. + # If no entry left delete user and group from VSD + filters = {'tenant_id': [context.tenant]} + routers = self.get_routers(context, filters=filters) + subnets = self.get_subnets(context, filters=filters) + return bool(routers or subnets) + + def create_network(self, context, network): + net = network['network'] + with context.session.begin(subtransactions=True): + net = super(NuagePlugin, self).create_network(context, + network) + self._process_l3_create(context, net, network['network']) + return net + + def _validate_update_network(self, context, id, network): + req_data = network['network'] + is_external_set = req_data.get(external_net.EXTERNAL) + if not attributes.is_attr_set(is_external_set): + return (None, None) + neutron_net = self.get_network(context, id) + if neutron_net.get(external_net.EXTERNAL) == is_external_set: + return (None, None) + subnet = self._validate_nuage_sharedresource(context, 'network', id) + if subnet and not is_external_set: + msg = _('External network with subnets can not be ' + 'changed to non-external network') + raise nuage_exc.OperationNotSupported(msg=msg) + return (is_external_set, subnet) + + def update_network(self, context, id, network): + with context.session.begin(subtransactions=True): + is_external_set, subnet = self._validate_update_network(context, + id, + network) + net = super(NuagePlugin, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + if subnet and is_external_set: + subn = subnet[0] + subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session, + subn['id']) + if subnet_l2dom: + nuage_subnet_id = subnet_l2dom['nuage_subnet_id'] + nuage_l2dom_tid = subnet_l2dom['nuage_l2dom_tmplt_id'] + user_id = subnet_l2dom['nuage_user_id'] + group_id = subnet_l2dom['nuage_group_id'] + self.nuageclient.delete_subnet(nuage_subnet_id, + nuage_l2dom_tid) + self.nuageclient.delete_user(user_id) + self.nuageclient.delete_group(group_id) + nuagedb.delete_subnetl2dom_mapping(context.session, + subnet_l2dom) + self._add_nuage_sharedresource(context, + subnet[0], + id, + constants.SR_TYPE_FLOATING) + return net + + def delete_network(self, context, id): + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, id) + filter = {'network_id': [id]} + subnets = self.get_subnets(context, filters=filter) + for subnet in subnets: + self.delete_subnet(context, subnet['id']) + super(NuagePlugin, self).delete_network(context, id) + + def _get_net_partition_for_subnet(self, context, subnet): + subn = subnet['subnet'] + ent = subn.get('net_partition', None) + if not ent: + def_net_part = cfg.CONF.RESTPROXY.default_net_partition_name + net_partition = nuagedb.get_net_partition_by_name(context.session, + def_net_part) + else: + net_partition = self._resource_finder(context, 'subnet', + 'net_partition', subn) + if not net_partition: + msg = _('Either net_partition is not provided with subnet OR ' + 'default net_partition is not created at the start') + raise n_exc.BadRequest(resource='subnet', msg=msg) + return net_partition + + def _validate_create_subnet(self, subnet): + if ('host_routes' in subnet and + attributes.is_attr_set(subnet['host_routes'])): + msg = 'host_routes extensions not supported for subnets' + raise nuage_exc.OperationNotSupported(msg=msg) + if subnet['gateway_ip'] is None: + msg = "no-gateway option not supported with subnets" + raise nuage_exc.OperationNotSupported(msg=msg) + + def _delete_nuage_sharedresource(self, context, net_id): + sharedresource_id = self.nuageclient.delete_nuage_sharedresource( + net_id) + if sharedresource_id: + fip_pool_mapping = nuagedb.get_fip_pool_by_id(context.session, + sharedresource_id) + if fip_pool_mapping: + with context.session.begin(subtransactions=True): + nuagedb.delete_fip_pool_mapping(context.session, + fip_pool_mapping) + + def _validate_nuage_sharedresource(self, context, resource, net_id): + filter = {'network_id': [net_id]} + existing_subn = self.get_subnets(context, filters=filter) + if len(existing_subn) > 1: + msg = _('Only one subnet is allowed per ' + 'external network %s') % net_id + raise nuage_exc.OperationNotSupported(msg=msg) + return existing_subn + + def _add_nuage_sharedresource(self, context, subnet, net_id, type): + net = netaddr.IPNetwork(subnet['cidr']) + params = { + 'neutron_subnet': subnet, + 'net': net, + 'type': type + } + fip_pool_id = self.nuageclient.create_nuage_sharedresource(params) + nuagedb.add_fip_pool_mapping(context.session, fip_pool_id, net_id) + + def _create_nuage_sharedresource(self, context, subnet, type): + subn = subnet['subnet'] + net_id = subn['network_id'] + self._validate_nuage_sharedresource(context, 'subnet', net_id) + with context.session.begin(subtransactions=True): + subn = super(NuagePlugin, self).create_subnet(context, subnet) + self._add_nuage_sharedresource(context, subn, net_id, type) + return subn + + def _create_nuage_subnet(self, context, neutron_subnet, net_partition): + net = netaddr.IPNetwork(neutron_subnet['cidr']) + params = { + 'net_partition': net_partition, + 'tenant_id': neutron_subnet['tenant_id'], + 'net': net + } + try: + nuage_subnet = self.nuageclient.create_subnet(neutron_subnet, + params) + except Exception: + with excutils.save_and_reraise_exception(): + super(NuagePlugin, self).delete_subnet(context, + neutron_subnet['id']) + + if nuage_subnet: + l2dom_id = str(nuage_subnet['nuage_l2template_id']) + user_id = nuage_subnet['nuage_userid'] + group_id = nuage_subnet['nuage_groupid'] + id = nuage_subnet['nuage_l2domain_id'] + with context.session.begin(subtransactions=True): + nuagedb.add_subnetl2dom_mapping(context.session, + neutron_subnet['id'], + id, + net_partition['id'], + l2dom_id=l2dom_id, + nuage_user_id=user_id, + nuage_group_id=group_id) + + def create_subnet(self, context, subnet): + subn = subnet['subnet'] + net_id = subn['network_id'] + + if self._network_is_external(context, net_id): + return self._create_nuage_sharedresource( + context, subnet, constants.SR_TYPE_FLOATING) + + self._validate_create_subnet(subn) + + net_partition = self._get_net_partition_for_subnet(context, subnet) + neutron_subnet = super(NuagePlugin, self).create_subnet(context, + subnet) + self._create_nuage_subnet(context, neutron_subnet, net_partition) + return neutron_subnet + + def delete_subnet(self, context, id): + subnet = self.get_subnet(context, id) + if self._network_is_external(context, subnet['network_id']): + super(NuagePlugin, self).delete_subnet(context, id) + return self._delete_nuage_sharedresource(context, id) + + subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session, id) + if subnet_l2dom: + template_id = subnet_l2dom['nuage_l2dom_tmplt_id'] + try: + self.nuageclient.delete_subnet(subnet_l2dom['nuage_subnet_id'], + template_id) + except Exception: + msg = (_('Unable to complete operation on subnet %s.' + 'One or more ports have an IP allocation ' + 'from this subnet.') % id) + raise n_exc.BadRequest(resource='subnet', msg=msg) + super(NuagePlugin, self).delete_subnet(context, id) + if subnet_l2dom and not self._check_router_subnet_for_tenant(context): + self.nuageclient.delete_user(subnet_l2dom['nuage_user_id']) + self.nuageclient.delete_group(subnet_l2dom['nuage_group_id']) + + def add_router_interface(self, context, router_id, interface_info): + session = context.session + with session.begin(subtransactions=True): + rtr_if_info = super(NuagePlugin, + self).add_router_interface(context, + router_id, + interface_info) + subnet_id = rtr_if_info['subnet_id'] + subn = self.get_subnet(context, subnet_id) + + rtr_zone_mapping = nuagedb.get_rtr_zone_mapping(session, + router_id) + ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session, + router_id) + subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session, + subnet_id) + if not rtr_zone_mapping or not ent_rtr_mapping: + super(NuagePlugin, + self).remove_router_interface(context, + router_id, + interface_info) + msg = (_("Router %s does not hold default zone OR " + "net_partition mapping. Router-IF add failed") + % router_id) + raise n_exc.BadRequest(resource='router', msg=msg) + + if not subnet_l2dom: + super(NuagePlugin, + self).remove_router_interface(context, + router_id, + interface_info) + msg = (_("Subnet %s does not hold Nuage VSD reference. " + "Router-IF add failed") % subnet_id) + raise n_exc.BadRequest(resource='subnet', msg=msg) + + if (subnet_l2dom['net_partition_id'] != + ent_rtr_mapping['net_partition_id']): + super(NuagePlugin, + self).remove_router_interface(context, + router_id, + interface_info) + msg = (_("Subnet %(subnet)s and Router %(router)s belong to " + "different net_partition Router-IF add " + "not permitted") % {'subnet': subnet_id, + 'router': router_id}) + raise n_exc.BadRequest(resource='subnet', msg=msg) + nuage_subnet_id = subnet_l2dom['nuage_subnet_id'] + nuage_l2dom_tmplt_id = subnet_l2dom['nuage_l2dom_tmplt_id'] + if self.nuageclient.vms_on_l2domain(nuage_subnet_id): + super(NuagePlugin, + self).remove_router_interface(context, + router_id, + interface_info) + msg = (_("Subnet %s has one or more active VMs " + "Router-IF add not permitted") % subnet_id) + raise n_exc.BadRequest(resource='subnet', msg=msg) + self.nuageclient.delete_subnet(nuage_subnet_id, + nuage_l2dom_tmplt_id) + net = netaddr.IPNetwork(subn['cidr']) + params = { + 'net': net, + 'zone_id': rtr_zone_mapping['nuage_zone_id'] + } + if not attributes.is_attr_set(subn['gateway_ip']): + subn['gateway_ip'] = str(netaddr.IPAddress(net.first + 1)) + try: + nuage_subnet = self.nuageclient.create_domain_subnet(subn, + params) + except Exception: + with excutils.save_and_reraise_exception(): + super(NuagePlugin, + self).remove_router_interface(context, + router_id, + interface_info) + if nuage_subnet: + ns_dict = {} + ns_dict['nuage_subnet_id'] = nuage_subnet['nuage_subnetid'] + ns_dict['nuage_l2dom_tmplt_id'] = None + nuagedb.update_subnetl2dom_mapping(subnet_l2dom, + ns_dict) + return rtr_if_info + + def remove_router_interface(self, context, router_id, interface_info): + if 'subnet_id' in interface_info: + subnet_id = interface_info['subnet_id'] + subnet = self.get_subnet(context, subnet_id) + found = False + try: + filters = {'device_id': [router_id], + 'device_owner': + [os_constants.DEVICE_OWNER_ROUTER_INTF], + 'network_id': [subnet['network_id']]} + ports = self.get_ports(context, filters) + + for p in ports: + if p['fixed_ips'][0]['subnet_id'] == subnet_id: + found = True + break + except exc.NoResultFound: + msg = (_("No router interface found for Router %s. " + "Router-IF delete failed") % router_id) + raise n_exc.BadRequest(resource='router', msg=msg) + + if not found: + msg = (_("No router interface found for Router %s. " + "Router-IF delete failed") % router_id) + raise n_exc.BadRequest(resource='router', msg=msg) + elif 'port_id' in interface_info: + port_db = self._get_port(context, interface_info['port_id']) + if not port_db: + msg = (_("No router interface found for Router %s. " + "Router-IF delete failed") % router_id) + raise n_exc.BadRequest(resource='router', msg=msg) + subnet_id = port_db['fixed_ips'][0]['subnet_id'] + + session = context.session + with session.begin(subtransactions=True): + subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session, + subnet_id) + if not subnet_l2dom: + return super(NuagePlugin, + self).remove_router_interface(context, + router_id, + interface_info) + nuage_subn_id = subnet_l2dom['nuage_subnet_id'] + if self.nuageclient.vms_on_l2domain(nuage_subn_id): + msg = (_("Subnet %s has one or more active VMs " + "Router-IF delete not permitted") % subnet_id) + raise n_exc.BadRequest(resource='subnet', msg=msg) + + neutron_subnet = self.get_subnet(context, subnet_id) + ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( + context.session, + router_id) + if not ent_rtr_mapping: + msg = (_("Router %s does not hold net_partition " + "assoc on Nuage VSD. Router-IF delete failed") + % router_id) + raise n_exc.BadRequest(resource='router', msg=msg) + net = netaddr.IPNetwork(neutron_subnet['cidr']) + net_part_id = ent_rtr_mapping['net_partition_id'] + net_partition = self.get_net_partition(context, + net_part_id) + params = { + 'net_partition': net_partition, + 'tenant_id': neutron_subnet['tenant_id'], + 'net': net + } + nuage_subnet = self.nuageclient.create_subnet(neutron_subnet, + params) + self.nuageclient.delete_domain_subnet(nuage_subn_id) + info = super(NuagePlugin, + self).remove_router_interface(context, router_id, + interface_info) + if nuage_subnet: + tmplt_id = str(nuage_subnet['nuage_l2template_id']) + ns_dict = {} + ns_dict['nuage_subnet_id'] = nuage_subnet['nuage_l2domain_id'] + ns_dict['nuage_l2dom_tmplt_id'] = tmplt_id + nuagedb.update_subnetl2dom_mapping(subnet_l2dom, + ns_dict) + return info + + def _get_net_partition_for_router(self, context, router): + rtr = router['router'] + ent = rtr.get('net_partition', None) + if not ent: + def_net_part = cfg.CONF.RESTPROXY.default_net_partition_name + net_partition = nuagedb.get_net_partition_by_name(context.session, + def_net_part) + else: + net_partition = self._resource_finder(context, 'router', + 'net_partition', rtr) + if not net_partition: + msg = _("Either net_partition is not provided with router OR " + "default net_partition is not created at the start") + raise n_exc.BadRequest(resource='router', msg=msg) + return net_partition + + def create_router(self, context, router): + net_partition = self._get_net_partition_for_router(context, router) + neutron_router = super(NuagePlugin, self).create_router(context, + router) + params = { + 'net_partition': net_partition, + 'tenant_id': neutron_router['tenant_id'] + } + try: + nuage_router = self.nuageclient.create_router(neutron_router, + router['router'], + params) + except Exception: + with excutils.save_and_reraise_exception(): + super(NuagePlugin, self).delete_router(context, + neutron_router['id']) + if nuage_router: + user_id = nuage_router['nuage_userid'] + group_id = nuage_router['nuage_groupid'] + with context.session.begin(subtransactions=True): + nuagedb.add_entrouter_mapping(context.session, + net_partition['id'], + neutron_router['id'], + nuage_router['nuage_domain_id']) + nuagedb.add_rtrzone_mapping(context.session, + neutron_router['id'], + nuage_router['nuage_def_zone_id'], + nuage_user_id=user_id, + nuage_group_id=group_id) + return neutron_router + + def _validate_nuage_staticroutes(self, old_routes, added, removed): + cidrs = [] + for old in old_routes: + if old not in removed: + ip = netaddr.IPNetwork(old['destination']) + cidrs.append(ip) + for route in added: + ip = netaddr.IPNetwork(route['destination']) + matching = netaddr.all_matching_cidrs(ip.ip, cidrs) + if matching: + msg = _('for same subnet, multiple static routes not allowed') + raise n_exc.BadRequest(resource='router', msg=msg) + cidrs.append(ip) + + def update_router(self, context, id, router): + r = router['router'] + with context.session.begin(subtransactions=True): + if 'routes' in r: + old_routes = self._get_extra_routes_by_router_id(context, + id) + added, removed = utils.diff_list_of_dict(old_routes, + r['routes']) + self._validate_nuage_staticroutes(old_routes, added, removed) + ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( + context.session, id) + if not ent_rtr_mapping: + msg = (_("Router %s does not hold net-partition " + "assoc on VSD. extra-route failed") % id) + raise n_exc.BadRequest(resource='router', msg=msg) + # Let it do internal checks first and verify it. + router_updated = super(NuagePlugin, + self).update_router(context, + id, + router) + for route in removed: + rtr_rt_mapping = nuagedb.get_router_route_mapping( + context.session, id, route) + if rtr_rt_mapping: + self.nuageclient.delete_nuage_staticroute( + rtr_rt_mapping['nuage_route_id']) + nuagedb.delete_static_route(context.session, + rtr_rt_mapping) + for route in added: + params = { + 'parent_id': ent_rtr_mapping['nuage_router_id'], + 'net': netaddr.IPNetwork(route['destination']), + 'nexthop': route['nexthop'] + } + nuage_rt_id = self.nuageclient.create_nuage_staticroute( + params) + nuagedb.add_static_route(context.session, + id, nuage_rt_id, + route['destination'], + route['nexthop']) + else: + router_updated = super(NuagePlugin, self).update_router( + context, id, router) + return router_updated + + def delete_router(self, context, id): + session = context.session + ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session, + id) + if ent_rtr_mapping: + filters = { + 'device_id': [id], + 'device_owner': [os_constants.DEVICE_OWNER_ROUTER_INTF] + } + ports = self.get_ports(context, filters) + if ports: + raise l3.RouterInUse(router_id=id) + nuage_router_id = ent_rtr_mapping['nuage_router_id'] + self.nuageclient.delete_router(nuage_router_id) + router_zone = nuagedb.get_rtr_zone_mapping(session, id) + super(NuagePlugin, self).delete_router(context, id) + if router_zone and not self._check_router_subnet_for_tenant(context): + self.nuageclient.delete_user(router_zone['nuage_user_id']) + self.nuageclient.delete_group(router_zone['nuage_group_id']) + + def _make_net_partition_dict(self, net_partition, fields=None): + res = { + 'id': net_partition['id'], + 'name': net_partition['name'], + 'l3dom_tmplt_id': net_partition['l3dom_tmplt_id'], + 'l2dom_tmplt_id': net_partition['l2dom_tmplt_id'], + } + return self._fields(res, fields) + + def _create_net_partition(self, session, net_part_name): + fip_quota = cfg.CONF.RESTPROXY.default_floatingip_quota + params = { + "name": net_part_name, + "fp_quota": str(fip_quota) + } + nuage_net_partition = self.nuageclient.create_net_partition(params) + net_partitioninst = None + if nuage_net_partition: + nuage_entid = nuage_net_partition['nuage_entid'] + l3dom_id = nuage_net_partition['l3dom_id'] + l2dom_id = nuage_net_partition['l2dom_id'] + with session.begin(): + net_partitioninst = nuagedb.add_net_partition(session, + nuage_entid, + l3dom_id, + l2dom_id, + net_part_name) + if not net_partitioninst: + return {} + return self._make_net_partition_dict(net_partitioninst) + + def _create_default_net_partition(self, default_net_part): + def_netpart = self.nuageclient.get_def_netpartition_data( + default_net_part) + session = db.get_session() + if def_netpart: + net_partition = nuagedb.get_net_partition_by_name( + session, default_net_part) + with session.begin(subtransactions=True): + if net_partition: + nuagedb.delete_net_partition(session, net_partition) + net_part = nuagedb.add_net_partition(session, + def_netpart['np_id'], + def_netpart['l3dom_tid'], + def_netpart['l2dom_tid'], + default_net_part) + return self._make_net_partition_dict(net_part) + else: + return self._create_net_partition(session, default_net_part) + + def create_net_partition(self, context, net_partition): + ent = net_partition['net_partition'] + session = context.session + return self._create_net_partition(session, ent["name"]) + + def delete_net_partition(self, context, id): + ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_entid( + context.session, + id) + if ent_rtr_mapping: + msg = (_("One or more router still attached to " + "net_partition %s.") % id) + raise n_exc.BadRequest(resource='net_partition', msg=msg) + net_partition = nuagedb.get_net_partition_by_id(context.session, id) + if not net_partition: + msg = (_("NetPartition with %s does not exist") % id) + raise n_exc.BadRequest(resource='net_partition', msg=msg) + l3dom_tmplt_id = net_partition['l3dom_tmplt_id'] + l2dom_tmplt_id = net_partition['l2dom_tmplt_id'] + self.nuageclient.delete_net_partition(net_partition['id'], + l3dom_id=l3dom_tmplt_id, + l2dom_id=l2dom_tmplt_id) + with context.session.begin(subtransactions=True): + nuagedb.delete_net_partition(context.session, + net_partition) + + def get_net_partition(self, context, id, fields=None): + net_partition = nuagedb.get_net_partition_by_id(context.session, + id) + return self._make_net_partition_dict(net_partition) + + def get_net_partitions(self, context, filters=None, fields=None): + net_partitions = nuagedb.get_net_partitions(context.session, + filters=filters, + fields=fields) + return [self._make_net_partition_dict(net_partition, fields) + for net_partition in net_partitions] + + def _check_floatingip_update(self, context, port): + filter = {'fixed_port_id': [port['id']]} + local_fip = self.get_floatingips(context, + filters=filter) + if local_fip: + fip = local_fip[0] + self._create_update_floatingip(context, + fip, port['id']) + + def _create_update_floatingip(self, context, + neutron_fip, port_id): + rtr_id = neutron_fip['router_id'] + net_id = neutron_fip['floating_network_id'] + + fip_pool_mapping = nuagedb.get_fip_pool_from_netid(context.session, + net_id) + fip_mapping = nuagedb.get_fip_mapping_by_id(context.session, + neutron_fip['id']) + + if not fip_mapping: + ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( + context.session, rtr_id) + if not ent_rtr_mapping: + msg = _('router %s is not associated with ' + 'any net-partition') % rtr_id + raise n_exc.BadRequest(resource='floatingip', + msg=msg) + params = { + 'nuage_rtr_id': ent_rtr_mapping['nuage_router_id'], + 'nuage_fippool_id': fip_pool_mapping['fip_pool_id'], + 'neutron_fip_ip': neutron_fip['floating_ip_address'] + } + nuage_fip_id = self.nuageclient.create_nuage_floatingip(params) + nuagedb.add_fip_mapping(context.session, + neutron_fip['id'], + rtr_id, nuage_fip_id) + else: + if rtr_id != fip_mapping['router_id']: + msg = _('Floating IP can not be associated to VM in ' + 'different router context') + raise nuage_exc.OperationNotSupported(msg=msg) + nuage_fip_id = fip_mapping['nuage_fip_id'] + + fip_pool_dict = {'router_id': neutron_fip['router_id']} + nuagedb.update_fip_pool_mapping(fip_pool_mapping, + fip_pool_dict) + + # Update VM if required + port_mapping = nuagedb.get_port_mapping_by_id(context.session, + port_id) + if port_mapping: + params = { + 'nuage_vport_id': port_mapping['nuage_vport_id'], + 'nuage_fip_id': nuage_fip_id + } + self.nuageclient.update_nuage_vm_vport(params) + + def create_floatingip(self, context, floatingip): + fip = floatingip['floatingip'] + with context.session.begin(subtransactions=True): + neutron_fip = super(NuagePlugin, self).create_floatingip( + context, floatingip) + if not neutron_fip['router_id']: + return neutron_fip + try: + self._create_update_floatingip(context, neutron_fip, + fip['port_id']) + except (nuage_exc.OperationNotSupported, n_exc.BadRequest): + with excutils.save_and_reraise_exception(): + super(NuagePlugin, self).delete_floatingip( + context, neutron_fip['id']) + return neutron_fip + + def disassociate_floatingips(self, context, port_id): + super(NuagePlugin, self).disassociate_floatingips(context, port_id) + port_mapping = nuagedb.get_port_mapping_by_id(context.session, + port_id) + if port_mapping: + params = { + 'nuage_vport_id': port_mapping['nuage_vport_id'], + 'nuage_fip_id': None + } + self.nuageclient.update_nuage_vm_vport(params) + + def update_floatingip(self, context, id, floatingip): + fip = floatingip['floatingip'] + orig_fip = self._get_floatingip(context, id) + port_id = orig_fip['fixed_port_id'] + with context.session.begin(subtransactions=True): + neutron_fip = super(NuagePlugin, self).update_floatingip( + context, id, floatingip) + if fip['port_id'] is not None: + if not neutron_fip['router_id']: + ret_msg = 'floating-ip is not associated yet' + raise n_exc.BadRequest(resource='floatingip', + msg=ret_msg) + + try: + self._create_update_floatingip(context, + neutron_fip, + fip['port_id']) + except nuage_exc.OperationNotSupported: + with excutils.save_and_reraise_exception(): + super(NuagePlugin, + self).disassociate_floatingips(context, + fip['port_id']) + except n_exc.BadRequest: + with excutils.save_and_reraise_exception(): + super(NuagePlugin, self).delete_floatingip(context, + id) + else: + port_mapping = nuagedb.get_port_mapping_by_id(context.session, + port_id) + if port_mapping: + params = { + 'nuage_vport_id': port_mapping['nuage_vport_id'], + 'nuage_fip_id': None + } + self.nuageclient.update_nuage_vm_vport(params) + return neutron_fip + + def delete_floatingip(self, context, id): + fip = self._get_floatingip(context, id) + port_id = fip['fixed_port_id'] + with context.session.begin(subtransactions=True): + if port_id: + port_mapping = nuagedb.get_port_mapping_by_id(context.session, + port_id) + if (port_mapping and + port_mapping['nuage_vport_id'] is not None): + params = { + 'nuage_vport_id': port_mapping['nuage_vport_id'], + 'nuage_fip_id': None + } + self.nuageclient.update_nuage_vm_vport(params) + fip_mapping = nuagedb.get_fip_mapping_by_id(context.session, + id) + if fip_mapping: + self.nuageclient.delete_nuage_floatingip( + fip_mapping['nuage_fip_id']) + nuagedb.delete_fip_mapping(context.session, fip_mapping) + super(NuagePlugin, self).delete_floatingip(context, id) diff --git a/neutron/plugins/ofagent/README b/neutron/plugins/ofagent/README new file mode 100644 index 000000000..a43b0dd07 --- /dev/null +++ b/neutron/plugins/ofagent/README @@ -0,0 +1,21 @@ +This directory includes agent for OpenFlow Agent mechanism driver. + +# -- Installation + +For how to install/set up ML2 mechanism driver for OpenFlow Agent, please refer to +https://github.com/osrg/ryu/wiki/OpenStack + +# -- Ryu General + +For general Ryu stuff, please refer to +http://www.osrg.net/ryu/ + +Ryu is available at github +git://github.com/osrg/ryu.git +https://github.com/osrg/ryu + +The mailing is at +ryu-devel@lists.sourceforge.net +https://lists.sourceforge.net/lists/listinfo/ryu-devel + +Enjoy! diff --git a/neutron/plugins/ofagent/__init__.py b/neutron/plugins/ofagent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ofagent/agent/__init__.py b/neutron/plugins/ofagent/agent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ofagent/agent/ofa_neutron_agent.py b/neutron/plugins/ofagent/agent/ofa_neutron_agent.py new file mode 100644 index 000000000..6e6cd84d7 --- /dev/null +++ b/neutron/plugins/ofagent/agent/ofa_neutron_agent.py @@ -0,0 +1,1418 @@ +# Copyright (C) 2014 VA Linux Systems Japan K.K. +# Based on openvswitch agent. +# +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K. + +import time + +import netaddr +from oslo.config import cfg +from ryu.app.ofctl import api as ryu_api +from ryu.base import app_manager +from ryu.lib import hub +from ryu.ofproto import ofproto_v1_3 as ryu_ofp13 + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import polling +from neutron.agent.linux import utils +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import constants as n_const +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils as n_utils +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as p_const +from neutron.plugins.ofagent.common import config # noqa +from neutron.plugins.openvswitch.common import constants + + +LOG = logging.getLogger(__name__) + +# A placeholder for dead vlans. +DEAD_VLAN_TAG = str(n_const.MAX_VLAN_TAG + 1) + + +# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac' +# attributes set). +class LocalVLANMapping: + def __init__(self, vlan, network_type, physical_network, segmentation_id, + vif_ports=None): + if vif_ports is None: + vif_ports = {} + self.vlan = vlan + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id + self.vif_ports = vif_ports + # set of tunnel ports on which packets should be flooded + self.tun_ofports = set() + + def __str__(self): + return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % + (self.vlan, self.network_type, self.physical_network, + self.segmentation_id)) + + +class OVSBridge(ovs_lib.OVSBridge): + def __init__(self, br_name, root_helper, ryuapp): + super(OVSBridge, self).__init__(br_name, root_helper) + self.datapath_id = None + self.datapath = None + self.ofparser = None + self.ryuapp = ryuapp + + def find_datapath_id(self): + self.datapath_id = self.get_datapath_id() + + def get_datapath(self, retry_max=cfg.CONF.AGENT.get_datapath_retry_times): + retry = 0 + while self.datapath is None: + self.datapath = ryu_api.get_datapath(self.ryuapp, + int(self.datapath_id, 16)) + retry += 1 + if retry >= retry_max: + LOG.error(_('Agent terminated!: Failed to get a datapath.')) + raise SystemExit(1) + time.sleep(1) + self.ofparser = self.datapath.ofproto_parser + + def setup_ofp(self, controller_names=None, + protocols='OpenFlow13', + retry_max=cfg.CONF.AGENT.get_datapath_retry_times): + if not controller_names: + host = cfg.CONF.ofp_listen_host + if not host: + # 127.0.0.1 is a default for agent style of controller + host = '127.0.0.1' + controller_names = ["tcp:%s:%d" % (host, + cfg.CONF.ofp_tcp_listen_port)] + try: + self.set_protocols(protocols) + self.set_controller(controller_names) + except RuntimeError: + LOG.exception(_("Agent terminated")) + raise SystemExit(1) + self.find_datapath_id() + self.get_datapath(retry_max) + + +class OFAPluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class OFASecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): + def __init__(self, context, plugin_rpc, root_helper): + self.context = context + self.plugin_rpc = plugin_rpc + self.root_helper = root_helper + self.init_firewall(defer_refresh_firewall=True) + + +class OFANeutronAgentRyuApp(app_manager.RyuApp): + OFP_VERSIONS = [ryu_ofp13.OFP_VERSION] + + def start(self): + + super(OFANeutronAgentRyuApp, self).start() + return hub.spawn(self._agent_main, self) + + def _agent_main(self, ryuapp): + cfg.CONF.register_opts(ip_lib.OPTS) + n_utils.log_opt_values(LOG) + + try: + agent_config = create_agent_config_map(cfg.CONF) + except ValueError: + LOG.exception(_("Agent failed to create agent config map")) + raise SystemExit(1) + + is_xen_compute_host = ('rootwrap-xen-dom0' in + agent_config['root_helper']) + if is_xen_compute_host: + # Force ip_lib to always use the root helper to ensure that ip + # commands target xen dom0 rather than domU. + cfg.CONF.set_default('ip_lib_force_root', True) + + agent = OFANeutronAgent(ryuapp, **agent_config) + + # Start everything. + LOG.info(_("Agent initialized successfully, now running... ")) + agent.daemon_loop() + + +class OFANeutronAgent(rpc_compat.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + """A agent for OpenFlow Agent ML2 mechanism driver. + + OFANeutronAgent is a OpenFlow Agent agent for a ML2 plugin. + This is as a ryu application thread. + - An agent acts as an OpenFlow controller on each compute nodes. + - OpenFlow 1.3 (vendor agnostic unlike OVS extensions). + """ + + # history + # 1.0 Initial version + # 1.1 Support Security Group RPC + RPC_API_VERSION = '1.1' + + def __init__(self, ryuapp, integ_br, tun_br, local_ip, + bridge_mappings, root_helper, + polling_interval, tunnel_types=None, + veth_mtu=None, l2_population=False, + minimize_polling=False, + ovsdb_monitor_respawn_interval=( + constants.DEFAULT_OVSDBMON_RESPAWN)): + """Constructor. + + :param ryuapp: object of the ryu app. + :param integ_br: name of the integration bridge. + :param tun_br: name of the tunnel bridge. + :param local_ip: local IP address of this hypervisor. + :param bridge_mappings: mappings from physical network name to bridge. + :param root_helper: utility to use when running shell cmds. + :param polling_interval: interval (secs) to poll DB. + :param tunnel_types: A list of tunnel types to enable support for in + the agent. If set, will automatically set enable_tunneling to + True. + :param veth_mtu: MTU size for veth interfaces. + :param minimize_polling: Optional, whether to minimize polling by + monitoring ovsdb for interface changes. + :param ovsdb_monitor_respawn_interval: Optional, when using polling + minimization, the number of seconds to wait before respawning + the ovsdb monitor. + """ + super(OFANeutronAgent, self).__init__() + self.ryuapp = ryuapp + self.veth_mtu = veth_mtu + self.root_helper = root_helper + self.available_local_vlans = set(xrange(n_const.MIN_VLAN_TAG, + n_const.MAX_VLAN_TAG)) + self.tunnel_types = tunnel_types or [] + self.l2_pop = l2_population + self.agent_state = { + 'binary': 'neutron-ofa-agent', + 'host': cfg.CONF.host, + 'topic': n_const.L2_AGENT_TOPIC, + 'configurations': {'bridge_mappings': bridge_mappings, + 'tunnel_types': self.tunnel_types, + 'tunneling_ip': local_ip, + 'l2_population': self.l2_pop}, + 'agent_type': n_const.AGENT_TYPE_OFA, + 'start_flag': True} + + # Keep track of int_br's device count for use by _report_state() + self.int_br_device_count = 0 + + self.int_br = OVSBridge(integ_br, self.root_helper, self.ryuapp) + # Stores port update notifications for processing in main loop + self.updated_ports = set() + self.setup_rpc() + self.setup_integration_br() + self.setup_physical_bridges(bridge_mappings) + self.local_vlan_map = {} + self.tun_br_ofports = {p_const.TYPE_GRE: {}, + p_const.TYPE_VXLAN: {}} + + self.polling_interval = polling_interval + self.minimize_polling = minimize_polling + self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval + + self.enable_tunneling = bool(self.tunnel_types) + self.local_ip = local_ip + self.tunnel_count = 0 + self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port + self.dont_fragment = cfg.CONF.AGENT.dont_fragment + if self.enable_tunneling: + self.setup_tunnel_br(tun_br) + # Collect additional bridges to monitor + self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br) + + # Security group agent support + self.sg_agent = OFASecurityGroupAgent(self.context, + self.plugin_rpc, + self.root_helper) + # Initialize iteration counter + self.iter_num = 0 + + def _report_state(self): + # How many devices are likely used by a VM + self.agent_state.get('configurations')['devices'] = ( + self.int_br_device_count) + try: + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def _create_tunnel_port_name(self, tunnel_type, ip_address): + try: + ip_hex = '%08x' % netaddr.IPAddress(ip_address, version=4) + return '%s-%s' % (tunnel_type, ip_hex) + except Exception: + LOG.warn(_("Unable to create tunnel port. Invalid remote IP: %s"), + ip_address) + + def ryu_send_msg(self, msg): + result = ryu_api.send_msg(self.ryuapp, msg) + LOG.info(_("ryu send_msg() result: %s"), result) + + def setup_rpc(self): + mac = self.int_br.get_local_port_mac() + self.agent_id = '%s%s' % ('ovs', (mac.replace(":", ""))) + self.topic = topics.AGENT + self.plugin_rpc = OFAPluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + + # RPC network init + self.context = context.get_admin_context_without_session() + # Handle updates from service + self.endpoints = [self] + # Define the listening consumers for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.NETWORK, topics.DELETE], + [constants.TUNNEL, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + report_interval = cfg.CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def get_net_uuid(self, vif_id): + for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + if vif_id in vlan_mapping.vif_ports: + return network_id + + def network_delete(self, context, **kwargs): + network_id = kwargs.get('network_id') + LOG.debug(_("network_delete received network %s"), network_id) + # The network may not be defined on this agent + lvm = self.local_vlan_map.get(network_id) + if lvm: + self.reclaim_local_vlan(network_id) + else: + LOG.debug(_("Network %s not used on agent."), network_id) + + def port_update(self, context, **kwargs): + port = kwargs.get('port') + # Put the port identifier in the updated_ports set. + # Even if full port details might be provided to this call, + # they are not used since there is no guarantee the notifications + # are processed in the same order as the relevant API requests + self.updated_ports.add(port['id']) + LOG.debug(_("port_update received port %s"), port['id']) + + def tunnel_update(self, context, **kwargs): + LOG.debug(_("tunnel_update received")) + if not self.enable_tunneling: + return + tunnel_ip = kwargs.get('tunnel_ip') + tunnel_type = kwargs.get('tunnel_type') + if not tunnel_type: + LOG.error(_("No tunnel_type specified, cannot create tunnels")) + return + if tunnel_type not in self.tunnel_types: + LOG.error(_("tunnel_type %s not supported by agent"), tunnel_type) + return + if tunnel_ip == self.local_ip: + return + tun_name = self._create_tunnel_port_name(tunnel_type, tunnel_ip) + if not tun_name: + return + self.setup_tunnel_port(tun_name, tunnel_ip, tunnel_type) + + def _provision_local_vlan_outbound_for_tunnel(self, lvid, + segmentation_id, ofports): + br = self.tun_br + match = br.ofparser.OFPMatch( + vlan_vid=int(lvid) | ryu_ofp13.OFPVID_PRESENT) + actions = [br.ofparser.OFPActionPopVlan(), + br.ofparser.OFPActionSetField( + tunnel_id=int(segmentation_id))] + for ofport in ofports: + actions.append(br.ofparser.OFPActionOutput(ofport, 0)) + instructions = [br.ofparser.OFPInstructionActions( + ryu_ofp13.OFPIT_APPLY_ACTIONS, actions)] + msg = br.ofparser.OFPFlowMod( + br.datapath, + table_id=constants.FLOOD_TO_TUN, + priority=1, + match=match, instructions=instructions) + self.ryu_send_msg(msg) + + def _provision_local_vlan_inbound_for_tunnel(self, lvid, network_type, + segmentation_id): + br = self.tun_br + match = br.ofparser.OFPMatch( + tunnel_id=int(segmentation_id)) + actions = [ + br.ofparser.OFPActionPushVlan(), + br.ofparser.OFPActionSetField( + vlan_vid=int(lvid) | ryu_ofp13.OFPVID_PRESENT)] + instructions = [ + br.ofparser.OFPInstructionActions( + ryu_ofp13.OFPIT_APPLY_ACTIONS, actions), + br.ofparser.OFPInstructionGotoTable( + table_id=constants.LEARN_FROM_TUN)] + msg = br.ofparser.OFPFlowMod( + br.datapath, + table_id=constants.TUN_TABLE[network_type], + priority=1, + match=match, + instructions=instructions) + self.ryu_send_msg(msg) + + def _local_vlan_for_tunnel(self, lvid, network_type, segmentation_id): + ofports = [int(ofport) for ofport in + self.tun_br_ofports[network_type].values()] + if ofports: + self._provision_local_vlan_outbound_for_tunnel( + lvid, segmentation_id, ofports) + self._provision_local_vlan_inbound_for_tunnel(lvid, network_type, + segmentation_id) + + def _provision_local_vlan_outbound(self, lvid, vlan_vid, physical_network): + br = self.phys_brs[physical_network] + datapath = br.datapath + ofp = datapath.ofproto + ofpp = datapath.ofproto_parser + match = ofpp.OFPMatch(in_port=int(self.phys_ofports[physical_network]), + vlan_vid=int(lvid) | ofp.OFPVID_PRESENT) + if vlan_vid == ofp.OFPVID_NONE: + actions = [ofpp.OFPActionPopVlan()] + else: + actions = [ofpp.OFPActionSetField(vlan_vid=vlan_vid)] + actions += [ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)] + instructions = [ + ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), + ] + msg = ofpp.OFPFlowMod(datapath, priority=4, match=match, + instructions=instructions) + self.ryu_send_msg(msg) + + def _provision_local_vlan_inbound(self, lvid, vlan_vid, physical_network): + datapath = self.int_br.datapath + ofp = datapath.ofproto + ofpp = datapath.ofproto_parser + match = ofpp.OFPMatch(in_port=int(self.int_ofports[physical_network]), + vlan_vid=vlan_vid) + if vlan_vid == ofp.OFPVID_NONE: + actions = [ofpp.OFPActionPushVlan()] + else: + actions = [] + actions += [ + ofpp.OFPActionSetField(vlan_vid=int(lvid) | ofp.OFPVID_PRESENT), + ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), + ] + instructions = [ + ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), + ] + msg = ofpp.OFPFlowMod(datapath, priority=3, match=match, + instructions=instructions) + self.ryu_send_msg(msg) + + def _local_vlan_for_flat(self, lvid, physical_network): + vlan_vid = ryu_ofp13.OFPVID_NONE + self._provision_local_vlan_outbound(lvid, vlan_vid, physical_network) + self._provision_local_vlan_inbound(lvid, vlan_vid, physical_network) + + def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id): + vlan_vid = int(segmentation_id) | ryu_ofp13.OFPVID_PRESENT + self._provision_local_vlan_outbound(lvid, vlan_vid, physical_network) + self._provision_local_vlan_inbound(lvid, vlan_vid, physical_network) + + def provision_local_vlan(self, net_uuid, network_type, physical_network, + segmentation_id): + """Provisions a local VLAN. + + :param net_uuid: the uuid of the network associated with this vlan. + :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat', + 'local') + :param physical_network: the physical network for 'vlan' or 'flat' + :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' + """ + + if not self.available_local_vlans: + LOG.error(_("No local VLAN available for net-id=%s"), net_uuid) + return + lvid = self.available_local_vlans.pop() + LOG.info(_("Assigning %(vlan_id)s as local vlan for " + "net-id=%(net_uuid)s"), + {'vlan_id': lvid, 'net_uuid': net_uuid}) + self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, network_type, + physical_network, + segmentation_id) + + if network_type in constants.TUNNEL_NETWORK_TYPES: + if self.enable_tunneling: + self._local_vlan_for_tunnel(lvid, network_type, + segmentation_id) + else: + LOG.error(_("Cannot provision %(network_type)s network for " + "net-id=%(net_uuid)s - tunneling disabled"), + {'network_type': network_type, + 'net_uuid': net_uuid}) + elif network_type == p_const.TYPE_FLAT: + if physical_network in self.phys_brs: + self._local_vlan_for_flat(lvid, physical_network) + else: + LOG.error(_("Cannot provision flat network for " + "net-id=%(net_uuid)s - no bridge for " + "physical_network %(physical_network)s"), + {'net_uuid': net_uuid, + 'physical_network': physical_network}) + elif network_type == p_const.TYPE_VLAN: + if physical_network in self.phys_brs: + self._local_vlan_for_vlan(lvid, physical_network, + segmentation_id) + else: + LOG.error(_("Cannot provision VLAN network for " + "net-id=%(net_uuid)s - no bridge for " + "physical_network %(physical_network)s"), + {'net_uuid': net_uuid, + 'physical_network': physical_network}) + elif network_type == p_const.TYPE_LOCAL: + # no flows needed for local networks + pass + else: + LOG.error(_("Cannot provision unknown network type " + "%(network_type)s for net-id=%(net_uuid)s"), + {'network_type': network_type, + 'net_uuid': net_uuid}) + + def _reclaim_local_vlan_outbound(self, lvm): + br = self.phys_brs[lvm.physical_network] + datapath = br.datapath + ofp = datapath.ofproto + ofpp = datapath.ofproto_parser + match = ofpp.OFPMatch( + in_port=int(self.phys_ofports[lvm.physical_network]), + vlan_vid=int(lvm.vlan) | ofp.OFPVID_PRESENT) + msg = ofpp.OFPFlowMod(datapath, table_id=ofp.OFPTT_ALL, + command=ofp.OFPFC_DELETE, out_group=ofp.OFPG_ANY, + out_port=ofp.OFPP_ANY, match=match) + self.ryu_send_msg(msg) + + def _reclaim_local_vlan_inbound(self, lvm): + datapath = self.int_br.datapath + ofp = datapath.ofproto + ofpp = datapath.ofproto_parser + if lvm.network_type == p_const.TYPE_FLAT: + vid = ofp.OFPVID_NONE + else: # p_const.TYPE_VLAN + vid = lvm.segmentation_id | ofp.OFPVID_PRESENT + match = ofpp.OFPMatch( + in_port=int(self.int_ofports[lvm.physical_network]), + vlan_vid=vid) + msg = ofpp.OFPFlowMod(datapath, table_id=ofp.OFPTT_ALL, + command=ofp.OFPFC_DELETE, out_group=ofp.OFPG_ANY, + out_port=ofp.OFPP_ANY, match=match) + self.ryu_send_msg(msg) + + def reclaim_local_vlan(self, net_uuid): + """Reclaim a local VLAN. + + :param net_uuid: the network uuid associated with this vlan. + :param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id, + vif_ids) mapping. + """ + lvm = self.local_vlan_map.pop(net_uuid, None) + if lvm is None: + LOG.debug(_("Network %s not used on agent."), net_uuid) + return + + LOG.info(_("Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"), + {'vlan_id': lvm.vlan, + 'net_uuid': net_uuid}) + + if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: + if self.enable_tunneling: + match = self.tun_br.ofparser.OFPMatch( + tunnel_id=int(lvm.segmentation_id)) + msg = self.tun_br.ofparser.OFPFlowMod( + self.tun_br.datapath, + table_id=constants.TUN_TABLE[lvm.network_type], + command=ryu_ofp13.OFPFC_DELETE, + out_group=ryu_ofp13.OFPG_ANY, + out_port=ryu_ofp13.OFPP_ANY, + match=match) + self.ryu_send_msg(msg) + match = self.tun_br.ofparser.OFPMatch( + vlan_vid=int(lvm.vlan) | ryu_ofp13.OFPVID_PRESENT) + msg = self.tun_br.ofparser.OFPFlowMod( + self.tun_br.datapath, + table_id=ryu_ofp13.OFPTT_ALL, + command=ryu_ofp13.OFPFC_DELETE, + out_group=ryu_ofp13.OFPG_ANY, + out_port=ryu_ofp13.OFPP_ANY, + match=match) + self.ryu_send_msg(msg) + elif lvm.network_type in (p_const.TYPE_FLAT, p_const.TYPE_VLAN): + if lvm.physical_network in self.phys_brs: + self._reclaim_local_vlan_outbound(lvm) + self._reclaim_local_vlan_inbound(lvm) + elif lvm.network_type == p_const.TYPE_LOCAL: + # no flows needed for local networks + pass + else: + LOG.error(_("Cannot reclaim unknown network type " + "%(network_type)s for net-id=%(net_uuid)s"), + {'network_type': lvm.network_type, + 'net_uuid': net_uuid}) + + self.available_local_vlans.add(lvm.vlan) + + def port_bound(self, port, net_uuid, + network_type, physical_network, segmentation_id): + """Bind port to net_uuid/lsw_id and install flow for inbound traffic + to vm. + + :param port: a ovs_lib.VifPort object. + :param net_uuid: the net_uuid this port is to be associated with. + :param network_type: the network type ('gre', 'vlan', 'flat', 'local') + :param physical_network: the physical network for 'vlan' or 'flat' + :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' + """ + if net_uuid not in self.local_vlan_map: + self.provision_local_vlan(net_uuid, network_type, + physical_network, segmentation_id) + lvm = self.local_vlan_map[net_uuid] + lvm.vif_ports[port.vif_id] = port + # Do not bind a port if it's already bound + cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + if cur_tag != str(lvm.vlan): + self.int_br.set_db_attribute("Port", port.port_name, "tag", + str(lvm.vlan)) + if port.ofport != -1: + match = self.int_br.ofparser.OFPMatch(in_port=port.ofport) + msg = self.int_br.ofparser.OFPFlowMod( + self.int_br.datapath, + table_id=ryu_ofp13.OFPTT_ALL, + command=ryu_ofp13.OFPFC_DELETE, + out_group=ryu_ofp13.OFPG_ANY, + out_port=ryu_ofp13.OFPP_ANY, + match=match) + self.ryu_send_msg(msg) + + def port_unbound(self, vif_id, net_uuid=None): + """Unbind port. + + Removes corresponding local vlan mapping object if this is its last + VIF. + + :param vif_id: the id of the vif + :param net_uuid: the net_uuid this port is associated with. + """ + net_uuid = net_uuid or self.get_net_uuid(vif_id) + + if not self.local_vlan_map.get(net_uuid): + LOG.info(_('port_unbound() net_uuid %s not in local_vlan_map'), + net_uuid) + return + + lvm = self.local_vlan_map[net_uuid] + lvm.vif_ports.pop(vif_id, None) + + if not lvm.vif_ports: + self.reclaim_local_vlan(net_uuid) + + def port_dead(self, port): + """Once a port has no binding, put it on the "dead vlan". + + :param port: a ovs_lib.VifPort object. + """ + # Don't kill a port if it's already dead + cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + if cur_tag != DEAD_VLAN_TAG: + self.int_br.set_db_attribute("Port", port.port_name, "tag", + DEAD_VLAN_TAG) + match = self.int_br.ofparser.OFPMatch(in_port=port.ofport) + msg = self.int_br.ofparser.OFPFlowMod(self.int_br.datapath, + priority=2, match=match) + self.ryu_send_msg(msg) + + def setup_integration_br(self): + """Setup the integration bridge. + + Create patch ports and remove all existing flows. + + :param bridge_name: the name of the integration bridge. + :returns: the integration bridge + """ + self.int_br.setup_ofp() + self.int_br.delete_port(cfg.CONF.OVS.int_peer_patch_port) + msg = self.int_br.ofparser.OFPFlowMod(self.int_br.datapath, + table_id=ryu_ofp13.OFPTT_ALL, + command=ryu_ofp13.OFPFC_DELETE, + out_group=ryu_ofp13.OFPG_ANY, + out_port=ryu_ofp13.OFPP_ANY) + self.ryu_send_msg(msg) + # switch all traffic using L2 learning + actions = [self.int_br.ofparser.OFPActionOutput( + ryu_ofp13.OFPP_NORMAL, 0)] + instructions = [self.int_br.ofparser.OFPInstructionActions( + ryu_ofp13.OFPIT_APPLY_ACTIONS, + actions)] + msg = self.int_br.ofparser.OFPFlowMod(self.int_br.datapath, + priority=1, + instructions=instructions) + self.ryu_send_msg(msg) + + def setup_ancillary_bridges(self, integ_br, tun_br): + """Setup ancillary bridges - for example br-ex.""" + ovs_bridges = set(ovs_lib.get_bridges(self.root_helper)) + # Remove all known bridges + ovs_bridges.remove(integ_br) + if self.enable_tunneling: + ovs_bridges.remove(tun_br) + br_names = [self.phys_brs[physical_network].br_name for + physical_network in self.phys_brs] + ovs_bridges.difference_update(br_names) + # Filter list of bridges to those that have external + # bridge-id's configured + br_names = [ + bridge for bridge in ovs_bridges + if bridge != ovs_lib.get_bridge_external_bridge_id( + self.root_helper, bridge) + ] + ovs_bridges.difference_update(br_names) + ancillary_bridges = [] + for bridge in ovs_bridges: + br = OVSBridge(bridge, self.root_helper, self.ryuapp) + ancillary_bridges.append(br) + LOG.info(_('ancillary bridge list: %s.'), ancillary_bridges) + return ancillary_bridges + + def _tun_br_sort_incoming_traffic_depend_in_port(self, br): + match = br.ofparser.OFPMatch( + in_port=int(self.patch_int_ofport)) + instructions = [br.ofparser.OFPInstructionGotoTable( + table_id=constants.PATCH_LV_TO_TUN)] + msg = br.ofparser.OFPFlowMod(br.datapath, + priority=1, + match=match, + instructions=instructions) + self.ryu_send_msg(msg) + msg = br.ofparser.OFPFlowMod(br.datapath, priority=0) + self.ryu_send_msg(msg) + + def _tun_br_goto_table_ucast_unicast(self, br): + match = br.ofparser.OFPMatch(eth_dst=('00:00:00:00:00:00', + '01:00:00:00:00:00')) + instructions = [br.ofparser.OFPInstructionGotoTable( + table_id=constants.UCAST_TO_TUN)] + msg = br.ofparser.OFPFlowMod(br.datapath, + table_id=constants.PATCH_LV_TO_TUN, + match=match, + instructions=instructions) + self.ryu_send_msg(msg) + + def _tun_br_goto_table_flood_broad_multi_cast(self, br): + match = br.ofparser.OFPMatch(eth_dst=('01:00:00:00:00:00', + '01:00:00:00:00:00')) + instructions = [br.ofparser.OFPInstructionGotoTable( + table_id=constants.FLOOD_TO_TUN)] + msg = br.ofparser.OFPFlowMod(br.datapath, + table_id=constants.PATCH_LV_TO_TUN, + match=match, + instructions=instructions) + self.ryu_send_msg(msg) + + def _tun_br_set_table_tun_by_tunnel_type(self, br): + for tunnel_type in constants.TUNNEL_NETWORK_TYPES: + msg = br.ofparser.OFPFlowMod( + br.datapath, + table_id=constants.TUN_TABLE[tunnel_type], + priority=0) + self.ryu_send_msg(msg) + + def _tun_br_output_patch_int(self, br): + actions = [br.ofparser.OFPActionOutput( + int(self.patch_int_ofport), 0)] + instructions = [br.ofparser.OFPInstructionActions( + ryu_ofp13.OFPIT_APPLY_ACTIONS, + actions)] + msg = br.ofparser.OFPFlowMod(br.datapath, + table_id=constants.LEARN_FROM_TUN, + priority=1, + instructions=instructions) + self.ryu_send_msg(msg) + + def _tun_br_goto_table_flood_unknown_unicast(self, br): + instructions = [br.ofparser.OFPInstructionGotoTable( + table_id=constants.FLOOD_TO_TUN)] + msg = br.ofparser.OFPFlowMod(br.datapath, + table_id=constants.UCAST_TO_TUN, + priority=0, + instructions=instructions) + self.ryu_send_msg(msg) + + def _tun_br_default_drop(self, br): + msg = br.ofparser.OFPFlowMod( + br.datapath, + table_id=constants.FLOOD_TO_TUN, + priority=0) + self.ryu_send_msg(msg) + + def setup_tunnel_br(self, tun_br): + """Setup the tunnel bridge. + + Creates tunnel bridge, and links it to the integration bridge + using a patch port. + + :param tun_br: the name of the tunnel bridge. + """ + self.tun_br = OVSBridge(tun_br, self.root_helper, self.ryuapp) + self.tun_br.reset_bridge() + self.tun_br.setup_ofp() + self.patch_tun_ofport = self.int_br.add_patch_port( + cfg.CONF.OVS.int_peer_patch_port, cfg.CONF.OVS.tun_peer_patch_port) + self.patch_int_ofport = self.tun_br.add_patch_port( + cfg.CONF.OVS.tun_peer_patch_port, cfg.CONF.OVS.int_peer_patch_port) + if int(self.patch_tun_ofport) < 0 or int(self.patch_int_ofport) < 0: + LOG.error(_("Failed to create OVS patch port. Cannot have " + "tunneling enabled on this agent, since this version " + "of OVS does not support tunnels or patch ports. " + "Agent terminated!")) + raise SystemExit(1) + msg = self.tun_br.ofparser.OFPFlowMod(self.tun_br.datapath, + table_id=ryu_ofp13.OFPTT_ALL, + command=ryu_ofp13.OFPFC_DELETE, + out_group=ryu_ofp13.OFPG_ANY, + out_port=ryu_ofp13.OFPP_ANY) + self.ryu_send_msg(msg) + + self._tun_br_sort_incoming_traffic_depend_in_port(self.tun_br) + self._tun_br_goto_table_ucast_unicast(self.tun_br) + self._tun_br_goto_table_flood_broad_multi_cast(self.tun_br) + self._tun_br_set_table_tun_by_tunnel_type(self.tun_br) + self._tun_br_output_patch_int(self.tun_br) + self._tun_br_goto_table_flood_unknown_unicast(self.tun_br) + self._tun_br_default_drop(self.tun_br) + + def _phys_br_prepare_create_veth(self, br, int_veth_name, phys_veth_name): + self.int_br.delete_port(int_veth_name) + br.delete_port(phys_veth_name) + if ip_lib.device_exists(int_veth_name, self.root_helper): + ip_lib.IPDevice(int_veth_name, self.root_helper).link.delete() + # Give udev a chance to process its rules here, to avoid + # race conditions between commands launched by udev rules + # and the subsequent call to ip_wrapper.add_veth + utils.execute(['/sbin/udevadm', 'settle', '--timeout=10']) + + def _phys_br_create_veth(self, br, int_veth_name, + phys_veth_name, physical_network, ip_wrapper): + int_veth, phys_veth = ip_wrapper.add_veth(int_veth_name, + phys_veth_name) + self.int_ofports[physical_network] = self.int_br.add_port(int_veth) + self.phys_ofports[physical_network] = br.add_port(phys_veth) + return (int_veth, phys_veth) + + def _phys_br_block_untranslated_traffic(self, br, physical_network): + match = self.int_br.ofparser.OFPMatch(in_port=int( + self.int_ofports[physical_network])) + msg = self.int_br.ofparser.OFPFlowMod(self.int_br.datapath, + priority=2, match=match) + self.ryu_send_msg(msg) + match = br.ofparser.OFPMatch(in_port=int( + self.phys_ofports[physical_network])) + msg = br.ofparser.OFPFlowMod(br.datapath, priority=2, match=match) + self.ryu_send_msg(msg) + + def _phys_br_enable_veth_to_pass_traffic(self, int_veth, phys_veth): + # enable veth to pass traffic + int_veth.link.set_up() + phys_veth.link.set_up() + + if self.veth_mtu: + # set up mtu size for veth interfaces + int_veth.link.set_mtu(self.veth_mtu) + phys_veth.link.set_mtu(self.veth_mtu) + + def _phys_br_patch_physical_bridge_with_integration_bridge( + self, br, physical_network, bridge, ip_wrapper): + int_veth_name = constants.VETH_INTEGRATION_PREFIX + bridge + phys_veth_name = constants.VETH_PHYSICAL_PREFIX + bridge + self._phys_br_prepare_create_veth(br, int_veth_name, phys_veth_name) + int_veth, phys_veth = self._phys_br_create_veth(br, int_veth_name, + phys_veth_name, + physical_network, + ip_wrapper) + self._phys_br_block_untranslated_traffic(br, physical_network) + self._phys_br_enable_veth_to_pass_traffic(int_veth, phys_veth) + + def setup_physical_bridges(self, bridge_mappings): + """Setup the physical network bridges. + + Creates physical network bridges and links them to the + integration bridge using veths. + + :param bridge_mappings: map physical network names to bridge names. + """ + self.phys_brs = {} + self.int_ofports = {} + self.phys_ofports = {} + ip_wrapper = ip_lib.IPWrapper(self.root_helper) + for physical_network, bridge in bridge_mappings.iteritems(): + LOG.info(_("Mapping physical network %(physical_network)s to " + "bridge %(bridge)s"), + {'physical_network': physical_network, + 'bridge': bridge}) + # setup physical bridge + if not ip_lib.device_exists(bridge, self.root_helper): + LOG.error(_("Bridge %(bridge)s for physical network " + "%(physical_network)s does not exist. Agent " + "terminated!"), + {'physical_network': physical_network, + 'bridge': bridge}) + raise SystemExit(1) + br = OVSBridge(bridge, self.root_helper, self.ryuapp) + br.setup_ofp() + msg = br.ofparser.OFPFlowMod(br.datapath, + table_id=ryu_ofp13.OFPTT_ALL, + command=ryu_ofp13.OFPFC_DELETE, + out_group=ryu_ofp13.OFPG_ANY, + out_port=ryu_ofp13.OFPP_ANY) + self.ryu_send_msg(msg) + actions = [br.ofparser.OFPActionOutput(ryu_ofp13.OFPP_NORMAL, 0)] + instructions = [br.ofparser.OFPInstructionActions( + ryu_ofp13.OFPIT_APPLY_ACTIONS, + actions)] + msg = br.ofparser.OFPFlowMod(br.datapath, + priority=1, + instructions=instructions) + self.ryu_send_msg(msg) + self.phys_brs[physical_network] = br + + self._phys_br_patch_physical_bridge_with_integration_bridge( + br, physical_network, bridge, ip_wrapper) + + def scan_ports(self, registered_ports, updated_ports=None): + cur_ports = self.int_br.get_vif_port_set() + self.int_br_device_count = len(cur_ports) + port_info = {'current': cur_ports} + if updated_ports is None: + updated_ports = set() + updated_ports.update(self._find_lost_vlan_port(registered_ports)) + if updated_ports: + # Some updated ports might have been removed in the + # meanwhile, and therefore should not be processed. + # In this case the updated port won't be found among + # current ports. + updated_ports &= cur_ports + if updated_ports: + port_info['updated'] = updated_ports + + if cur_ports == registered_ports: + # No added or removed ports to set, just return here + return port_info + + port_info['added'] = cur_ports - registered_ports + # Remove all the known ports not found on the integration bridge + port_info['removed'] = registered_ports - cur_ports + return port_info + + def _find_lost_vlan_port(self, registered_ports): + """Return ports which have lost their vlan tag. + + The returned value is a set of port ids of the ports concerned by a + vlan tag loss. + """ + port_tags = self.int_br.get_port_tag_dict() + changed_ports = set() + for lvm in self.local_vlan_map.values(): + for port in registered_ports: + if ( + port in lvm.vif_ports + and lvm.vif_ports[port].port_name in port_tags + and port_tags[lvm.vif_ports[port].port_name] != lvm.vlan + ): + LOG.info( + _("Port '%(port_name)s' has lost " + "its vlan tag '%(vlan_tag)d'!"), + {'port_name': lvm.vif_ports[port].port_name, + 'vlan_tag': lvm.vlan} + ) + changed_ports.add(port) + return changed_ports + + def update_ancillary_ports(self, registered_ports): + ports = set() + for bridge in self.ancillary_brs: + ports |= bridge.get_vif_port_set() + + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def treat_vif_port(self, vif_port, port_id, network_id, network_type, + physical_network, segmentation_id, admin_state_up): + if vif_port: + # When this function is called for a port, the port should have + # an OVS ofport configured, as only these ports were considered + # for being treated. If that does not happen, it is a potential + # error condition of which operators should be aware + if not vif_port.ofport: + LOG.warn(_("VIF port: %s has no ofport configured, and might " + "not be able to transmit"), vif_port.vif_id) + if admin_state_up: + self.port_bound(vif_port, network_id, network_type, + physical_network, segmentation_id) + else: + self.port_dead(vif_port) + else: + LOG.debug(_("No VIF port for port %s defined on agent."), port_id) + + def setup_tunnel_port(self, port_name, remote_ip, tunnel_type): + ofport = self.tun_br.add_tunnel_port(port_name, + remote_ip, + self.local_ip, + tunnel_type, + self.vxlan_udp_port, + self.dont_fragment) + ofport_int = -1 + try: + ofport_int = int(ofport) + except (TypeError, ValueError): + LOG.exception(_("ofport should have a value that can be " + "interpreted as an integer")) + if ofport_int < 0: + LOG.error(_("Failed to set-up %(type)s tunnel port to %(ip)s"), + {'type': tunnel_type, 'ip': remote_ip}) + return 0 + + self.tun_br_ofports[tunnel_type][remote_ip] = ofport + # Add flow in default table to resubmit to the right + # tunelling table (lvid will be set in the latter) + match = self.tun_br.ofparser.OFPMatch(in_port=int(ofport)) + instructions = [self.tun_br.ofparser.OFPInstructionGotoTable( + table_id=constants.TUN_TABLE[tunnel_type])] + msg = self.tun_br.ofparser.OFPFlowMod(self.tun_br.datapath, + priority=1, + match=match, + instructions=instructions) + self.ryu_send_msg(msg) + + ofports = [int(p) for p in self.tun_br_ofports[tunnel_type].values()] + if ofports: + # Update flooding flows to include the new tunnel + for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + if vlan_mapping.network_type == tunnel_type: + match = self.tun_br.ofparser.OFPMatch( + vlan_vid=int(vlan_mapping.vlan) | + ryu_ofp13.OFPVID_PRESENT) + actions = [ + self.tun_br.ofparser.OFPActionPopVlan(), + self.tun_br.ofparser.OFPActionSetField( + tunnel_id=int(vlan_mapping.segmentation_id))] + actions.extend( + self.tun_br.ofparser.OFPActionOutput(p, 0) + for p in ofports + ) + instructions = [ + self.tun_br.ofparser.OFPInstructionActions( + ryu_ofp13.OFPIT_APPLY_ACTIONS, + actions)] + msg = self.tun_br.ofparser.OFPFlowMod( + self.tun_br.datapath, + table_id=constants.FLOOD_TO_TUN, + priority=1, + match=match, + instructions=instructions) + self.ryu_send_msg(msg) + return ofport + + def treat_devices_added_or_updated(self, devices): + resync = False + for device in devices: + LOG.debug(_("Processing port %s"), device) + port = self.int_br.get_vif_port_by_id(device) + if not port: + # The port has disappeared and should not be processed + # There is no need to put the port DOWN in the plugin as + # it never went up in the first place + LOG.info(_("Port %s was not found on the integration bridge " + "and will therefore not be processed"), device) + continue + try: + details = self.plugin_rpc.get_device_details(self.context, + device, + self.agent_id) + except Exception as e: + LOG.debug(_("Unable to get port details for " + "%(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + if 'port_id' in details: + LOG.info(_("Port %(device)s updated. Details: %(details)s"), + {'device': device, 'details': details}) + self.treat_vif_port(port, details['port_id'], + details['network_id'], + details['network_type'], + details['physical_network'], + details['segmentation_id'], + details['admin_state_up']) + + # update plugin about port status + if details.get('admin_state_up'): + LOG.debug(_("Setting status for %s to UP"), device) + self.plugin_rpc.update_device_up( + self.context, device, self.agent_id, cfg.CONF.host) + else: + LOG.debug(_("Setting status for %s to DOWN"), device) + self.plugin_rpc.update_device_down( + self.context, device, self.agent_id, cfg.CONF.host) + LOG.info(_("Configuration for device %s completed."), device) + else: + LOG.warn(_("Device %s not defined on plugin"), device) + if (port and port.ofport != -1): + self.port_dead(port) + return resync + + def treat_ancillary_devices_added(self, devices): + resync = False + for device in devices: + LOG.info(_("Ancillary Port %s added"), device) + try: + self.plugin_rpc.get_device_details(self.context, device, + self.agent_id) + except Exception as e: + LOG.debug(_("Unable to get port details for " + "%(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + + # update plugin about port status + self.plugin_rpc.update_device_up(self.context, + device, + self.agent_id, + cfg.CONF.host) + return resync + + def treat_devices_removed(self, devices): + resync = False + self.sg_agent.remove_devices_filter(devices) + for device in devices: + LOG.info(_("Attachment %s removed"), device) + try: + self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + self.port_unbound(device) + return resync + + def treat_ancillary_devices_removed(self, devices): + resync = False + for device in devices: + LOG.info(_("Attachment %s removed"), device) + try: + details = self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + if details['exists']: + LOG.info(_("Port %s updated."), device) + # Nothing to do regarding local networking + else: + LOG.debug(_("Device %s not defined on plugin"), device) + return resync + + def process_network_ports(self, port_info): + resync_add = False + resync_removed = False + # If there is an exception while processing security groups ports + # will not be wired anyway, and a resync will be triggered + self.sg_agent.setup_port_filters(port_info.get('added', set()), + port_info.get('updated', set())) + # VIF wiring needs to be performed always for 'new' devices. + # For updated ports, re-wiring is not needed in most cases, but needs + # to be performed anyway when the admin state of a device is changed. + # A device might be both in the 'added' and 'updated' + # list at the same time; avoid processing it twice. + devices_added_updated = (port_info.get('added', set()) | + port_info.get('updated', set())) + if devices_added_updated: + start = time.time() + resync_add = self.treat_devices_added_or_updated( + devices_added_updated) + LOG.debug(_("process_network_ports - iteration:%(iter_num)d - " + "treat_devices_added_or_updated completed " + "in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + if 'removed' in port_info: + start = time.time() + resync_removed = self.treat_devices_removed(port_info['removed']) + LOG.debug(_("process_network_ports - iteration:%(iter_num)d - " + "treat_devices_removed completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # If one of the above opertaions fails => resync with plugin + return (resync_add | resync_removed) + + def process_ancillary_network_ports(self, port_info): + resync_add = False + resync_removed = False + if 'added' in port_info: + start = time.time() + resync_add = self.treat_ancillary_devices_added(port_info['added']) + LOG.debug(_("process_ancillary_network_ports - iteration: " + "%(iter_num)d - treat_ancillary_devices_added " + "completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + if 'removed' in port_info: + start = time.time() + resync_removed = self.treat_ancillary_devices_removed( + port_info['removed']) + LOG.debug(_("process_ancillary_network_ports - iteration: " + "%(iter_num)d - treat_ancillary_devices_removed " + "completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + + # If one of the above opertaions fails => resync with plugin + return (resync_add | resync_removed) + + def tunnel_sync(self): + resync = False + try: + for tunnel_type in self.tunnel_types: + details = self.plugin_rpc.tunnel_sync(self.context, + self.local_ip, + tunnel_type) + tunnels = details['tunnels'] + for tunnel in tunnels: + if self.local_ip != tunnel['ip_address']: + tun_name = self._create_tunnel_port_name( + tunnel_type, tunnel['ip_address']) + if not tun_name: + continue + self.setup_tunnel_port(tun_name, + tunnel['ip_address'], + tunnel_type) + except Exception as e: + LOG.debug(_("Unable to sync tunnel IP %(local_ip)s: %(e)s"), + {'local_ip': self.local_ip, 'e': e}) + resync = True + return resync + + def _agent_has_updates(self, polling_manager): + return (polling_manager.is_polling_required or + self.updated_ports or + self.sg_agent.firewall_refresh_needed()) + + def _port_info_has_changes(self, port_info): + return (port_info.get('added') or + port_info.get('removed') or + port_info.get('updated')) + + def ovsdb_monitor_loop(self, polling_manager=None): + if not polling_manager: + polling_manager = polling.AlwaysPoll() + + sync = True + ports = set() + updated_ports_copy = set() + ancillary_ports = set() + tunnel_sync = True + while True: + start = time.time() + port_stats = {'regular': {'added': 0, 'updated': 0, 'removed': 0}, + 'ancillary': {'added': 0, 'removed': 0}} + LOG.debug(_("Agent ovsdb_monitor_loop - " + "iteration:%d started"), + self.iter_num) + if sync: + LOG.info(_("Agent out of sync with plugin!")) + ports.clear() + ancillary_ports.clear() + sync = False + polling_manager.force_polling() + # Notify the plugin of tunnel IP + if self.enable_tunneling and tunnel_sync: + LOG.info(_("Agent tunnel out of sync with plugin!")) + try: + tunnel_sync = self.tunnel_sync() + except Exception: + LOG.exception(_("Error while synchronizing tunnels")) + tunnel_sync = True + if self._agent_has_updates(polling_manager): + try: + LOG.debug(_("Agent ovsdb_monitor_loop - " + "iteration:%(iter_num)d - " + "starting polling. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # Save updated ports dict to perform rollback in + # case resync would be needed, and then clear + # self.updated_ports. As the greenthread should not yield + # between these two statements, this will be thread-safe + updated_ports_copy = self.updated_ports + self.updated_ports = set() + port_info = self.scan_ports(ports, updated_ports_copy) + ports = port_info['current'] + LOG.debug(_("Agent ovsdb_monitor_loop - " + "iteration:%(iter_num)d - " + "port information retrieved. " + "Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # Secure and wire/unwire VIFs and update their status + # on Neutron server + if (self._port_info_has_changes(port_info) or + self.sg_agent.firewall_refresh_needed()): + LOG.debug(_("Starting to process devices in:%s"), + port_info) + # If treat devices fails - must resync with plugin + sync = self.process_network_ports(port_info) + LOG.debug(_("Agent ovsdb_monitor_loop - " + "iteration:%(iter_num)d - " + "ports processed. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + port_stats['regular']['added'] = ( + len(port_info.get('added', []))) + port_stats['regular']['updated'] = ( + len(port_info.get('updated', []))) + port_stats['regular']['removed'] = ( + len(port_info.get('removed', []))) + # Treat ancillary devices if they exist + if self.ancillary_brs: + port_info = self.update_ancillary_ports( + ancillary_ports) + LOG.debug(_("Agent ovsdb_monitor_loop - " + "iteration:%(iter_num)d - " + "ancillary port info retrieved. " + "Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + + if port_info: + rc = self.process_ancillary_network_ports( + port_info) + LOG.debug(_("Agent ovsdb_monitor_loop - " + "iteration:" + "%(iter_num)d - ancillary ports " + "processed. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + ancillary_ports = port_info['current'] + port_stats['ancillary']['added'] = ( + len(port_info.get('added', []))) + port_stats['ancillary']['removed'] = ( + len(port_info.get('removed', []))) + sync = sync | rc + + polling_manager.polling_completed() + except Exception: + LOG.exception(_("Error while processing VIF ports")) + # Put the ports back in self.updated_port + self.updated_ports |= updated_ports_copy + sync = True + + # sleep till end of polling interval + elapsed = (time.time() - start) + LOG.debug(_("Agent ovsdb_monitor_loop - iteration:%(iter_num)d " + "completed. Processed ports statistics:" + "%(port_stats)s. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'port_stats': port_stats, + 'elapsed': elapsed}) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + self.iter_num = self.iter_num + 1 + + def daemon_loop(self): + with polling.get_polling_manager( + self.minimize_polling, + self.root_helper, + self.ovsdb_monitor_respawn_interval) as pm: + + self.ovsdb_monitor_loop(polling_manager=pm) + + +def create_agent_config_map(config): + """Create a map of agent config parameters. + + :param config: an instance of cfg.CONF + :returns: a map of agent configuration parameters + """ + try: + bridge_mappings = n_utils.parse_mappings(config.OVS.bridge_mappings) + except ValueError as e: + raise ValueError(_("Parsing bridge_mappings failed: %s.") % e) + + kwargs = dict( + integ_br=config.OVS.integration_bridge, + tun_br=config.OVS.tunnel_bridge, + local_ip=config.OVS.local_ip, + bridge_mappings=bridge_mappings, + root_helper=config.AGENT.root_helper, + polling_interval=config.AGENT.polling_interval, + minimize_polling=config.AGENT.minimize_polling, + tunnel_types=config.AGENT.tunnel_types, + veth_mtu=config.AGENT.veth_mtu, + l2_population=False, + ovsdb_monitor_respawn_interval=constants.DEFAULT_OVSDBMON_RESPAWN, + ) + + # If enable_tunneling is TRUE, set tunnel_type to default to GRE + if config.OVS.enable_tunneling and not kwargs['tunnel_types']: + kwargs['tunnel_types'] = [p_const.TYPE_GRE] + + # Verify the tunnel_types specified are valid + for tun in kwargs['tunnel_types']: + if tun not in constants.TUNNEL_NETWORK_TYPES: + msg = _('Invalid tunnel type specificed: %s'), tun + raise ValueError(msg) + if not kwargs['local_ip']: + msg = _('Tunneling cannot be enabled without a valid local_ip.') + raise ValueError(msg) + + return kwargs diff --git a/neutron/plugins/ofagent/common/__init__.py b/neutron/plugins/ofagent/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ofagent/common/config.py b/neutron/plugins/ofagent/common/config.py new file mode 100644 index 000000000..759d3df1d --- /dev/null +++ b/neutron/plugins/ofagent/common/config.py @@ -0,0 +1,33 @@ +# Copyright (C) 2014 VA Linux Systems Japan K.K. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K. + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.plugins.openvswitch.common import config as ovs_config + + +agent_opts = [ + cfg.IntOpt('get_datapath_retry_times', default=60, + help=_("Number of seconds to retry acquiring " + "an Open vSwitch datapath")), +] + + +cfg.CONF.register_opts(ovs_config.ovs_opts, 'OVS') +cfg.CONF.register_opts(ovs_config.agent_opts, 'AGENT') +cfg.CONF.register_opts(agent_opts, 'AGENT') +config.register_agent_state_opts_helper(cfg.CONF) +config.register_root_helper(cfg.CONF) diff --git a/neutron/plugins/oneconvergence/README b/neutron/plugins/oneconvergence/README new file mode 100644 index 000000000..11b26545a --- /dev/null +++ b/neutron/plugins/oneconvergence/README @@ -0,0 +1,32 @@ +One Convergence Neutron Plugin to implement the Neutron v2.0 API. The plugin +works with One Convergence NVSD controller to provide network virtualization +functionality. + +The plugin is enabled with the following configuration line in neutron.conf: + +core_plugin = neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2 + +The configuration parameters required for the plugin are specified in the file +etc/neutron/plugins/oneconvergence/nvsdplugin.ini. The configuration file contains +description of the different parameters. + +To enable One Convergence Neutron Plugin with devstack and configure the required +parameters, use the following lines in localrc: + +Q_PLUGIN=oneconvergence + +disable_service n-net +enable_service q-agt +enable_service q-dhcp +enable_service q-svc +enable_service q-l3 +enable_service q-meta +enable_service neutron + +NVSD_IP= +NVSD_PORT= +NVSD_USER= +NVSD_PASSWD= + +The NVSD controller configuration should be specified in nvsdplugin.ini before +invoking stack.sh. diff --git a/neutron/plugins/oneconvergence/__init__.py b/neutron/plugins/oneconvergence/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/oneconvergence/agent/__init__.py b/neutron/plugins/oneconvergence/agent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py b/neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py new file mode 100644 index 000000000..377cdda1e --- /dev/null +++ b/neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py @@ -0,0 +1,176 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kedar Kulkarni, One Convergence, Inc. + +"""NVSD agent code for security group events.""" + +import socket +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from neutron.agent.linux import ovs_lib +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import rpc_compat +from neutron.common import topics +from neutron import context as n_context +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import log as logging +from neutron.plugins.oneconvergence.lib import config + +LOG = logging.getLogger(__name__) + + +class NVSDAgentRpcCallback(rpc_compat.RpcCallback): + + RPC_API_VERSION = '1.0' + + def __init__(self, context, agent, sg_agent): + super(NVSDAgentRpcCallback, self).__init__() + self.context = context + self.agent = agent + self.sg_agent = sg_agent + + def port_update(self, context, **kwargs): + LOG.debug(_("port_update received: %s"), kwargs) + port = kwargs.get('port') + # Validate that port is on OVS + vif_port = self.agent.int_br.get_vif_port_by_id(port['id']) + if not vif_port: + return + + if ext_sg.SECURITYGROUPS in port: + self.sg_agent.refresh_firewall() + + +class SecurityGroupServerRpcApi(rpc_compat.RpcProxy, + sg_rpc.SecurityGroupServerRpcApiMixin): + def __init__(self, topic): + super(SecurityGroupServerRpcApi, self).__init__( + topic=topic, default_version=sg_rpc.SG_RPC_VERSION) + + +class SecurityGroupAgentRpcCallback( + rpc_compat.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + + RPC_API_VERSION = sg_rpc.SG_RPC_VERSION + + def __init__(self, context, sg_agent): + super(SecurityGroupAgentRpcCallback, self).__init__() + self.context = context + self.sg_agent = sg_agent + + +class SecurityGroupAgentRpc(sg_rpc.SecurityGroupAgentRpcMixin): + + def __init__(self, context, root_helper): + self.context = context + + self.plugin_rpc = SecurityGroupServerRpcApi(topics.PLUGIN) + self.root_helper = root_helper + self.init_firewall() + + +class NVSDNeutronAgent(rpc_compat.RpcCallback): + # history + # 1.0 Initial version + # 1.1 Support Security Group RPC + RPC_API_VERSION = '1.1' + + def __init__(self, integ_br, root_helper, polling_interval): + super(NVSDNeutronAgent, self).__init__() + self.int_br = ovs_lib.OVSBridge(integ_br, root_helper) + self.polling_interval = polling_interval + self.root_helper = root_helper + self.setup_rpc() + self.ports = set() + + def setup_rpc(self): + + self.host = socket.gethostname() + self.agent_id = 'nvsd-q-agent.%s' % self.host + LOG.info(_("RPC agent_id: %s"), self.agent_id) + + self.topic = topics.AGENT + self.context = n_context.get_admin_context_without_session() + self.sg_agent = SecurityGroupAgentRpc(self.context, + self.root_helper) + + # RPC network init + # Handle updates from service + self.callback_oc = NVSDAgentRpcCallback(self.context, + self, self.sg_agent) + self.callback_sg = SecurityGroupAgentRpcCallback(self.context, + self.sg_agent) + self.endpoints = [self.callback_oc, self.callback_sg] + # Define the listening consumer for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + def _update_ports(self, registered_ports): + ports = self.int_br.get_vif_port_set() + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def _process_devices_filter(self, port_info): + if 'added' in port_info: + self.sg_agent.prepare_devices_filter(port_info['added']) + if 'removed' in port_info: + self.sg_agent.remove_devices_filter(port_info['removed']) + + def daemon_loop(self): + """Main processing loop for OC Plugin Agent.""" + + ports = set() + while True: + try: + port_info = self._update_ports(ports) + if port_info: + LOG.debug(_("Port list is updated")) + self._process_devices_filter(port_info) + ports = port_info['current'] + self.ports = ports + except Exception: + LOG.exception(_("Error in agent event loop")) + + LOG.debug(_("AGENT looping.....")) + time.sleep(self.polling_interval) + + +def main(): + common_config.init(sys.argv[1:]) + common_config.setup_logging(config.CONF) + + integ_br = config.AGENT.integration_bridge + root_helper = config.AGENT.root_helper + polling_interval = config.AGENT.polling_interval + agent = NVSDNeutronAgent(integ_br, root_helper, polling_interval) + LOG.info(_("NVSD Agent initialized successfully, now running... ")) + + # Start everything. + agent.daemon_loop() diff --git a/neutron/plugins/oneconvergence/lib/__init__.py b/neutron/plugins/oneconvergence/lib/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/oneconvergence/lib/config.py b/neutron/plugins/oneconvergence/lib/config.py new file mode 100644 index 000000000..2bbf086a3 --- /dev/null +++ b/neutron/plugins/oneconvergence/lib/config.py @@ -0,0 +1,57 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" Register the configuration options""" + +from oslo.config import cfg + +from neutron.agent.common import config + + +NVSD_OPT = [ + cfg.StrOpt('nvsd_ip', + default='127.0.0.1', + help=_("NVSD Controller IP address")), + cfg.IntOpt('nvsd_port', + default=8082, + help=_("NVSD Controller Port number")), + cfg.StrOpt('nvsd_user', + default='ocplugin', + help=_("NVSD Controller username")), + cfg.StrOpt('nvsd_passwd', + default='oc123', secret=True, + help=_("NVSD Controller password")), + cfg.IntOpt('request_timeout', + default=30, + help=_("NVSD controller REST API request timeout in seconds")), + cfg.IntOpt('nvsd_retries', default=0, + help=_("Number of login retries to NVSD controller")) +] + +agent_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_("integration bridge")), + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), +] + +cfg.CONF.register_opts(NVSD_OPT, "nvsd") +cfg.CONF.register_opts(agent_opts, "AGENT") + +config.register_root_helper(cfg.CONF) + +CONF = cfg.CONF +AGENT = cfg.CONF.AGENT diff --git a/neutron/plugins/oneconvergence/lib/exception.py b/neutron/plugins/oneconvergence/lib/exception.py new file mode 100644 index 000000000..b6864b13f --- /dev/null +++ b/neutron/plugins/oneconvergence/lib/exception.py @@ -0,0 +1,55 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""NVSD Exception Definitions.""" + +from neutron.common import exceptions as n_exc + + +class NVSDAPIException(n_exc.NeutronException): + '''Base NVSDplugin Exception.''' + message = _("An unknown nvsd plugin exception occurred: %(reason)s") + + +class RequestTimeout(NVSDAPIException): + message = _("The request has timed out.") + + +class UnAuthorizedException(NVSDAPIException): + message = _("Invalid access credentials to the Server.") + + +class NotFoundException(NVSDAPIException): + message = _("A resource is not found: %(reason)s") + + +class BadRequestException(NVSDAPIException): + message = _("Request sent to server is invalid: %(reason)s") + + +class ServerException(NVSDAPIException): + message = _("Internal Server Error: %(reason)s") + + +class ConnectionClosedException(NVSDAPIException): + message = _("Connection is closed by the server.") + + +class ForbiddenException(NVSDAPIException): + message = _("The request is forbidden access to the resource: %(reason)s") + + +class InternalServerError(NVSDAPIException): + message = _("Internal Server Error from NVSD controller: %(reason)s") diff --git a/neutron/plugins/oneconvergence/lib/nvsd_db.py b/neutron/plugins/oneconvergence/lib/nvsd_db.py new file mode 100644 index 000000000..00a623a85 --- /dev/null +++ b/neutron/plugins/oneconvergence/lib/nvsd_db.py @@ -0,0 +1,45 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kedar Kulkarni, One Convergence, Inc. + +from neutron.db import api as db +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.extensions import securitygroup as ext_sg +from neutron import manager + + +def get_port_from_device(port_id): + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id == port_id) + port_and_sgs = query.all() + if not port_and_sgs: + return None + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict[ext_sg.SECURITYGROUPS] = [ + sg_id for tport, sg_id in port_and_sgs if sg_id] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict diff --git a/neutron/plugins/oneconvergence/lib/nvsdlib.py b/neutron/plugins/oneconvergence/lib/nvsdlib.py new file mode 100644 index 000000000..09e8a5b16 --- /dev/null +++ b/neutron/plugins/oneconvergence/lib/nvsdlib.py @@ -0,0 +1,352 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kedar Kulkarni, One Convergence, Inc. + +"""Intermidiate NVSD Library.""" + +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +import neutron.plugins.oneconvergence.lib.exception as nvsdexception +from neutron.plugins.oneconvergence.lib import plugin_helper + +LOG = logging.getLogger(__name__) + +NETWORKS_URI = "/pluginhandler/ocplugin/tenant/%s/lnetwork/" +NETWORK_URI = NETWORKS_URI + "%s" +GET_ALL_NETWORKS = "/pluginhandler/ocplugin/tenant/getallnetworks" + +SUBNETS_URI = NETWORK_URI + "/lsubnet/" +SUBNET_URI = SUBNETS_URI + "%s" +GET_ALL_SUBNETS = "/pluginhandler/ocplugin/tenant/getallsubnets" + +PORTS_URI = NETWORK_URI + "/lport/" +PORT_URI = PORTS_URI + "%s" + +EXT_URI = "/pluginhandler/ocplugin/ext/tenant/%s" +FLOATING_IPS_URI = EXT_URI + "/floatingip/" +FLOATING_IP_URI = FLOATING_IPS_URI + "%s" + +ROUTERS_URI = EXT_URI + "/lrouter/" +ROUTER_URI = ROUTERS_URI + "%s" + +METHODS = {"POST": "create", + "PUT": "update", + "DELETE": "delete", + "GET": "get"} + + +class NVSDApi(object): + + def build_error_msg(self, method, resource, tenant_id, resource_id): + if method == "POST": + msg = _("Could not create a %(resource)s under tenant " + "%(tenant_id)s") % {'resource': resource, + 'tenant_id': tenant_id} + elif resource_id: + msg = _("Failed to %(method)s %(resource)s " + "id=%(resource_id)s") % {'method': METHODS[method], + 'resource': resource, + 'resource_id': resource_id + } + else: + msg = _("Failed to %(method)s %(resource)s") % { + 'method': METHODS[method], 'resource': resource} + return msg + + def set_connection(self): + self.nvsdcontroller = plugin_helper.initialize_plugin_helper() + self.nvsdcontroller.login() + + def send_request(self, method, uri, body=None, resource=None, + tenant_id='', resource_id=None): + """Issue a request to NVSD controller.""" + + try: + result = self.nvsdcontroller.request(method, uri, body=body) + except nvsdexception.NVSDAPIException as e: + with excutils.save_and_reraise_exception() as ctxt: + msg = self.build_error_msg(method, resource, tenant_id, + resource_id) + LOG.error(msg) + # Modifying the reason message without disturbing the exception + # info + ctxt.value = type(e)(reason=msg) + return result + + def create_network(self, network): + + tenant_id = network['tenant_id'] + router_external = network['router:external'] is True + + network_obj = { + "name": network['name'], + "tenant_id": tenant_id, + "shared": network['shared'], + "admin_state_up": network['admin_state_up'], + "router:external": router_external + } + + uri = NETWORKS_URI % tenant_id + + response = self.send_request("POST", uri, body=json.dumps(network_obj), + resource='network', tenant_id=tenant_id) + + nvsd_net = response.json() + + LOG.debug(_("Network %(id)s created under tenant %(tenant_id)s"), + {'id': nvsd_net['id'], 'tenant_id': tenant_id}) + + return nvsd_net + + def update_network(self, network, network_update): + + tenant_id = network['tenant_id'] + network_id = network['id'] + + uri = NETWORK_URI % (tenant_id, network_id) + + self.send_request("PUT", uri, + body=json.dumps(network_update), + resource='network', tenant_id=tenant_id, + resource_id=network_id) + + LOG.debug(_("Network %(id)s updated under tenant %(tenant_id)s"), + {'id': network_id, 'tenant_id': tenant_id}) + + def delete_network(self, network, subnets=[]): + + tenant_id = network['tenant_id'] + network_id = network['id'] + + ports = self._get_ports(tenant_id, network_id) + + for port in ports: + self.delete_port(port['id'], port) + + for subnet in subnets: + self.delete_subnet(subnet) + + path = NETWORK_URI % (tenant_id, network_id) + + self.send_request("DELETE", path, resource='network', + tenant_id=tenant_id, resource_id=network_id) + + LOG.debug(_("Network %(id)s deleted under tenant %(tenant_id)s"), + {'id': network_id, 'tenant_id': tenant_id}) + + def create_subnet(self, subnet): + + tenant_id = subnet['tenant_id'] + network_id = subnet['network_id'] + + uri = SUBNETS_URI % (tenant_id, network_id) + + self.send_request("POST", uri, body=json.dumps(subnet), + resource='subnet', tenant_id=tenant_id) + + LOG.debug(_("Subnet %(id)s created under tenant %(tenant_id)s"), + {'id': subnet['id'], 'tenant_id': tenant_id}) + + def delete_subnet(self, subnet): + + tenant_id = subnet['tenant_id'] + network_id = subnet['network_id'] + subnet_id = subnet['id'] + + uri = SUBNET_URI % (tenant_id, network_id, subnet_id) + + self.send_request("DELETE", uri, resource='subnet', + tenant_id=tenant_id, resource_id=subnet_id) + + LOG.debug(_("Subnet %(id)s deleted under tenant %(tenant_id)s"), + {'id': subnet_id, 'tenant_id': tenant_id}) + + def update_subnet(self, subnet, subnet_update): + + tenant_id = subnet['tenant_id'] + network_id = subnet['network_id'] + subnet_id = subnet['id'] + + uri = SUBNET_URI % (tenant_id, network_id, subnet_id) + + self.send_request("PUT", uri, + body=json.dumps(subnet_update), + resource='subnet', tenant_id=tenant_id, + resource_id=subnet_id) + + LOG.debug(_("Subnet %(id)s updated under tenant %(tenant_id)s"), + {'id': subnet_id, 'tenant_id': tenant_id}) + + def create_port(self, tenant_id, port): + + network_id = port["network_id"] + fixed_ips = port.get("fixed_ips") + ip_address = None + subnet_id = None + + if fixed_ips: + ip_address = fixed_ips[0].get("ip_address") + subnet_id = fixed_ips[0].get("subnet_id") + + lport = { + "id": port["id"], + "name": port["name"], + "device_id": port["device_id"], + "device_owner": port["device_owner"], + "mac_address": port["mac_address"], + "ip_address": ip_address, + "subnet_id": subnet_id, + "admin_state_up": port["admin_state_up"], + "network_id": network_id, + "status": port["status"] + } + + path = PORTS_URI % (tenant_id, network_id) + + self.send_request("POST", path, body=json.dumps(lport), + resource='port', tenant_id=tenant_id) + + LOG.debug(_("Port %(id)s created under tenant %(tenant_id)s"), + {'id': port['id'], 'tenant_id': tenant_id}) + + def update_port(self, tenant_id, port, port_update): + + network_id = port['network_id'] + port_id = port['id'] + + lport = {} + for k in ('admin_state_up', 'name', 'device_id', 'device_owner'): + if k in port_update: + lport[k] = port_update[k] + + fixed_ips = port_update.get('fixed_ips', None) + if fixed_ips: + lport["ip_address"] = fixed_ips[0].get("ip_address") + lport["subnet_id"] = fixed_ips[0].get("subnet_id") + + uri = PORT_URI % (tenant_id, network_id, port_id) + + self.send_request("PUT", uri, body=json.dumps(lport), + resource='port', tenant_id=tenant_id, + resource_id=port_id) + + LOG.debug(_("Port %(id)s updated under tenant %(tenant_id)s"), + {'id': port_id, 'tenant_id': tenant_id}) + + def delete_port(self, port_id, port): + + tenant_id = port['tenant_id'] + network_id = port['network_id'] + + uri = PORT_URI % (tenant_id, network_id, port_id) + + self.send_request("DELETE", uri, resource='port', tenant_id=tenant_id, + resource_id=port_id) + + LOG.debug(_("Port %(id)s deleted under tenant %(tenant_id)s"), + {'id': port_id, 'tenant_id': tenant_id}) + + def _get_ports(self, tenant_id, network_id): + + uri = PORTS_URI % (tenant_id, network_id) + + response = self.send_request("GET", uri, resource='ports', + tenant_id=tenant_id) + + return response.json() + + def create_floatingip(self, floating_ip): + + tenant_id = floating_ip['tenant_id'] + + uri = FLOATING_IPS_URI % tenant_id + + self.send_request("POST", uri, body=json.dumps(floating_ip), + resource='floating_ip', + tenant_id=tenant_id) + + LOG.debug(_("Flatingip %(id)s created under tenant %(tenant_id)s"), + {'id': floating_ip['id'], 'tenant_id': tenant_id}) + + def update_floatingip(self, floating_ip, floating_ip_update): + + tenant_id = floating_ip['tenant_id'] + + floating_ip_id = floating_ip['id'] + + uri = FLOATING_IP_URI % (tenant_id, floating_ip_id) + + self.send_request("PUT", uri, + body=json.dumps(floating_ip_update['floatingip']), + resource='floating_ip', + tenant_id=tenant_id, + resource_id=floating_ip_id) + + LOG.debug(_("Flatingip %(id)s updated under tenant %(tenant_id)s"), + {'id': floating_ip_id, 'tenant_id': tenant_id}) + + def delete_floatingip(self, floating_ip): + + tenant_id = floating_ip['tenant_id'] + + floating_ip_id = floating_ip['id'] + + uri = FLOATING_IP_URI % (tenant_id, floating_ip_id) + + self.send_request("DELETE", uri, resource='floating_ip', + tenant_id=tenant_id, resource_id=floating_ip_id) + + LOG.debug(_("Flatingip %(id)s deleted under tenant %(tenant_id)s"), + {'id': floating_ip_id, 'tenant_id': tenant_id}) + + def create_router(self, router): + + tenant_id = router['tenant_id'] + + uri = ROUTERS_URI % tenant_id + + self.send_request("POST", uri, body=json.dumps(router), + resource='router', + tenant_id=tenant_id) + + LOG.debug(_("Router %(id)s created under tenant %(tenant_id)s"), + {'id': router['id'], 'tenant_id': tenant_id}) + + def update_router(self, router): + + tenant_id = router['tenant_id'] + + router_id = router['id'] + + uri = ROUTER_URI % (tenant_id, router_id) + + self.send_request("PUT", uri, + body=json.dumps(router), + resource='router', tenant_id=tenant_id, + resource_id=router_id) + + LOG.debug(_("Router %(id)s updated under tenant %(tenant_id)s"), + {'id': router_id, 'tenant_id': tenant_id}) + + def delete_router(self, tenant_id, router_id): + + uri = ROUTER_URI % (tenant_id, router_id) + + self.send_request("DELETE", uri, resource='router', + tenant_id=tenant_id, resource_id=router_id) + + LOG.debug(_("Router %(id)s deleted under tenant %(tenant_id)s"), + {'id': router_id, 'tenant_id': tenant_id}) diff --git a/neutron/plugins/oneconvergence/lib/plugin_helper.py b/neutron/plugins/oneconvergence/lib/plugin_helper.py new file mode 100644 index 000000000..4158257fd --- /dev/null +++ b/neutron/plugins/oneconvergence/lib/plugin_helper.py @@ -0,0 +1,186 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kedar Kulkarni, One Convergence, Inc. + +"""Library to talk to NVSD controller.""" + +import httplib +import time + +from oslo.config import cfg +import requests +from six.moves.urllib import parse + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +import neutron.plugins.oneconvergence.lib.exception as exception + +LOG = logging.getLogger(__name__) + + +def initialize_plugin_helper(): + nvsdcontroller = NVSDController() + return nvsdcontroller + + +class NVSDController(object): + + """Encapsulates the NVSD Controller details.""" + + def __init__(self): + + self._host = cfg.CONF.nvsd.nvsd_ip + self._port = cfg.CONF.nvsd.nvsd_port + self._user = cfg.CONF.nvsd.nvsd_user + self._password = cfg.CONF.nvsd.nvsd_passwd + self._retries = cfg.CONF.nvsd.nvsd_retries + self._request_timeout = float(cfg.CONF.nvsd.request_timeout) + self.api_url = 'http://' + self._host + ':' + str(self._port) + + self.pool = requests.Session() + + self.auth_token = None + + def do_request(self, method, url=None, headers=None, data=None, + timeout=10): + response = self.pool.request(method, url=url, + headers=headers, data=data, + timeout=self._request_timeout) + return response + + def login(self): + """Login to NVSD Controller.""" + + headers = {"Content-Type": "application/json"} + + login_url = parse.urljoin(self.api_url, + "/pluginhandler/ocplugin/authmgmt/login") + + data = json.dumps({"user_name": self._user, "passwd": self._password}) + + attempts = 0 + + while True: + if attempts < self._retries: + attempts += 1 + elif self._retries == 0: + attempts = 0 + else: + msg = _("Unable to connect to NVSD controller. Exiting after " + "%(retries)s attempts") % {'retries': self._retries} + LOG.error(msg) + raise exception.ServerException(reason=msg) + try: + response = self.do_request("POST", url=login_url, + headers=headers, data=data, + timeout=self._request_timeout) + break + except Exception as e: + LOG.error(_("Login Failed: %s"), e) + LOG.error(_("Unable to establish connection" + " with Controller %s"), self.api_url) + LOG.error(_("Retrying after 1 second...")) + time.sleep(1) + + if response.status_code == requests.codes.ok: + LOG.debug(_("Login Successful %(uri)s " + "%(status)s"), {'uri': self.api_url, + 'status': response.status_code}) + self.auth_token = json.loads(response.content)["session_uuid"] + LOG.debug(_("AuthToken = %s"), self.auth_token) + else: + LOG.error(_("login failed")) + + return + + def request(self, method, url, body="", content_type="application/json"): + """Issue a request to NVSD controller.""" + + if self.auth_token is None: + LOG.warning(_("No Token, Re-login")) + self.login() + + headers = {"Content-Type": content_type} + + uri = parse.urljoin(url, "?authToken=%s" % self.auth_token) + + url = parse.urljoin(self.api_url, uri) + + request_ok = False + response = None + + try: + response = self.do_request(method, url=url, + headers=headers, data=body, + timeout=self._request_timeout) + + LOG.debug(_("request: %(method)s %(uri)s successful"), + {'method': method, 'uri': self.api_url + uri}) + request_ok = True + except httplib.IncompleteRead as e: + response = e.partial + request_ok = True + except Exception as e: + LOG.error(_("request: Request failed from " + "Controller side :%s"), e) + + if response is None: + # Timeout. + LOG.error(_("Response is Null, Request timed out: %(method)s to " + "%(uri)s"), {'method': method, 'uri': uri}) + self.auth_token = None + raise exception.RequestTimeout() + + status = response.status_code + if status == requests.codes.unauthorized: + self.auth_token = None + # Raise an exception to inform that the request failed. + raise exception.UnAuthorizedException() + + if status in self.error_codes: + LOG.error(_("Request %(method)s %(uri)s body = %(body)s failed " + "with status %(status)s"), {'method': method, + 'uri': uri, 'body': body, + 'status': status}) + LOG.error(_("%s"), response.reason) + raise self.error_codes[status]() + elif status not in (requests.codes.ok, requests.codes.created, + requests.codes.no_content): + LOG.error(_("%(method)s to %(url)s, unexpected response code: " + "%(status)d"), {'method': method, 'url': url, + 'status': status}) + return + + if not request_ok: + LOG.error(_("Request failed from Controller side with " + "Status=%s"), status) + raise exception.ServerException() + else: + LOG.debug(_("Success: %(method)s %(url)s status=%(status)s"), + {'method': method, 'url': self.api_url + uri, + 'status': status}) + response.body = response.content + return response + + error_codes = { + 404: exception.NotFoundException, + 409: exception.BadRequestException, + 500: exception.InternalServerError, + 503: exception.ServerException, + 403: exception.ForbiddenException, + 301: exception.NVSDAPIException, + 307: exception.NVSDAPIException, + 400: exception.NVSDAPIException, + } diff --git a/neutron/plugins/oneconvergence/plugin.py b/neutron/plugins/oneconvergence/plugin.py new file mode 100644 index 000000000..257ab5494 --- /dev/null +++ b/neutron/plugins/oneconvergence/plugin.py @@ -0,0 +1,440 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kedar Kulkarni, One Convergence, Inc. + +"""Implementation of OneConvergence Neutron Plugin.""" + +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.common import constants as q_const +from neutron.common import exceptions as nexception +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_gwmode_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_base +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +import neutron.plugins.oneconvergence.lib.config # noqa +import neutron.plugins.oneconvergence.lib.exception as nvsdexception +import neutron.plugins.oneconvergence.lib.nvsd_db as nvsd_db +from neutron.plugins.oneconvergence.lib import nvsdlib as nvsd_lib + +LOG = logging.getLogger(__name__) +IPv6 = 6 + + +class NVSDPluginRpcCallbacks(rpc_compat.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + @staticmethod + def get_port_from_device(device): + port = nvsd_db.get_port_from_device(device) + if port: + port['device'] = device + return port + + +class NVSDPluginV2AgentNotifierApi(rpc_compat.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(NVSDPluginV2AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_port_update = topics.get_topic_name(topic, topics.PORT, + topics.UPDATE) + + def port_update(self, context, port): + self.fanout_cast(context, + self.make_msg('port_update', port=port), + topic=self.topic_port_update) + + +class OneConvergencePluginV2(db_base_plugin_v2.NeutronDbPluginV2, + extraroute_db.ExtraRoute_db_mixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + external_net_db.External_net_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + portbindings_base.PortBindingBaseMixin, + sg_db_rpc.SecurityGroupServerRpcMixin): + + """L2 Virtual Network Plugin. + + OneConvergencePluginV2 is a Neutron plugin that provides L2 Virtual Network + functionality. + """ + + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + _supported_extension_aliases = ['agent', + 'binding', + 'dhcp_agent_scheduler', + 'ext-gw-mode', + 'external-net', + 'extraroute', + 'l3_agent_scheduler', + 'quotas', + 'router', + 'security-group' + ] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + + super(OneConvergencePluginV2, self).__init__() + + self.oneconvergence_init() + + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: { + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + + portbindings_base.register_port_dict_function() + + self.setup_rpc() + + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver) + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver) + + def oneconvergence_init(self): + """Initialize the connections and set the log levels for the plugin.""" + + self.nvsdlib = nvsd_lib.NVSDApi() + self.nvsdlib.set_connection() + + def setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = rpc_compat.create_connection(new=True) + self.notifier = NVSDPluginV2AgentNotifierApi(topics.AGENT) + self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + l3_rpc_agent_api.L3AgentNotifyAPI() + ) + self.endpoints = [NVSDPluginRpcCallbacks(), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def create_network(self, context, network): + + tenant_id = self._get_tenant_id_for_create( + context, network['network']) + self._ensure_default_security_group(context, tenant_id) + + net = self.nvsdlib.create_network(network['network']) + + network['network']['id'] = net['id'] + + with context.session.begin(subtransactions=True): + try: + neutron_net = super(OneConvergencePluginV2, + self).create_network(context, network) + + #following call checks whether the network is external or not + #and if it is external then adds this network to + #externalnetworks table of neutron db + self._process_l3_create(context, neutron_net, + network['network']) + except nvsdexception.NVSDAPIException: + with excutils.save_and_reraise_exception(): + self.nvsdlib.delete_network(net) + + return neutron_net + + def update_network(self, context, net_id, network): + + with context.session.begin(subtransactions=True): + + neutron_net = super(OneConvergencePluginV2, + self).update_network(context, net_id, network) + + self.nvsdlib.update_network(neutron_net, network['network']) + # updates neutron database e.g. externalnetworks table. + self._process_l3_update(context, neutron_net, network['network']) + + return neutron_net + + def delete_network(self, context, net_id): + + with context.session.begin(subtransactions=True): + network = self._get_network(context, net_id) + #get all the subnets under the network to delete them + subnets = self._get_subnets_by_network(context, net_id) + + self._process_l3_delete(context, net_id) + super(OneConvergencePluginV2, self).delete_network(context, + net_id) + + self.nvsdlib.delete_network(network, subnets) + + def create_subnet(self, context, subnet): + + if subnet['subnet']['ip_version'] == IPv6: + raise nexception.InvalidInput( + error_message="NVSDPlugin doesn't support IPv6.") + + neutron_subnet = super(OneConvergencePluginV2, + self).create_subnet(context, subnet) + + try: + self.nvsdlib.create_subnet(neutron_subnet) + except nvsdexception.NVSDAPIException: + with excutils.save_and_reraise_exception(): + #Log the message and delete the subnet from the neutron + super(OneConvergencePluginV2, + self).delete_subnet(context, neutron_subnet['id']) + LOG.error(_("Failed to create subnet, " + "deleting it from neutron")) + + return neutron_subnet + + def delete_subnet(self, context, subnet_id): + + neutron_subnet = self._get_subnet(context, subnet_id) + + with context.session.begin(subtransactions=True): + + super(OneConvergencePluginV2, self).delete_subnet(context, + subnet_id) + + self.nvsdlib.delete_subnet(neutron_subnet) + + def update_subnet(self, context, subnet_id, subnet): + + with context.session.begin(subtransactions=True): + + neutron_subnet = super(OneConvergencePluginV2, + self).update_subnet(context, subnet_id, + subnet) + + self.nvsdlib.update_subnet(neutron_subnet, subnet) + return neutron_subnet + + def create_port(self, context, port): + + self._ensure_default_security_group_on_port(context, port) + + sgids = self._get_security_groups_on_port(context, port) + + network = {} + + network_id = port['port']['network_id'] + + with context.session.begin(subtransactions=True): + + # Invoke the Neutron API for creating port + neutron_port = super(OneConvergencePluginV2, + self).create_port(context, port) + + self._process_portbindings_create_and_update(context, + port['port'], + neutron_port) + + self._process_port_create_security_group(context, neutron_port, + sgids) + if port['port']['device_owner'] in ('network:router_gateway', + 'network:floatingip'): + # for l3 requests, tenant_id will be None/'' + network = self._get_network(context, network_id) + + tenant_id = network['tenant_id'] + else: + tenant_id = port['port']['tenant_id'] + + port_id = neutron_port['id'] + + try: + self.nvsdlib.create_port(tenant_id, neutron_port) + except nvsdexception.NVSDAPIException: + with excutils.save_and_reraise_exception(): + LOG.error(_("Deleting newly created " + "neutron port %s"), port_id) + super(OneConvergencePluginV2, self).delete_port(context, + port_id) + + self.notify_security_groups_member_updated(context, neutron_port) + + return neutron_port + + def update_port(self, context, port_id, port): + + with context.session.begin(subtransactions=True): + + old_port = super(OneConvergencePluginV2, self).get_port(context, + port_id) + + neutron_port = super(OneConvergencePluginV2, + self).update_port(context, port_id, port) + + if neutron_port['tenant_id'] == '': + network = self._get_network(context, + neutron_port['network_id']) + tenant_id = network['tenant_id'] + else: + tenant_id = neutron_port['tenant_id'] + + self.nvsdlib.update_port(tenant_id, neutron_port, port['port']) + + self._process_portbindings_create_and_update(context, + port['port'], + neutron_port) + need_port_update_notify = self.update_security_group_on_port( + context, port_id, port, old_port, neutron_port) + + if need_port_update_notify: + self.notifier.port_update(context, neutron_port) + + return neutron_port + + def delete_port(self, context, port_id, l3_port_check=True): + + if l3_port_check: + self.prevent_l3_port_deletion(context, port_id) + + with context.session.begin(subtransactions=True): + neutron_port = super(OneConvergencePluginV2, + self).get_port(context, port_id) + + self._delete_port_security_group_bindings(context, port_id) + + self.disassociate_floatingips(context, port_id) + + super(OneConvergencePluginV2, self).delete_port(context, port_id) + + network = self._get_network(context, neutron_port['network_id']) + neutron_port['tenant_id'] = network['tenant_id'] + + self.nvsdlib.delete_port(port_id, neutron_port) + + self.notify_security_groups_member_updated(context, neutron_port) + + def create_floatingip(self, context, floatingip): + + neutron_floatingip = super(OneConvergencePluginV2, + self).create_floatingip(context, + floatingip) + try: + self.nvsdlib.create_floatingip(neutron_floatingip) + except nvsdexception.NVSDAPIException: + with excutils.save_and_reraise_exception(): + LOG.error(_("Failed to create floatingip")) + super(OneConvergencePluginV2, + self).delete_floatingip(context, + neutron_floatingip['id']) + + return neutron_floatingip + + def update_floatingip(self, context, fip_id, floatingip): + + with context.session.begin(subtransactions=True): + + neutron_floatingip = super(OneConvergencePluginV2, + self).update_floatingip(context, + fip_id, + floatingip) + + self.nvsdlib.update_floatingip(neutron_floatingip, floatingip) + + return neutron_floatingip + + def delete_floatingip(self, context, floating_ip_id): + + with context.session.begin(subtransactions=True): + + floating_ip = self._get_floatingip(context, floating_ip_id) + + super(OneConvergencePluginV2, + self).delete_floatingip(context, floating_ip_id) + + self.nvsdlib.delete_floatingip(floating_ip) + + def create_router(self, context, router): + + neutron_router = super(OneConvergencePluginV2, + self).create_router(context, router) + try: + self.nvsdlib.create_router(neutron_router) + except nvsdexception.NVSDAPIException: + with excutils.save_and_reraise_exception(): + LOG.error(_("Failed to create router")) + super(OneConvergencePluginV2, + self).delete_router(context, neutron_router['id']) + + return neutron_router + + def update_router(self, context, router_id, router): + + with context.session.begin(subtransactions=True): + + neutron_router = super(OneConvergencePluginV2, + self).update_router(context, router_id, + router) + + self.nvsdlib.update_router(neutron_router) + + return neutron_router + + def delete_router(self, context, router_id): + + tenant_id = self._get_router(context, router_id)['tenant_id'] + + with context.session.begin(subtransactions=True): + + super(OneConvergencePluginV2, self).delete_router(context, + router_id) + + self.nvsdlib.delete_router(tenant_id, router_id) diff --git a/neutron/plugins/openvswitch/README b/neutron/plugins/openvswitch/README new file mode 100644 index 000000000..b8991ad0a --- /dev/null +++ b/neutron/plugins/openvswitch/README @@ -0,0 +1,6 @@ +The Open vSwitch (OVS) Neutron plugin is a simple plugin to manage OVS +features using a local agent running on each hypervisor. + +For details on how to configure and use the plugin, see: + +http://openvswitch.org/openstack/documentation/ diff --git a/neutron/plugins/openvswitch/__init__.py b/neutron/plugins/openvswitch/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/openvswitch/agent/__init__.py b/neutron/plugins/openvswitch/agent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py new file mode 100644 index 000000000..c5b136b06 --- /dev/null +++ b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py @@ -0,0 +1,1517 @@ +#!/usr/bin/env python +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import hashlib +import signal +import sys +import time + +import eventlet +eventlet.monkey_patch() + +import netaddr +from oslo.config import cfg +from six import moves + +from neutron.agent import l2population_rpc +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import polling +from neutron.agent.linux import utils +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import constants as q_const +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils as q_utils +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as p_const +from neutron.plugins.openvswitch.common import config # noqa +from neutron.plugins.openvswitch.common import constants + + +LOG = logging.getLogger(__name__) + +# A placeholder for dead vlans. +DEAD_VLAN_TAG = str(q_const.MAX_VLAN_TAG + 1) + + +# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac' +# attributes set). +class LocalVLANMapping: + def __init__(self, vlan, network_type, physical_network, segmentation_id, + vif_ports=None): + if vif_ports is None: + vif_ports = {} + self.vlan = vlan + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id + self.vif_ports = vif_ports + # set of tunnel ports on which packets should be flooded + self.tun_ofports = set() + + def __str__(self): + return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % + (self.vlan, self.network_type, self.physical_network, + self.segmentation_id)) + + +class OVSPluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class OVSSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): + def __init__(self, context, plugin_rpc, root_helper): + self.context = context + self.plugin_rpc = plugin_rpc + self.root_helper = root_helper + self.init_firewall(defer_refresh_firewall=True) + + +class OVSNeutronAgent(rpc_compat.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin, + l2population_rpc.L2populationRpcCallBackMixin): + '''Implements OVS-based tunneling, VLANs and flat networks. + + Two local bridges are created: an integration bridge (defaults to + 'br-int') and a tunneling bridge (defaults to 'br-tun'). An + additional bridge is created for each physical network interface + used for VLANs and/or flat networks. + + All VM VIFs are plugged into the integration bridge. VM VIFs on a + given virtual network share a common "local" VLAN (i.e. not + propagated externally). The VLAN id of this local VLAN is mapped + to the physical networking details realizing that virtual network. + + For virtual networks realized as GRE tunnels, a Logical Switch + (LS) identifier is used to differentiate tenant traffic on + inter-HV tunnels. A mesh of tunnels is created to other + Hypervisors in the cloud. These tunnels originate and terminate on + the tunneling bridge of each hypervisor. Port patching is done to + connect local VLANs on the integration bridge to inter-hypervisor + tunnels on the tunnel bridge. + + For each virtual network realized as a VLAN or flat network, a + veth is used to connect the local VLAN on the integration bridge + with the physical network bridge, with flow rules adding, + modifying, or stripping VLAN tags as necessary. + ''' + + # history + # 1.0 Initial version + # 1.1 Support Security Group RPC + RPC_API_VERSION = '1.1' + + def __init__(self, integ_br, tun_br, local_ip, + bridge_mappings, root_helper, + polling_interval, tunnel_types=None, + veth_mtu=None, l2_population=False, + minimize_polling=False, + ovsdb_monitor_respawn_interval=( + constants.DEFAULT_OVSDBMON_RESPAWN), + arp_responder=False): + '''Constructor. + + :param integ_br: name of the integration bridge. + :param tun_br: name of the tunnel bridge. + :param local_ip: local IP address of this hypervisor. + :param bridge_mappings: mappings from physical network name to bridge. + :param root_helper: utility to use when running shell cmds. + :param polling_interval: interval (secs) to poll DB. + :param tunnel_types: A list of tunnel types to enable support for in + the agent. If set, will automatically set enable_tunneling to + True. + :param veth_mtu: MTU size for veth interfaces. + :param l2_population: Optional, whether L2 population is turned on + :param minimize_polling: Optional, whether to minimize polling by + monitoring ovsdb for interface changes. + :param ovsdb_monitor_respawn_interval: Optional, when using polling + minimization, the number of seconds to wait before respawning + the ovsdb monitor. + :param arp_responder: Optional, enable local ARP responder if it is + supported. + ''' + super(OVSNeutronAgent, self).__init__() + self.veth_mtu = veth_mtu + self.root_helper = root_helper + self.available_local_vlans = set(moves.xrange(q_const.MIN_VLAN_TAG, + q_const.MAX_VLAN_TAG)) + self.tunnel_types = tunnel_types or [] + self.l2_pop = l2_population + # TODO(ethuleau): Initially, local ARP responder is be dependent to the + # ML2 l2 population mechanism driver. + self.arp_responder_enabled = (arp_responder and + self._check_arp_responder_support() and + self.l2_pop) + self.agent_state = { + 'binary': 'neutron-openvswitch-agent', + 'host': cfg.CONF.host, + 'topic': q_const.L2_AGENT_TOPIC, + 'configurations': {'bridge_mappings': bridge_mappings, + 'tunnel_types': self.tunnel_types, + 'tunneling_ip': local_ip, + 'l2_population': self.l2_pop, + 'arp_responder_enabled': + self.arp_responder_enabled}, + 'agent_type': q_const.AGENT_TYPE_OVS, + 'start_flag': True} + + # Keep track of int_br's device count for use by _report_state() + self.int_br_device_count = 0 + + self.int_br = ovs_lib.OVSBridge(integ_br, self.root_helper) + self.int_br.set_secure_mode() + # Stores port update notifications for processing in main rpc loop + self.updated_ports = set() + self.setup_rpc() + self.setup_integration_br() + self.bridge_mappings = bridge_mappings + self.setup_physical_bridges(self.bridge_mappings) + self.local_vlan_map = {} + self.tun_br_ofports = {p_const.TYPE_GRE: {}, + p_const.TYPE_VXLAN: {}} + + self.polling_interval = polling_interval + self.minimize_polling = minimize_polling + self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval + + if tunnel_types: + self.enable_tunneling = True + else: + self.enable_tunneling = False + self.local_ip = local_ip + self.tunnel_count = 0 + self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port + self.dont_fragment = cfg.CONF.AGENT.dont_fragment + self.tun_br = None + if self.enable_tunneling: + self.setup_tunnel_br(tun_br) + # Collect additional bridges to monitor + self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br) + + # Security group agent support + self.sg_agent = OVSSecurityGroupAgent(self.context, + self.plugin_rpc, + root_helper) + # Initialize iteration counter + self.iter_num = 0 + + def _check_arp_responder_support(self): + '''Check if OVS supports to modify ARP headers. + + This functionality is only available since the development branch 2.1. + ''' + args = ['arp,action=load:0x2->NXM_OF_ARP_OP[],' + 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' + 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[]'] + supported = ovs_lib.ofctl_arg_supported(self.root_helper, 'add-flow', + args) + if not supported: + LOG.warning(_('OVS version can not support ARP responder.')) + return supported + + def _report_state(self): + # How many devices are likely used by a VM + self.agent_state.get('configurations')['devices'] = ( + self.int_br_device_count) + try: + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def setup_rpc(self): + mac = self.int_br.get_local_port_mac() + self.agent_id = '%s%s' % ('ovs', (mac.replace(":", ""))) + self.topic = topics.AGENT + self.plugin_rpc = OVSPluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + + # RPC network init + self.context = context.get_admin_context_without_session() + # Handle updates from service + self.endpoints = [self] + # Define the listening consumers for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.NETWORK, topics.DELETE], + [constants.TUNNEL, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE]] + if self.l2_pop: + consumers.append([topics.L2POPULATION, + topics.UPDATE, cfg.CONF.host]) + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + report_interval = cfg.CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def get_net_uuid(self, vif_id): + for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + if vif_id in vlan_mapping.vif_ports: + return network_id + + def network_delete(self, context, **kwargs): + LOG.debug(_("network_delete received")) + network_id = kwargs.get('network_id') + LOG.debug(_("Delete %s"), network_id) + # The network may not be defined on this agent + lvm = self.local_vlan_map.get(network_id) + if lvm: + self.reclaim_local_vlan(network_id) + else: + LOG.debug(_("Network %s not used on agent."), network_id) + + def port_update(self, context, **kwargs): + port = kwargs.get('port') + # Put the port identifier in the updated_ports set. + # Even if full port details might be provided to this call, + # they are not used since there is no guarantee the notifications + # are processed in the same order as the relevant API requests + self.updated_ports.add(port['id']) + LOG.debug(_("port_update message processed for port %s"), port['id']) + + def tunnel_update(self, context, **kwargs): + LOG.debug(_("tunnel_update received")) + if not self.enable_tunneling: + return + tunnel_ip = kwargs.get('tunnel_ip') + tunnel_id = kwargs.get('tunnel_id', self.get_ip_in_hex(tunnel_ip)) + if not tunnel_id: + return + tunnel_type = kwargs.get('tunnel_type') + if not tunnel_type: + LOG.error(_("No tunnel_type specified, cannot create tunnels")) + return + if tunnel_type not in self.tunnel_types: + LOG.error(_("tunnel_type %s not supported by agent"), tunnel_type) + return + if tunnel_ip == self.local_ip: + return + tun_name = '%s-%s' % (tunnel_type, tunnel_id) + if not self.l2_pop: + self.setup_tunnel_port(tun_name, tunnel_ip, tunnel_type) + + def fdb_add(self, context, fdb_entries): + LOG.debug(_("fdb_add received")) + for network_id, values in fdb_entries.items(): + lvm = self.local_vlan_map.get(network_id) + if not lvm: + # Agent doesn't manage any port in this network + continue + agent_ports = values.get('ports') + agent_ports.pop(self.local_ip, None) + if len(agent_ports): + self.tun_br.defer_apply_on() + for agent_ip, ports in agent_ports.items(): + # Ensure we have a tunnel port with this remote agent + ofport = self.tun_br_ofports[ + lvm.network_type].get(agent_ip) + if not ofport: + remote_ip_hex = self.get_ip_in_hex(agent_ip) + if not remote_ip_hex: + continue + port_name = '%s-%s' % (lvm.network_type, remote_ip_hex) + ofport = self.setup_tunnel_port(port_name, agent_ip, + lvm.network_type) + if ofport == 0: + continue + for port in ports: + self._add_fdb_flow(port, lvm, ofport) + self.tun_br.defer_apply_off() + + def fdb_remove(self, context, fdb_entries): + LOG.debug(_("fdb_remove received")) + for network_id, values in fdb_entries.items(): + lvm = self.local_vlan_map.get(network_id) + if not lvm: + # Agent doesn't manage any more ports in this network + continue + agent_ports = values.get('ports') + agent_ports.pop(self.local_ip, None) + if len(agent_ports): + self.tun_br.defer_apply_on() + for agent_ip, ports in agent_ports.items(): + ofport = self.tun_br_ofports[ + lvm.network_type].get(agent_ip) + if not ofport: + continue + for port in ports: + self._del_fdb_flow(port, lvm, ofport) + self.tun_br.defer_apply_off() + + def _add_fdb_flow(self, port_info, lvm, ofport): + if port_info == q_const.FLOODING_ENTRY: + lvm.tun_ofports.add(ofport) + ofports = ','.join(lvm.tun_ofports) + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=lvm.vlan, + actions="strip_vlan,set_tunnel:%s," + "output:%s" % (lvm.segmentation_id, ofports)) + else: + self._set_arp_responder('add', lvm.vlan, port_info[0], + port_info[1]) + self.tun_br.add_flow(table=constants.UCAST_TO_TUN, + priority=2, + dl_vlan=lvm.vlan, + dl_dst=port_info[0], + actions="strip_vlan,set_tunnel:%s,output:%s" % + (lvm.segmentation_id, ofport)) + + def _del_fdb_flow(self, port_info, lvm, ofport): + if port_info == q_const.FLOODING_ENTRY: + lvm.tun_ofports.remove(ofport) + if len(lvm.tun_ofports) > 0: + ofports = ','.join(lvm.tun_ofports) + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=lvm.vlan, + actions="strip_vlan," + "set_tunnel:%s,output:%s" % + (lvm.segmentation_id, ofports)) + else: + # This local vlan doesn't require any more tunnelling + self.tun_br.delete_flows(table=constants.FLOOD_TO_TUN, + dl_vlan=lvm.vlan) + # Check if this tunnel port is still used + self.cleanup_tunnel_port(ofport, lvm.network_type) + else: + self._set_arp_responder('remove', lvm.vlan, port_info[0], + port_info[1]) + self.tun_br.delete_flows(table=constants.UCAST_TO_TUN, + dl_vlan=lvm.vlan, + dl_dst=port_info[0]) + + def _fdb_chg_ip(self, context, fdb_entries): + '''fdb update when an IP of a port is updated. + + The ML2 l2-pop mechanism driver send an fdb update rpc message when an + IP of a port is updated. + + :param context: RPC context. + :param fdb_entries: fdb dicts that contain all mac/IP informations per + agent and network. + {'net1': + {'agent_ip': + {'before': [[mac, ip]], + 'after': [[mac, ip]] + } + } + 'net2': + ... + } + ''' + LOG.debug(_("update chg_ip received")) + + # TODO(ethuleau): Use OVS defer apply flows for all rules will be an + # interesting improvement here. But actually, OVS lib defer apply flows + # methods doesn't ensure the add flows will be applied before delete. + for network_id, agent_ports in fdb_entries.items(): + lvm = self.local_vlan_map.get(network_id) + if not lvm: + continue + + for agent_ip, state in agent_ports.items(): + if agent_ip == self.local_ip: + continue + + after = state.get('after') + for mac, ip in after: + self._set_arp_responder('add', lvm.vlan, mac, ip) + + before = state.get('before') + for mac, ip in before: + self._set_arp_responder('remove', lvm.vlan, mac, ip) + + def fdb_update(self, context, fdb_entries): + LOG.debug(_("fdb_update received")) + for action, values in fdb_entries.items(): + method = '_fdb_' + action + if not hasattr(self, method): + raise NotImplementedError() + + getattr(self, method)(context, values) + + def _set_arp_responder(self, action, lvid, mac_str, ip_str): + '''Set the ARP respond entry. + + When the l2 population mechanism driver and OVS supports to edit ARP + fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the + tunnel bridge. + + :param action: add or remove ARP entry. + :param lvid: local VLAN map of network's ARP entry. + :param mac_str: MAC string value. + :param ip_str: IP string value. + ''' + if not self.arp_responder_enabled: + return + + mac = netaddr.EUI(mac_str, dialect=netaddr.mac_unix) + ip = netaddr.IPAddress(ip_str) + + if action == 'add': + actions = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' + 'mod_dl_src:%(mac)s,' + 'load:0x2->NXM_OF_ARP_OP[],' + 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' + 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],' + 'load:%(mac)#x->NXM_NX_ARP_SHA[],' + 'load:%(ip)#x->NXM_OF_ARP_SPA[],' + 'in_port' % {'mac': mac, 'ip': ip}) + self.tun_br.add_flow(table=constants.ARP_RESPONDER, + priority=1, + proto='arp', + dl_vlan=lvid, + nw_dst='%s' % ip, + actions=actions) + elif action == 'remove': + self.tun_br.delete_flows(table=constants.ARP_RESPONDER, + proto='arp', + dl_vlan=lvid, + nw_dst='%s' % ip) + else: + LOG.warning(_('Action %s not supported'), action) + + def provision_local_vlan(self, net_uuid, network_type, physical_network, + segmentation_id): + '''Provisions a local VLAN. + + :param net_uuid: the uuid of the network associated with this vlan. + :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat', + 'local') + :param physical_network: the physical network for 'vlan' or 'flat' + :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' + ''' + + # On a restart or crash of OVS, the network associated with this VLAN + # will already be assigned, so check for that here before assigning a + # new one. + lvm = self.local_vlan_map.get(net_uuid) + if lvm: + lvid = lvm.vlan + else: + if not self.available_local_vlans: + LOG.error(_("No local VLAN available for net-id=%s"), net_uuid) + return + lvid = self.available_local_vlans.pop() + self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, + network_type, + physical_network, + segmentation_id) + + LOG.info(_("Assigning %(vlan_id)s as local vlan for " + "net-id=%(net_uuid)s"), + {'vlan_id': lvid, 'net_uuid': net_uuid}) + + if network_type in constants.TUNNEL_NETWORK_TYPES: + if self.enable_tunneling: + # outbound broadcast/multicast + ofports = ','.join(self.tun_br_ofports[network_type].values()) + if ofports: + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=lvid, + actions="strip_vlan," + "set_tunnel:%s,output:%s" % + (segmentation_id, ofports)) + # inbound from tunnels: set lvid in the right table + # and resubmit to Table LEARN_FROM_TUN for mac learning + self.tun_br.add_flow(table=constants.TUN_TABLE[network_type], + priority=1, + tun_id=segmentation_id, + actions="mod_vlan_vid:%s,resubmit(,%s)" % + (lvid, constants.LEARN_FROM_TUN)) + else: + LOG.error(_("Cannot provision %(network_type)s network for " + "net-id=%(net_uuid)s - tunneling disabled"), + {'network_type': network_type, + 'net_uuid': net_uuid}) + elif network_type == p_const.TYPE_FLAT: + if physical_network in self.phys_brs: + # outbound + br = self.phys_brs[physical_network] + br.add_flow(priority=4, + in_port=self.phys_ofports[physical_network], + dl_vlan=lvid, + actions="strip_vlan,normal") + # inbound + self.int_br.add_flow( + priority=3, + in_port=self.int_ofports[physical_network], + dl_vlan=0xffff, + actions="mod_vlan_vid:%s,normal" % lvid) + else: + LOG.error(_("Cannot provision flat network for " + "net-id=%(net_uuid)s - no bridge for " + "physical_network %(physical_network)s"), + {'net_uuid': net_uuid, + 'physical_network': physical_network}) + elif network_type == p_const.TYPE_VLAN: + if physical_network in self.phys_brs: + # outbound + br = self.phys_brs[physical_network] + br.add_flow(priority=4, + in_port=self.phys_ofports[physical_network], + dl_vlan=lvid, + actions="mod_vlan_vid:%s,normal" % segmentation_id) + # inbound + self.int_br.add_flow(priority=3, + in_port=self. + int_ofports[physical_network], + dl_vlan=segmentation_id, + actions="mod_vlan_vid:%s,normal" % lvid) + else: + LOG.error(_("Cannot provision VLAN network for " + "net-id=%(net_uuid)s - no bridge for " + "physical_network %(physical_network)s"), + {'net_uuid': net_uuid, + 'physical_network': physical_network}) + elif network_type == p_const.TYPE_LOCAL: + # no flows needed for local networks + pass + else: + LOG.error(_("Cannot provision unknown network type " + "%(network_type)s for net-id=%(net_uuid)s"), + {'network_type': network_type, + 'net_uuid': net_uuid}) + + def reclaim_local_vlan(self, net_uuid): + '''Reclaim a local VLAN. + + :param net_uuid: the network uuid associated with this vlan. + :param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id, + vif_ids) mapping. + ''' + lvm = self.local_vlan_map.pop(net_uuid, None) + if lvm is None: + LOG.debug(_("Network %s not used on agent."), net_uuid) + return + + LOG.info(_("Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"), + {'vlan_id': lvm.vlan, + 'net_uuid': net_uuid}) + + if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: + if self.enable_tunneling: + self.tun_br.delete_flows( + table=constants.TUN_TABLE[lvm.network_type], + tun_id=lvm.segmentation_id) + self.tun_br.delete_flows(dl_vlan=lvm.vlan) + if self.l2_pop: + # Try to remove tunnel ports if not used by other networks + for ofport in lvm.tun_ofports: + self.cleanup_tunnel_port(ofport, lvm.network_type) + elif lvm.network_type == p_const.TYPE_FLAT: + if lvm.physical_network in self.phys_brs: + # outbound + br = self.phys_brs[lvm.physical_network] + br.delete_flows(in_port=self.phys_ofports[lvm. + physical_network], + dl_vlan=lvm.vlan) + # inbound + br = self.int_br + br.delete_flows(in_port=self.int_ofports[lvm.physical_network], + dl_vlan=0xffff) + elif lvm.network_type == p_const.TYPE_VLAN: + if lvm.physical_network in self.phys_brs: + # outbound + br = self.phys_brs[lvm.physical_network] + br.delete_flows(in_port=self.phys_ofports[lvm. + physical_network], + dl_vlan=lvm.vlan) + # inbound + br = self.int_br + br.delete_flows(in_port=self.int_ofports[lvm.physical_network], + dl_vlan=lvm.segmentation_id) + elif lvm.network_type == p_const.TYPE_LOCAL: + # no flows needed for local networks + pass + else: + LOG.error(_("Cannot reclaim unknown network type " + "%(network_type)s for net-id=%(net_uuid)s"), + {'network_type': lvm.network_type, + 'net_uuid': net_uuid}) + + self.available_local_vlans.add(lvm.vlan) + + def port_bound(self, port, net_uuid, + network_type, physical_network, segmentation_id, + ovs_restarted): + '''Bind port to net_uuid/lsw_id and install flow for inbound traffic + to vm. + + :param port: a ovslib.VifPort object. + :param net_uuid: the net_uuid this port is to be associated with. + :param network_type: the network type ('gre', 'vlan', 'flat', 'local') + :param physical_network: the physical network for 'vlan' or 'flat' + :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' + :param ovs_restarted: indicates if this is called for an OVS restart. + ''' + if net_uuid not in self.local_vlan_map or ovs_restarted: + self.provision_local_vlan(net_uuid, network_type, + physical_network, segmentation_id) + lvm = self.local_vlan_map[net_uuid] + lvm.vif_ports[port.vif_id] = port + # Do not bind a port if it's already bound + cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + if cur_tag != str(lvm.vlan): + self.int_br.set_db_attribute("Port", port.port_name, "tag", + str(lvm.vlan)) + if port.ofport != -1: + self.int_br.delete_flows(in_port=port.ofport) + + def port_unbound(self, vif_id, net_uuid=None): + '''Unbind port. + + Removes corresponding local vlan mapping object if this is its last + VIF. + + :param vif_id: the id of the vif + :param net_uuid: the net_uuid this port is associated with. + ''' + if net_uuid is None: + net_uuid = self.get_net_uuid(vif_id) + + if not self.local_vlan_map.get(net_uuid): + LOG.info(_('port_unbound() net_uuid %s not in local_vlan_map'), + net_uuid) + return + + lvm = self.local_vlan_map[net_uuid] + lvm.vif_ports.pop(vif_id, None) + + if not lvm.vif_ports: + self.reclaim_local_vlan(net_uuid) + + def port_dead(self, port): + '''Once a port has no binding, put it on the "dead vlan". + + :param port: a ovs_lib.VifPort object. + ''' + # Don't kill a port if it's already dead + cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + if cur_tag != DEAD_VLAN_TAG: + self.int_br.set_db_attribute("Port", port.port_name, "tag", + DEAD_VLAN_TAG) + self.int_br.add_flow(priority=2, in_port=port.ofport, + actions="drop") + + def setup_integration_br(self): + '''Setup the integration bridge. + + Create patch ports and remove all existing flows. + + :param bridge_name: the name of the integration bridge. + :returns: the integration bridge + ''' + self.int_br.delete_port(cfg.CONF.OVS.int_peer_patch_port) + self.int_br.remove_all_flows() + # switch all traffic using L2 learning + self.int_br.add_flow(priority=1, actions="normal") + # Add a canary flow to int_br to track OVS restarts + self.int_br.add_flow(table=constants.CANARY_TABLE, priority=0, + actions="drop") + + def setup_ancillary_bridges(self, integ_br, tun_br): + '''Setup ancillary bridges - for example br-ex.''' + ovs_bridges = set(ovs_lib.get_bridges(self.root_helper)) + # Remove all known bridges + ovs_bridges.remove(integ_br) + if self.enable_tunneling: + ovs_bridges.remove(tun_br) + br_names = [self.phys_brs[physical_network].br_name for + physical_network in self.phys_brs] + ovs_bridges.difference_update(br_names) + # Filter list of bridges to those that have external + # bridge-id's configured + br_names = [] + for bridge in ovs_bridges: + id = ovs_lib.get_bridge_external_bridge_id(self.root_helper, + bridge) + if id != bridge: + br_names.append(bridge) + ovs_bridges.difference_update(br_names) + ancillary_bridges = [] + for bridge in ovs_bridges: + br = ovs_lib.OVSBridge(bridge, self.root_helper) + LOG.info(_('Adding %s to list of bridges.'), bridge) + ancillary_bridges.append(br) + return ancillary_bridges + + def setup_tunnel_br(self, tun_br=None): + '''Setup the tunnel bridge. + + Creates tunnel bridge, and links it to the integration bridge + using a patch port. + + :param tun_br: the name of the tunnel bridge. + ''' + if not self.tun_br: + self.tun_br = ovs_lib.OVSBridge(tun_br, self.root_helper) + + self.tun_br.reset_bridge() + self.patch_tun_ofport = self.int_br.add_patch_port( + cfg.CONF.OVS.int_peer_patch_port, cfg.CONF.OVS.tun_peer_patch_port) + self.patch_int_ofport = self.tun_br.add_patch_port( + cfg.CONF.OVS.tun_peer_patch_port, cfg.CONF.OVS.int_peer_patch_port) + if int(self.patch_tun_ofport) < 0 or int(self.patch_int_ofport) < 0: + LOG.error(_("Failed to create OVS patch port. Cannot have " + "tunneling enabled on this agent, since this version " + "of OVS does not support tunnels or patch ports. " + "Agent terminated!")) + exit(1) + self.tun_br.remove_all_flows() + + # Table 0 (default) will sort incoming traffic depending on in_port + self.tun_br.add_flow(priority=1, + in_port=self.patch_int_ofport, + actions="resubmit(,%s)" % + constants.PATCH_LV_TO_TUN) + self.tun_br.add_flow(priority=0, actions="drop") + if self.arp_responder_enabled: + # ARP broadcast-ed request go to the local ARP_RESPONDER table to + # be locally resolved + self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=1, + proto='arp', + dl_dst="ff:ff:ff:ff:ff:ff", + actions=("resubmit(,%s)" % + constants.ARP_RESPONDER)) + # PATCH_LV_TO_TUN table will handle packets coming from patch_int + # unicasts go to table UCAST_TO_TUN where remote addresses are learnt + self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=0, + dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", + actions="resubmit(,%s)" % constants.UCAST_TO_TUN) + # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding + self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=0, + dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", + actions="resubmit(,%s)" % constants.FLOOD_TO_TUN) + # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id + # for each tunnel type, and resubmit to table LEARN_FROM_TUN where + # remote mac addresses will be learnt + for tunnel_type in constants.TUNNEL_NETWORK_TYPES: + self.tun_br.add_flow(table=constants.TUN_TABLE[tunnel_type], + priority=0, + actions="drop") + # LEARN_FROM_TUN table will have a single flow using a learn action to + # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac + # addresses (assumes that lvid has already been set by a previous flow) + learned_flow = ("table=%s," + "priority=1," + "hard_timeout=300," + "NXM_OF_VLAN_TCI[0..11]," + "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," + "load:0->NXM_OF_VLAN_TCI[]," + "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," + "output:NXM_OF_IN_PORT[]" % + constants.UCAST_TO_TUN) + # Once remote mac addresses are learnt, output packet to patch_int + self.tun_br.add_flow(table=constants.LEARN_FROM_TUN, + priority=1, + actions="learn(%s),output:%s" % + (learned_flow, self.patch_int_ofport)) + # Egress unicast will be handled in table UCAST_TO_TUN, where remote + # mac addresses will be learned. For now, just add a default flow that + # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them + # as broadcasts/multicasts + self.tun_br.add_flow(table=constants.UCAST_TO_TUN, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) + if self.arp_responder_enabled: + # If none of the ARP entries correspond to the requested IP, the + # broadcast-ed packet is resubmitted to the flooding table + self.tun_br.add_flow(table=constants.ARP_RESPONDER, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) + # FLOOD_TO_TUN will handle flooding in tunnels based on lvid, + # for now, add a default drop action + self.tun_br.add_flow(table=constants.FLOOD_TO_TUN, + priority=0, + actions="drop") + + def get_veth_name(self, prefix, name): + """Construct a veth name based on the prefix and name that does not + exceed the maximum length allowed for a linux device. Longer names + are hashed to help ensure uniqueness. + """ + if len(prefix + name) <= ip_lib.VETH_MAX_NAME_LENGTH: + return prefix + name + # We can't just truncate because bridges may be distinguished + # by an ident at the end. A hash over the name should be unique. + # Leave part of the bridge name on for easier identification + hashlen = 6 + namelen = ip_lib.VETH_MAX_NAME_LENGTH - len(prefix) - hashlen + new_name = ('%(prefix)s%(truncated)s%(hash)s' % + {'prefix': prefix, 'truncated': name[0:namelen], + 'hash': hashlib.sha1(name).hexdigest()[0:hashlen]}) + LOG.warning(_("Creating an interface named %(name)s exceeds the " + "%(limit)d character limitation. It was shortened to " + "%(new_name)s to fit."), + {'name': name, 'limit': ip_lib.VETH_MAX_NAME_LENGTH, + 'new_name': new_name}) + return new_name + + def setup_physical_bridges(self, bridge_mappings): + '''Setup the physical network bridges. + + Creates physical network bridges and links them to the + integration bridge using veths. + + :param bridge_mappings: map physical network names to bridge names. + ''' + self.phys_brs = {} + self.int_ofports = {} + self.phys_ofports = {} + ip_wrapper = ip_lib.IPWrapper(self.root_helper) + for physical_network, bridge in bridge_mappings.iteritems(): + LOG.info(_("Mapping physical network %(physical_network)s to " + "bridge %(bridge)s"), + {'physical_network': physical_network, + 'bridge': bridge}) + # setup physical bridge + if not ip_lib.device_exists(bridge, self.root_helper): + LOG.error(_("Bridge %(bridge)s for physical network " + "%(physical_network)s does not exist. Agent " + "terminated!"), + {'physical_network': physical_network, + 'bridge': bridge}) + sys.exit(1) + br = ovs_lib.OVSBridge(bridge, self.root_helper) + br.remove_all_flows() + br.add_flow(priority=1, actions="normal") + self.phys_brs[physical_network] = br + + # create veth to patch physical bridge with integration bridge + int_veth_name = self.get_veth_name( + constants.VETH_INTEGRATION_PREFIX, bridge) + self.int_br.delete_port(int_veth_name) + phys_veth_name = self.get_veth_name( + constants.VETH_PHYSICAL_PREFIX, bridge) + br.delete_port(phys_veth_name) + if ip_lib.device_exists(int_veth_name, self.root_helper): + ip_lib.IPDevice(int_veth_name, self.root_helper).link.delete() + # Give udev a chance to process its rules here, to avoid + # race conditions between commands launched by udev rules + # and the subsequent call to ip_wrapper.add_veth + utils.execute(['/sbin/udevadm', 'settle', '--timeout=10']) + int_veth, phys_veth = ip_wrapper.add_veth(int_veth_name, + phys_veth_name) + self.int_ofports[physical_network] = self.int_br.add_port(int_veth) + self.phys_ofports[physical_network] = br.add_port(phys_veth) + + # block all untranslated traffic over veth between bridges + self.int_br.add_flow(priority=2, + in_port=self.int_ofports[physical_network], + actions="drop") + br.add_flow(priority=2, + in_port=self.phys_ofports[physical_network], + actions="drop") + + # enable veth to pass traffic + int_veth.link.set_up() + phys_veth.link.set_up() + + if self.veth_mtu: + # set up mtu size for veth interfaces + int_veth.link.set_mtu(self.veth_mtu) + phys_veth.link.set_mtu(self.veth_mtu) + + def scan_ports(self, registered_ports, updated_ports=None): + cur_ports = self.int_br.get_vif_port_set() + self.int_br_device_count = len(cur_ports) + port_info = {'current': cur_ports} + if updated_ports is None: + updated_ports = set() + updated_ports.update(self.check_changed_vlans(registered_ports)) + if updated_ports: + # Some updated ports might have been removed in the + # meanwhile, and therefore should not be processed. + # In this case the updated port won't be found among + # current ports. + updated_ports &= cur_ports + if updated_ports: + port_info['updated'] = updated_ports + + # FIXME(salv-orlando): It's not really necessary to return early + # if nothing has changed. + if cur_ports == registered_ports: + # No added or removed ports to set, just return here + return port_info + + port_info['added'] = cur_ports - registered_ports + # Remove all the known ports not found on the integration bridge + port_info['removed'] = registered_ports - cur_ports + return port_info + + def check_changed_vlans(self, registered_ports): + """Return ports which have lost their vlan tag. + + The returned value is a set of port ids of the ports concerned by a + vlan tag loss. + """ + port_tags = self.int_br.get_port_tag_dict() + changed_ports = set() + for lvm in self.local_vlan_map.values(): + for port in registered_ports: + if ( + port in lvm.vif_ports + and lvm.vif_ports[port].port_name in port_tags + and port_tags[lvm.vif_ports[port].port_name] != lvm.vlan + ): + LOG.info( + _("Port '%(port_name)s' has lost " + "its vlan tag '%(vlan_tag)d'!"), + {'port_name': lvm.vif_ports[port].port_name, + 'vlan_tag': lvm.vlan} + ) + changed_ports.add(port) + return changed_ports + + def update_ancillary_ports(self, registered_ports): + ports = set() + for bridge in self.ancillary_brs: + ports |= bridge.get_vif_port_set() + + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def treat_vif_port(self, vif_port, port_id, network_id, network_type, + physical_network, segmentation_id, admin_state_up, + ovs_restarted): + # When this function is called for a port, the port should have + # an OVS ofport configured, as only these ports were considered + # for being treated. If that does not happen, it is a potential + # error condition of which operators should be aware + if not vif_port.ofport: + LOG.warn(_("VIF port: %s has no ofport configured, and might not " + "be able to transmit"), vif_port.vif_id) + if vif_port: + if admin_state_up: + self.port_bound(vif_port, network_id, network_type, + physical_network, segmentation_id, + ovs_restarted) + else: + self.port_dead(vif_port) + else: + LOG.debug(_("No VIF port for port %s defined on agent."), port_id) + + def setup_tunnel_port(self, port_name, remote_ip, tunnel_type): + ofport = self.tun_br.add_tunnel_port(port_name, + remote_ip, + self.local_ip, + tunnel_type, + self.vxlan_udp_port, + self.dont_fragment) + ofport_int = -1 + try: + ofport_int = int(ofport) + except (TypeError, ValueError): + LOG.exception(_("ofport should have a value that can be " + "interpreted as an integer")) + if ofport_int < 0: + LOG.error(_("Failed to set-up %(type)s tunnel port to %(ip)s"), + {'type': tunnel_type, 'ip': remote_ip}) + return 0 + + self.tun_br_ofports[tunnel_type][remote_ip] = ofport + # Add flow in default table to resubmit to the right + # tunnelling table (lvid will be set in the latter) + self.tun_br.add_flow(priority=1, + in_port=ofport, + actions="resubmit(,%s)" % + constants.TUN_TABLE[tunnel_type]) + + ofports = ','.join(self.tun_br_ofports[tunnel_type].values()) + if ofports and not self.l2_pop: + # Update flooding flows to include the new tunnel + for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + if vlan_mapping.network_type == tunnel_type: + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=vlan_mapping.vlan, + actions="strip_vlan," + "set_tunnel:%s,output:%s" % + (vlan_mapping.segmentation_id, + ofports)) + return ofport + + def cleanup_tunnel_port(self, tun_ofport, tunnel_type): + # Check if this tunnel port is still used + for lvm in self.local_vlan_map.values(): + if tun_ofport in lvm.tun_ofports: + break + # If not, remove it + else: + for remote_ip, ofport in self.tun_br_ofports[tunnel_type].items(): + if ofport == tun_ofport: + port_name = '%s-%s' % (tunnel_type, + self.get_ip_in_hex(remote_ip)) + self.tun_br.delete_port(port_name) + self.tun_br.delete_flows(in_port=ofport) + self.tun_br_ofports[tunnel_type].pop(remote_ip, None) + + def treat_devices_added_or_updated(self, devices, ovs_restarted): + resync = False + for device in devices: + LOG.debug(_("Processing port %s"), device) + port = self.int_br.get_vif_port_by_id(device) + if not port: + # The port has disappeared and should not be processed + # There is no need to put the port DOWN in the plugin as + # it never went up in the first place + LOG.info(_("Port %s was not found on the integration bridge " + "and will therefore not be processed"), device) + continue + try: + # TODO(salv-orlando): Provide bulk API for retrieving + # details for all devices in one call + details = self.plugin_rpc.get_device_details(self.context, + device, + self.agent_id) + except Exception as e: + LOG.debug(_("Unable to get port details for " + "%(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + if 'port_id' in details: + LOG.info(_("Port %(device)s updated. Details: %(details)s"), + {'device': device, 'details': details}) + self.treat_vif_port(port, details['port_id'], + details['network_id'], + details['network_type'], + details['physical_network'], + details['segmentation_id'], + details['admin_state_up'], + ovs_restarted) + # update plugin about port status + if details.get('admin_state_up'): + LOG.debug(_("Setting status for %s to UP"), device) + self.plugin_rpc.update_device_up( + self.context, device, self.agent_id, cfg.CONF.host) + else: + LOG.debug(_("Setting status for %s to DOWN"), device) + self.plugin_rpc.update_device_down( + self.context, device, self.agent_id, cfg.CONF.host) + LOG.info(_("Configuration for device %s completed."), device) + else: + LOG.warn(_("Device %s not defined on plugin"), device) + if (port and port.ofport != -1): + self.port_dead(port) + return resync + + def treat_ancillary_devices_added(self, devices): + resync = False + for device in devices: + LOG.info(_("Ancillary Port %s added"), device) + try: + self.plugin_rpc.get_device_details(self.context, device, + self.agent_id) + except Exception as e: + LOG.debug(_("Unable to get port details for " + "%(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + + # update plugin about port status + self.plugin_rpc.update_device_up(self.context, + device, + self.agent_id, + cfg.CONF.host) + return resync + + def treat_devices_removed(self, devices): + resync = False + self.sg_agent.remove_devices_filter(devices) + for device in devices: + LOG.info(_("Attachment %s removed"), device) + try: + self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + self.port_unbound(device) + return resync + + def treat_ancillary_devices_removed(self, devices): + resync = False + for device in devices: + LOG.info(_("Attachment %s removed"), device) + try: + details = self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + if details['exists']: + LOG.info(_("Port %s updated."), device) + # Nothing to do regarding local networking + else: + LOG.debug(_("Device %s not defined on plugin"), device) + return resync + + def process_network_ports(self, port_info, ovs_restarted): + resync_a = False + resync_b = False + # TODO(salv-orlando): consider a solution for ensuring notifications + # are processed exactly in the same order in which they were + # received. This is tricky because there are two notification + # sources: the neutron server, and the ovs db monitor process + # If there is an exception while processing security groups ports + # will not be wired anyway, and a resync will be triggered + # TODO(salv-orlando): Optimize avoiding applying filters unnecessarily + # (eg: when there are no IP address changes) + self.sg_agent.setup_port_filters(port_info.get('added', set()), + port_info.get('updated', set())) + # VIF wiring needs to be performed always for 'new' devices. + # For updated ports, re-wiring is not needed in most cases, but needs + # to be performed anyway when the admin state of a device is changed. + # A device might be both in the 'added' and 'updated' + # list at the same time; avoid processing it twice. + devices_added_updated = (port_info.get('added', set()) | + port_info.get('updated', set())) + if devices_added_updated: + start = time.time() + resync_a = self.treat_devices_added_or_updated( + devices_added_updated, ovs_restarted) + LOG.debug(_("process_network_ports - iteration:%(iter_num)d -" + "treat_devices_added_or_updated completed " + "in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + if 'removed' in port_info: + start = time.time() + resync_b = self.treat_devices_removed(port_info['removed']) + LOG.debug(_("process_network_ports - iteration:%(iter_num)d -" + "treat_devices_removed completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # If one of the above operations fails => resync with plugin + return (resync_a | resync_b) + + def process_ancillary_network_ports(self, port_info): + resync_a = False + resync_b = False + if 'added' in port_info: + start = time.time() + resync_a = self.treat_ancillary_devices_added(port_info['added']) + LOG.debug(_("process_ancillary_network_ports - iteration: " + "%(iter_num)d - treat_ancillary_devices_added " + "completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + if 'removed' in port_info: + start = time.time() + resync_b = self.treat_ancillary_devices_removed( + port_info['removed']) + LOG.debug(_("process_ancillary_network_ports - iteration: " + "%(iter_num)d - treat_ancillary_devices_removed " + "completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + + # If one of the above operations fails => resync with plugin + return (resync_a | resync_b) + + def get_ip_in_hex(self, ip_address): + try: + return '%08x' % netaddr.IPAddress(ip_address, version=4) + except Exception: + LOG.warn(_("Unable to create tunnel port. Invalid remote IP: %s"), + ip_address) + return + + def tunnel_sync(self): + resync = False + try: + for tunnel_type in self.tunnel_types: + details = self.plugin_rpc.tunnel_sync(self.context, + self.local_ip, + tunnel_type) + if not self.l2_pop: + tunnels = details['tunnels'] + for tunnel in tunnels: + if self.local_ip != tunnel['ip_address']: + tunnel_id = tunnel.get('id') + # Unlike the OVS plugin, ML2 doesn't return an id + # key. So use ip_address to form port name instead. + # Port name must be <=15 chars, so use shorter hex. + remote_ip = tunnel['ip_address'] + remote_ip_hex = self.get_ip_in_hex(remote_ip) + if not tunnel_id and not remote_ip_hex: + continue + tun_name = '%s-%s' % (tunnel_type, + tunnel_id or remote_ip_hex) + self.setup_tunnel_port(tun_name, + tunnel['ip_address'], + tunnel_type) + except Exception as e: + LOG.debug(_("Unable to sync tunnel IP %(local_ip)s: %(e)s"), + {'local_ip': self.local_ip, 'e': e}) + resync = True + return resync + + def _agent_has_updates(self, polling_manager): + return (polling_manager.is_polling_required or + self.updated_ports or + self.sg_agent.firewall_refresh_needed()) + + def _port_info_has_changes(self, port_info): + return (port_info.get('added') or + port_info.get('removed') or + port_info.get('updated')) + + def check_ovs_restart(self): + # Check for the canary flow + canary_flow = self.int_br.dump_flows_for_table(constants.CANARY_TABLE) + return not canary_flow + + def rpc_loop(self, polling_manager=None): + if not polling_manager: + polling_manager = polling.AlwaysPoll() + + sync = True + ports = set() + updated_ports_copy = set() + ancillary_ports = set() + tunnel_sync = True + ovs_restarted = False + while True: + start = time.time() + port_stats = {'regular': {'added': 0, + 'updated': 0, + 'removed': 0}, + 'ancillary': {'added': 0, + 'removed': 0}} + LOG.debug(_("Agent rpc_loop - iteration:%d started"), + self.iter_num) + if sync: + LOG.info(_("Agent out of sync with plugin!")) + ports.clear() + ancillary_ports.clear() + sync = False + polling_manager.force_polling() + # Notify the plugin of tunnel IP + if self.enable_tunneling and tunnel_sync: + LOG.info(_("Agent tunnel out of sync with plugin!")) + try: + tunnel_sync = self.tunnel_sync() + except Exception: + LOG.exception(_("Error while synchronizing tunnels")) + tunnel_sync = True + ovs_restarted = self.check_ovs_restart() + if ovs_restarted: + self.setup_integration_br() + self.setup_physical_bridges(self.bridge_mappings) + if self.enable_tunneling: + self.setup_tunnel_br() + if self._agent_has_updates(polling_manager) or ovs_restarted: + try: + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - " + "starting polling. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # Save updated ports dict to perform rollback in + # case resync would be needed, and then clear + # self.updated_ports. As the greenthread should not yield + # between these two statements, this will be thread-safe + updated_ports_copy = self.updated_ports + self.updated_ports = set() + reg_ports = (set() if ovs_restarted else ports) + port_info = self.scan_ports(reg_ports, updated_ports_copy) + ports = port_info['current'] + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - " + "port information retrieved. " + "Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # Secure and wire/unwire VIFs and update their status + # on Neutron server + if (self._port_info_has_changes(port_info) or + self.sg_agent.firewall_refresh_needed() or + ovs_restarted): + LOG.debug(_("Starting to process devices in:%s"), + port_info) + # If treat devices fails - must resync with plugin + sync = self.process_network_ports(port_info, + ovs_restarted) + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -" + "ports processed. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + port_stats['regular']['added'] = ( + len(port_info.get('added', []))) + port_stats['regular']['updated'] = ( + len(port_info.get('updated', []))) + port_stats['regular']['removed'] = ( + len(port_info.get('removed', []))) + # Treat ancillary devices if they exist + if self.ancillary_brs: + port_info = self.update_ancillary_ports( + ancillary_ports) + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -" + "ancillary port info retrieved. " + "Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + + if port_info: + rc = self.process_ancillary_network_ports( + port_info) + LOG.debug(_("Agent rpc_loop - iteration:" + "%(iter_num)d - ancillary ports " + "processed. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + ancillary_ports = port_info['current'] + port_stats['ancillary']['added'] = ( + len(port_info.get('added', []))) + port_stats['ancillary']['removed'] = ( + len(port_info.get('removed', []))) + sync = sync | rc + + polling_manager.polling_completed() + except Exception: + LOG.exception(_("Error while processing VIF ports")) + # Put the ports back in self.updated_port + self.updated_ports |= updated_ports_copy + sync = True + + # sleep till end of polling interval + elapsed = (time.time() - start) + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d " + "completed. Processed ports statistics: " + "%(port_stats)s. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'port_stats': port_stats, + 'elapsed': elapsed}) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + self.iter_num = self.iter_num + 1 + + def daemon_loop(self): + with polling.get_polling_manager( + self.minimize_polling, + self.root_helper, + self.ovsdb_monitor_respawn_interval) as pm: + + self.rpc_loop(polling_manager=pm) + + +def handle_sigterm(signum, frame): + sys.exit(1) + + +def create_agent_config_map(config): + """Create a map of agent config parameters. + + :param config: an instance of cfg.CONF + :returns: a map of agent configuration parameters + """ + try: + bridge_mappings = q_utils.parse_mappings(config.OVS.bridge_mappings) + except ValueError as e: + raise ValueError(_("Parsing bridge_mappings failed: %s.") % e) + + kwargs = dict( + integ_br=config.OVS.integration_bridge, + tun_br=config.OVS.tunnel_bridge, + local_ip=config.OVS.local_ip, + bridge_mappings=bridge_mappings, + root_helper=config.AGENT.root_helper, + polling_interval=config.AGENT.polling_interval, + minimize_polling=config.AGENT.minimize_polling, + tunnel_types=config.AGENT.tunnel_types, + veth_mtu=config.AGENT.veth_mtu, + l2_population=config.AGENT.l2_population, + arp_responder=config.AGENT.arp_responder, + ) + + # If enable_tunneling is TRUE, set tunnel_type to default to GRE + if config.OVS.enable_tunneling and not kwargs['tunnel_types']: + kwargs['tunnel_types'] = [p_const.TYPE_GRE] + + # Verify the tunnel_types specified are valid + for tun in kwargs['tunnel_types']: + if tun not in constants.TUNNEL_NETWORK_TYPES: + msg = _('Invalid tunnel type specified: %s'), tun + raise ValueError(msg) + if not kwargs['local_ip']: + msg = _('Tunneling cannot be enabled without a valid local_ip.') + raise ValueError(msg) + + return kwargs + + +def main(): + cfg.CONF.register_opts(ip_lib.OPTS) + common_config.init(sys.argv[1:]) + common_config.setup_logging(cfg.CONF) + q_utils.log_opt_values(LOG) + + try: + agent_config = create_agent_config_map(cfg.CONF) + except ValueError as e: + LOG.error(_('%s Agent terminated!'), e) + sys.exit(1) + + is_xen_compute_host = 'rootwrap-xen-dom0' in agent_config['root_helper'] + if is_xen_compute_host: + # Force ip_lib to always use the root helper to ensure that ip + # commands target xen dom0 rather than domU. + cfg.CONF.set_default('ip_lib_force_root', True) + + agent = OVSNeutronAgent(**agent_config) + signal.signal(signal.SIGTERM, handle_sigterm) + + # Start everything. + LOG.info(_("Agent initialized successfully, now running... ")) + agent.daemon_loop() + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/neutron/plugins/openvswitch/agent/xenapi/README b/neutron/plugins/openvswitch/agent/xenapi/README new file mode 100644 index 000000000..0a02c99e1 --- /dev/null +++ b/neutron/plugins/openvswitch/agent/xenapi/README @@ -0,0 +1,16 @@ +This directory contains files that are required for the XenAPI support. +They should be installed in the XenServer / Xen Cloud Platform dom0. + +If you install them manually, you will need to ensure that the newly +added files are executable. You can do this by running the following +command (from dom0): + + chmod a+x /etc/xapi.d/plugins/* + +Otherwise, you can build an rpm by running the following command: + + ./contrib/build-rpm.sh + +and install the rpm by running the following command (from dom0): + + rpm -i openstack-neutron-xen-plugins.rpm diff --git a/neutron/plugins/openvswitch/agent/xenapi/contrib/build-rpm.sh b/neutron/plugins/openvswitch/agent/xenapi/contrib/build-rpm.sh new file mode 100755 index 000000000..81b5f3b31 --- /dev/null +++ b/neutron/plugins/openvswitch/agent/xenapi/contrib/build-rpm.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +set -eux + +thisdir=$(dirname $(readlink -f "$0")) +export NEUTRON_ROOT="$thisdir/../../../../../../" +export PYTHONPATH=$NEUTRON_ROOT + +cd $NEUTRON_ROOT +VERSION=$(sh -c "(cat $NEUTRON_ROOT/neutron/version.py; \ + echo 'print version_info.release_string()') | \ + python") +cd - + +PACKAGE=openstack-neutron-xen-plugins +RPMBUILD_DIR=$PWD/rpmbuild +if [ ! -d $RPMBUILD_DIR ]; then + echo $RPMBUILD_DIR is missing + exit 1 +fi + +for dir in BUILD BUILDROOT SRPMS RPMS SOURCES; do + rm -rf $RPMBUILD_DIR/$dir + mkdir -p $RPMBUILD_DIR/$dir +done + +rm -rf /tmp/$PACKAGE +mkdir /tmp/$PACKAGE +cp -r ../etc/xapi.d /tmp/$PACKAGE +tar czf $RPMBUILD_DIR/SOURCES/$PACKAGE.tar.gz -C /tmp $PACKAGE + +rpmbuild -ba --nodeps --define "_topdir $RPMBUILD_DIR" \ + --define "version $VERSION" \ + $RPMBUILD_DIR/SPECS/$PACKAGE.spec diff --git a/neutron/plugins/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec b/neutron/plugins/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec new file mode 100644 index 000000000..8ba03eaf1 --- /dev/null +++ b/neutron/plugins/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec @@ -0,0 +1,30 @@ +Name: openstack-neutron-xen-plugins +Version: %{version} +Release: 1 +Summary: Files for XenAPI support. +License: ASL 2.0 +Group: Applications/Utilities +Source0: openstack-neutron-xen-plugins.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +%define debug_package %{nil} + +%description +This package contains files that are required for XenAPI support for Neutron. + +%prep +%setup -q -n openstack-neutron-xen-plugins + +%install +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT/etc +cp -r xapi.d $RPM_BUILD_ROOT/etc +chmod a+x $RPM_BUILD_ROOT/etc/xapi.d/plugins/* + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root,-) +/etc/xapi.d/plugins/* diff --git a/neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap b/neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap new file mode 100644 index 000000000..21909e846 --- /dev/null +++ b/neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap @@ -0,0 +1,72 @@ +#!/usr/bin/env python + +# Copyright 2012 OpenStack Foundation +# Copyright 2012 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for executing network commands (ovs, iptables, etc) on dom0 +# + +import gettext +gettext.install('neutron', unicode=1) +try: + import json +except ImportError: + import simplejson as json +import subprocess + +import XenAPIPlugin + + +ALLOWED_CMDS = [ + 'ip', + 'ovs-ofctl', + 'ovs-vsctl', + ] + + +class PluginError(Exception): + """Base Exception class for all plugin errors.""" + def __init__(self, *args): + Exception.__init__(self, *args) + +def _run_command(cmd, cmd_input): + """Abstracts out the basics of issuing system commands. If the command + returns anything in stderr, a PluginError is raised with that information. + Otherwise, the output from stdout is returned. + """ + pipe = subprocess.PIPE + proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe, + stderr=pipe, close_fds=True) + (out, err) = proc.communicate(cmd_input) + + if err: + raise PluginError(err) + return out + + +def run_command(session, args): + cmd = json.loads(args.get('cmd')) + if cmd and cmd[0] not in ALLOWED_CMDS: + msg = _("Dom0 execution of '%s' is not permitted") % cmd[0] + raise PluginError(msg) + result = _run_command(cmd, json.loads(args.get('cmd_input', 'null'))) + return json.dumps(result) + + +if __name__ == "__main__": + XenAPIPlugin.dispatch({"run_command": run_command}) diff --git a/neutron/plugins/openvswitch/common/__init__.py b/neutron/plugins/openvswitch/common/__init__.py new file mode 100644 index 000000000..e5f41adfe --- /dev/null +++ b/neutron/plugins/openvswitch/common/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/openvswitch/common/config.py b/neutron/plugins/openvswitch/common/config.py new file mode 100644 index 000000000..07ba94168 --- /dev/null +++ b/neutron/plugins/openvswitch/common/config.py @@ -0,0 +1,94 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.plugins.openvswitch.common import constants + + +DEFAULT_BRIDGE_MAPPINGS = [] +DEFAULT_VLAN_RANGES = [] +DEFAULT_TUNNEL_RANGES = [] +DEFAULT_TUNNEL_TYPES = [] + +ovs_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_("Integration bridge to use")), + cfg.BoolOpt('enable_tunneling', default=False, + help=_("Enable tunneling support")), + cfg.StrOpt('tunnel_bridge', default='br-tun', + help=_("Tunnel bridge to use")), + cfg.StrOpt('int_peer_patch_port', default='patch-tun', + help=_("Peer patch port in integration bridge for tunnel " + "bridge")), + cfg.StrOpt('tun_peer_patch_port', default='patch-int', + help=_("Peer patch port in tunnel bridge for integration " + "bridge")), + cfg.StrOpt('local_ip', default='', + help=_("Local IP address of GRE tunnel endpoints.")), + cfg.ListOpt('bridge_mappings', + default=DEFAULT_BRIDGE_MAPPINGS, + help=_("List of :")), + cfg.StrOpt('tenant_network_type', default='local', + help=_("Network type for tenant networks " + "(local, vlan, gre, vxlan, or none)")), + cfg.ListOpt('network_vlan_ranges', + default=DEFAULT_VLAN_RANGES, + help=_("List of :: " + "or ")), + cfg.ListOpt('tunnel_id_ranges', + default=DEFAULT_TUNNEL_RANGES, + help=_("List of :")), + cfg.StrOpt('tunnel_type', default='', + help=_("The type of tunnels to use when utilizing tunnels, " + "either 'gre' or 'vxlan'")), +] + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), + cfg.BoolOpt('minimize_polling', + default=True, + help=_("Minimize polling by monitoring ovsdb for interface " + "changes.")), + cfg.IntOpt('ovsdb_monitor_respawn_interval', + default=constants.DEFAULT_OVSDBMON_RESPAWN, + help=_("The number of seconds to wait before respawning the " + "ovsdb monitor after losing communication with it")), + cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES, + help=_("Network types supported by the agent " + "(gre and/or vxlan)")), + cfg.IntOpt('vxlan_udp_port', default=constants.VXLAN_UDP_PORT, + help=_("The UDP port to use for VXLAN tunnels.")), + cfg.IntOpt('veth_mtu', + help=_("MTU size of veth interfaces")), + cfg.BoolOpt('l2_population', default=False, + help=_("Use ml2 l2population mechanism driver to learn " + "remote mac and IPs and improve tunnel scalability")), + cfg.BoolOpt('arp_responder', default=False, + help=_("Enable local ARP responder if it is supported")), + cfg.BoolOpt('dont_fragment', default=True, + help=_("Set or un-set the don't fragment (DF) bit on " + "outgoing IP packet carrying GRE/VXLAN tunnel")), +] + + +cfg.CONF.register_opts(ovs_opts, "OVS") +cfg.CONF.register_opts(agent_opts, "AGENT") +config.register_agent_state_opts_helper(cfg.CONF) +config.register_root_helper(cfg.CONF) diff --git a/neutron/plugins/openvswitch/common/constants.py b/neutron/plugins/openvswitch/common/constants.py new file mode 100644 index 000000000..57f086325 --- /dev/null +++ b/neutron/plugins/openvswitch/common/constants.py @@ -0,0 +1,54 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.plugins.common import constants as p_const + + +# Special vlan_id value in ovs_vlan_allocations table indicating flat network +FLAT_VLAN_ID = -1 + +# Topic for tunnel notifications between the plugin and agent +TUNNEL = 'tunnel' + +# Values for network_type +VXLAN_UDP_PORT = 4789 + +# Name prefixes for veth device pair linking the integration bridge +# with the physical bridge for a physical network +VETH_INTEGRATION_PREFIX = 'int-' +VETH_PHYSICAL_PREFIX = 'phy-' + +# The different types of tunnels +TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN] + +# Various tables for tunneling flows +PATCH_LV_TO_TUN = 1 +GRE_TUN_TO_LV = 2 +VXLAN_TUN_TO_LV = 3 +LEARN_FROM_TUN = 10 +UCAST_TO_TUN = 20 +ARP_RESPONDER = 21 +FLOOD_TO_TUN = 22 +CANARY_TABLE = 23 + +# Map tunnel types to tables number +TUN_TABLE = {p_const.TYPE_GRE: GRE_TUN_TO_LV, + p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV} + +# The default respawn interval for the ovsdb monitor +DEFAULT_OVSDBMON_RESPAWN = 30 + +# Special return value for an invalid OVS ofport +INVALID_OFPORT = '-1' diff --git a/neutron/plugins/openvswitch/ovs_db_v2.py b/neutron/plugins/openvswitch/ovs_db_v2.py new file mode 100644 index 000000000..75d0ec70d --- /dev/null +++ b/neutron/plugins/openvswitch/ovs_db_v2.py @@ -0,0 +1,396 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from six import moves +from sqlalchemy import func +from sqlalchemy.orm import exc + +from neutron.common import exceptions as n_exc +import neutron.db.api as db +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.openstack.common.db import exception as db_exc +from neutron.openstack.common import log as logging +from neutron.plugins.openvswitch.common import constants +from neutron.plugins.openvswitch import ovs_models_v2 + +LOG = logging.getLogger(__name__) + + +def get_network_binding(session, network_id): + session = session or db.get_session() + try: + binding = (session.query(ovs_models_v2.NetworkBinding). + filter_by(network_id=network_id). + one()) + return binding + except exc.NoResultFound: + return + + +def add_network_binding(session, network_id, network_type, + physical_network, segmentation_id): + with session.begin(subtransactions=True): + binding = ovs_models_v2.NetworkBinding(network_id, network_type, + physical_network, + segmentation_id) + session.add(binding) + return binding + + +def sync_vlan_allocations(network_vlan_ranges): + """Synchronize vlan_allocations table with configured VLAN ranges.""" + + session = db.get_session() + with session.begin(): + # get existing allocations for all physical networks + allocations = dict() + allocs = (session.query(ovs_models_v2.VlanAllocation). + all()) + for alloc in allocs: + if alloc.physical_network not in allocations: + allocations[alloc.physical_network] = set() + allocations[alloc.physical_network].add(alloc) + + # process vlan ranges for each configured physical network + for physical_network, vlan_ranges in network_vlan_ranges.iteritems(): + # determine current configured allocatable vlans for this + # physical network + vlan_ids = set() + for vlan_range in vlan_ranges: + vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1)) + + # remove from table unallocated vlans not currently allocatable + if physical_network in allocations: + for alloc in allocations[physical_network]: + try: + # see if vlan is allocatable + vlan_ids.remove(alloc.vlan_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing vlan %(vlan_id)s on " + "physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': physical_network}) + session.delete(alloc) + del allocations[physical_network] + + # add missing allocatable vlans to table + for vlan_id in sorted(vlan_ids): + alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id) + session.add(alloc) + + # remove from table unallocated vlans for any unconfigured physical + # networks + for allocs in allocations.itervalues(): + for alloc in allocs: + if not alloc.allocated: + LOG.debug(_("Removing vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': alloc.physical_network}) + session.delete(alloc) + + +def get_vlan_allocation(physical_network, vlan_id): + session = db.get_session() + try: + alloc = (session.query(ovs_models_v2.VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + one()) + return alloc + except exc.NoResultFound: + return + + +def reserve_vlan(session): + with session.begin(subtransactions=True): + alloc = (session.query(ovs_models_v2.VlanAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if alloc: + LOG.debug(_("Reserving vlan %(vlan_id)s on physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': alloc.physical_network}) + alloc.allocated = True + return (alloc.physical_network, alloc.vlan_id) + raise n_exc.NoNetworkAvailable() + + +def reserve_specific_vlan(session, physical_network, vlan_id): + with session.begin(subtransactions=True): + try: + alloc = (session.query(ovs_models_v2.VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + if alloc.allocated: + if vlan_id == constants.FLAT_VLAN_ID: + raise n_exc.FlatNetworkInUse( + physical_network=physical_network) + else: + raise n_exc.VlanIdInUse(vlan_id=vlan_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + alloc.allocated = True + except exc.NoResultFound: + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id) + alloc.allocated = True + session.add(alloc) + + +def release_vlan(session, physical_network, vlan_id, network_vlan_ranges): + with session.begin(subtransactions=True): + try: + alloc = (session.query(ovs_models_v2.VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + alloc.allocated = False + inside = False + for vlan_range in network_vlan_ranges.get(physical_network, []): + if vlan_id >= vlan_range[0] and vlan_id <= vlan_range[1]: + inside = True + break + if not inside: + session.delete(alloc) + LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " + "%(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + else: + LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " + "%(physical_network)s to pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + except exc.NoResultFound: + LOG.warning(_("vlan_id %(vlan_id)s on physical network " + "%(physical_network)s not found"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + + +def sync_tunnel_allocations(tunnel_id_ranges): + """Synchronize tunnel_allocations table with configured tunnel ranges.""" + + # determine current configured allocatable tunnels + tunnel_ids = set() + for tunnel_id_range in tunnel_id_ranges: + tun_min, tun_max = tunnel_id_range + if tun_max + 1 - tun_min > 1000000: + LOG.error(_("Skipping unreasonable tunnel ID range " + "%(tun_min)s:%(tun_max)s"), + {'tun_min': tun_min, 'tun_max': tun_max}) + else: + tunnel_ids |= set(moves.xrange(tun_min, tun_max + 1)) + + session = db.get_session() + with session.begin(): + # remove from table unallocated tunnels not currently allocatable + allocs = (session.query(ovs_models_v2.TunnelAllocation). + all()) + for alloc in allocs: + try: + # see if tunnel is allocatable + tunnel_ids.remove(alloc.tunnel_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing tunnel %s from pool"), + alloc.tunnel_id) + session.delete(alloc) + + # add missing allocatable tunnels to table + for tunnel_id in sorted(tunnel_ids): + alloc = ovs_models_v2.TunnelAllocation(tunnel_id) + session.add(alloc) + + +def get_tunnel_allocation(tunnel_id): + session = db.get_session() + try: + alloc = (session.query(ovs_models_v2.TunnelAllocation). + filter_by(tunnel_id=tunnel_id). + with_lockmode('update'). + one()) + return alloc + except exc.NoResultFound: + return + + +def reserve_tunnel(session): + with session.begin(subtransactions=True): + alloc = (session.query(ovs_models_v2.TunnelAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if alloc: + LOG.debug(_("Reserving tunnel %s from pool"), alloc.tunnel_id) + alloc.allocated = True + return alloc.tunnel_id + raise n_exc.NoNetworkAvailable() + + +def reserve_specific_tunnel(session, tunnel_id): + with session.begin(subtransactions=True): + try: + alloc = (session.query(ovs_models_v2.TunnelAllocation). + filter_by(tunnel_id=tunnel_id). + with_lockmode('update'). + one()) + if alloc.allocated: + raise n_exc.TunnelIdInUse(tunnel_id=tunnel_id) + LOG.debug(_("Reserving specific tunnel %s from pool"), tunnel_id) + alloc.allocated = True + except exc.NoResultFound: + LOG.debug(_("Reserving specific tunnel %s outside pool"), + tunnel_id) + alloc = ovs_models_v2.TunnelAllocation(tunnel_id) + alloc.allocated = True + session.add(alloc) + + +def release_tunnel(session, tunnel_id, tunnel_id_ranges): + with session.begin(subtransactions=True): + try: + alloc = (session.query(ovs_models_v2.TunnelAllocation). + filter_by(tunnel_id=tunnel_id). + with_lockmode('update'). + one()) + alloc.allocated = False + inside = False + for tunnel_id_range in tunnel_id_ranges: + if (tunnel_id >= tunnel_id_range[0] + and tunnel_id <= tunnel_id_range[1]): + inside = True + break + if not inside: + session.delete(alloc) + LOG.debug(_("Releasing tunnel %s outside pool"), tunnel_id) + else: + LOG.debug(_("Releasing tunnel %s to pool"), tunnel_id) + except exc.NoResultFound: + LOG.warning(_("tunnel_id %s not found"), tunnel_id) + + +def get_port(port_id): + session = db.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + except exc.NoResultFound: + port = None + return port + + +def get_port_from_device(port_id): + """Get port from database.""" + LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id == port_id) + port_and_sgs = query.all() + if not port_and_sgs: + return None + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict[ext_sg.SECURITYGROUPS] = [ + sg_id for port_, sg_id in port_and_sgs if sg_id] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict + + +def set_port_status(port_id, status): + session = db.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.merge(port) + session.flush() + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) + + +def get_tunnel_endpoints(): + session = db.get_session() + + tunnels = session.query(ovs_models_v2.TunnelEndpoint) + return [{'id': tunnel.id, + 'ip_address': tunnel.ip_address} for tunnel in tunnels] + + +def _generate_tunnel_id(session): + max_tunnel_id = session.query( + func.max(ovs_models_v2.TunnelEndpoint.id)).scalar() or 0 + return max_tunnel_id + 1 + + +def add_tunnel_endpoint(ip, max_retries=10): + """Return the endpoint of the given IP address or generate a new one.""" + + # NOTE(rpodolyaka): generation of a new tunnel endpoint must be put into a + # repeatedly executed transactional block to ensure it + # doesn't conflict with any other concurrently executed + # DB transactions in spite of the specified transactions + # isolation level value + for i in moves.xrange(max_retries): + LOG.debug(_('Adding a tunnel endpoint for %s'), ip) + try: + session = db.get_session() + with session.begin(subtransactions=True): + tunnel = (session.query(ovs_models_v2.TunnelEndpoint). + filter_by(ip_address=ip).with_lockmode('update'). + first()) + + if tunnel is None: + tunnel_id = _generate_tunnel_id(session) + tunnel = ovs_models_v2.TunnelEndpoint(ip, tunnel_id) + session.add(tunnel) + + return tunnel + except db_exc.DBDuplicateEntry: + # a concurrent transaction has been committed, try again + LOG.debug(_('Adding a tunnel endpoint failed due to a concurrent' + 'transaction had been committed (%s attempts left)'), + max_retries - (i + 1)) + + raise n_exc.NeutronException( + message=_('Unable to generate a new tunnel id')) diff --git a/neutron/plugins/openvswitch/ovs_models_v2.py b/neutron/plugins/openvswitch/ovs_models_v2.py new file mode 100644 index 000000000..59b2c14a9 --- /dev/null +++ b/neutron/plugins/openvswitch/ovs_models_v2.py @@ -0,0 +1,107 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import Boolean, Column, ForeignKey, Integer, String +from sqlalchemy.schema import UniqueConstraint + +from neutron.db import model_base +from neutron.db import models_v2 +from sqlalchemy import orm + + +class VlanAllocation(model_base.BASEV2): + """Represents allocation state of vlan_id on physical network.""" + __tablename__ = 'ovs_vlan_allocations' + + physical_network = Column(String(64), nullable=False, primary_key=True) + vlan_id = Column(Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = Column(Boolean, nullable=False) + + def __init__(self, physical_network, vlan_id): + self.physical_network = physical_network + self.vlan_id = vlan_id + self.allocated = False + + def __repr__(self): + return "" % (self.physical_network, + self.vlan_id, self.allocated) + + +class TunnelAllocation(model_base.BASEV2): + """Represents allocation state of tunnel_id.""" + __tablename__ = 'ovs_tunnel_allocations' + + tunnel_id = Column(Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = Column(Boolean, nullable=False) + + def __init__(self, tunnel_id): + self.tunnel_id = tunnel_id + self.allocated = False + + def __repr__(self): + return "" % (self.tunnel_id, self.allocated) + + +class NetworkBinding(model_base.BASEV2): + """Represents binding of virtual network to physical realization.""" + __tablename__ = 'ovs_network_bindings' + + network_id = Column(String(36), + ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + # 'gre', 'vlan', 'flat', 'local' + network_type = Column(String(32), nullable=False) + physical_network = Column(String(64)) + segmentation_id = Column(Integer) # tunnel_id or vlan_id + + network = orm.relationship( + models_v2.Network, + backref=orm.backref("binding", lazy='joined', + uselist=False, cascade='delete')) + + def __init__(self, network_id, network_type, physical_network, + segmentation_id): + self.network_id = network_id + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id + + def __repr__(self): + return "" % (self.network_id, + self.network_type, + self.physical_network, + self.segmentation_id) + + +class TunnelEndpoint(model_base.BASEV2): + """Represents tunnel endpoint in RPC mode.""" + __tablename__ = 'ovs_tunnel_endpoints' + __table_args__ = ( + UniqueConstraint('id', name='uniq_ovs_tunnel_endpoints0id'), + model_base.BASEV2.__table_args__, + ) + + ip_address = Column(String(64), primary_key=True) + id = Column(Integer, nullable=False) + + def __init__(self, ip_address, id): + self.ip_address = ip_address + self.id = id + + def __repr__(self): + return "" % (self.ip_address, self.id) diff --git a/neutron/plugins/openvswitch/ovs_neutron_plugin.py b/neutron/plugins/openvswitch/ovs_neutron_plugin.py new file mode 100644 index 000000000..31698a3df --- /dev/null +++ b/neutron/plugins/openvswitch/ovs_neutron_plugin.py @@ -0,0 +1,623 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as q_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extradhcpopt_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_gwmode_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import extra_dhcp_opt as edo_ext +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.common import utils as plugin_utils +from neutron.plugins.openvswitch.common import config # noqa +from neutron.plugins.openvswitch.common import constants +from neutron.plugins.openvswitch import ovs_db_v2 + + +LOG = logging.getLogger(__name__) + + +class OVSRpcCallbacks(rpc_compat.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + + # history + # 1.0 Initial version + # 1.1 Support Security Group RPC + + RPC_API_VERSION = '1.1' + + def __init__(self, notifier, tunnel_type): + super(OVSRpcCallbacks, self).__init__() + self.notifier = notifier + self.tunnel_type = tunnel_type + + @classmethod + def get_port_from_device(cls, device): + port = ovs_db_v2.get_port_from_device(device) + if port: + port['device'] = device + return port + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = ovs_db_v2.get_port(device) + if port: + binding = ovs_db_v2.get_network_binding(None, port['network_id']) + entry = {'device': device, + 'network_id': port['network_id'], + 'port_id': port['id'], + 'admin_state_up': port['admin_state_up'], + 'network_type': binding.network_type, + 'segmentation_id': binding.segmentation_id, + 'physical_network': binding.physical_network} + new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up'] + else q_const.PORT_STATUS_DOWN) + if port['status'] != new_status: + ovs_db_v2.set_port_status(port['id'], new_status) + else: + entry = {'device': device} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + port = ovs_db_v2.get_port(device) + LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + if port: + entry = {'device': device, + 'exists': True} + plugin = manager.NeutronManager.get_plugin() + if (host and + not plugin.get_port_host(rpc_context, port['id']) == host): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + elif port['status'] != q_const.PORT_STATUS_DOWN: + # Set port status to DOWN + ovs_db_v2.set_port_status(port['id'], + q_const.PORT_STATUS_DOWN) + else: + entry = {'device': device, + 'exists': False} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def update_device_up(self, rpc_context, **kwargs): + """Device is up on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + port = ovs_db_v2.get_port(device) + LOG.debug(_("Device %(device)s up on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + plugin = manager.NeutronManager.get_plugin() + if port: + if (host and + not plugin.get_port_host(rpc_context, port['id']) == host): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + return + elif port['status'] != q_const.PORT_STATUS_ACTIVE: + ovs_db_v2.set_port_status(port['id'], + q_const.PORT_STATUS_ACTIVE) + else: + LOG.debug(_("%s can not be found in database"), device) + + def tunnel_sync(self, rpc_context, **kwargs): + """Update new tunnel. + + Updates the datbase with the tunnel IP. All listening agents will also + be notified about the new tunnel IP. + """ + tunnel_ip = kwargs.get('tunnel_ip') + # Update the database with the IP + tunnel = ovs_db_v2.add_tunnel_endpoint(tunnel_ip) + tunnels = ovs_db_v2.get_tunnel_endpoints() + entry = dict() + entry['tunnels'] = tunnels + # Notify all other listening agents + self.notifier.tunnel_update(rpc_context, tunnel.ip_address, + tunnel.id, self.tunnel_type) + # Return the list of tunnels IP's to the agent + return entry + + +class AgentNotifierApi(rpc_compat.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + '''Agent side of the openvswitch rpc API. + + API version history: + 1.0 - Initial version. + + ''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + self.topic_tunnel_update = topics.get_topic_name(topic, + constants.TUNNEL, + topics.UPDATE) + + def network_delete(self, context, network_id): + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, network_type, segmentation_id, + physical_network): + self.fanout_cast(context, + self.make_msg('port_update', + port=port, + network_type=network_type, + segmentation_id=segmentation_id, + physical_network=physical_network), + topic=self.topic_port_update) + + def tunnel_update(self, context, tunnel_ip, tunnel_id, tunnel_type): + self.fanout_cast(context, + self.make_msg('tunnel_update', + tunnel_ip=tunnel_ip, + tunnel_id=tunnel_id, + tunnel_type=tunnel_type), + topic=self.topic_tunnel_update) + + +class OVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + portbindings_db.PortBindingMixin, + extradhcpopt_db.ExtraDhcpOptMixin, + addr_pair_db.AllowedAddressPairsMixin): + + """Implement the Neutron abstractions using Open vSwitch. + + Depending on whether tunneling is enabled, either a GRE, VXLAN tunnel or + a new VLAN is created for each network. An agent is relied upon to + perform the actual OVS configuration on each host. + + The provider extension is also supported. As discussed in + https://bugs.launchpad.net/neutron/+bug/1023156, this class could + be simplified, and filtering on extended attributes could be + handled, by adding support for extended attributes to the + NeutronDbPluginV2 base class. When that occurs, this class should + be updated to take advantage of it. + + The port binding extension enables an external application relay + information to and from the plugin. + """ + + # This attribute specifies whether the plugin supports or not + # bulk/pagination/sorting operations. Name mangling is used in + # order to ensure it is qualified by class + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + _supported_extension_aliases = ["provider", "external-net", "router", + "ext-gw-mode", "binding", "quotas", + "security-group", "agent", "extraroute", + "l3_agent_scheduler", + "dhcp_agent_scheduler", + "extra_dhcp_opt", + "allowed-address-pairs"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.NETWORKS, ['_extend_network_dict_provider_ovs']) + + def __init__(self, configfile=None): + super(OVSNeutronPluginV2, self).__init__() + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases, + portbindings.OVS_HYBRID_PLUG: True}} + self._parse_network_vlan_ranges() + ovs_db_v2.sync_vlan_allocations(self.network_vlan_ranges) + self.tenant_network_type = cfg.CONF.OVS.tenant_network_type + if self.tenant_network_type not in [svc_constants.TYPE_LOCAL, + svc_constants.TYPE_VLAN, + svc_constants.TYPE_GRE, + svc_constants.TYPE_VXLAN, + svc_constants.TYPE_NONE]: + LOG.error(_("Invalid tenant_network_type: %s. " + "Server terminated!"), + self.tenant_network_type) + sys.exit(1) + self.enable_tunneling = cfg.CONF.OVS.enable_tunneling + self.tunnel_type = None + if self.enable_tunneling: + self.tunnel_type = (cfg.CONF.OVS.tunnel_type or + svc_constants.TYPE_GRE) + elif cfg.CONF.OVS.tunnel_type: + self.tunnel_type = cfg.CONF.OVS.tunnel_type + self.enable_tunneling = True + self.tunnel_id_ranges = [] + if self.enable_tunneling: + self._parse_tunnel_id_ranges() + ovs_db_v2.sync_tunnel_allocations(self.tunnel_id_ranges) + elif self.tenant_network_type in constants.TUNNEL_NETWORK_TYPES: + LOG.error(_("Tunneling disabled but tenant_network_type is '%s'. " + "Server terminated!"), self.tenant_network_type) + sys.exit(1) + self.setup_rpc() + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver + ) + + def setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = rpc_compat.create_connection(new=True) + self.notifier = AgentNotifierApi(topics.AGENT) + self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + l3_rpc_agent_api.L3AgentNotifyAPI() + ) + self.endpoints = [OVSRpcCallbacks(self.notifier, self.tunnel_type), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def _parse_network_vlan_ranges(self): + try: + self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( + cfg.CONF.OVS.network_vlan_ranges) + except Exception as ex: + LOG.error(_("%s. Server terminated!"), ex) + sys.exit(1) + LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) + + def _parse_tunnel_id_ranges(self): + for entry in cfg.CONF.OVS.tunnel_id_ranges: + entry = entry.strip() + try: + tun_min, tun_max = entry.split(':') + self.tunnel_id_ranges.append((int(tun_min), int(tun_max))) + except ValueError as ex: + LOG.error(_("Invalid tunnel ID range: " + "'%(range)s' - %(e)s. Server terminated!"), + {'range': entry, 'e': ex}) + sys.exit(1) + LOG.info(_("Tunnel ID ranges: %s"), self.tunnel_id_ranges) + + def _extend_network_dict_provider_ovs(self, network, net_db, + net_binding=None): + # this method used in two cases: when binding is provided explicitly + # and when it is a part of db model object + binding = net_db.binding if net_db else net_binding + network[provider.NETWORK_TYPE] = binding.network_type + if binding.network_type in constants.TUNNEL_NETWORK_TYPES: + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = binding.segmentation_id + elif binding.network_type == svc_constants.TYPE_FLAT: + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = None + elif binding.network_type == svc_constants.TYPE_VLAN: + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = binding.segmentation_id + elif binding.network_type == svc_constants.TYPE_LOCAL: + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = None + + def _process_provider_create(self, context, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + + network_type_set = attributes.is_attr_set(network_type) + physical_network_set = attributes.is_attr_set(physical_network) + segmentation_id_set = attributes.is_attr_set(segmentation_id) + + if not (network_type_set or physical_network_set or + segmentation_id_set): + return (None, None, None) + + if not network_type_set: + msg = _("provider:network_type required") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == svc_constants.TYPE_FLAT: + if segmentation_id_set: + msg = _("provider:segmentation_id specified for flat network") + raise n_exc.InvalidInput(error_message=msg) + else: + segmentation_id = constants.FLAT_VLAN_ID + elif network_type == svc_constants.TYPE_VLAN: + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + if not utils.is_valid_vlan_tag(segmentation_id): + msg = (_("provider:segmentation_id out of range " + "(%(min_id)s through %(max_id)s)") % + {'min_id': q_const.MIN_VLAN_TAG, + 'max_id': q_const.MAX_VLAN_TAG}) + raise n_exc.InvalidInput(error_message=msg) + elif network_type in constants.TUNNEL_NETWORK_TYPES: + if not self.enable_tunneling: + msg = _("%s networks are not enabled") % network_type + raise n_exc.InvalidInput(error_message=msg) + if physical_network_set: + msg = _("provider:physical_network specified for %s " + "network") % network_type + raise n_exc.InvalidInput(error_message=msg) + else: + physical_network = None + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == svc_constants.TYPE_LOCAL: + if physical_network_set: + msg = _("provider:physical_network specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + else: + physical_network = None + if segmentation_id_set: + msg = _("provider:segmentation_id specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + else: + segmentation_id = None + else: + msg = _("provider:network_type %s not supported") % network_type + raise n_exc.InvalidInput(error_message=msg) + + if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]: + if physical_network_set: + if physical_network not in self.network_vlan_ranges: + msg = _("Unknown provider:physical_network " + "%s") % physical_network + raise n_exc.InvalidInput(error_message=msg) + elif 'default' in self.network_vlan_ranges: + physical_network = 'default' + else: + msg = _("provider:physical_network required") + raise n_exc.InvalidInput(error_message=msg) + + return (network_type, physical_network, segmentation_id) + + def create_network(self, context, network): + (network_type, physical_network, + segmentation_id) = self._process_provider_create(context, + network['network']) + + session = context.session + #set up default security groups + tenant_id = self._get_tenant_id_for_create( + context, network['network']) + self._ensure_default_security_group(context, tenant_id) + + with session.begin(subtransactions=True): + if not network_type: + # tenant network + network_type = self.tenant_network_type + if network_type == svc_constants.TYPE_NONE: + raise n_exc.TenantNetworksDisabled() + elif network_type == svc_constants.TYPE_VLAN: + (physical_network, + segmentation_id) = ovs_db_v2.reserve_vlan(session) + elif network_type in constants.TUNNEL_NETWORK_TYPES: + segmentation_id = ovs_db_v2.reserve_tunnel(session) + # no reservation needed for TYPE_LOCAL + else: + # provider network + if network_type in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_FLAT]: + ovs_db_v2.reserve_specific_vlan(session, physical_network, + segmentation_id) + elif network_type in constants.TUNNEL_NETWORK_TYPES: + ovs_db_v2.reserve_specific_tunnel(session, segmentation_id) + # no reservation needed for TYPE_LOCAL + net = super(OVSNeutronPluginV2, self).create_network(context, + network) + binding = ovs_db_v2.add_network_binding(session, net['id'], + network_type, + physical_network, + segmentation_id) + + self._process_l3_create(context, net, network['network']) + # passing None as db model to use binding object + self._extend_network_dict_provider_ovs(net, None, binding) + # note - exception will rollback entire transaction + LOG.debug(_("Created network: %s"), net['id']) + return net + + def update_network(self, context, id, network): + provider._raise_if_updates_provider_attributes(network['network']) + + session = context.session + with session.begin(subtransactions=True): + net = super(OVSNeutronPluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + return net + + def delete_network(self, context, id): + session = context.session + with session.begin(subtransactions=True): + binding = ovs_db_v2.get_network_binding(session, id) + self._process_l3_delete(context, id) + super(OVSNeutronPluginV2, self).delete_network(context, id) + if binding.network_type in constants.TUNNEL_NETWORK_TYPES: + ovs_db_v2.release_tunnel(session, binding.segmentation_id, + self.tunnel_id_ranges) + elif binding.network_type in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_FLAT]: + ovs_db_v2.release_vlan(session, binding.physical_network, + binding.segmentation_id, + self.network_vlan_ranges) + # the network_binding record is deleted via cascade from + # the network record, so explicit removal is not necessary + self.notifier.network_delete(context, id) + + def get_network(self, context, id, fields=None): + session = context.session + with session.begin(subtransactions=True): + net = super(OVSNeutronPluginV2, self).get_network(context, + id, None) + return self._fields(net, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, + limit=None, marker=None, page_reverse=False): + session = context.session + with session.begin(subtransactions=True): + nets = super(OVSNeutronPluginV2, + self).get_networks(context, filters, None, sorts, + limit, marker, page_reverse) + + return [self._fields(net, fields) for net in nets] + + def create_port(self, context, port): + # Set port status as 'DOWN'. This will be updated by agent + port['port']['status'] = q_const.PORT_STATUS_DOWN + port_data = port['port'] + session = context.session + with session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) + port = super(OVSNeutronPluginV2, self).create_port(context, port) + self._process_portbindings_create_and_update(context, + port_data, port) + self._process_port_create_security_group(context, port, sgids) + self._process_port_create_extra_dhcp_opts(context, port, + dhcp_opts) + port[addr_pair.ADDRESS_PAIRS] = ( + self._process_create_allowed_address_pairs( + context, port, + port_data.get(addr_pair.ADDRESS_PAIRS))) + self.notify_security_groups_member_updated(context, port) + return port + + def update_port(self, context, id, port): + session = context.session + need_port_update_notify = False + with session.begin(subtransactions=True): + original_port = super(OVSNeutronPluginV2, self).get_port( + context, id) + updated_port = super(OVSNeutronPluginV2, self).update_port( + context, id, port) + if addr_pair.ADDRESS_PAIRS in port['port']: + need_port_update_notify |= ( + self.update_address_pairs_on_port(context, id, port, + original_port, + updated_port)) + need_port_update_notify |= self.update_security_group_on_port( + context, id, port, original_port, updated_port) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + need_port_update_notify |= self._update_extra_dhcp_opts_on_port( + context, id, port, updated_port) + + need_port_update_notify |= self.is_security_group_member_updated( + context, original_port, updated_port) + if original_port['admin_state_up'] != updated_port['admin_state_up']: + need_port_update_notify = True + + if need_port_update_notify: + binding = ovs_db_v2.get_network_binding(None, + updated_port['network_id']) + self.notifier.port_update(context, updated_port, + binding.network_type, + binding.segmentation_id, + binding.physical_network) + return updated_port + + def delete_port(self, context, id, l3_port_check=True): + + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + + session = context.session + with session.begin(subtransactions=True): + self.disassociate_floatingips(context, id) + port = self.get_port(context, id) + self._delete_port_security_group_bindings(context, id) + super(OVSNeutronPluginV2, self).delete_port(context, id) + + self.notify_security_groups_member_updated(context, port) diff --git a/neutron/plugins/plumgrid/README b/neutron/plugins/plumgrid/README new file mode 100644 index 000000000..e7118307d --- /dev/null +++ b/neutron/plugins/plumgrid/README @@ -0,0 +1,8 @@ +PLUMgrid Neutron Plugin for Virtual Network Infrastructure (VNI) + +This plugin implements Neutron v2 APIs and helps configure +L2/L3 virtual networks consisting of PLUMgrid Platform. +Implements External Networks and Port Binding Extension + +For more details on use please refer to: +http://wiki.openstack.org/PLUMgrid-Neutron diff --git a/neutron/plugins/plumgrid/__init__.py b/neutron/plugins/plumgrid/__init__.py new file mode 100644 index 000000000..39e9b8d13 --- /dev/null +++ b/neutron/plugins/plumgrid/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/neutron/plugins/plumgrid/common/__init__.py b/neutron/plugins/plumgrid/common/__init__.py new file mode 100644 index 000000000..39e9b8d13 --- /dev/null +++ b/neutron/plugins/plumgrid/common/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/neutron/plugins/plumgrid/common/exceptions.py b/neutron/plugins/plumgrid/common/exceptions.py new file mode 100644 index 000000000..b2862f9cc --- /dev/null +++ b/neutron/plugins/plumgrid/common/exceptions.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. + + +"""Neutron PLUMgrid Plugin exceptions""" + +from neutron.common import exceptions as base_exec + + +class PLUMgridException(base_exec.NeutronException): + message = _("PLUMgrid Plugin Error: %(err_msg)s") + + +class PLUMgridConnectionFailed(PLUMgridException): + message = _("Connection failed with PLUMgrid Director: %(err_msg)s") diff --git a/neutron/plugins/plumgrid/drivers/__init__.py b/neutron/plugins/plumgrid/drivers/__init__.py new file mode 100644 index 000000000..09cf65a01 --- /dev/null +++ b/neutron/plugins/plumgrid/drivers/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/neutron/plugins/plumgrid/drivers/fake_plumlib.py b/neutron/plugins/plumgrid/drivers/fake_plumlib.py new file mode 100644 index 000000000..bf0cb9f9a --- /dev/null +++ b/neutron/plugins/plumgrid/drivers/fake_plumlib.py @@ -0,0 +1,99 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. + +from neutron.extensions import providernet as provider +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class Plumlib(): + """ + Class PLUMgrid Fake Library. This library is a by-pass implementation + for the PLUMgrid Library. This class is being used by the unit test + integration in Neutron. + """ + + def __init__(self): + LOG.info(_('Python PLUMgrid Fake Library Started ')) + pass + + def director_conn(self, director_plumgrid, director_port, timeout, + director_admin, director_password): + LOG.info(_('Fake Director: %s'), + director_plumgrid + ':' + director_port) + pass + + def create_network(self, tenant_id, net_db, network): + net_db["network"] = {} + for key in (provider.NETWORK_TYPE, + provider.PHYSICAL_NETWORK, + provider.SEGMENTATION_ID): + net_db["network"][key] = network["network"][key] + return net_db + + def update_network(self, tenant_id, net_id): + pass + + def delete_network(self, net_db, net_id): + pass + + def create_subnet(self, sub_db, net_db, ipnet): + pass + + def update_subnet(self, orig_sub_db, new_sub_db, ipnet): + pass + + def delete_subnet(self, tenant_id, net_db, net_id): + pass + + def create_port(self, port_db, router_db): + pass + + def update_port(self, port_db, router_db): + pass + + def delete_port(self, port_db, router_db): + pass + + def create_router(self, tenant_id, router_db): + pass + + def update_router(self, router_db, router_id): + pass + + def delete_router(self, tenant_id, router_id): + pass + + def add_router_interface(self, tenant_id, router_id, port_db, ipnet): + pass + + def remove_router_interface(self, tenant_id, net_id, router_id): + pass + + def create_floatingip(self, floating_ip): + pass + + def update_floatingip(self, floating_ip_orig, floating_ip, id): + pass + + def delete_floatingip(self, floating_ip_orig, id): + pass + + def disassociate_floatingips(self, fip, port_id): + return dict((key, fip[key]) for key in ("id", "floating_network_id", + "floating_ip_address")) diff --git a/neutron/plugins/plumgrid/drivers/plumlib.py b/neutron/plugins/plumgrid/drivers/plumlib.py new file mode 100644 index 000000000..8e2607e85 --- /dev/null +++ b/neutron/plugins/plumgrid/drivers/plumlib.py @@ -0,0 +1,100 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. + +""" +Neutron Plug-in for PLUMgrid Virtual Networking Infrastructure (VNI) +This plugin will forward authenticated REST API calls +to the PLUMgrid Network Management System called Director +""" + +from plumgridlib import plumlib + +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class Plumlib(object): + """ + Class PLUMgrid Python Library. This library is a third-party tool + needed by PLUMgrid plugin to implement all core API in Neutron. + """ + + def __init__(self): + LOG.info(_('Python PLUMgrid Library Started ')) + + def director_conn(self, director_plumgrid, director_port, timeout, + director_admin, director_password): + self.plumlib = plumlib.Plumlib(director_plumgrid, + director_port, + timeout, + director_admin, + director_password) + + def create_network(self, tenant_id, net_db, network): + self.plumlib.create_network(tenant_id, net_db, network) + + def update_network(self, tenant_id, net_id): + self.plumlib.update_network(tenant_id, net_id) + + def delete_network(self, net_db, net_id): + self.plumlib.delete_network(net_db, net_id) + + def create_subnet(self, sub_db, net_db, ipnet): + self.plumlib.create_subnet(sub_db, net_db, ipnet) + + def update_subnet(self, orig_sub_db, new_sub_db, ipnet): + self.plumlib.update_subnet(orig_sub_db, new_sub_db, ipnet) + + def delete_subnet(self, tenant_id, net_db, net_id): + self.plumlib.delete_subnet(tenant_id, net_db, net_id) + + def create_port(self, port_db, router_db): + self.plumlib.create_port(port_db, router_db) + + def update_port(self, port_db, router_db): + self.plumlib.update_port(port_db, router_db) + + def delete_port(self, port_db, router_db): + self.plumlib.delete_port(port_db, router_db) + + def create_router(self, tenant_id, router_db): + self.plumlib.create_router(tenant_id, router_db) + + def update_router(self, router_db, router_id): + self.plumlib.update_router(router_db, router_id) + + def delete_router(self, tenant_id, router_id): + self.plumlib.delete_router(tenant_id, router_id) + + def add_router_interface(self, tenant_id, router_id, port_db, ipnet): + self.plumlib.add_router_interface(tenant_id, router_id, port_db, ipnet) + + def remove_router_interface(self, tenant_id, net_id, router_id): + self.plumlib.remove_router_interface(tenant_id, net_id, router_id) + + def create_floatingip(self, floating_ip): + self.plumlib.create_floatingip(floating_ip) + + def update_floatingip(self, floating_ip_orig, floating_ip, id): + self.plumlib.update_floatingip(floating_ip_orig, floating_ip, id) + + def delete_floatingip(self, floating_ip_orig, id): + self.plumlib.delete_floatingip(floating_ip_orig, id) + + def disassociate_floatingips(self, floating_ip, port_id): + self.plumlib.disassociate_floatingips(floating_ip, port_id) diff --git a/neutron/plugins/plumgrid/plumgrid_plugin/__init__.py b/neutron/plugins/plumgrid/plumgrid_plugin/__init__.py new file mode 100644 index 000000000..39e9b8d13 --- /dev/null +++ b/neutron/plugins/plumgrid/plumgrid_plugin/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/neutron/plugins/plumgrid/plumgrid_plugin/plugin_ver.py b/neutron/plugins/plumgrid/plumgrid_plugin/plugin_ver.py new file mode 100644 index 000000000..5a47438c1 --- /dev/null +++ b/neutron/plugins/plumgrid/plumgrid_plugin/plugin_ver.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. + +VERSION = "0.2" diff --git a/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py b/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py new file mode 100644 index 000000000..d28b90ae0 --- /dev/null +++ b/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py @@ -0,0 +1,604 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. + +""" +Neutron Plug-in for PLUMgrid Virtual Networking Infrastructure (VNI) +This plugin will forward authenticated REST API calls +to the PLUMgrid Network Management System called Director +""" + +import netaddr +from oslo.config import cfg +from sqlalchemy.orm import exc as sa_exc + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import l3_db +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.extensions import portbindings +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.plumgrid.common import exceptions as plum_excep +from neutron.plugins.plumgrid.plumgrid_plugin import plugin_ver + +LOG = logging.getLogger(__name__) + +director_server_opts = [ + cfg.StrOpt('director_server', default='localhost', + help=_("PLUMgrid Director server to connect to")), + cfg.StrOpt('director_server_port', default='8080', + help=_("PLUMgrid Director server port to connect to")), + cfg.StrOpt('username', default='username', + help=_("PLUMgrid Director admin username")), + cfg.StrOpt('password', default='password', secret=True, + help=_("PLUMgrid Director admin password")), + cfg.IntOpt('servertimeout', default=5, + help=_("PLUMgrid Director server timeout")), + cfg.StrOpt('driver', + default="neutron.plugins.plumgrid.drivers.plumlib.Plumlib", + help=_("PLUMgrid Driver")), ] + +cfg.CONF.register_opts(director_server_opts, "plumgriddirector") + + +class NeutronPluginPLUMgridV2(db_base_plugin_v2.NeutronDbPluginV2, + portbindings_db.PortBindingMixin, + external_net_db.External_net_db_mixin, + l3_db.L3_NAT_db_mixin): + + supported_extension_aliases = ["external-net", "router", "binding", + "quotas", "provider"] + + binding_view = "extension:port_binding:view" + binding_set = "extension:port_binding:set" + + def __init__(self): + LOG.info(_('Neutron PLUMgrid Director: Starting Plugin')) + + super(NeutronPluginPLUMgridV2, self).__init__() + self.plumgrid_init() + + LOG.debug(_('Neutron PLUMgrid Director: Neutron server with ' + 'PLUMgrid Plugin has started')) + + def plumgrid_init(self): + """PLUMgrid initialization.""" + director_plumgrid = cfg.CONF.plumgriddirector.director_server + director_port = cfg.CONF.plumgriddirector.director_server_port + director_admin = cfg.CONF.plumgriddirector.username + director_password = cfg.CONF.plumgriddirector.password + timeout = cfg.CONF.plumgriddirector.servertimeout + plum_driver = cfg.CONF.plumgriddirector.driver + + # PLUMgrid Director info validation + LOG.info(_('Neutron PLUMgrid Director: %s'), director_plumgrid) + self._plumlib = importutils.import_object(plum_driver) + self._plumlib.director_conn(director_plumgrid, director_port, timeout, + director_admin, director_password) + + def create_network(self, context, network): + """Create Neutron network. + + Creates a PLUMgrid-based bridge. + """ + + LOG.debug(_('Neutron PLUMgrid Director: create_network() called')) + + # Plugin DB - Network Create and validation + tenant_id = self._get_tenant_id_for_create(context, + network["network"]) + self._network_admin_state(network) + + with context.session.begin(subtransactions=True): + net_db = super(NeutronPluginPLUMgridV2, + self).create_network(context, network) + # Propagate all L3 data into DB + self._process_l3_create(context, net_db, network['network']) + + try: + LOG.debug(_('PLUMgrid Library: create_network() called')) + self._plumlib.create_network(tenant_id, net_db, network) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + # Return created network + return net_db + + def update_network(self, context, net_id, network): + """Update Neutron network. + + Updates a PLUMgrid-based bridge. + """ + + LOG.debug(_("Neutron PLUMgrid Director: update_network() called")) + self._network_admin_state(network) + tenant_id = self._get_tenant_id_for_create(context, network["network"]) + + with context.session.begin(subtransactions=True): + # Plugin DB - Network Update + net_db = super( + NeutronPluginPLUMgridV2, self).update_network(context, + net_id, network) + self._process_l3_update(context, net_db, network['network']) + + try: + LOG.debug(_("PLUMgrid Library: update_network() called")) + self._plumlib.update_network(tenant_id, net_id) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + # Return updated network + return net_db + + def delete_network(self, context, net_id): + """Delete Neutron network. + + Deletes a PLUMgrid-based bridge. + """ + + LOG.debug(_("Neutron PLUMgrid Director: delete_network() called")) + net_db = super(NeutronPluginPLUMgridV2, + self).get_network(context, net_id) + + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, net_id) + # Plugin DB - Network Delete + super(NeutronPluginPLUMgridV2, self).delete_network(context, + net_id) + + try: + LOG.debug(_("PLUMgrid Library: update_network() called")) + self._plumlib.delete_network(net_db, net_id) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + def create_port(self, context, port): + """Create Neutron port. + + Creates a PLUMgrid-based port on the specific Virtual Network + Function (VNF). + """ + LOG.debug(_("Neutron PLUMgrid Director: create_port() called")) + + # Port operations on PLUMgrid Director is an automatic operation + # from the VIF driver operations in Nova. + # It requires admin_state_up to be True + + port["port"]["admin_state_up"] = True + + with context.session.begin(subtransactions=True): + # Plugin DB - Port Create and Return port + port_db = super(NeutronPluginPLUMgridV2, self).create_port(context, + port) + device_id = port_db["device_id"] + if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: + router_db = self._get_router(context, device_id) + else: + router_db = None + + try: + LOG.debug(_("PLUMgrid Library: create_port() called")) + self._plumlib.create_port(port_db, router_db) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + # Plugin DB - Port Create and Return port + return self._port_viftype_binding(context, port_db) + + def update_port(self, context, port_id, port): + """Update Neutron port. + + Updates a PLUMgrid-based port on the specific Virtual Network + Function (VNF). + """ + LOG.debug(_("Neutron PLUMgrid Director: update_port() called")) + + with context.session.begin(subtransactions=True): + # Plugin DB - Port Create and Return port + port_db = super(NeutronPluginPLUMgridV2, self).update_port( + context, port_id, port) + device_id = port_db["device_id"] + if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: + router_db = self._get_router(context, device_id) + else: + router_db = None + try: + LOG.debug(_("PLUMgrid Library: create_port() called")) + self._plumlib.update_port(port_db, router_db) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + # Plugin DB - Port Update + return self._port_viftype_binding(context, port_db) + + def delete_port(self, context, port_id, l3_port_check=True): + """Delete Neutron port. + + Deletes a PLUMgrid-based port on the specific Virtual Network + Function (VNF). + """ + + LOG.debug(_("Neutron PLUMgrid Director: delete_port() called")) + + with context.session.begin(subtransactions=True): + # Plugin DB - Port Create and Return port + port_db = super(NeutronPluginPLUMgridV2, + self).get_port(context, port_id) + self.disassociate_floatingips(context, port_id) + super(NeutronPluginPLUMgridV2, self).delete_port(context, port_id) + + if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: + device_id = port_db["device_id"] + router_db = self._get_router(context, device_id) + else: + router_db = None + try: + LOG.debug(_("PLUMgrid Library: delete_port() called")) + self._plumlib.delete_port(port_db, router_db) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + def get_port(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + port_db = super(NeutronPluginPLUMgridV2, + self).get_port(context, id, fields) + + self._port_viftype_binding(context, port_db) + return self._fields(port_db, fields) + + def get_ports(self, context, filters=None, fields=None): + with context.session.begin(subtransactions=True): + ports_db = super(NeutronPluginPLUMgridV2, + self).get_ports(context, filters, fields) + for port_db in ports_db: + self._port_viftype_binding(context, port_db) + return [self._fields(port, fields) for port in ports_db] + + def create_subnet(self, context, subnet): + """Create Neutron subnet. + + Creates a PLUMgrid-based DHCP and NAT Virtual Network + Functions (VNFs). + """ + + LOG.debug(_("Neutron PLUMgrid Director: create_subnet() called")) + + with context.session.begin(subtransactions=True): + # Plugin DB - Subnet Create + net_db = super(NeutronPluginPLUMgridV2, self).get_network( + context, subnet['subnet']['network_id'], fields=None) + s = subnet['subnet'] + ipnet = netaddr.IPNetwork(s['cidr']) + + # PLUMgrid Director reserves the last IP address for GW + # when is not defined + if s['gateway_ip'] is attributes.ATTR_NOT_SPECIFIED: + gw_ip = str(netaddr.IPAddress(ipnet.last - 1)) + subnet['subnet']['gateway_ip'] = gw_ip + + # PLUMgrid reserves the first IP + if s['allocation_pools'] == attributes.ATTR_NOT_SPECIFIED: + allocation_pool = self._allocate_pools_for_subnet(context, s) + subnet['subnet']['allocation_pools'] = allocation_pool + + sub_db = super(NeutronPluginPLUMgridV2, self).create_subnet( + context, subnet) + + try: + LOG.debug(_("PLUMgrid Library: create_subnet() called")) + self._plumlib.create_subnet(sub_db, net_db, ipnet) + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + return sub_db + + def delete_subnet(self, context, subnet_id): + """Delete subnet core Neutron API.""" + + LOG.debug(_("Neutron PLUMgrid Director: delete_subnet() called")) + # Collecting subnet info + sub_db = self._get_subnet(context, subnet_id) + tenant_id = self._get_tenant_id_for_create(context, subnet_id) + net_id = sub_db["network_id"] + net_db = self.get_network(context, net_id) + + with context.session.begin(subtransactions=True): + # Plugin DB - Subnet Delete + super(NeutronPluginPLUMgridV2, self).delete_subnet( + context, subnet_id) + try: + LOG.debug(_("PLUMgrid Library: delete_subnet() called")) + self._plumlib.delete_subnet(tenant_id, net_db, net_id) + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + def update_subnet(self, context, subnet_id, subnet): + """Update subnet core Neutron API.""" + + LOG.debug(_("update_subnet() called")) + # Collecting subnet info + orig_sub_db = self._get_subnet(context, subnet_id) + + with context.session.begin(subtransactions=True): + # Plugin DB - Subnet Update + new_sub_db = super(NeutronPluginPLUMgridV2, + self).update_subnet(context, subnet_id, subnet) + ipnet = netaddr.IPNetwork(new_sub_db['cidr']) + + try: + # PLUMgrid Server does not support updating resources yet + LOG.debug(_("PLUMgrid Library: update_network() called")) + self._plumlib.update_subnet(orig_sub_db, new_sub_db, ipnet) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + return new_sub_db + + def create_router(self, context, router): + """ + Create router extension Neutron API + """ + LOG.debug(_("Neutron PLUMgrid Director: create_router() called")) + + tenant_id = self._get_tenant_id_for_create(context, router["router"]) + + with context.session.begin(subtransactions=True): + + # Create router in DB + router_db = super(NeutronPluginPLUMgridV2, + self).create_router(context, router) + # Create router on the network controller + try: + # Add Router to VND + LOG.debug(_("PLUMgrid Library: create_router() called")) + self._plumlib.create_router(tenant_id, router_db) + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + # Return created router + return router_db + + def update_router(self, context, router_id, router): + + LOG.debug(_("Neutron PLUMgrid Director: update_router() called")) + + with context.session.begin(subtransactions=True): + router_db = super(NeutronPluginPLUMgridV2, + self).update_router(context, router_id, router) + try: + LOG.debug(_("PLUMgrid Library: update_router() called")) + self._plumlib.update_router(router_db, router_id) + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + # Return updated router + return router_db + + def delete_router(self, context, router_id): + LOG.debug(_("Neutron PLUMgrid Director: delete_router() called")) + + with context.session.begin(subtransactions=True): + orig_router = self._get_router(context, router_id) + tenant_id = orig_router["tenant_id"] + + super(NeutronPluginPLUMgridV2, self).delete_router(context, + router_id) + + try: + LOG.debug(_("PLUMgrid Library: delete_router() called")) + self._plumlib.delete_router(tenant_id, router_id) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + def add_router_interface(self, context, router_id, interface_info): + + LOG.debug(_("Neutron PLUMgrid Director: " + "add_router_interface() called")) + with context.session.begin(subtransactions=True): + # Validate args + router_db = self._get_router(context, router_id) + tenant_id = router_db['tenant_id'] + + # Create interface in DB + int_router = super(NeutronPluginPLUMgridV2, + self).add_router_interface(context, + router_id, + interface_info) + port_db = self._get_port(context, int_router['port_id']) + subnet_id = port_db["fixed_ips"][0]["subnet_id"] + subnet_db = super(NeutronPluginPLUMgridV2, + self)._get_subnet(context, subnet_id) + ipnet = netaddr.IPNetwork(subnet_db['cidr']) + + # Create interface on the network controller + try: + LOG.debug(_("PLUMgrid Library: add_router_interface() called")) + self._plumlib.add_router_interface(tenant_id, router_id, + port_db, ipnet) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + return int_router + + def remove_router_interface(self, context, router_id, int_info): + + LOG.debug(_("Neutron PLUMgrid Director: " + "remove_router_interface() called")) + with context.session.begin(subtransactions=True): + # Validate args + router_db = self._get_router(context, router_id) + tenant_id = router_db['tenant_id'] + if 'port_id' in int_info: + port = self._get_port(context, int_info['port_id']) + net_id = port['network_id'] + + elif 'subnet_id' in int_info: + subnet_id = int_info['subnet_id'] + subnet = self._get_subnet(context, subnet_id) + net_id = subnet['network_id'] + + # Remove router in DB + del_int_router = super(NeutronPluginPLUMgridV2, + self).remove_router_interface(context, + router_id, + int_info) + + try: + LOG.debug(_("PLUMgrid Library: " + "remove_router_interface() called")) + self._plumlib.remove_router_interface(tenant_id, + net_id, router_id) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + return del_int_router + + def create_floatingip(self, context, floatingip): + LOG.debug(_("Neutron PLUMgrid Director: create_floatingip() called")) + + with context.session.begin(subtransactions=True): + + floating_ip = super(NeutronPluginPLUMgridV2, + self).create_floatingip(context, floatingip) + try: + LOG.debug(_("PLUMgrid Library: create_floatingip() called")) + self._plumlib.create_floatingip(floating_ip) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + return floating_ip + + def update_floatingip(self, context, id, floatingip): + LOG.debug(_("Neutron PLUMgrid Director: update_floatingip() called")) + + with context.session.begin(subtransactions=True): + floating_ip_orig = super(NeutronPluginPLUMgridV2, + self).get_floatingip(context, id) + floating_ip = super(NeutronPluginPLUMgridV2, + self).update_floatingip(context, id, + floatingip) + try: + LOG.debug(_("PLUMgrid Library: update_floatingip() called")) + self._plumlib.update_floatingip(floating_ip_orig, floating_ip, + id) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + return floating_ip + + def delete_floatingip(self, context, id): + LOG.debug(_("Neutron PLUMgrid Director: delete_floatingip() called")) + + with context.session.begin(subtransactions=True): + + floating_ip_orig = super(NeutronPluginPLUMgridV2, + self).get_floatingip(context, id) + + super(NeutronPluginPLUMgridV2, self).delete_floatingip(context, id) + + try: + LOG.debug(_("PLUMgrid Library: delete_floatingip() called")) + self._plumlib.delete_floatingip(floating_ip_orig, id) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + def disassociate_floatingips(self, context, port_id): + LOG.debug(_("Neutron PLUMgrid Director: disassociate_floatingips() " + "called")) + + try: + fip_qry = context.session.query(l3_db.FloatingIP) + floating_ip = fip_qry.filter_by(fixed_port_id=port_id).one() + + LOG.debug(_("PLUMgrid Library: disassociate_floatingips()" + " called")) + self._plumlib.disassociate_floatingips(floating_ip, port_id) + + except sa_exc.NoResultFound: + pass + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + super(NeutronPluginPLUMgridV2, + self).disassociate_floatingips(context, port_id) + + """ + Internal PLUMgrid Fuctions + """ + + def _get_plugin_version(self): + return plugin_ver.VERSION + + def _port_viftype_binding(self, context, port): + port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_IOVISOR + port[portbindings.VIF_DETAILS] = { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases} + return port + + def _network_admin_state(self, network): + if network["network"].get("admin_state_up") is False: + LOG.warning(_("Networks with admin_state_up=False are not " + "supported by PLUMgrid plugin yet.")) + return network + + def _allocate_pools_for_subnet(self, context, subnet): + """Create IP allocation pools for a given subnet + + Pools are defined by the 'allocation_pools' attribute, + a list of dict objects with 'start' and 'end' keys for + defining the pool range. + Modified from Neutron DB based class + + """ + + pools = [] + # Auto allocate the pool around gateway_ip + net = netaddr.IPNetwork(subnet['cidr']) + first_ip = net.first + 2 + last_ip = net.last - 1 + gw_ip = int(netaddr.IPAddress(subnet['gateway_ip'] or net.last)) + # Use the gw_ip to find a point for splitting allocation pools + # for this subnet + split_ip = min(max(gw_ip, net.first), net.last) + if split_ip > first_ip: + pools.append({'start': str(netaddr.IPAddress(first_ip)), + 'end': str(netaddr.IPAddress(split_ip - 1))}) + if split_ip < last_ip: + pools.append({'start': str(netaddr.IPAddress(split_ip + 1)), + 'end': str(netaddr.IPAddress(last_ip))}) + # return auto-generated pools + # no need to check for their validity + return pools diff --git a/neutron/plugins/ryu/README b/neutron/plugins/ryu/README new file mode 100644 index 000000000..054c69a86 --- /dev/null +++ b/neutron/plugins/ryu/README @@ -0,0 +1,22 @@ +Neutron plugin for Ryu Network Operating System +This directory includes neutron plugin for Ryu Network Operating System. + +# -- Installation + +For how to install/set up this plugin with Ryu and OpenStack, please refer to +https://github.com/osrg/ryu/wiki/OpenStack + +# -- Ryu General + +For general Ryu stuff, please refer to +http://www.osrg.net/ryu/ + +Ryu is available at github +git://github.com/osrg/ryu.git +https://github.com/osrg/ryu + +The mailing is at +ryu-devel@lists.sourceforge.net +https://lists.sourceforge.net/lists/listinfo/ryu-devel + +Enjoy! diff --git a/neutron/plugins/ryu/__init__.py b/neutron/plugins/ryu/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ryu/agent/__init__.py b/neutron/plugins/ryu/agent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ryu/agent/ryu_neutron_agent.py b/neutron/plugins/ryu/agent/ryu_neutron_agent.py new file mode 100755 index 000000000..d1fac3185 --- /dev/null +++ b/neutron/plugins/ryu/agent/ryu_neutron_agent.py @@ -0,0 +1,314 @@ +#!/usr/bin/env python +# Copyright 2012 Isaku Yamahata +# Based on openvswitch agent. +# +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Isaku Yamahata + +import httplib +import socket +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg +from ryu.app import client +from ryu.app import conf_switch_key +from ryu.app import rest_nw_id + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import exceptions as n_exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron import context as q_context +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import log +from neutron.plugins.ryu.common import config # noqa + + +LOG = log.getLogger(__name__) + + +# This is copied of nova.flags._get_my_ip() +# Agent shouldn't depend on nova module +def _get_my_ip(): + """Return the actual ip of the local machine. + + This code figures out what source address would be used if some traffic + were to be sent out to some well known address on the Internet. In this + case, a Google DNS server is used, but the specific address does not + matter much. No traffic is actually sent. + """ + csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + csock.connect(('8.8.8.8', 80)) + (addr, _port) = csock.getsockname() + csock.close() + return addr + + +def _get_ip_from_nic(nic): + ip_wrapper = ip_lib.IPWrapper() + dev = ip_wrapper.device(nic) + addrs = dev.addr.list(scope='global') + for addr in addrs: + if addr['ip_version'] == 4: + return addr['cidr'].split('/')[0] + + +def _get_ip(cfg_ip_str, cfg_interface_str): + ip = None + try: + ip = getattr(cfg.CONF.OVS, cfg_ip_str) + except (cfg.NoSuchOptError, cfg.NoSuchGroupError): + pass + if ip: + return ip + + iface = None + try: + iface = getattr(cfg.CONF.OVS, cfg_interface_str) + except (cfg.NoSuchOptError, cfg.NoSuchGroupError): + pass + if iface: + ip = _get_ip_from_nic(iface) + if ip: + return ip + LOG.warning(_('Could not get IPv4 address from %(nic)s: %(cfg)s'), + {'nic': iface, 'cfg': cfg_interface_str}) + + return _get_my_ip() + + +def _get_tunnel_ip(): + return _get_ip('tunnel_ip', 'tunnel_interface') + + +def _get_ovsdb_ip(): + return _get_ip('ovsdb_ip', 'ovsdb_interface') + + +class OVSBridge(ovs_lib.OVSBridge): + def __init__(self, br_name, root_helper): + ovs_lib.OVSBridge.__init__(self, br_name, root_helper) + self.datapath_id = None + + def find_datapath_id(self): + self.datapath_id = self.get_datapath_id() + + def set_manager(self, target): + self.run_vsctl(["set-manager", target]) + + def get_ofport(self, name): + return self.db_get_val("Interface", name, "ofport") + + def _get_ports(self, get_port): + ports = [] + port_names = self.get_port_name_list() + for name in port_names: + if self.get_ofport(name) < 0: + continue + port = get_port(name) + if port: + ports.append(port) + + return ports + + def _get_external_port(self, name): + # exclude vif ports + external_ids = self.db_get_map("Interface", name, "external_ids") + if external_ids: + return + + # exclude tunnel ports + options = self.db_get_map("Interface", name, "options") + if "remote_ip" in options: + return + + ofport = self.get_ofport(name) + return ovs_lib.VifPort(name, ofport, None, None, self) + + def get_external_ports(self): + return self._get_ports(self._get_external_port) + + +class VifPortSet(object): + def __init__(self, int_br, ryu_rest_client): + super(VifPortSet, self).__init__() + self.int_br = int_br + self.api = ryu_rest_client + + def setup(self): + for port in self.int_br.get_external_ports(): + LOG.debug(_('External port %s'), port) + self.api.update_port(rest_nw_id.NW_ID_EXTERNAL, + port.switch.datapath_id, port.ofport) + + +class RyuPluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + def get_ofp_rest_api_addr(self, context): + LOG.debug(_("Get Ryu rest API address")) + return self.call(context, + self.make_msg('get_ofp_rest_api'), + topic=self.topic) + + +class RyuSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): + def __init__(self, context, plugin_rpc, root_helper): + self.context = context + self.plugin_rpc = plugin_rpc + self.root_helper = root_helper + self.init_firewall() + + +class OVSNeutronOFPRyuAgent(rpc_compat.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + def __init__(self, integ_br, tunnel_ip, ovsdb_ip, ovsdb_port, + polling_interval, root_helper): + super(OVSNeutronOFPRyuAgent, self).__init__() + self.polling_interval = polling_interval + self._setup_rpc() + self.sg_agent = RyuSecurityGroupAgent(self.context, + self.plugin_rpc, + root_helper) + self._setup_integration_br(root_helper, integ_br, tunnel_ip, + ovsdb_port, ovsdb_ip) + + def _setup_rpc(self): + self.topic = topics.AGENT + self.plugin_rpc = RyuPluginApi(topics.PLUGIN) + self.context = q_context.get_admin_context_without_session() + self.endpoints = [self] + consumers = [[topics.PORT, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + def _setup_integration_br(self, root_helper, integ_br, + tunnel_ip, ovsdb_port, ovsdb_ip): + self.int_br = OVSBridge(integ_br, root_helper) + self.int_br.find_datapath_id() + + rest_api_addr = self.plugin_rpc.get_ofp_rest_api_addr(self.context) + if not rest_api_addr: + raise n_exc.Invalid(_("Ryu rest API port isn't specified")) + LOG.debug(_("Going to ofp controller mode %s"), rest_api_addr) + + ryu_rest_client = client.OFPClient(rest_api_addr) + + self.vif_ports = VifPortSet(self.int_br, ryu_rest_client) + self.vif_ports.setup() + + sc_client = client.SwitchConfClient(rest_api_addr) + sc_client.set_key(self.int_br.datapath_id, + conf_switch_key.OVS_TUNNEL_ADDR, tunnel_ip) + + # Currently Ryu supports only tcp methods. (ssl isn't supported yet) + self.int_br.set_manager('ptcp:%d' % ovsdb_port) + sc_client.set_key(self.int_br.datapath_id, conf_switch_key.OVSDB_ADDR, + 'tcp:%s:%d' % (ovsdb_ip, ovsdb_port)) + + def port_update(self, context, **kwargs): + LOG.debug(_("Port update received")) + port = kwargs.get('port') + vif_port = self.int_br.get_vif_port_by_id(port['id']) + if not vif_port: + return + + if ext_sg.SECURITYGROUPS in port: + self.sg_agent.refresh_firewall() + + def _update_ports(self, registered_ports): + ports = self.int_br.get_vif_port_set() + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def _process_devices_filter(self, port_info): + if 'added' in port_info: + self.sg_agent.prepare_devices_filter(port_info['added']) + if 'removed' in port_info: + self.sg_agent.remove_devices_filter(port_info['removed']) + + def daemon_loop(self): + ports = set() + + while True: + start = time.time() + try: + port_info = self._update_ports(ports) + if port_info: + LOG.debug(_("Agent loop has new device")) + self._process_devices_filter(port_info) + ports = port_info['current'] + except Exception: + LOG.exception(_("Error in agent event loop")) + + elapsed = max(time.time() - start, 0) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + + +def main(): + common_config.init(sys.argv[1:]) + + common_config.setup_logging(cfg.CONF) + + integ_br = cfg.CONF.OVS.integration_bridge + polling_interval = cfg.CONF.AGENT.polling_interval + root_helper = cfg.CONF.AGENT.root_helper + + tunnel_ip = _get_tunnel_ip() + LOG.debug(_('tunnel_ip %s'), tunnel_ip) + ovsdb_port = cfg.CONF.OVS.ovsdb_port + LOG.debug(_('ovsdb_port %s'), ovsdb_port) + ovsdb_ip = _get_ovsdb_ip() + LOG.debug(_('ovsdb_ip %s'), ovsdb_ip) + try: + agent = OVSNeutronOFPRyuAgent(integ_br, tunnel_ip, ovsdb_ip, + ovsdb_port, polling_interval, + root_helper) + except httplib.HTTPException as e: + LOG.error(_("Initialization failed: %s"), e) + sys.exit(1) + + LOG.info(_("Ryu initialization on the node is done. " + "Agent initialized successfully, now running...")) + agent.daemon_loop() + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/neutron/plugins/ryu/common/__init__.py b/neutron/plugins/ryu/common/__init__.py new file mode 100644 index 000000000..e5f41adfe --- /dev/null +++ b/neutron/plugins/ryu/common/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/ryu/common/config.py b/neutron/plugins/ryu/common/config.py new file mode 100644 index 000000000..504166d58 --- /dev/null +++ b/neutron/plugins/ryu/common/config.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import ovs_lib # noqa + +ovs_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_("Integration bridge to use")), + cfg.StrOpt('openflow_rest_api', default='127.0.0.1:8080', + help=_("OpenFlow REST API location")), + cfg.IntOpt('tunnel_key_min', default=1, + help=_("Minimum tunnel ID to use")), + cfg.IntOpt('tunnel_key_max', default=0xffffff, + help=_("Maximum tunnel ID to use")), + cfg.StrOpt('tunnel_ip', + help=_("Tunnel IP to use")), + cfg.StrOpt('tunnel_interface', + help=_("Tunnel interface to use")), + cfg.IntOpt('ovsdb_port', default=6634, + help=_("OVSDB port to connect to")), + cfg.StrOpt('ovsdb_ip', + help=_("OVSDB IP to connect to")), + cfg.StrOpt('ovsdb_interface', + help=_("OVSDB interface to connect to")), +] + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), +] + + +cfg.CONF.register_opts(ovs_opts, "OVS") +cfg.CONF.register_opts(agent_opts, "AGENT") +config.register_root_helper(cfg.CONF) diff --git a/neutron/plugins/ryu/db/__init__.py b/neutron/plugins/ryu/db/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/ryu/db/api_v2.py b/neutron/plugins/ryu/db/api_v2.py new file mode 100644 index 000000000..df4c904b5 --- /dev/null +++ b/neutron/plugins/ryu/db/api_v2.py @@ -0,0 +1,215 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 Isaku Yamahata +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import exc as sa_exc +from sqlalchemy import func +from sqlalchemy.orm import exc as orm_exc + +from neutron.common import exceptions as n_exc +import neutron.db.api as db +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.ryu.db import models_v2 as ryu_models_v2 + + +LOG = logging.getLogger(__name__) + + +def network_all_tenant_list(): + session = db.get_session() + return session.query(models_v2.Network).all() + + +def get_port_from_device(port_id): + LOG.debug(_("get_port_from_device() called:port_id=%s"), port_id) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id == port_id) + port_and_sgs = query.all() + if not port_and_sgs: + return None + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict[ext_sg.SECURITYGROUPS] = [ + sg_id for port_, sg_id in port_and_sgs if sg_id] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] for ip in port['fixed_ips']] + return port_dict + + +class TunnelKey(object): + # VLAN: 12 bits + # GRE, VXLAN: 24bits + # TODO(yamahata): STT: 64bits + _KEY_MIN_HARD = 1 + _KEY_MAX_HARD = 0xffffffff + + def __init__(self, key_min=_KEY_MIN_HARD, key_max=_KEY_MAX_HARD): + self.key_min = key_min + self.key_max = key_max + + if (key_min < self._KEY_MIN_HARD or key_max > self._KEY_MAX_HARD or + key_min > key_max): + raise ValueError(_('Invalid tunnel key options ' + 'tunnel_key_min: %(key_min)d ' + 'tunnel_key_max: %(key_max)d. ' + 'Using default value') % {'key_min': key_min, + 'key_max': key_max}) + + def _last_key(self, session): + try: + return session.query(ryu_models_v2.TunnelKeyLast).one() + except orm_exc.MultipleResultsFound: + max_key = session.query( + func.max(ryu_models_v2.TunnelKeyLast.last_key)) + if max_key > self.key_max: + max_key = self.key_min + + session.query(ryu_models_v2.TunnelKeyLast).delete() + last_key = ryu_models_v2.TunnelKeyLast(last_key=max_key) + except orm_exc.NoResultFound: + last_key = ryu_models_v2.TunnelKeyLast(last_key=self.key_min) + + session.add(last_key) + session.flush() + return session.query(ryu_models_v2.TunnelKeyLast).one() + + def _find_key(self, session, last_key): + """Try to find unused tunnel key. + + Trying to find unused tunnel key in TunnelKey table starting + from last_key + 1. + When all keys are used, raise sqlalchemy.orm.exc.NoResultFound + """ + # key 0 is used for special meanings. So don't allocate 0. + + # sqlite doesn't support + # '(select order by limit) union all (select order by limit) ' + # 'order by limit' + # So do it manually + # new_key = session.query("new_key").from_statement( + # # If last_key + 1 isn't used, it's the result + # 'SELECT new_key ' + # 'FROM (SELECT :last_key + 1 AS new_key) q1 ' + # 'WHERE NOT EXISTS ' + # '(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) ' + # + # 'UNION ALL ' + # + # # if last_key + 1 used, + # # find the least unused key from last_key + 1 + # '(SELECT t.tunnel_key + 1 AS new_key ' + # 'FROM tunnelkeys t ' + # 'WHERE NOT EXISTS ' + # '(SELECT 1 FROM tunnelkeys ti ' + # ' WHERE ti.tunnel_key = t.tunnel_key + 1) ' + # 'AND t.tunnel_key >= :last_key ' + # 'ORDER BY new_key LIMIT 1) ' + # + # 'ORDER BY new_key LIMIT 1' + # ).params(last_key=last_key).one() + try: + new_key = session.query("new_key").from_statement( + # If last_key + 1 isn't used, it's the result + 'SELECT new_key ' + 'FROM (SELECT :last_key + 1 AS new_key) q1 ' + 'WHERE NOT EXISTS ' + '(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) ' + ).params(last_key=last_key).one() + except orm_exc.NoResultFound: + new_key = session.query("new_key").from_statement( + # if last_key + 1 used, + # find the least unused key from last_key + 1 + '(SELECT t.tunnel_key + 1 AS new_key ' + 'FROM tunnelkeys t ' + 'WHERE NOT EXISTS ' + '(SELECT 1 FROM tunnelkeys ti ' + ' WHERE ti.tunnel_key = t.tunnel_key + 1) ' + 'AND t.tunnel_key >= :last_key ' + 'ORDER BY new_key LIMIT 1) ' + ).params(last_key=last_key).one() + + new_key = new_key[0] # the result is tuple. + LOG.debug(_("last_key %(last_key)s new_key %(new_key)s"), + {'last_key': last_key, 'new_key': new_key}) + if new_key > self.key_max: + LOG.debug(_("No key found")) + raise orm_exc.NoResultFound() + return new_key + + def _allocate(self, session, network_id): + last_key = self._last_key(session) + try: + new_key = self._find_key(session, last_key.last_key) + except orm_exc.NoResultFound: + new_key = self._find_key(session, self.key_min) + + tunnel_key = ryu_models_v2.TunnelKey(network_id=network_id, + tunnel_key=new_key) + last_key.last_key = new_key + session.add(tunnel_key) + return new_key + + _TRANSACTION_RETRY_MAX = 16 + + def allocate(self, session, network_id): + count = 0 + while True: + session.begin(subtransactions=True) + try: + new_key = self._allocate(session, network_id) + session.commit() + break + except sa_exc.SQLAlchemyError: + session.rollback() + + count += 1 + if count > self._TRANSACTION_RETRY_MAX: + # if this happens too often, increase _TRANSACTION_RETRY_MAX + LOG.warn(_("Transaction retry exhausted (%d). " + "Abandoned tunnel key allocation."), count) + raise n_exc.ResourceExhausted() + + return new_key + + def delete(self, session, network_id): + session.query(ryu_models_v2.TunnelKey).filter_by( + network_id=network_id).delete() + session.flush() + + def all_list(self): + session = db.get_session() + return session.query(ryu_models_v2.TunnelKey).all() + + +def set_port_status(session, port_id, status): + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.merge(port) + session.flush() + except orm_exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) diff --git a/neutron/plugins/ryu/db/models_v2.py b/neutron/plugins/ryu/db/models_v2.py new file mode 100644 index 000000000..cf10e1732 --- /dev/null +++ b/neutron/plugins/ryu/db/models_v2.py @@ -0,0 +1,41 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 Isaku Yamahata +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa + +from neutron.db import model_base + + +class TunnelKeyLast(model_base.BASEV2): + """Last allocated Tunnel key. + + The next key allocation will be started from this value + 1 + """ + last_key = sa.Column(sa.Integer, primary_key=True) + + def __repr__(self): + return "" % self.last_key + + +class TunnelKey(model_base.BASEV2): + """Netowrk ID <-> tunnel key mapping.""" + network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"), + nullable=False) + tunnel_key = sa.Column(sa.Integer, primary_key=True, + nullable=False, autoincrement=False) + + def __repr__(self): + return "" % (self.network_id, self.tunnel_key) diff --git a/neutron/plugins/ryu/ryu_neutron_plugin.py b/neutron/plugins/ryu/ryu_neutron_plugin.py new file mode 100644 index 000000000..9fd6bf989 --- /dev/null +++ b/neutron/plugins/ryu/ryu_neutron_plugin.py @@ -0,0 +1,269 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 Isaku Yamahata +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Isaku Yamahata + +from oslo.config import cfg +from ryu.app import client +from ryu.app import rest_nw_id + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import constants as q_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_gwmode_db +from neutron.db import l3_rpc_base +from neutron.db import models_v2 +from neutron.db import portbindings_base +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.ryu.common import config # noqa +from neutron.plugins.ryu.db import api_v2 as db_api_v2 + + +LOG = logging.getLogger(__name__) + + +class RyuRpcCallbacks(rpc_compat.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + def __init__(self, ofp_rest_api_addr): + super(RyuRpcCallbacks, self).__init__() + self.ofp_rest_api_addr = ofp_rest_api_addr + + def get_ofp_rest_api(self, context, **kwargs): + LOG.debug(_("get_ofp_rest_api: %s"), self.ofp_rest_api_addr) + return self.ofp_rest_api_addr + + @classmethod + def get_port_from_device(cls, device): + port = db_api_v2.get_port_from_device(device) + if port: + port['device'] = device + return port + + +class AgentNotifierApi(rpc_compat.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + + def port_update(self, context, port): + self.fanout_cast(context, + self.make_msg('port_update', port=port), + topic=self.topic_port_update) + + +class RyuNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + portbindings_base.PortBindingBaseMixin): + + _supported_extension_aliases = ["external-net", "router", "ext-gw-mode", + "extraroute", "security-group", + "binding", "quotas"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self, configfile=None): + super(RyuNeutronPluginV2, self).__init__() + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases, + portbindings.OVS_HYBRID_PLUG: True + } + } + portbindings_base.register_port_dict_function() + self.tunnel_key = db_api_v2.TunnelKey( + cfg.CONF.OVS.tunnel_key_min, cfg.CONF.OVS.tunnel_key_max) + self.ofp_api_host = cfg.CONF.OVS.openflow_rest_api + if not self.ofp_api_host: + raise n_exc.Invalid(_('Invalid configuration. check ryu.ini')) + + self.client = client.OFPClient(self.ofp_api_host) + self.tun_client = client.TunnelClient(self.ofp_api_host) + self.iface_client = client.NeutronIfaceClient(self.ofp_api_host) + for nw_id in rest_nw_id.RESERVED_NETWORK_IDS: + if nw_id != rest_nw_id.NW_ID_UNKNOWN: + self.client.update_network(nw_id) + self._setup_rpc() + + # register known all network list on startup + self._create_all_tenant_network() + + def _setup_rpc(self): + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = rpc_compat.create_connection(new=True) + self.notifier = AgentNotifierApi(topics.AGENT) + self.endpoints = [RyuRpcCallbacks(self.ofp_api_host)] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + self.conn.consume_in_threads() + + def _create_all_tenant_network(self): + for net in db_api_v2.network_all_tenant_list(): + self.client.update_network(net.id) + for tun in self.tunnel_key.all_list(): + self.tun_client.update_tunnel_key(tun.network_id, tun.tunnel_key) + session = db.get_session() + for port in session.query(models_v2.Port): + self.iface_client.update_network_id(port.id, port.network_id) + + def _client_create_network(self, net_id, tunnel_key): + self.client.create_network(net_id) + self.tun_client.create_tunnel_key(net_id, tunnel_key) + + def _client_delete_network(self, net_id): + RyuNeutronPluginV2._safe_client_delete_network(self.safe_reference, + net_id) + + @staticmethod + def _safe_client_delete_network(safe_reference, net_id): + # Avoid handing naked plugin references to the client. When + # the client is mocked for testing, such references can + # prevent the plugin from being deallocated. + client.ignore_http_not_found( + lambda: safe_reference.client.delete_network(net_id)) + client.ignore_http_not_found( + lambda: safe_reference.tun_client.delete_tunnel_key(net_id)) + + def create_network(self, context, network): + session = context.session + with session.begin(subtransactions=True): + #set up default security groups + tenant_id = self._get_tenant_id_for_create( + context, network['network']) + self._ensure_default_security_group(context, tenant_id) + + net = super(RyuNeutronPluginV2, self).create_network(context, + network) + self._process_l3_create(context, net, network['network']) + + tunnel_key = self.tunnel_key.allocate(session, net['id']) + try: + self._client_create_network(net['id'], tunnel_key) + except Exception: + with excutils.save_and_reraise_exception(): + self._client_delete_network(net['id']) + + return net + + def update_network(self, context, id, network): + session = context.session + with session.begin(subtransactions=True): + net = super(RyuNeutronPluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + return net + + def delete_network(self, context, id): + self._client_delete_network(id) + session = context.session + with session.begin(subtransactions=True): + self.tunnel_key.delete(session, id) + self._process_l3_delete(context, id) + super(RyuNeutronPluginV2, self).delete_network(context, id) + + def create_port(self, context, port): + session = context.session + port_data = port['port'] + with session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + port = super(RyuNeutronPluginV2, self).create_port(context, port) + self._process_portbindings_create_and_update(context, + port_data, + port) + self._process_port_create_security_group( + context, port, sgids) + self.notify_security_groups_member_updated(context, port) + self.iface_client.create_network_id(port['id'], port['network_id']) + return port + + def delete_port(self, context, id, l3_port_check=True): + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + + with context.session.begin(subtransactions=True): + self.disassociate_floatingips(context, id) + port = self.get_port(context, id) + self._delete_port_security_group_bindings(context, id) + super(RyuNeutronPluginV2, self).delete_port(context, id) + + self.notify_security_groups_member_updated(context, port) + + def update_port(self, context, id, port): + deleted = port['port'].get('deleted', False) + session = context.session + + need_port_update_notify = False + with session.begin(subtransactions=True): + original_port = super(RyuNeutronPluginV2, self).get_port( + context, id) + updated_port = super(RyuNeutronPluginV2, self).update_port( + context, id, port) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + need_port_update_notify = self.update_security_group_on_port( + context, id, port, original_port, updated_port) + + need_port_update_notify |= self.is_security_group_member_updated( + context, original_port, updated_port) + + need_port_update_notify |= (original_port['admin_state_up'] != + updated_port['admin_state_up']) + + if need_port_update_notify: + self.notifier.port_update(context, updated_port) + + if deleted: + db_api_v2.set_port_status(session, id, q_const.PORT_STATUS_DOWN) + return updated_port diff --git a/neutron/plugins/vmware/__init__.py b/neutron/plugins/vmware/__init__.py new file mode 100644 index 000000000..a62818888 --- /dev/null +++ b/neutron/plugins/vmware/__init__.py @@ -0,0 +1,3 @@ +import os + +NSX_EXT_PATH = os.path.join(os.path.dirname(__file__), 'extensions') diff --git a/neutron/plugins/vmware/api_client/__init__.py b/neutron/plugins/vmware/api_client/__init__.py new file mode 100644 index 000000000..6b7126b02 --- /dev/null +++ b/neutron/plugins/vmware/api_client/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import httplib + + +def ctrl_conn_to_str(conn): + """Returns a string representing a connection URL to the controller.""" + if isinstance(conn, httplib.HTTPSConnection): + proto = "https://" + elif isinstance(conn, httplib.HTTPConnection): + proto = "http://" + else: + raise TypeError(_('Invalid connection type: %s') % type(conn)) + return "%s%s:%s" % (proto, conn.host, conn.port) diff --git a/neutron/plugins/vmware/api_client/base.py b/neutron/plugins/vmware/api_client/base.py new file mode 100644 index 000000000..e8998b5cd --- /dev/null +++ b/neutron/plugins/vmware/api_client/base.py @@ -0,0 +1,249 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import httplib +import six +import time + +from neutron.openstack.common import log as logging +from neutron.plugins.vmware import api_client + +LOG = logging.getLogger(__name__) + +GENERATION_ID_TIMEOUT = -1 +DEFAULT_CONCURRENT_CONNECTIONS = 3 +DEFAULT_CONNECT_TIMEOUT = 5 + + +@six.add_metaclass(abc.ABCMeta) +class ApiClientBase(object): + """An abstract baseclass for all API client implementations.""" + + CONN_IDLE_TIMEOUT = 60 * 15 + + def _create_connection(self, host, port, is_ssl): + if is_ssl: + return httplib.HTTPSConnection(host, port, + timeout=self._connect_timeout) + return httplib.HTTPConnection(host, port, + timeout=self._connect_timeout) + + @staticmethod + def _conn_params(http_conn): + is_ssl = isinstance(http_conn, httplib.HTTPSConnection) + return (http_conn.host, http_conn.port, is_ssl) + + @property + def user(self): + return self._user + + @property + def password(self): + return self._password + + @property + def config_gen(self): + # If NSX_gen_timeout is not -1 then: + # Maintain a timestamp along with the generation ID. Hold onto the + # ID long enough to be useful and block on sequential requests but + # not long enough to persist when Onix db is cleared, which resets + # the generation ID, causing the DAL to block indefinitely with some + # number that's higher than the cluster's value. + if self._gen_timeout != -1: + ts = self._config_gen_ts + if ts is not None: + if (time.time() - ts) > self._gen_timeout: + return None + return self._config_gen + + @config_gen.setter + def config_gen(self, value): + if self._config_gen != value: + if self._gen_timeout != -1: + self._config_gen_ts = time.time() + self._config_gen = value + + def auth_cookie(self, conn): + cookie = None + data = self._get_provider_data(conn) + if data: + cookie = data[1] + return cookie + + def set_auth_cookie(self, conn, cookie): + data = self._get_provider_data(conn) + if data: + self._set_provider_data(conn, (data[0], cookie)) + + def acquire_connection(self, auto_login=True, headers=None, rid=-1): + '''Check out an available HTTPConnection instance. + + Blocks until a connection is available. + :auto_login: automatically logins before returning conn + :headers: header to pass on to login attempt + :param rid: request id passed in from request eventlet. + :returns: An available HTTPConnection instance or None if no + api_providers are configured. + ''' + if not self._api_providers: + LOG.warn(_("[%d] no API providers currently available."), rid) + return None + if self._conn_pool.empty(): + LOG.debug(_("[%d] Waiting to acquire API client connection."), rid) + priority, conn = self._conn_pool.get() + now = time.time() + if getattr(conn, 'last_used', now) < now - self.CONN_IDLE_TIMEOUT: + LOG.info(_("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f " + "seconds; reconnecting."), + {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn), + 'sec': now - conn.last_used}) + conn = self._create_connection(*self._conn_params(conn)) + + conn.last_used = now + conn.priority = priority # stash current priority for release + qsize = self._conn_pool.qsize() + LOG.debug(_("[%(rid)d] Acquired connection %(conn)s. %(qsize)d " + "connection(s) available."), + {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn), + 'qsize': qsize}) + if auto_login and self.auth_cookie(conn) is None: + self._wait_for_login(conn, headers) + return conn + + def release_connection(self, http_conn, bad_state=False, + service_unavail=False, rid=-1): + '''Mark HTTPConnection instance as available for check-out. + + :param http_conn: An HTTPConnection instance obtained from this + instance. + :param bad_state: True if http_conn is known to be in a bad state + (e.g. connection fault.) + :service_unavail: True if http_conn returned 503 response. + :param rid: request id passed in from request eventlet. + ''' + conn_params = self._conn_params(http_conn) + if self._conn_params(http_conn) not in self._api_providers: + LOG.debug(_("[%(rid)d] Released connection %(conn)s is not an " + "API provider for the cluster"), + {'rid': rid, + 'conn': api_client.ctrl_conn_to_str(http_conn)}) + return + elif hasattr(http_conn, "no_release"): + return + + if bad_state: + # Reconnect to provider. + LOG.warn(_("[%(rid)d] Connection returned in bad state, " + "reconnecting to %(conn)s"), + {'rid': rid, + 'conn': api_client.ctrl_conn_to_str(http_conn)}) + http_conn = self._create_connection(*self._conn_params(http_conn)) + priority = self._next_conn_priority + self._next_conn_priority += 1 + elif service_unavail: + # http_conn returned a service unaviable response, put other + # connections to the same controller at end of priority queue, + conns = [] + while not self._conn_pool.empty(): + priority, conn = self._conn_pool.get() + if self._conn_params(conn) == conn_params: + priority = self._next_conn_priority + self._next_conn_priority += 1 + conns.append((priority, conn)) + for priority, conn in conns: + self._conn_pool.put((priority, conn)) + # put http_conn at end of queue also + priority = self._next_conn_priority + self._next_conn_priority += 1 + else: + priority = http_conn.priority + + self._conn_pool.put((priority, http_conn)) + LOG.debug(_("[%(rid)d] Released connection %(conn)s. %(qsize)d " + "connection(s) available."), + {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn), + 'qsize': self._conn_pool.qsize()}) + + def _wait_for_login(self, conn, headers=None): + '''Block until a login has occurred for the current API provider.''' + + data = self._get_provider_data(conn) + if data is None: + LOG.error(_("Login request for an invalid connection: '%s'"), + api_client.ctrl_conn_to_str(conn)) + return + provider_sem = data[0] + if provider_sem.acquire(blocking=False): + try: + cookie = self._login(conn, headers) + self.set_auth_cookie(conn, cookie) + finally: + provider_sem.release() + else: + LOG.debug(_("Waiting for auth to complete")) + # Wait until we can acquire then release + provider_sem.acquire(blocking=True) + provider_sem.release() + + def _get_provider_data(self, conn_or_conn_params, default=None): + """Get data for specified API provider. + + Args: + conn_or_conn_params: either a HTTP(S)Connection object or the + resolved conn_params tuple returned by self._conn_params(). + default: conn_params if ones passed aren't known + Returns: Data associated with specified provider + """ + conn_params = self._normalize_conn_params(conn_or_conn_params) + return self._api_provider_data.get(conn_params, default) + + def _set_provider_data(self, conn_or_conn_params, data): + """Set data for specified API provider. + + Args: + conn_or_conn_params: either a HTTP(S)Connection object or the + resolved conn_params tuple returned by self._conn_params(). + data: data to associate with API provider + """ + conn_params = self._normalize_conn_params(conn_or_conn_params) + if data is None: + del self._api_provider_data[conn_params] + else: + self._api_provider_data[conn_params] = data + + def _normalize_conn_params(self, conn_or_conn_params): + """Normalize conn_param tuple. + + Args: + conn_or_conn_params: either a HTTP(S)Connection object or the + resolved conn_params tuple returned by self._conn_params(). + + Returns: Normalized conn_param tuple + """ + if (not isinstance(conn_or_conn_params, tuple) and + not isinstance(conn_or_conn_params, httplib.HTTPConnection)): + LOG.debug(_("Invalid conn_params value: '%s'"), + str(conn_or_conn_params)) + return conn_or_conn_params + if isinstance(conn_or_conn_params, httplib.HTTPConnection): + conn_params = self._conn_params(conn_or_conn_params) + else: + conn_params = conn_or_conn_params + host, port, is_ssl = conn_params + if port is None: + port = 443 if is_ssl else 80 + return (host, port, is_ssl) diff --git a/neutron/plugins/vmware/api_client/client.py b/neutron/plugins/vmware/api_client/client.py new file mode 100644 index 000000000..a6981a853 --- /dev/null +++ b/neutron/plugins/vmware/api_client/client.py @@ -0,0 +1,143 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import httplib + +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.api_client import base +from neutron.plugins.vmware.api_client import eventlet_client +from neutron.plugins.vmware.api_client import eventlet_request +from neutron.plugins.vmware.api_client import exception +from neutron.plugins.vmware.api_client import version + +LOG = logging.getLogger(__name__) + + +class NsxApiClient(eventlet_client.EventletApiClient): + """The Nsx API Client.""" + + def __init__(self, api_providers, user, password, + concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS, + gen_timeout=base.GENERATION_ID_TIMEOUT, + use_https=True, + connect_timeout=base.DEFAULT_CONNECT_TIMEOUT, + request_timeout=30, http_timeout=10, retries=2, redirects=2): + '''Constructor. Adds the following: + + :param request_timeout: all operations (including retries, redirects + from unresponsive controllers, etc) should finish within this + timeout. + :param http_timeout: how long to wait before aborting an + unresponsive controller (and allow for retries to another + controller in the cluster) + :param retries: the number of concurrent connections. + :param redirects: the number of concurrent connections. + ''' + super(NsxApiClient, self).__init__( + api_providers, user, password, + concurrent_connections=concurrent_connections, + gen_timeout=gen_timeout, use_https=use_https, + connect_timeout=connect_timeout) + + self._request_timeout = request_timeout + self._http_timeout = http_timeout + self._retries = retries + self._redirects = redirects + self._version = None + + # NOTE(salvatore-orlando): This method is not used anymore. Login is now + # performed automatically inside the request eventlet if necessary. + def login(self, user=None, password=None): + '''Login to NSX controller. + + Assumes same password is used for all controllers. + + :param user: controller user (usually admin). Provided for + backwards compatibility. In the normal mode of operation + this should be None. + :param password: controller password. Provided for backwards + compatibility. In the normal mode of operation this should + be None. + ''' + if user: + self._user = user + if password: + self._password = password + + return self._login() + + def request(self, method, url, body="", content_type="application/json"): + '''Issues request to controller.''' + + g = eventlet_request.GenericRequestEventlet( + self, method, url, body, content_type, auto_login=True, + request_timeout=self._request_timeout, + http_timeout=self._http_timeout, + retries=self._retries, redirects=self._redirects) + g.start() + response = g.join() + LOG.debug(_('Request returns "%s"'), response) + + # response is a modified HTTPResponse object or None. + # response.read() will not work on response as the underlying library + # request_eventlet.ApiRequestEventlet has already called this + # method in order to extract the body and headers for processing. + # ApiRequestEventlet derived classes call .read() and + # .getheaders() on the HTTPResponse objects and store the results in + # the response object's .body and .headers data members for future + # access. + + if response is None: + # Timeout. + LOG.error(_('Request timed out: %(method)s to %(url)s'), + {'method': method, 'url': url}) + raise exception.RequestTimeout() + + status = response.status + if status == httplib.UNAUTHORIZED: + raise exception.UnAuthorizedRequest() + + # Fail-fast: Check for exception conditions and raise the + # appropriate exceptions for known error codes. + if status in exception.ERROR_MAPPINGS: + LOG.error(_("Received error code: %s"), status) + LOG.error(_("Server Error Message: %s"), response.body) + exception.ERROR_MAPPINGS[status](response) + + # Continue processing for non-error condition. + if (status != httplib.OK and status != httplib.CREATED + and status != httplib.NO_CONTENT): + LOG.error(_("%(method)s to %(url)s, unexpected response code: " + "%(status)d (content = '%(body)s')"), + {'method': method, 'url': url, + 'status': response.status, 'body': response.body}) + return None + + if not self._version: + self._version = version.find_version(response.headers) + return response.body + + def get_version(self): + if not self._version: + # Determine the controller version by querying the + # cluster nodes. Currently, the version will be the + # one of the server that responds. + self.request('GET', '/ws.v1/control-cluster/node') + if not self._version: + LOG.error(_('Unable to determine NSX version. ' + 'Plugin might not work as expected.')) + return self._version diff --git a/neutron/plugins/vmware/api_client/eventlet_client.py b/neutron/plugins/vmware/api_client/eventlet_client.py new file mode 100644 index 000000000..fa0cd1f3e --- /dev/null +++ b/neutron/plugins/vmware/api_client/eventlet_client.py @@ -0,0 +1,155 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import time + +import eventlet +eventlet.monkey_patch() + +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.api_client import base +from neutron.plugins.vmware.api_client import eventlet_request + +LOG = logging.getLogger(__name__) + + +class EventletApiClient(base.ApiClientBase): + """Eventlet-based implementation of NSX ApiClient ABC.""" + + def __init__(self, api_providers, user, password, + concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS, + gen_timeout=base.GENERATION_ID_TIMEOUT, + use_https=True, + connect_timeout=base.DEFAULT_CONNECT_TIMEOUT): + '''Constructor + + :param api_providers: a list of tuples of the form: (host, port, + is_ssl). + :param user: login username. + :param password: login password. + :param concurrent_connections: total number of concurrent connections. + :param use_https: whether or not to use https for requests. + :param connect_timeout: connection timeout in seconds. + :param gen_timeout controls how long the generation id is kept + if set to -1 the generation id is never timed out + ''' + if not api_providers: + api_providers = [] + self._api_providers = set([tuple(p) for p in api_providers]) + self._api_provider_data = {} # tuple(semaphore, session_cookie) + for p in self._api_providers: + self._set_provider_data(p, (eventlet.semaphore.Semaphore(1), None)) + self._user = user + self._password = password + self._concurrent_connections = concurrent_connections + self._use_https = use_https + self._connect_timeout = connect_timeout + self._config_gen = None + self._config_gen_ts = None + self._gen_timeout = gen_timeout + + # Connection pool is a list of queues. + self._conn_pool = eventlet.queue.PriorityQueue() + self._next_conn_priority = 1 + for host, port, is_ssl in api_providers: + for _ in range(concurrent_connections): + conn = self._create_connection(host, port, is_ssl) + self._conn_pool.put((self._next_conn_priority, conn)) + self._next_conn_priority += 1 + + def acquire_redirect_connection(self, conn_params, auto_login=True, + headers=None): + """Check out or create connection to redirected NSX API server. + + Args: + conn_params: tuple specifying target of redirect, see + self._conn_params() + auto_login: returned connection should have valid session cookie + headers: headers to pass on if auto_login + + Returns: An available HTTPConnection instance corresponding to the + specified conn_params. If a connection did not previously + exist, new connections are created with the highest prioity + in the connection pool and one of these new connections + returned. + """ + result_conn = None + data = self._get_provider_data(conn_params) + if data: + # redirect target already exists in provider data and connections + # to the provider have been added to the connection pool. Try to + # obtain a connection from the pool, note that it's possible that + # all connection to the provider are currently in use. + conns = [] + while not self._conn_pool.empty(): + priority, conn = self._conn_pool.get_nowait() + if not result_conn and self._conn_params(conn) == conn_params: + conn.priority = priority + result_conn = conn + else: + conns.append((priority, conn)) + for priority, conn in conns: + self._conn_pool.put((priority, conn)) + # hack: if no free connections available, create new connection + # and stash "no_release" attribute (so that we only exceed + # self._concurrent_connections temporarily) + if not result_conn: + conn = self._create_connection(*conn_params) + conn.priority = 0 # redirect connections have highest priority + conn.no_release = True + result_conn = conn + else: + #redirect target not already known, setup provider lists + self._api_providers.update([conn_params]) + self._set_provider_data(conn_params, + (eventlet.semaphore.Semaphore(1), None)) + # redirects occur during cluster upgrades, i.e. results to old + # redirects to new, so give redirect targets highest priority + priority = 0 + for i in range(self._concurrent_connections): + conn = self._create_connection(*conn_params) + conn.priority = priority + if i == self._concurrent_connections - 1: + break + self._conn_pool.put((priority, conn)) + result_conn = conn + if result_conn: + result_conn.last_used = time.time() + if auto_login and self.auth_cookie(conn) is None: + self._wait_for_login(result_conn, headers) + return result_conn + + def _login(self, conn=None, headers=None): + '''Issue login request and update authentication cookie.''' + cookie = None + g = eventlet_request.LoginRequestEventlet( + self, self._user, self._password, conn, headers) + g.start() + ret = g.join() + if ret: + if isinstance(ret, Exception): + LOG.error(_('Login error "%s"'), ret) + raise ret + + cookie = ret.getheader("Set-Cookie") + if cookie: + LOG.debug(_("Saving new authentication cookie '%s'"), cookie) + + return cookie + +# Register as subclass. +base.ApiClientBase.register(EventletApiClient) diff --git a/neutron/plugins/vmware/api_client/eventlet_request.py b/neutron/plugins/vmware/api_client/eventlet_request.py new file mode 100644 index 000000000..26c378e0c --- /dev/null +++ b/neutron/plugins/vmware/api_client/eventlet_request.py @@ -0,0 +1,240 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +import httplib +import urllib + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.api_client import request + +LOG = logging.getLogger(__name__) +USER_AGENT = "Neutron eventlet client/2.0" + + +class EventletApiRequest(request.ApiRequest): + '''Eventlet-based ApiRequest class. + + This class will form the basis for eventlet-based ApiRequest classes + ''' + + # Maximum number of green threads present in the system at one time. + API_REQUEST_POOL_SIZE = request.DEFAULT_API_REQUEST_POOL_SIZE + + # Pool of green threads. One green thread is allocated per incoming + # request. Incoming requests will block when the pool is empty. + API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE) + + # A unique id is assigned to each incoming request. When the current + # request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0. + MAXIMUM_REQUEST_ID = request.DEFAULT_MAXIMUM_REQUEST_ID + + # The request id for the next incoming request. + CURRENT_REQUEST_ID = 0 + + def __init__(self, client_obj, url, method="GET", body=None, + headers=None, + request_timeout=request.DEFAULT_REQUEST_TIMEOUT, + retries=request.DEFAULT_RETRIES, + auto_login=True, + redirects=request.DEFAULT_REDIRECTS, + http_timeout=request.DEFAULT_HTTP_TIMEOUT, client_conn=None): + '''Constructor.''' + self._api_client = client_obj + self._url = url + self._method = method + self._body = body + self._headers = headers or {} + self._request_timeout = request_timeout + self._retries = retries + self._auto_login = auto_login + self._redirects = redirects + self._http_timeout = http_timeout + self._client_conn = client_conn + self._abort = False + + self._request_error = None + + if "User-Agent" not in self._headers: + self._headers["User-Agent"] = USER_AGENT + + self._green_thread = None + # Retrieve and store this instance's unique request id. + self._request_id = EventletApiRequest.CURRENT_REQUEST_ID + # Update the class variable that tracks request id. + # Request IDs wrap around at MAXIMUM_REQUEST_ID + next_request_id = self._request_id + 1 + next_request_id %= self.MAXIMUM_REQUEST_ID + EventletApiRequest.CURRENT_REQUEST_ID = next_request_id + + @classmethod + def _spawn(cls, func, *args, **kwargs): + '''Allocate a green thread from the class pool.''' + return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs) + + def spawn(self, func, *args, **kwargs): + '''Spawn a new green thread with the supplied function and args.''' + return self.__class__._spawn(func, *args, **kwargs) + + @classmethod + def joinall(cls): + '''Wait for all outstanding requests to complete.''' + return cls.API_REQUEST_POOL.waitall() + + def join(self): + '''Wait for instance green thread to complete.''' + if self._green_thread is not None: + return self._green_thread.wait() + return Exception(_('Joining an invalid green thread')) + + def start(self): + '''Start request processing.''' + self._green_thread = self.spawn(self._run) + + def copy(self): + '''Return a copy of this request instance.''' + return EventletApiRequest( + self._api_client, self._url, self._method, self._body, + self._headers, self._request_timeout, self._retries, + self._auto_login, self._redirects, self._http_timeout) + + def _run(self): + '''Method executed within green thread.''' + if self._request_timeout: + # No timeout exception escapes the with block. + with eventlet.timeout.Timeout(self._request_timeout, False): + return self._handle_request() + + LOG.info(_('[%d] Request timeout.'), self._rid()) + self._request_error = Exception(_('Request timeout')) + return None + else: + return self._handle_request() + + def _handle_request(self): + '''First level request handling.''' + attempt = 0 + timeout = 0 + response = None + while response is None and attempt <= self._retries: + eventlet.greenthread.sleep(timeout) + attempt += 1 + + req = self._issue_request() + # automatically raises any exceptions returned. + if isinstance(req, httplib.HTTPResponse): + timeout = 0 + if attempt <= self._retries and not self._abort: + if req.status in (httplib.UNAUTHORIZED, httplib.FORBIDDEN): + continue + elif req.status == httplib.SERVICE_UNAVAILABLE: + timeout = 0.5 + continue + # else fall through to return the error code + + LOG.debug(_("[%(rid)d] Completed request '%(method)s %(url)s'" + ": %(status)s"), + {'rid': self._rid(), 'method': self._method, + 'url': self._url, 'status': req.status}) + self._request_error = None + response = req + else: + LOG.info(_('[%(rid)d] Error while handling request: %(req)s'), + {'rid': self._rid(), 'req': req}) + self._request_error = req + response = None + return response + + +class LoginRequestEventlet(EventletApiRequest): + '''Process a login request.''' + + def __init__(self, client_obj, user, password, client_conn=None, + headers=None): + if headers is None: + headers = {} + headers.update({"Content-Type": "application/x-www-form-urlencoded"}) + body = urllib.urlencode({"username": user, "password": password}) + super(LoginRequestEventlet, self).__init__( + client_obj, "/ws.v1/login", "POST", body, headers, + auto_login=False, client_conn=client_conn) + + def session_cookie(self): + if self.successful(): + return self.value.getheader("Set-Cookie") + return None + + +class GetApiProvidersRequestEventlet(EventletApiRequest): + '''Get a list of API providers.''' + + def __init__(self, client_obj): + url = "/ws.v1/control-cluster/node?fields=roles" + super(GetApiProvidersRequestEventlet, self).__init__( + client_obj, url, "GET", auto_login=True) + + def api_providers(self): + """Parse api_providers from response. + + Returns: api_providers in [(host, port, is_ssl), ...] format + """ + def _provider_from_listen_addr(addr): + # (pssl|ptcp):: => (host, port, is_ssl) + parts = addr.split(':') + return (parts[1], int(parts[2]), parts[0] == 'pssl') + + try: + if self.successful(): + ret = [] + body = json.loads(self.value.body) + for node in body.get('results', []): + for role in node.get('roles', []): + if role.get('role') == 'api_provider': + addr = role.get('listen_addr') + if addr: + ret.append(_provider_from_listen_addr(addr)) + return ret + except Exception as e: + LOG.warn(_("[%(rid)d] Failed to parse API provider: %(e)s"), + {'rid': self._rid(), 'e': e}) + # intentionally fall through + return None + + +class GenericRequestEventlet(EventletApiRequest): + '''Handle a generic request.''' + + def __init__(self, client_obj, method, url, body, content_type, + auto_login=False, + request_timeout=request.DEFAULT_REQUEST_TIMEOUT, + http_timeout=request.DEFAULT_HTTP_TIMEOUT, + retries=request.DEFAULT_RETRIES, + redirects=request.DEFAULT_REDIRECTS): + headers = {"Content-Type": content_type} + super(GenericRequestEventlet, self).__init__( + client_obj, url, method, body, headers, + request_timeout=request_timeout, retries=retries, + auto_login=auto_login, redirects=redirects, + http_timeout=http_timeout) + + def session_cookie(self): + if self.successful(): + return self.value.getheader("Set-Cookie") + return None + + +request.ApiRequest.register(EventletApiRequest) diff --git a/neutron/plugins/vmware/api_client/exception.py b/neutron/plugins/vmware/api_client/exception.py new file mode 100644 index 000000000..b3facfcaa --- /dev/null +++ b/neutron/plugins/vmware/api_client/exception.py @@ -0,0 +1,121 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + + +class NsxApiException(Exception): + """Base NSX API Client Exception. + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + + """ + message = _("An unknown exception occurred.") + + def __init__(self, **kwargs): + try: + self._error_string = self.message % kwargs + except Exception: + # at least get the core message out if something happened + self._error_string = self.message + + def __str__(self): + return self._error_string + + +class UnAuthorizedRequest(NsxApiException): + message = _("Server denied session's authentication credentials.") + + +class ResourceNotFound(NsxApiException): + message = _("An entity referenced in the request was not found.") + + +class Conflict(NsxApiException): + message = _("Request conflicts with configuration on a different " + "entity.") + + +class ServiceUnavailable(NsxApiException): + message = _("Request could not completed because the associated " + "resource could not be reached.") + + +class Forbidden(NsxApiException): + message = _("The request is forbidden from accessing the " + "referenced resource.") + + +class ReadOnlyMode(Forbidden): + message = _("Create/Update actions are forbidden when in read-only mode.") + + +class RequestTimeout(NsxApiException): + message = _("The request has timed out.") + + +class BadRequest(NsxApiException): + message = _("The server is unable to fulfill the request due " + "to a bad syntax") + + +class InvalidSecurityCertificate(BadRequest): + message = _("The backend received an invalid security certificate.") + + +def fourZeroZero(response=None): + if response and "Invalid SecurityCertificate" in response.body: + raise InvalidSecurityCertificate() + raise BadRequest() + + +def fourZeroFour(response=None): + raise ResourceNotFound() + + +def fourZeroNine(response=None): + raise Conflict() + + +def fiveZeroThree(response=None): + raise ServiceUnavailable() + + +def fourZeroThree(response=None): + if 'read-only' in response.body: + raise ReadOnlyMode() + else: + raise Forbidden() + + +def zero(self, response=None): + raise NsxApiException() + + +ERROR_MAPPINGS = { + 400: fourZeroZero, + 404: fourZeroFour, + 405: zero, + 409: fourZeroNine, + 503: fiveZeroThree, + 403: fourZeroThree, + 301: zero, + 307: zero, + 500: zero, + 501: zero, + 503: zero +} diff --git a/neutron/plugins/vmware/api_client/request.py b/neutron/plugins/vmware/api_client/request.py new file mode 100644 index 000000000..70e7dcef4 --- /dev/null +++ b/neutron/plugins/vmware/api_client/request.py @@ -0,0 +1,287 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import abc +import copy +import eventlet +import httplib +import time + +import six +import six.moves.urllib.parse as urlparse + +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware import api_client + +LOG = logging.getLogger(__name__) + +DEFAULT_REQUEST_TIMEOUT = 30 +DEFAULT_HTTP_TIMEOUT = 30 +DEFAULT_RETRIES = 2 +DEFAULT_REDIRECTS = 2 +DEFAULT_API_REQUEST_POOL_SIZE = 1000 +DEFAULT_MAXIMUM_REQUEST_ID = 4294967295 +DOWNLOAD_TIMEOUT = 180 + + +@six.add_metaclass(abc.ABCMeta) +class ApiRequest(object): + '''An abstract baseclass for all ApiRequest implementations. + + This defines the interface and property structure for both eventlet and + gevent-based ApiRequest classes. + ''' + + # List of allowed status codes. + ALLOWED_STATUS_CODES = [ + httplib.OK, + httplib.CREATED, + httplib.NO_CONTENT, + httplib.MOVED_PERMANENTLY, + httplib.TEMPORARY_REDIRECT, + httplib.BAD_REQUEST, + httplib.UNAUTHORIZED, + httplib.FORBIDDEN, + httplib.NOT_FOUND, + httplib.CONFLICT, + httplib.INTERNAL_SERVER_ERROR, + httplib.SERVICE_UNAVAILABLE + ] + + @abc.abstractmethod + def start(self): + pass + + @abc.abstractmethod + def join(self): + pass + + @abc.abstractmethod + def copy(self): + pass + + def _issue_request(self): + '''Issue a request to a provider.''' + conn = (self._client_conn or + self._api_client.acquire_connection(True, + copy.copy(self._headers), + rid=self._rid())) + if conn is None: + error = Exception(_("No API connections available")) + self._request_error = error + return error + + url = self._url + LOG.debug(_("[%(rid)d] Issuing - request %(conn)s"), + {'rid': self._rid(), 'conn': self._request_str(conn, url)}) + issued_time = time.time() + is_conn_error = False + is_conn_service_unavail = False + response = None + try: + redirects = 0 + while (redirects <= self._redirects): + # Update connection with user specified request timeout, + # the connect timeout is usually smaller so we only set + # the request timeout after a connection is established + if conn.sock is None: + conn.connect() + conn.sock.settimeout(self._http_timeout) + elif conn.sock.gettimeout() != self._http_timeout: + conn.sock.settimeout(self._http_timeout) + + headers = copy.copy(self._headers) + cookie = self._api_client.auth_cookie(conn) + if cookie: + headers["Cookie"] = cookie + + gen = self._api_client.config_gen + if gen: + headers["X-Nvp-Wait-For-Config-Generation"] = gen + LOG.debug(_("Setting X-Nvp-Wait-For-Config-Generation " + "request header: '%s'"), gen) + try: + conn.request(self._method, url, self._body, headers) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.warn(_("[%(rid)d] Exception issuing request: " + "%(e)s"), + {'rid': self._rid(), 'e': e}) + + response = conn.getresponse() + response.body = response.read() + response.headers = response.getheaders() + elapsed_time = time.time() - issued_time + LOG.debug(_("[%(rid)d] Completed request '%(conn)s': " + "%(status)s (%(elapsed)s seconds)"), + {'rid': self._rid(), + 'conn': self._request_str(conn, url), + 'status': response.status, + 'elapsed': elapsed_time}) + + new_gen = response.getheader('X-Nvp-Config-Generation', None) + if new_gen: + LOG.debug(_("Reading X-Nvp-config-Generation response " + "header: '%s'"), new_gen) + if (self._api_client.config_gen is None or + self._api_client.config_gen < int(new_gen)): + self._api_client.config_gen = int(new_gen) + + if response.status == httplib.UNAUTHORIZED: + + if cookie is None and self._url != "/ws.v1/login": + # The connection still has no valid cookie despite + # attemps to authenticate and the request has failed + # with unauthorized status code. If this isn't a + # a request to authenticate, we should abort the + # request since there is no point in retrying. + self._abort = True + else: + # If request is unauthorized, clear the session cookie + # for the current provider so that subsequent requests + # to the same provider triggers re-authentication. + self._api_client.set_auth_cookie(conn, None) + + self._api_client.set_auth_cookie(conn, None) + elif response.status == httplib.SERVICE_UNAVAILABLE: + is_conn_service_unavail = True + + if response.status not in [httplib.MOVED_PERMANENTLY, + httplib.TEMPORARY_REDIRECT]: + break + elif redirects >= self._redirects: + LOG.info(_("[%d] Maximum redirects exceeded, aborting " + "request"), self._rid()) + break + redirects += 1 + + conn, url = self._redirect_params(conn, response.headers, + self._client_conn is None) + if url is None: + response.status = httplib.INTERNAL_SERVER_ERROR + break + LOG.info(_("[%(rid)d] Redirecting request to: %(conn)s"), + {'rid': self._rid(), + 'conn': self._request_str(conn, url)}) + # yield here, just in case we are not out of the loop yet + eventlet.greenthread.sleep(0) + # If we receive any of these responses, then + # our server did not process our request and may be in an + # errored state. Raise an exception, which will cause the + # the conn to be released with is_conn_error == True + # which puts the conn on the back of the client's priority + # queue. + if (response.status == httplib.INTERNAL_SERVER_ERROR and + response.status > httplib.NOT_IMPLEMENTED): + LOG.warn(_("[%(rid)d] Request '%(method)s %(url)s' " + "received: %(status)s"), + {'rid': self._rid(), 'method': self._method, + 'url': self._url, 'status': response.status}) + raise Exception(_('Server error return: %s'), response.status) + return response + except Exception as e: + if isinstance(e, httplib.BadStatusLine): + msg = (_("Invalid server response")) + else: + msg = unicode(e) + if response is None: + elapsed_time = time.time() - issued_time + LOG.warn(_("[%(rid)d] Failed request '%(conn)s': '%(msg)s' " + "(%(elapsed)s seconds)"), + {'rid': self._rid(), 'conn': self._request_str(conn, url), + 'msg': msg, 'elapsed': elapsed_time}) + self._request_error = e + is_conn_error = True + return e + finally: + # Make sure we release the original connection provided by the + # acquire_connection() call above. + if self._client_conn is None: + self._api_client.release_connection(conn, is_conn_error, + is_conn_service_unavail, + rid=self._rid()) + + def _redirect_params(self, conn, headers, allow_release_conn=False): + """Process redirect response, create new connection if necessary. + + Args: + conn: connection that returned the redirect response + headers: response headers of the redirect response + allow_release_conn: if redirecting to a different server, + release existing connection back to connection pool. + + Returns: Return tuple(conn, url) where conn is a connection object + to the redirect target and url is the path of the API request + """ + + url = None + for name, value in headers: + if name.lower() == "location": + url = value + break + if not url: + LOG.warn(_("[%d] Received redirect status without location header" + " field"), self._rid()) + return (conn, None) + # Accept location with the following format: + # 1. /path, redirect to same node + # 2. scheme://hostname:[port]/path where scheme is https or http + # Reject others + # 3. e.g. relative paths, unsupported scheme, unspecified host + result = urlparse.urlparse(url) + if not result.scheme and not result.hostname and result.path: + if result.path[0] == "/": + if result.query: + url = "%s?%s" % (result.path, result.query) + else: + url = result.path + return (conn, url) # case 1 + else: + LOG.warn(_("[%(rid)d] Received invalid redirect location: " + "'%(url)s'"), {'rid': self._rid(), 'url': url}) + return (conn, None) # case 3 + elif result.scheme not in ["http", "https"] or not result.hostname: + LOG.warn(_("[%(rid)d] Received malformed redirect " + "location: %(url)s"), {'rid': self._rid(), 'url': url}) + return (conn, None) # case 3 + # case 2, redirect location includes a scheme + # so setup a new connection and authenticate + if allow_release_conn: + self._api_client.release_connection(conn) + conn_params = (result.hostname, result.port, result.scheme == "https") + conn = self._api_client.acquire_redirect_connection(conn_params, True, + self._headers) + if result.query: + url = "%s?%s" % (result.path, result.query) + else: + url = result.path + return (conn, url) + + def _rid(self): + '''Return current request id.''' + return self._request_id + + @property + def request_error(self): + '''Return any errors associated with this instance.''' + return self._request_error + + def _request_str(self, conn, url): + '''Return string representation of connection.''' + return "%s %s/%s" % (self._method, api_client.ctrl_conn_to_str(conn), + url) diff --git a/neutron/plugins/vmware/api_client/version.py b/neutron/plugins/vmware/api_client/version.py new file mode 100644 index 000000000..52fcd74b4 --- /dev/null +++ b/neutron/plugins/vmware/api_client/version.py @@ -0,0 +1,43 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def find_version(headers): + """Retrieve NSX controller version from response headers.""" + for (header_name, header_value) in (headers or ()): + try: + if header_name == 'server': + return Version(header_value.split('/')[1]) + except IndexError: + LOG.warning(_("Unable to fetch NSX version from response " + "headers :%s"), headers) + + +class Version(object): + """Abstracts NSX version by exposing major and minor.""" + + def __init__(self, version): + self.full_version = version.split('.') + self.major = int(self.full_version[0]) + self.minor = int(self.full_version[1]) + + def __str__(self): + return '.'.join(self.full_version) diff --git a/neutron/plugins/vmware/check_nsx_config.py b/neutron/plugins/vmware/check_nsx_config.py new file mode 100644 index 000000000..14eca41cb --- /dev/null +++ b/neutron/plugins/vmware/check_nsx_config.py @@ -0,0 +1,163 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import sys + +from oslo.config import cfg + +from neutron.common import config +from neutron.plugins.vmware.common import config as nsx_config # noqa +from neutron.plugins.vmware.common import nsx_utils +from neutron.plugins.vmware import nsxlib + +config.setup_logging(cfg.CONF) + + +def help(name): + print("Usage: %s path/to/neutron/plugin/ini/config/file" % name) + sys.exit(1) + + +def get_nsx_controllers(cluster): + return cluster.nsx_controllers + + +def config_helper(config_entity, cluster): + try: + return nsxlib.do_request('GET', + "/ws.v1/%s?fields=uuid" % config_entity, + cluster=cluster).get('results', []) + except Exception as e: + msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.") + % {'err': str(e), + 'ctl': ', '.join(get_nsx_controllers(cluster))}) + raise Exception(msg) + + +def get_control_cluster_nodes(cluster): + return config_helper("control-cluster/node", cluster) + + +def get_gateway_services(cluster): + ret_gw_services = {"L2GatewayServiceConfig": [], + "L3GatewayServiceConfig": []} + gw_services = config_helper("gateway-service", cluster) + for gw_service in gw_services: + ret_gw_services[gw_service['type']].append(gw_service['uuid']) + return ret_gw_services + + +def get_transport_zones(cluster): + transport_zones = config_helper("transport-zone", cluster) + return [transport_zone['uuid'] for transport_zone in transport_zones] + + +def get_transport_nodes(cluster): + transport_nodes = config_helper("transport-node", cluster) + return [transport_node['uuid'] for transport_node in transport_nodes] + + +def is_transport_node_connected(cluster, node_uuid): + try: + return nsxlib.do_request('GET', + "/ws.v1/transport-node/%s/status" % node_uuid, + cluster=cluster)['connection']['connected'] + except Exception as e: + msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.") + % {'err': str(e), + 'ctl': ', '.join(get_nsx_controllers(cluster))}) + raise Exception(msg) + + +def main(): + if len(sys.argv) != 2: + help(sys.argv[0]) + args = ['--config-file'] + args.append(sys.argv[1]) + config.init(args) + print("----------------------- Database Options -----------------------") + print("\tconnection: %s" % cfg.CONF.database.connection) + print("\tretry_interval: %d" % cfg.CONF.database.retry_interval) + print("\tmax_retries: %d" % cfg.CONF.database.max_retries) + print("----------------------- NSX Options -----------------------") + print("\tNSX Generation Timeout %d" % cfg.CONF.NSX.nsx_gen_timeout) + print("\tNumber of concurrent connections to each controller %d" % + cfg.CONF.NSX.concurrent_connections) + print("\tmax_lp_per_bridged_ls: %s" % cfg.CONF.NSX.max_lp_per_bridged_ls) + print("\tmax_lp_per_overlay_ls: %s" % cfg.CONF.NSX.max_lp_per_overlay_ls) + print("----------------------- Cluster Options -----------------------") + print("\trequested_timeout: %s" % cfg.CONF.req_timeout) + print("\tretries: %s" % cfg.CONF.retries) + print("\tredirects: %s" % cfg.CONF.redirects) + print("\thttp_timeout: %s" % cfg.CONF.http_timeout) + cluster = nsx_utils.create_nsx_cluster( + cfg.CONF, + cfg.CONF.NSX.concurrent_connections, + cfg.CONF.NSX.nsx_gen_timeout) + nsx_controllers = get_nsx_controllers(cluster) + num_controllers = len(nsx_controllers) + print("Number of controllers found: %s" % num_controllers) + if num_controllers == 0: + print("You must specify at least one controller!") + sys.exit(1) + + get_control_cluster_nodes(cluster) + for controller in nsx_controllers: + print("\tController endpoint: %s" % controller) + gateway_services = get_gateway_services(cluster) + default_gateways = { + "L2GatewayServiceConfig": cfg.CONF.default_l2_gw_service_uuid, + "L3GatewayServiceConfig": cfg.CONF.default_l3_gw_service_uuid} + errors = 0 + for svc_type in default_gateways.keys(): + for uuid in gateway_services[svc_type]: + print("\t\tGateway(%s) uuid: %s" % (svc_type, uuid)) + if (default_gateways[svc_type] and + default_gateways[svc_type] not in gateway_services[svc_type]): + print("\t\t\tError: specified default %s gateway (%s) is " + "missing from NSX Gateway Services!" % ( + svc_type, + default_gateways[svc_type])) + errors += 1 + transport_zones = get_transport_zones(cluster) + print("\tTransport zones: %s" % transport_zones) + if cfg.CONF.default_tz_uuid not in transport_zones: + print("\t\tError: specified default transport zone " + "(%s) is missing from NSX transport zones!" + % cfg.CONF.default_tz_uuid) + errors += 1 + transport_nodes = get_transport_nodes(cluster) + print("\tTransport nodes: %s" % transport_nodes) + node_errors = [] + for node in transport_nodes: + if not is_transport_node_connected(cluster, node): + node_errors.append(node) + + # Use different exit codes, so that we can distinguish + # between config and runtime errors + if len(node_errors): + print("\nThere are one or mode transport nodes that are " + "not connected: %s. Please, revise!" % node_errors) + sys.exit(10) + elif errors: + print("\nThere are %d errors with your configuration. " + "Please, revise!" % errors) + sys.exit(12) + else: + print("Done.") diff --git a/neutron/plugins/vmware/common/__init__.py b/neutron/plugins/vmware/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/vmware/common/config.py b/neutron/plugins/vmware/common/config.py new file mode 100644 index 000000000..c75f982a6 --- /dev/null +++ b/neutron/plugins/vmware/common/config.py @@ -0,0 +1,198 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.plugins.vmware.common import exceptions as nsx_exc + + +class AgentModes: + AGENT = 'agent' + AGENTLESS = 'agentless' + COMBINED = 'combined' + + +class MetadataModes: + DIRECT = 'access_network' + INDIRECT = 'dhcp_host_route' + + +class ReplicationModes: + SERVICE = 'service' + SOURCE = 'source' + + +base_opts = [ + cfg.IntOpt('max_lp_per_bridged_ls', default=5000, + deprecated_group='NVP', + help=_("Maximum number of ports of a logical switch on a " + "bridged transport zone (default 5000)")), + cfg.IntOpt('max_lp_per_overlay_ls', default=256, + deprecated_group='NVP', + help=_("Maximum number of ports of a logical switch on an " + "overlay transport zone (default 256)")), + cfg.IntOpt('concurrent_connections', default=10, + deprecated_group='NVP', + help=_("Maximum concurrent connections to each NSX " + "controller.")), + cfg.IntOpt('nsx_gen_timeout', default=-1, + deprecated_name='nvp_gen_timeout', + deprecated_group='NVP', + help=_("Number of seconds a generation id should be valid for " + "(default -1 meaning do not time out)")), + cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT, + deprecated_group='NVP', + help=_("If set to access_network this enables a dedicated " + "connection to the metadata proxy for metadata server " + "access via Neutron router. If set to dhcp_host_route " + "this enables host route injection via the dhcp agent. " + "This option is only useful if running on a host that " + "does not support namespaces otherwise access_network " + "should be used.")), + cfg.StrOpt('default_transport_type', default='stt', + deprecated_group='NVP', + help=_("The default network tranport type to use (stt, gre, " + "bridge, ipsec_gre, or ipsec_stt)")), + cfg.StrOpt('agent_mode', default=AgentModes.AGENT, + deprecated_group='NVP', + help=_("The mode used to implement DHCP/metadata services.")), + cfg.StrOpt('replication_mode', default=ReplicationModes.SERVICE, + help=_("The default option leverages service nodes to perform" + " packet replication though one could set to this to " + "'source' to perform replication locally. This is useful" + " if one does not want to deploy a service node(s).")) +] + +sync_opts = [ + cfg.IntOpt('state_sync_interval', default=10, + deprecated_group='NVP_SYNC', + help=_("Interval in seconds between runs of the state " + "synchronization task. Set it to 0 to disable it")), + cfg.IntOpt('max_random_sync_delay', default=0, + deprecated_group='NVP_SYNC', + help=_("Maximum value for the additional random " + "delay in seconds between runs of the state " + "synchronization task")), + cfg.IntOpt('min_sync_req_delay', default=1, + deprecated_group='NVP_SYNC', + help=_('Minimum delay, in seconds, between two state ' + 'synchronization queries to NSX. It must not ' + 'exceed state_sync_interval')), + cfg.IntOpt('min_chunk_size', default=500, + deprecated_group='NVP_SYNC', + help=_('Minimum number of resources to be retrieved from NSX ' + 'during state synchronization')), + cfg.BoolOpt('always_read_status', default=False, + deprecated_group='NVP_SYNC', + help=_('Always read operational status from backend on show ' + 'operations. Enabling this option might slow down ' + 'the system.')) +] + +connection_opts = [ + cfg.StrOpt('nsx_user', + default='admin', + deprecated_name='nvp_user', + help=_('User name for NSX controllers in this cluster')), + cfg.StrOpt('nsx_password', + default='admin', + deprecated_name='nvp_password', + secret=True, + help=_('Password for NSX controllers in this cluster')), + cfg.IntOpt('req_timeout', + default=30, + help=_('Total time limit for a cluster request')), + cfg.IntOpt('http_timeout', + default=30, + help=_('Time before aborting a request')), + cfg.IntOpt('retries', + default=2, + help=_('Number of time a request should be retried')), + cfg.IntOpt('redirects', + default=2, + help=_('Number of times a redirect should be followed')), + cfg.ListOpt('nsx_controllers', + deprecated_name='nvp_controllers', + help=_("Lists the NSX controllers in this cluster")), +] + +cluster_opts = [ + cfg.StrOpt('default_tz_uuid', + help=_("This is uuid of the default NSX Transport zone that " + "will be used for creating tunneled isolated " + "\"Neutron\" networks. It needs to be created in NSX " + "before starting Neutron with the nsx plugin.")), + cfg.StrOpt('default_l3_gw_service_uuid', + help=_("Unique identifier of the NSX L3 Gateway service " + "which will be used for implementing routers and " + "floating IPs")), + cfg.StrOpt('default_l2_gw_service_uuid', + help=_("Unique identifier of the NSX L2 Gateway service " + "which will be used by default for network gateways")), + cfg.StrOpt('default_service_cluster_uuid', + help=_("Unique identifier of the Service Cluster which will " + "be used by logical services like dhcp and metadata")), + cfg.StrOpt('default_interface_name', default='breth0', + help=_("Name of the interface on a L2 Gateway transport node" + "which should be used by default when setting up a " + "network connection")), +] + +DEFAULT_STATUS_CHECK_INTERVAL = 2000 + +vcns_opts = [ + cfg.StrOpt('user', + default='admin', + help=_('User name for vsm')), + cfg.StrOpt('password', + default='default', + secret=True, + help=_('Password for vsm')), + cfg.StrOpt('manager_uri', + help=_('uri for vsm')), + cfg.StrOpt('datacenter_moid', + help=_('Optional parameter identifying the ID of datacenter ' + 'to deploy NSX Edges')), + cfg.StrOpt('deployment_container_id', + help=_('Optional parameter identifying the ID of datastore to ' + 'deploy NSX Edges')), + cfg.StrOpt('resource_pool_id', + help=_('Optional parameter identifying the ID of resource to ' + 'deploy NSX Edges')), + cfg.StrOpt('datastore_id', + help=_('Optional parameter identifying the ID of datastore to ' + 'deploy NSX Edges')), + cfg.StrOpt('external_network', + help=_('Network ID for physical network connectivity')), + cfg.IntOpt('task_status_check_interval', + default=DEFAULT_STATUS_CHECK_INTERVAL, + help=_("Task status check interval")) +] + +# Register the configuration options +cfg.CONF.register_opts(connection_opts) +cfg.CONF.register_opts(cluster_opts) +cfg.CONF.register_opts(vcns_opts, group="vcns") +cfg.CONF.register_opts(base_opts, group="NSX") +cfg.CONF.register_opts(sync_opts, group="NSX_SYNC") + + +def validate_config_options(): + if cfg.CONF.NSX.replication_mode not in (ReplicationModes.SERVICE, + ReplicationModes.SOURCE): + error = (_("Invalid replication_mode: %s") % + cfg.CONF.NSX.replication_mode) + raise nsx_exc.NsxPluginException(err_msg=error) diff --git a/neutron/plugins/vmware/common/exceptions.py b/neutron/plugins/vmware/common/exceptions.py new file mode 100644 index 000000000..83cc05bf4 --- /dev/null +++ b/neutron/plugins/vmware/common/exceptions.py @@ -0,0 +1,126 @@ +# Copyright 2012 VMware, Inc +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions as n_exc + + +class NsxPluginException(n_exc.NeutronException): + message = _("An unexpected error occurred in the NSX Plugin: %(err_msg)s") + + +class InvalidVersion(NsxPluginException): + message = _("Unable to fulfill request with version %(version)s.") + + +class InvalidConnection(NsxPluginException): + message = _("Invalid NSX connection parameters: %(conn_params)s") + + +class InvalidClusterConfiguration(NsxPluginException): + message = _("Invalid cluster values: %(invalid_attrs)s. Please ensure " + "that these values are specified in the [DEFAULT] " + "section of the NSX plugin ini file.") + + +class InvalidNovaZone(NsxPluginException): + message = _("Unable to find cluster config entry " + "for nova zone: %(nova_zone)s") + + +class NoMorePortsException(NsxPluginException): + message = _("Unable to create port on network %(network)s. " + "Maximum number of ports reached") + + +class NatRuleMismatch(NsxPluginException): + message = _("While retrieving NAT rules, %(actual_rules)s were found " + "whereas rules in the (%(min_rules)s,%(max_rules)s) interval " + "were expected") + + +class InvalidAttachmentType(NsxPluginException): + message = _("Invalid NSX attachment type '%(attachment_type)s'") + + +class MaintenanceInProgress(NsxPluginException): + message = _("The networking backend is currently in maintenance mode and " + "therefore unable to accept requests which modify its state. " + "Please try later.") + + +class L2GatewayAlreadyInUse(n_exc.Conflict): + message = _("Gateway Service %(gateway)s is already in use") + + +class InvalidSecurityCertificate(NsxPluginException): + message = _("An invalid security certificate was specified for the " + "gateway device. Certificates must be enclosed between " + "'-----BEGIN CERTIFICATE-----' and " + "'-----END CERTIFICATE-----'") + + +class ServiceOverQuota(n_exc.Conflict): + message = _("Quota exceeded for Vcns resource: %(overs)s: %(err_msg)s") + + +class RouterInUseByLBService(n_exc.InUse): + message = _("Router %(router_id)s is in use by Loadbalancer Service " + "%(vip_id)s") + + +class RouterInUseByFWService(n_exc.InUse): + message = _("Router %(router_id)s is in use by firewall Service " + "%(firewall_id)s") + + +class VcnsDriverException(NsxPluginException): + message = _("Error happened in NSX VCNS Driver: %(err_msg)s") + + +class AdvRouterServiceUnavailable(n_exc.ServiceUnavailable): + message = _("Router %(router_id)s is not in 'ACTIVE' " + "status, thus unable to provide advanced service") + + +class ServiceClusterUnavailable(NsxPluginException): + message = _("Service cluster: '%(cluster_id)s' is unavailable. Please, " + "check NSX setup and/or configuration") + + +class PortConfigurationError(NsxPluginException): + message = _("An error occurred while connecting LSN %(lsn_id)s " + "and network %(net_id)s via port %(port_id)s") + + def __init__(self, **kwargs): + super(PortConfigurationError, self).__init__(**kwargs) + self.port_id = kwargs.get('port_id') + + +class LsnNotFound(n_exc.NotFound): + message = _('Unable to find LSN for %(entity)s %(entity_id)s') + + +class LsnPortNotFound(n_exc.NotFound): + message = (_('Unable to find port for LSN %(lsn_id)s ' + 'and %(entity)s %(entity_id)s')) + + +class LsnMigrationConflict(n_exc.Conflict): + message = _("Unable to migrate network '%(net_id)s' to LSN: %(reason)s") + + +class LsnConfigurationConflict(NsxPluginException): + message = _("Configuration conflict on Logical Service Node %(lsn_id)s") diff --git a/neutron/plugins/vmware/common/nsx_utils.py b/neutron/plugins/vmware/common/nsx_utils.py new file mode 100644 index 000000000..c2c2b7f28 --- /dev/null +++ b/neutron/plugins/vmware/common/nsx_utils.py @@ -0,0 +1,249 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions as n_exc +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import client +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.dbexts import db as nsx_db +from neutron.plugins.vmware.dbexts import networkgw_db +from neutron.plugins.vmware import nsx_cluster +from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib +from neutron.plugins.vmware.nsxlib import router as routerlib +from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib +from neutron.plugins.vmware.nsxlib import switch as switchlib + +LOG = log.getLogger(__name__) + + +def fetch_nsx_switches(session, cluster, neutron_net_id): + """Retrieve logical switches for a neutron network. + + This function is optimized for fetching all the lswitches always + with a single NSX query. + If there is more than 1 logical switch (chained switches use case) + NSX lswitches are queried by 'quantum_net_id' tag. Otherwise the NSX + lswitch is directly retrieved by id (more efficient). + """ + nsx_switch_ids = get_nsx_switch_ids(session, cluster, neutron_net_id) + if len(nsx_switch_ids) > 1: + lswitches = switchlib.get_lswitches(cluster, neutron_net_id) + else: + lswitches = [switchlib.get_lswitch_by_id( + cluster, nsx_switch_ids[0])] + return lswitches + + +def get_nsx_switch_ids(session, cluster, neutron_network_id): + """Return the NSX switch id for a given neutron network. + + First lookup for mappings in Neutron database. If no mapping is + found, query the NSX backend and add the mappings. + """ + nsx_switch_ids = nsx_db.get_nsx_switch_ids( + session, neutron_network_id) + if not nsx_switch_ids: + # Find logical switches from backend. + # This is a rather expensive query, but it won't be executed + # more than once for each network in Neutron's lifetime + nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id) + if not nsx_switches: + LOG.warn(_("Unable to find NSX switches for Neutron network %s"), + neutron_network_id) + return + nsx_switch_ids = [] + with session.begin(subtransactions=True): + for nsx_switch in nsx_switches: + nsx_switch_id = nsx_switch['uuid'] + nsx_switch_ids.append(nsx_switch_id) + # Create DB mapping + nsx_db.add_neutron_nsx_network_mapping( + session, + neutron_network_id, + nsx_switch_id) + return nsx_switch_ids + + +def get_nsx_switch_and_port_id(session, cluster, neutron_port_id): + """Return the NSX switch and port uuids for a given neutron port. + + First, look up the Neutron database. If not found, execute + a query on NSX platform as the mapping might be missing because + the port was created before upgrading to grizzly. + + This routine also retrieves the identifier of the logical switch in + the backend where the port is plugged. Prior to Icehouse this + information was not available in the Neutron Database. For dealing + with pre-existing records, this routine will query the backend + for retrieving the correct switch identifier. + + As of Icehouse release it is not indeed anymore possible to assume + the backend logical switch identifier is equal to the neutron + network identifier. + """ + nsx_switch_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( + session, neutron_port_id) + if not nsx_switch_id: + # Find logical switch for port from backend + # This is a rather expensive query, but it won't be executed + # more than once for each port in Neutron's lifetime + nsx_ports = switchlib.query_lswitch_lports( + cluster, '*', relations='LogicalSwitchConfig', + filters={'tag': neutron_port_id, + 'tag_scope': 'q_port_id'}) + # Only one result expected + # NOTE(salv-orlando): Not handling the case where more than one + # port is found with the same neutron port tag + if not nsx_ports: + LOG.warn(_("Unable to find NSX port for Neutron port %s"), + neutron_port_id) + # This method is supposed to return a tuple + return None, None + nsx_port = nsx_ports[0] + nsx_switch_id = (nsx_port['_relations'] + ['LogicalSwitchConfig']['uuid']) + if nsx_port_id: + # Mapping already exists. Delete before recreating + nsx_db.delete_neutron_nsx_port_mapping( + session, neutron_port_id) + else: + nsx_port_id = nsx_port['uuid'] + # (re)Create DB mapping + nsx_db.add_neutron_nsx_port_mapping( + session, neutron_port_id, + nsx_switch_id, nsx_port_id) + return nsx_switch_id, nsx_port_id + + +def get_nsx_security_group_id(session, cluster, neutron_id): + """Return the NSX sec profile uuid for a given neutron sec group. + + First, look up the Neutron database. If not found, execute + a query on NSX platform as the mapping might be missing. + NOTE: Security groups are called 'security profiles' on the NSX backend. + """ + nsx_id = nsx_db.get_nsx_security_group_id(session, neutron_id) + if not nsx_id: + # Find security profile on backend. + # This is a rather expensive query, but it won't be executed + # more than once for each security group in Neutron's lifetime + nsx_sec_profiles = secgrouplib.query_security_profiles( + cluster, '*', + filters={'tag': neutron_id, + 'tag_scope': 'q_sec_group_id'}) + # Only one result expected + # NOTE(salv-orlando): Not handling the case where more than one + # security profile is found with the same neutron port tag + if not nsx_sec_profiles: + LOG.warn(_("Unable to find NSX security profile for Neutron " + "security group %s"), neutron_id) + return + elif len(nsx_sec_profiles) > 1: + LOG.warn(_("Multiple NSX security profiles found for Neutron " + "security group %s"), neutron_id) + nsx_sec_profile = nsx_sec_profiles[0] + nsx_id = nsx_sec_profile['uuid'] + with session.begin(subtransactions=True): + # Create DB mapping + nsx_db.add_neutron_nsx_security_group_mapping( + session, neutron_id, nsx_id) + return nsx_id + + +def get_nsx_router_id(session, cluster, neutron_router_id): + """Return the NSX router uuid for a given neutron router. + + First, look up the Neutron database. If not found, execute + a query on NSX platform as the mapping might be missing. + """ + nsx_router_id = nsx_db.get_nsx_router_id( + session, neutron_router_id) + if not nsx_router_id: + # Find logical router from backend. + # This is a rather expensive query, but it won't be executed + # more than once for each router in Neutron's lifetime + nsx_routers = routerlib.query_lrouters( + cluster, '*', + filters={'tag': neutron_router_id, + 'tag_scope': 'q_router_id'}) + # Only one result expected + # NOTE(salv-orlando): Not handling the case where more than one + # port is found with the same neutron port tag + if not nsx_routers: + LOG.warn(_("Unable to find NSX router for Neutron router %s"), + neutron_router_id) + return + nsx_router = nsx_routers[0] + nsx_router_id = nsx_router['uuid'] + with session.begin(subtransactions=True): + # Create DB mapping + nsx_db.add_neutron_nsx_router_mapping( + session, + neutron_router_id, + nsx_router_id) + return nsx_router_id + + +def create_nsx_cluster(cluster_opts, concurrent_connections, gen_timeout): + cluster = nsx_cluster.NSXCluster(**cluster_opts) + + def _ctrl_split(x, y): + return (x, int(y), True) + + api_providers = [_ctrl_split(*ctrl.split(':')) + for ctrl in cluster.nsx_controllers] + cluster.api_client = client.NsxApiClient( + api_providers, cluster.nsx_user, cluster.nsx_password, + request_timeout=cluster.req_timeout, + http_timeout=cluster.http_timeout, + retries=cluster.retries, + redirects=cluster.redirects, + concurrent_connections=concurrent_connections, + gen_timeout=gen_timeout) + return cluster + + +def get_nsx_device_status(cluster, nsx_uuid): + try: + status_up = l2gwlib.get_gateway_device_status( + cluster, nsx_uuid) + if status_up: + return networkgw_db.STATUS_ACTIVE + else: + return networkgw_db.STATUS_DOWN + except api_exc.NsxApiException: + return networkgw_db.STATUS_UNKNOWN + except n_exc.NotFound: + return networkgw_db.ERROR + + +def get_nsx_device_statuses(cluster, tenant_id): + try: + status_dict = l2gwlib.get_gateway_devices_status( + cluster, tenant_id) + return dict((nsx_device_id, + networkgw_db.STATUS_ACTIVE if connected + else networkgw_db.STATUS_DOWN) for + (nsx_device_id, connected) in status_dict.iteritems()) + except api_exc.NsxApiException: + # Do not make a NSX API exception fatal + if tenant_id: + LOG.warn(_("Unable to retrieve operational status for gateway " + "devices belonging to tenant: %s"), tenant_id) + else: + LOG.warn(_("Unable to retrieve operational status for " + "gateway devices")) diff --git a/neutron/plugins/vmware/common/securitygroups.py b/neutron/plugins/vmware/common/securitygroups.py new file mode 100644 index 000000000..db61b72a8 --- /dev/null +++ b/neutron/plugins/vmware/common/securitygroups.py @@ -0,0 +1,134 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.openstack.common import log +from neutron.plugins.vmware.common import nsx_utils + +LOG = log.getLogger(__name__) +# Protocol number look up for supported protocols +protocol_num_look_up = {'tcp': 6, 'icmp': 1, 'udp': 17} + + +def _convert_to_nsx_rule(session, cluster, rule, with_id=False): + """Converts a Neutron security group rule to the NSX format. + + This routine also replaces Neutron IDs with NSX UUIDs. + """ + nsx_rule = {} + params = ['remote_ip_prefix', 'protocol', + 'remote_group_id', 'port_range_min', + 'port_range_max', 'ethertype'] + if with_id: + params.append('id') + + for param in params: + value = rule.get(param) + if param not in rule: + nsx_rule[param] = value + elif not value: + pass + elif param == 'remote_ip_prefix': + nsx_rule['ip_prefix'] = rule['remote_ip_prefix'] + elif param == 'remote_group_id': + nsx_rule['profile_uuid'] = nsx_utils.get_nsx_security_group_id( + session, cluster, rule['remote_group_id']) + + elif param == 'protocol': + try: + nsx_rule['protocol'] = int(rule['protocol']) + except (ValueError, TypeError): + nsx_rule['protocol'] = ( + protocol_num_look_up[rule['protocol']]) + else: + nsx_rule[param] = value + return nsx_rule + + +def _convert_to_nsx_rules(session, cluster, rules, with_id=False): + """Converts a list of Neutron security group rules to the NSX format.""" + nsx_rules = {'logical_port_ingress_rules': [], + 'logical_port_egress_rules': []} + for direction in ['logical_port_ingress_rules', + 'logical_port_egress_rules']: + for rule in rules[direction]: + nsx_rules[direction].append( + _convert_to_nsx_rule(session, cluster, rule, with_id)) + return nsx_rules + + +def get_security_group_rules_nsx_format(session, cluster, + security_group_rules, with_id=False): + """Convert neutron security group rules into NSX format. + + This routine splits Neutron security group rules into two lists, one + for ingress rules and the other for egress rules. + """ + + def fields(rule): + _fields = ['remote_ip_prefix', 'remote_group_id', 'protocol', + 'port_range_min', 'port_range_max', 'protocol', 'ethertype'] + if with_id: + _fields.append('id') + return dict((k, v) for k, v in rule.iteritems() if k in _fields) + + ingress_rules = [] + egress_rules = [] + for rule in security_group_rules: + if rule.get('souce_group_id'): + rule['remote_group_id'] = nsx_utils.get_nsx_security_group_id( + session, cluster, rule['remote_group_id']) + + if rule['direction'] == 'ingress': + ingress_rules.append(fields(rule)) + elif rule['direction'] == 'egress': + egress_rules.append(fields(rule)) + rules = {'logical_port_ingress_rules': egress_rules, + 'logical_port_egress_rules': ingress_rules} + return _convert_to_nsx_rules(session, cluster, rules, with_id) + + +def merge_security_group_rules_with_current(session, cluster, + new_rules, current_rules): + merged_rules = get_security_group_rules_nsx_format( + session, cluster, current_rules) + for new_rule in new_rules: + rule = new_rule['security_group_rule'] + if rule['direction'] == 'ingress': + merged_rules['logical_port_egress_rules'].append( + _convert_to_nsx_rule(session, cluster, rule)) + elif rule['direction'] == 'egress': + merged_rules['logical_port_ingress_rules'].append( + _convert_to_nsx_rule(session, cluster, rule)) + return merged_rules + + +def remove_security_group_with_id_and_id_field(rules, rule_id): + """Remove rule by rule_id. + + This function receives all of the current rule associated with a + security group and then removes the rule that matches the rule_id. In + addition it removes the id field in the dict with each rule since that + should not be passed to nsx. + """ + for rule_direction in rules.values(): + item_to_remove = None + for port_rule in rule_direction: + if port_rule['id'] == rule_id: + item_to_remove = port_rule + else: + # remove key from dictionary for NSX + del port_rule['id'] + if item_to_remove: + rule_direction.remove(item_to_remove) diff --git a/neutron/plugins/vmware/common/sync.py b/neutron/plugins/vmware/common/sync.py new file mode 100644 index 000000000..76c6a27f4 --- /dev/null +++ b/neutron/plugins/vmware/common/sync.py @@ -0,0 +1,669 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random + +from neutron.common import constants +from neutron.common import exceptions +from neutron import context +from neutron.db import external_net_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.extensions import l3 +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log +from neutron.openstack.common import loopingcall +from neutron.openstack.common import timeutils +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import nsx_utils +from neutron.plugins.vmware import nsxlib +from neutron.plugins.vmware.nsxlib import router as routerlib +from neutron.plugins.vmware.nsxlib import switch as switchlib + +# Maximum page size for a single request +# NOTE(salv-orlando): This might become a version-dependent map should the +# limit be raised in future versions +MAX_PAGE_SIZE = 5000 + +LOG = log.getLogger(__name__) + + +class NsxCache(object): + """A simple Cache for NSX resources. + + Associates resource id with resource hash to rapidly identify + updated resources. + Each entry in the cache also stores the following information: + - changed: the resource in the cache has been altered following + an update or a delete + - hit: the resource has been visited during an update (and possibly + left unchanged) + - data: current resource data + - data_bk: backup of resource data prior to its removal + """ + + def __init__(self): + # Maps a uuid to the dict containing it + self._uuid_dict_mappings = {} + # Dicts for NSX cached resources + self._lswitches = {} + self._lswitchports = {} + self._lrouters = {} + + def __getitem__(self, key): + # uuids are unique across the various types of resources + # TODO(salv-orlando): Avoid lookups over all dictionaries + # when retrieving items + # Fetch lswitches, lports, or lrouters + resources = self._uuid_dict_mappings[key] + return resources[key] + + def _update_resources(self, resources, new_resources): + # Clear the 'changed' attribute for all items + for uuid, item in resources.items(): + if item.pop('changed', None) and not item.get('data'): + # The item is not anymore in NSX, so delete it + del resources[uuid] + del self._uuid_dict_mappings[uuid] + LOG.debug("Removed item %s from NSX object cache", uuid) + + def do_hash(item): + return hash(jsonutils.dumps(item)) + + # Parse new data and identify new, deleted, and updated resources + for item in new_resources: + item_id = item['uuid'] + if resources.get(item_id): + new_hash = do_hash(item) + if new_hash != resources[item_id]['hash']: + resources[item_id]['hash'] = new_hash + resources[item_id]['changed'] = True + resources[item_id]['data_bk'] = ( + resources[item_id]['data']) + resources[item_id]['data'] = item + # Mark the item as hit in any case + resources[item_id]['hit'] = True + LOG.debug("Updating item %s in NSX object cache", item_id) + else: + resources[item_id] = {'hash': do_hash(item)} + resources[item_id]['hit'] = True + resources[item_id]['changed'] = True + resources[item_id]['data'] = item + # add a uuid to dict mapping for easy retrieval + # with __getitem__ + self._uuid_dict_mappings[item_id] = resources + LOG.debug("Added item %s to NSX object cache", item_id) + + def _delete_resources(self, resources): + # Mark for removal all the elements which have not been visited. + # And clear the 'hit' attribute. + for to_delete in [k for (k, v) in resources.iteritems() + if not v.pop('hit', False)]: + resources[to_delete]['changed'] = True + resources[to_delete]['data_bk'] = ( + resources[to_delete].pop('data', None)) + + def _get_resource_ids(self, resources, changed_only): + if changed_only: + return [k for (k, v) in resources.iteritems() + if v.get('changed')] + return resources.keys() + + def get_lswitches(self, changed_only=False): + return self._get_resource_ids(self._lswitches, changed_only) + + def get_lrouters(self, changed_only=False): + return self._get_resource_ids(self._lrouters, changed_only) + + def get_lswitchports(self, changed_only=False): + return self._get_resource_ids(self._lswitchports, changed_only) + + def update_lswitch(self, lswitch): + self._update_resources(self._lswitches, [lswitch]) + + def update_lrouter(self, lrouter): + self._update_resources(self._lrouters, [lrouter]) + + def update_lswitchport(self, lswitchport): + self._update_resources(self._lswitchports, [lswitchport]) + + def process_updates(self, lswitches=None, + lrouters=None, lswitchports=None): + self._update_resources(self._lswitches, lswitches) + self._update_resources(self._lrouters, lrouters) + self._update_resources(self._lswitchports, lswitchports) + return (self._get_resource_ids(self._lswitches, changed_only=True), + self._get_resource_ids(self._lrouters, changed_only=True), + self._get_resource_ids(self._lswitchports, changed_only=True)) + + def process_deletes(self): + self._delete_resources(self._lswitches) + self._delete_resources(self._lrouters) + self._delete_resources(self._lswitchports) + return (self._get_resource_ids(self._lswitches, changed_only=True), + self._get_resource_ids(self._lrouters, changed_only=True), + self._get_resource_ids(self._lswitchports, changed_only=True)) + + +class SyncParameters(): + """Defines attributes used by the synchronization procedure. + + chunk_size: Actual chunk size + extra_chunk_size: Additional data to fetch because of chunk size + adjustment + current_chunk: Counter of the current data chunk being synchronized + Page cursors: markers for the next resource to fetch. + 'start' means page cursor unset for fetching 1st page + init_sync_performed: True if the initial synchronization concluded + """ + + def __init__(self, min_chunk_size): + self.chunk_size = min_chunk_size + self.extra_chunk_size = 0 + self.current_chunk = 0 + self.ls_cursor = 'start' + self.lr_cursor = 'start' + self.lp_cursor = 'start' + self.init_sync_performed = False + self.total_size = 0 + + +def _start_loopingcall(min_chunk_size, state_sync_interval, func): + """Start a loopingcall for the synchronization task.""" + # Start a looping call to synchronize operational status + # for neutron resources + if not state_sync_interval: + # do not start the looping call if specified + # sync interval is 0 + return + state_synchronizer = loopingcall.DynamicLoopingCall( + func, sp=SyncParameters(min_chunk_size)) + state_synchronizer.start( + periodic_interval_max=state_sync_interval) + return state_synchronizer + + +class NsxSynchronizer(): + + LS_URI = nsxlib._build_uri_path( + switchlib.LSWITCH_RESOURCE, fields='uuid,tags,fabric_status', + relations='LogicalSwitchStatus') + LR_URI = nsxlib._build_uri_path( + routerlib.LROUTER_RESOURCE, fields='uuid,tags,fabric_status', + relations='LogicalRouterStatus') + LP_URI = nsxlib._build_uri_path( + switchlib.LSWITCHPORT_RESOURCE, + parent_resource_id='*', + fields='uuid,tags,fabric_status_up', + relations='LogicalPortStatus') + + def __init__(self, plugin, cluster, state_sync_interval, + req_delay, min_chunk_size, max_rand_delay=0): + random.seed() + self._nsx_cache = NsxCache() + # Store parameters as instance members + # NOTE(salv-orlando): apologies if it looks java-ish + self._plugin = plugin + self._cluster = cluster + self._req_delay = req_delay + self._sync_interval = state_sync_interval + self._max_rand_delay = max_rand_delay + # Validate parameters + if self._sync_interval < self._req_delay: + err_msg = (_("Minimum request delay:%(req_delay)s must not " + "exceed synchronization interval:%(sync_interval)s") % + {'req_delay': self._req_delay, + 'sync_interval': self._sync_interval}) + LOG.error(err_msg) + raise nsx_exc.NsxPluginException(err_msg=err_msg) + # Backoff time in case of failures while fetching sync data + self._sync_backoff = 1 + # Store the looping call in an instance variable to allow unit tests + # for controlling its lifecycle + self._sync_looping_call = _start_loopingcall( + min_chunk_size, state_sync_interval, self._synchronize_state) + + def _get_tag_dict(self, tags): + return dict((tag.get('scope'), tag['tag']) for tag in tags) + + def synchronize_network(self, context, neutron_network_data, + lswitches=None): + """Synchronize a Neutron network with its NSX counterpart. + + This routine synchronizes a set of switches when a Neutron + network is mapped to multiple lswitches. + """ + if not lswitches: + # Try to get logical switches from nsx + try: + lswitches = nsx_utils.fetch_nsx_switches( + context.session, self._cluster, + neutron_network_data['id']) + except exceptions.NetworkNotFound: + # TODO(salv-orlando): We should be catching + # api_exc.ResourceNotFound here + # The logical switch was not found + LOG.warning(_("Logical switch for neutron network %s not " + "found on NSX."), neutron_network_data['id']) + lswitches = [] + else: + for lswitch in lswitches: + self._nsx_cache.update_lswitch(lswitch) + # By default assume things go wrong + status = constants.NET_STATUS_ERROR + # In most cases lswitches will contain a single element + for ls in lswitches: + if not ls: + # Logical switch was deleted + break + ls_status = ls['_relations']['LogicalSwitchStatus'] + if not ls_status['fabric_status']: + status = constants.NET_STATUS_DOWN + break + else: + # No switch was down or missing. Set status to ACTIVE unless + # there were no switches in the first place! + if lswitches: + status = constants.NET_STATUS_ACTIVE + # Update db object + if status == neutron_network_data['status']: + # do nothing + return + + with context.session.begin(subtransactions=True): + try: + network = self._plugin._get_network(context, + neutron_network_data['id']) + except exceptions.NetworkNotFound: + pass + else: + network.status = status + LOG.debug(_("Updating status for neutron resource %(q_id)s to:" + " %(status)s"), + {'q_id': neutron_network_data['id'], + 'status': status}) + + def _synchronize_lswitches(self, ctx, ls_uuids, scan_missing=False): + if not ls_uuids and not scan_missing: + return + neutron_net_ids = set() + neutron_nsx_mappings = {} + # TODO(salvatore-orlando): Deal with the case the tag + # has been tampered with + for ls_uuid in ls_uuids: + # If the lswitch has been deleted, get backup copy of data + lswitch = (self._nsx_cache[ls_uuid].get('data') or + self._nsx_cache[ls_uuid].get('data_bk')) + tags = self._get_tag_dict(lswitch['tags']) + neutron_id = tags.get('quantum_net_id') + neutron_net_ids.add(neutron_id) + neutron_nsx_mappings[neutron_id] = ( + neutron_nsx_mappings.get(neutron_id, []) + + [self._nsx_cache[ls_uuid]]) + # Fetch neutron networks from database + filters = {'router:external': [False]} + if not scan_missing: + filters['id'] = neutron_net_ids + + networks = self._plugin._get_collection( + ctx, models_v2.Network, self._plugin._make_network_dict, + filters=filters) + + for network in networks: + lswitches = neutron_nsx_mappings.get(network['id'], []) + lswitches = [lswitch.get('data') for lswitch in lswitches] + self.synchronize_network(ctx, network, lswitches) + + def synchronize_router(self, context, neutron_router_data, + lrouter=None): + """Synchronize a neutron router with its NSX counterpart.""" + if not lrouter: + # Try to get router from nsx + try: + # This query will return the logical router status too + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self._cluster, neutron_router_data['id']) + if nsx_router_id: + lrouter = routerlib.get_lrouter( + self._cluster, nsx_router_id) + except exceptions.NotFound: + # NOTE(salv-orlando): We should be catching + # api_exc.ResourceNotFound here + # The logical router was not found + LOG.warning(_("Logical router for neutron router %s not " + "found on NSX."), neutron_router_data['id']) + if lrouter: + # Update the cache + self._nsx_cache.update_lrouter(lrouter) + + # Note(salv-orlando): It might worth adding a check to verify neutron + # resource tag in nsx entity matches a Neutron id. + # By default assume things go wrong + status = constants.NET_STATUS_ERROR + if lrouter: + lr_status = (lrouter['_relations'] + ['LogicalRouterStatus'] + ['fabric_status']) + status = (lr_status and + constants.NET_STATUS_ACTIVE + or constants.NET_STATUS_DOWN) + # Update db object + if status == neutron_router_data['status']: + # do nothing + return + + with context.session.begin(subtransactions=True): + try: + router = self._plugin._get_router(context, + neutron_router_data['id']) + except l3.RouterNotFound: + pass + else: + router.status = status + LOG.debug(_("Updating status for neutron resource %(q_id)s to:" + " %(status)s"), + {'q_id': neutron_router_data['id'], + 'status': status}) + + def _synchronize_lrouters(self, ctx, lr_uuids, scan_missing=False): + if not lr_uuids and not scan_missing: + return + # TODO(salvatore-orlando): Deal with the case the tag + # has been tampered with + neutron_router_mappings = {} + for lr_uuid in lr_uuids: + lrouter = (self._nsx_cache[lr_uuid].get('data') or + self._nsx_cache[lr_uuid].get('data_bk')) + tags = self._get_tag_dict(lrouter['tags']) + neutron_router_id = tags.get('q_router_id') + if neutron_router_id: + neutron_router_mappings[neutron_router_id] = ( + self._nsx_cache[lr_uuid]) + else: + LOG.warn(_("Unable to find Neutron router id for " + "NSX logical router: %s"), lr_uuid) + # Fetch neutron routers from database + filters = ({} if scan_missing else + {'id': neutron_router_mappings.keys()}) + routers = self._plugin._get_collection( + ctx, l3_db.Router, self._plugin._make_router_dict, + filters=filters) + for router in routers: + lrouter = neutron_router_mappings.get(router['id']) + self.synchronize_router( + ctx, router, lrouter and lrouter.get('data')) + + def synchronize_port(self, context, neutron_port_data, + lswitchport=None, ext_networks=None): + """Synchronize a Neutron port with its NSX counterpart.""" + # Skip synchronization for ports on external networks + if not ext_networks: + ext_networks = [net['id'] for net in context.session.query( + models_v2.Network).join( + external_net_db.ExternalNetwork, + (models_v2.Network.id == + external_net_db.ExternalNetwork.network_id))] + if neutron_port_data['network_id'] in ext_networks: + with context.session.begin(subtransactions=True): + neutron_port_data['status'] = constants.PORT_STATUS_ACTIVE + return + + if not lswitchport: + # Try to get port from nsx + try: + ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id( + context.session, self._cluster, neutron_port_data['id']) + if lp_uuid: + lswitchport = switchlib.get_port( + self._cluster, ls_uuid, lp_uuid, + relations='LogicalPortStatus') + except (exceptions.PortNotFoundOnNetwork): + # NOTE(salv-orlando): We should be catching + # api_exc.ResourceNotFound here instead + # of PortNotFoundOnNetwork when the id exists but + # the logical switch port was not found + LOG.warning(_("Logical switch port for neutron port %s " + "not found on NSX."), neutron_port_data['id']) + lswitchport = None + else: + # If lswitchport is not None, update the cache. + # It could be none if the port was deleted from the backend + if lswitchport: + self._nsx_cache.update_lswitchport(lswitchport) + # Note(salv-orlando): It might worth adding a check to verify neutron + # resource tag in nsx entity matches Neutron id. + # By default assume things go wrong + status = constants.PORT_STATUS_ERROR + if lswitchport: + lp_status = (lswitchport['_relations'] + ['LogicalPortStatus'] + ['fabric_status_up']) + status = (lp_status and + constants.PORT_STATUS_ACTIVE + or constants.PORT_STATUS_DOWN) + + # Update db object + if status == neutron_port_data['status']: + # do nothing + return + + with context.session.begin(subtransactions=True): + try: + port = self._plugin._get_port(context, + neutron_port_data['id']) + except exceptions.PortNotFound: + pass + else: + port.status = status + LOG.debug(_("Updating status for neutron resource %(q_id)s to:" + " %(status)s"), + {'q_id': neutron_port_data['id'], + 'status': status}) + + def _synchronize_lswitchports(self, ctx, lp_uuids, scan_missing=False): + if not lp_uuids and not scan_missing: + return + # Find Neutron port id by tag - the tag is already + # loaded in memory, no reason for doing a db query + # TODO(salvatore-orlando): Deal with the case the tag + # has been tampered with + neutron_port_mappings = {} + for lp_uuid in lp_uuids: + lport = (self._nsx_cache[lp_uuid].get('data') or + self._nsx_cache[lp_uuid].get('data_bk')) + tags = self._get_tag_dict(lport['tags']) + neutron_port_id = tags.get('q_port_id') + if neutron_port_id: + neutron_port_mappings[neutron_port_id] = ( + self._nsx_cache[lp_uuid]) + # Fetch neutron ports from database + # At the first sync we need to fetch all ports + filters = ({} if scan_missing else + {'id': neutron_port_mappings.keys()}) + # TODO(salv-orlando): Work out a solution for avoiding + # this query + ext_nets = [net['id'] for net in ctx.session.query( + models_v2.Network).join( + external_net_db.ExternalNetwork, + (models_v2.Network.id == + external_net_db.ExternalNetwork.network_id))] + ports = self._plugin._get_collection( + ctx, models_v2.Port, self._plugin._make_port_dict, + filters=filters) + for port in ports: + lswitchport = neutron_port_mappings.get(port['id']) + self.synchronize_port( + ctx, port, lswitchport and lswitchport.get('data'), + ext_networks=ext_nets) + + def _get_chunk_size(self, sp): + # NOTE(salv-orlando): Try to use __future__ for this routine only? + ratio = ((float(sp.total_size) / float(sp.chunk_size)) / + (float(self._sync_interval) / float(self._req_delay))) + new_size = max(1.0, ratio) * float(sp.chunk_size) + return int(new_size) + (new_size - int(new_size) > 0) + + def _fetch_data(self, uri, cursor, page_size): + # If not cursor there is nothing to retrieve + if cursor: + if cursor == 'start': + cursor = None + # Chunk size tuning might, in some conditions, make it larger + # than 5,000, which is the maximum page size allowed by the NSX + # API. In this case the request should be split in multiple + # requests. This is not ideal, and therefore a log warning will + # be emitted. + num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1 + if num_requests > 1: + LOG.warn(_("Requested page size is %(cur_chunk_size)d." + "It might be necessary to do %(num_requests)d " + "round-trips to NSX for fetching data. Please " + "tune sync parameters to ensure chunk size " + "is less than %(max_page_size)d"), + {'cur_chunk_size': page_size, + 'num_requests': num_requests, + 'max_page_size': MAX_PAGE_SIZE}) + # Only the first request might return the total size, + # subsequent requests will definetely not + results, cursor, total_size = nsxlib.get_single_query_page( + uri, self._cluster, cursor, + min(page_size, MAX_PAGE_SIZE)) + for _req in range(num_requests - 1): + # If no cursor is returned break the cycle as there is no + # actual need to perform multiple requests (all fetched) + # This happens when the overall size of resources exceeds + # the maximum page size, but the number for each single + # resource type is below this threshold + if not cursor: + break + req_results, cursor = nsxlib.get_single_query_page( + uri, self._cluster, cursor, + min(page_size, MAX_PAGE_SIZE))[:2] + results.extend(req_results) + # reset cursor before returning if we queried just to + # know the number of entities + return results, cursor if page_size else 'start', total_size + return [], cursor, None + + def _fetch_nsx_data_chunk(self, sp): + base_chunk_size = sp.chunk_size + chunk_size = base_chunk_size + sp.extra_chunk_size + LOG.info(_("Fetching up to %s resources " + "from NSX backend"), chunk_size) + fetched = ls_count = lr_count = lp_count = 0 + lswitches = lrouters = lswitchports = [] + if sp.ls_cursor or sp.ls_cursor == 'start': + (lswitches, sp.ls_cursor, ls_count) = self._fetch_data( + self.LS_URI, sp.ls_cursor, chunk_size) + fetched = len(lswitches) + if fetched < chunk_size and sp.lr_cursor or sp.lr_cursor == 'start': + (lrouters, sp.lr_cursor, lr_count) = self._fetch_data( + self.LR_URI, sp.lr_cursor, max(chunk_size - fetched, 0)) + fetched += len(lrouters) + if fetched < chunk_size and sp.lp_cursor or sp.lp_cursor == 'start': + (lswitchports, sp.lp_cursor, lp_count) = self._fetch_data( + self.LP_URI, sp.lp_cursor, max(chunk_size - fetched, 0)) + fetched += len(lswitchports) + if sp.current_chunk == 0: + # No cursors were provided. Then it must be possible to + # calculate the total amount of data to fetch + sp.total_size = ls_count + lr_count + lp_count + LOG.debug(_("Total data size: %d"), sp.total_size) + sp.chunk_size = self._get_chunk_size(sp) + # Calculate chunk size adjustment + sp.extra_chunk_size = sp.chunk_size - base_chunk_size + LOG.debug(_("Fetched %(num_lswitches)d logical switches, " + "%(num_lswitchports)d logical switch ports," + "%(num_lrouters)d logical routers"), + {'num_lswitches': len(lswitches), + 'num_lswitchports': len(lswitchports), + 'num_lrouters': len(lrouters)}) + return (lswitches, lrouters, lswitchports) + + def _synchronize_state(self, sp): + # If the plugin has been destroyed, stop the LoopingCall + if not self._plugin: + raise loopingcall.LoopingCallDone + start = timeutils.utcnow() + # Reset page cursor variables if necessary + if sp.current_chunk == 0: + sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start' + LOG.info(_("Running state synchronization task. Chunk: %s"), + sp.current_chunk) + # Fetch chunk_size data from NSX + try: + (lswitches, lrouters, lswitchports) = ( + self._fetch_nsx_data_chunk(sp)) + except (api_exc.RequestTimeout, api_exc.NsxApiException): + sleep_interval = self._sync_backoff + # Cap max back off to 64 seconds + self._sync_backoff = min(self._sync_backoff * 2, 64) + LOG.exception(_("An error occurred while communicating with " + "NSX backend. Will retry synchronization " + "in %d seconds"), sleep_interval) + return sleep_interval + LOG.debug(_("Time elapsed querying NSX: %s"), + timeutils.utcnow() - start) + if sp.total_size: + num_chunks = ((sp.total_size / sp.chunk_size) + + (sp.total_size % sp.chunk_size != 0)) + else: + num_chunks = 1 + LOG.debug(_("Number of chunks: %d"), num_chunks) + # Find objects which have changed on NSX side and need + # to be synchronized + LOG.debug("Processing NSX cache for updated objects") + (ls_uuids, lr_uuids, lp_uuids) = self._nsx_cache.process_updates( + lswitches, lrouters, lswitchports) + # Process removed objects only at the last chunk + scan_missing = (sp.current_chunk == num_chunks - 1 and + not sp.init_sync_performed) + if sp.current_chunk == num_chunks - 1: + LOG.debug("Processing NSX cache for deleted objects") + self._nsx_cache.process_deletes() + ls_uuids = self._nsx_cache.get_lswitches( + changed_only=not scan_missing) + lr_uuids = self._nsx_cache.get_lrouters( + changed_only=not scan_missing) + lp_uuids = self._nsx_cache.get_lswitchports( + changed_only=not scan_missing) + LOG.debug(_("Time elapsed hashing data: %s"), + timeutils.utcnow() - start) + # Get an admin context + ctx = context.get_admin_context() + # Synchronize with database + self._synchronize_lswitches(ctx, ls_uuids, + scan_missing=scan_missing) + self._synchronize_lrouters(ctx, lr_uuids, + scan_missing=scan_missing) + self._synchronize_lswitchports(ctx, lp_uuids, + scan_missing=scan_missing) + # Increase chunk counter + LOG.info(_("Synchronization for chunk %(chunk_num)d of " + "%(total_chunks)d performed"), + {'chunk_num': sp.current_chunk + 1, + 'total_chunks': num_chunks}) + sp.current_chunk = (sp.current_chunk + 1) % num_chunks + added_delay = 0 + if sp.current_chunk == 0: + # Ensure init_sync_performed is True + if not sp.init_sync_performed: + sp.init_sync_performed = True + # Add additional random delay + added_delay = random.randint(0, self._max_rand_delay) + LOG.debug(_("Time elapsed at end of sync: %s"), + timeutils.utcnow() - start) + return self._sync_interval / num_chunks + added_delay diff --git a/neutron/plugins/vmware/common/utils.py b/neutron/plugins/vmware/common/utils.py new file mode 100644 index 000000000..496fa48a3 --- /dev/null +++ b/neutron/plugins/vmware/common/utils.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import hashlib + +from neutron.api.v2 import attributes +from neutron.openstack.common import log +from neutron import version + + +LOG = log.getLogger(__name__) +MAX_DISPLAY_NAME_LEN = 40 +NEUTRON_VERSION = version.version_info.release_string() + + +# Allowed network types for the NSX Plugin +class NetworkTypes: + """Allowed provider network types for the NSX Plugin.""" + L3_EXT = 'l3_ext' + STT = 'stt' + GRE = 'gre' + FLAT = 'flat' + VLAN = 'vlan' + BRIDGE = 'bridge' + + +def get_tags(**kwargs): + tags = ([dict(tag=value, scope=key) + for key, value in kwargs.iteritems()]) + tags.append({"tag": NEUTRON_VERSION, "scope": "quantum"}) + return tags + + +def device_id_to_vm_id(device_id, obfuscate=False): + # device_id can be longer than 40 characters, for example + # a device_id for a dhcp port is like the following: + # + # dhcp83b5fdeb-e3b4-5e18-ac5f-55161...80747326-47d7-46c2-a87a-cf6d5194877c + # + # To fit it into an NSX tag we need to hash it, however device_id + # used for ports associated to VM's are small enough so let's skip the + # hashing + if len(device_id) > MAX_DISPLAY_NAME_LEN or obfuscate: + return hashlib.sha1(device_id).hexdigest() + else: + return device_id + + +def check_and_truncate(display_name): + if (attributes.is_attr_set(display_name) and + len(display_name) > MAX_DISPLAY_NAME_LEN): + LOG.debug(_("Specified name:'%s' exceeds maximum length. " + "It will be truncated on NSX"), display_name) + return display_name[:MAX_DISPLAY_NAME_LEN] + return display_name or '' diff --git a/neutron/plugins/vmware/dbexts/__init__.py b/neutron/plugins/vmware/dbexts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/vmware/dbexts/db.py b/neutron/plugins/vmware/dbexts/db.py new file mode 100644 index 000000000..0db4f09a3 --- /dev/null +++ b/neutron/plugins/vmware/dbexts/db.py @@ -0,0 +1,193 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.orm import exc + +import neutron.db.api as db +from neutron.openstack.common.db import exception as db_exc +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.dbexts import models +from neutron.plugins.vmware.dbexts import networkgw_db + +LOG = logging.getLogger(__name__) + + +def get_network_bindings(session, network_id): + session = session or db.get_session() + return (session.query(models.TzNetworkBinding). + filter_by(network_id=network_id). + all()) + + +def get_network_bindings_by_vlanid(session, vlan_id): + session = session or db.get_session() + return (session.query(models.TzNetworkBinding). + filter_by(vlan_id=vlan_id). + all()) + + +def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id): + with session.begin(subtransactions=True): + binding = models.TzNetworkBinding(network_id, binding_type, + phy_uuid, vlan_id) + session.add(binding) + return binding + + +def add_neutron_nsx_network_mapping(session, neutron_id, nsx_switch_id): + with session.begin(subtransactions=True): + mapping = models.NeutronNsxNetworkMapping( + neutron_id=neutron_id, nsx_id=nsx_switch_id) + session.add(mapping) + return mapping + + +def add_neutron_nsx_port_mapping(session, neutron_id, + nsx_switch_id, nsx_port_id): + session.begin(subtransactions=True) + try: + mapping = models.NeutronNsxPortMapping( + neutron_id, nsx_switch_id, nsx_port_id) + session.add(mapping) + session.commit() + except db_exc.DBDuplicateEntry: + with excutils.save_and_reraise_exception() as ctxt: + session.rollback() + # do not complain if the same exact mapping is being added, + # otherwise re-raise because even though it is possible for the + # same neutron port to map to different back-end ports over time, + # this should not occur whilst a mapping already exists + current = get_nsx_switch_and_port_id(session, neutron_id) + if current[1] == nsx_port_id: + LOG.debug(_("Port mapping for %s already available"), + neutron_id) + ctxt.reraise = False + except db_exc.DBError: + with excutils.save_and_reraise_exception(): + # rollback for any other db error + session.rollback() + return mapping + + +def add_neutron_nsx_router_mapping(session, neutron_id, nsx_router_id): + with session.begin(subtransactions=True): + mapping = models.NeutronNsxRouterMapping( + neutron_id=neutron_id, nsx_id=nsx_router_id) + session.add(mapping) + return mapping + + +def add_neutron_nsx_security_group_mapping(session, neutron_id, nsx_id): + """Map a Neutron security group to a NSX security profile. + + :param session: a valid database session object + :param neutron_id: a neutron security group identifier + :param nsx_id: a nsx security profile identifier + """ + with session.begin(subtransactions=True): + mapping = models.NeutronNsxSecurityGroupMapping( + neutron_id=neutron_id, nsx_id=nsx_id) + session.add(mapping) + return mapping + + +def get_nsx_switch_ids(session, neutron_id): + # This function returns a list of NSX switch identifiers because of + # the possibility of chained logical switches + return [mapping['nsx_id'] for mapping in + session.query(models.NeutronNsxNetworkMapping).filter_by( + neutron_id=neutron_id)] + + +def get_nsx_switch_and_port_id(session, neutron_id): + try: + mapping = (session.query(models.NeutronNsxPortMapping). + filter_by(neutron_id=neutron_id). + one()) + return mapping['nsx_switch_id'], mapping['nsx_port_id'] + except exc.NoResultFound: + LOG.debug(_("NSX identifiers for neutron port %s not yet " + "stored in Neutron DB"), neutron_id) + return None, None + + +def get_nsx_router_id(session, neutron_id): + try: + mapping = (session.query(models.NeutronNsxRouterMapping). + filter_by(neutron_id=neutron_id).one()) + return mapping['nsx_id'] + except exc.NoResultFound: + LOG.debug(_("NSX identifiers for neutron router %s not yet " + "stored in Neutron DB"), neutron_id) + + +def get_nsx_security_group_id(session, neutron_id): + """Return the id of a security group in the NSX backend. + + Note: security groups are called 'security profiles' in NSX + """ + try: + mapping = (session.query(models.NeutronNsxSecurityGroupMapping). + filter_by(neutron_id=neutron_id). + one()) + return mapping['nsx_id'] + except exc.NoResultFound: + LOG.debug(_("NSX identifiers for neutron security group %s not yet " + "stored in Neutron DB"), neutron_id) + return None + + +def _delete_by_neutron_id(session, model, neutron_id): + return session.query(model).filter_by(neutron_id=neutron_id).delete() + + +def delete_neutron_nsx_port_mapping(session, neutron_id): + return _delete_by_neutron_id( + session, models.NeutronNsxPortMapping, neutron_id) + + +def delete_neutron_nsx_router_mapping(session, neutron_id): + return _delete_by_neutron_id( + session, models.NeutronNsxRouterMapping, neutron_id) + + +def unset_default_network_gateways(session): + with session.begin(subtransactions=True): + session.query(networkgw_db.NetworkGateway).update( + {networkgw_db.NetworkGateway.default: False}) + + +def set_default_network_gateway(session, gw_id): + with session.begin(subtransactions=True): + gw = (session.query(networkgw_db.NetworkGateway). + filter_by(id=gw_id).one()) + gw['default'] = True + + +def set_multiprovider_network(session, network_id): + with session.begin(subtransactions=True): + multiprovider_network = models.MultiProviderNetworks( + network_id) + session.add(multiprovider_network) + return multiprovider_network + + +def is_multiprovider_network(session, network_id): + with session.begin(subtransactions=True): + return bool( + session.query(models.MultiProviderNetworks).filter_by( + network_id=network_id).first()) diff --git a/neutron/plugins/vmware/dbexts/distributedrouter.py b/neutron/plugins/vmware/dbexts/distributedrouter.py new file mode 100644 index 000000000..5c6accbd3 --- /dev/null +++ b/neutron/plugins/vmware/dbexts/distributedrouter.py @@ -0,0 +1,28 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.plugins.vmware.dbexts import nsxrouter +from neutron.plugins.vmware.extensions import distributedrouter as dist_rtr + + +class DistributedRouter_mixin(nsxrouter.NsxRouterMixin): + """Mixin class to enable distributed router support.""" + + nsx_attributes = ( + nsxrouter.NsxRouterMixin.nsx_attributes + [{ + 'name': dist_rtr.DISTRIBUTED, + 'default': False + }]) diff --git a/neutron/plugins/vmware/dbexts/lsn_db.py b/neutron/plugins/vmware/dbexts/lsn_db.py new file mode 100644 index 000000000..25a457ac7 --- /dev/null +++ b/neutron/plugins/vmware/dbexts/lsn_db.py @@ -0,0 +1,131 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from sqlalchemy import Column +from sqlalchemy import ForeignKey +from sqlalchemy import orm +from sqlalchemy import String + +from neutron.db import models_v2 +from neutron.openstack.common.db import exception as d_exc +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import exceptions as p_exc + + +LOG = logging.getLogger(__name__) + + +class LsnPort(models_v2.model_base.BASEV2): + + __tablename__ = 'lsn_port' + + lsn_port_id = Column(String(36), primary_key=True) + + lsn_id = Column(String(36), ForeignKey('lsn.lsn_id', ondelete="CASCADE"), + nullable=False) + sub_id = Column(String(36), nullable=False, unique=True) + mac_addr = Column(String(32), nullable=False, unique=True) + + def __init__(self, lsn_port_id, subnet_id, mac_address, lsn_id): + self.lsn_port_id = lsn_port_id + self.lsn_id = lsn_id + self.sub_id = subnet_id + self.mac_addr = mac_address + + +class Lsn(models_v2.model_base.BASEV2): + __tablename__ = 'lsn' + + lsn_id = Column(String(36), primary_key=True) + net_id = Column(String(36), nullable=False) + + def __init__(self, net_id, lsn_id): + self.net_id = net_id + self.lsn_id = lsn_id + + +def lsn_add(context, network_id, lsn_id): + """Add Logical Service Node information to persistent datastore.""" + with context.session.begin(subtransactions=True): + lsn = Lsn(network_id, lsn_id) + context.session.add(lsn) + + +def lsn_remove(context, lsn_id): + """Remove Logical Service Node information from datastore given its id.""" + with context.session.begin(subtransactions=True): + context.session.query(Lsn).filter_by(lsn_id=lsn_id).delete() + + +def lsn_remove_for_network(context, network_id): + """Remove information about the Logical Service Node given its network.""" + with context.session.begin(subtransactions=True): + context.session.query(Lsn).filter_by(net_id=network_id).delete() + + +def lsn_get_for_network(context, network_id, raise_on_err=True): + """Retrieve LSN information given its network id.""" + query = context.session.query(Lsn) + try: + return query.filter_by(net_id=network_id).one() + except (orm.exc.NoResultFound, d_exc.DBError): + logger = raise_on_err and LOG.error or LOG.warn + logger(_('Unable to find Logical Service Node for ' + 'network %s'), network_id) + if raise_on_err: + raise p_exc.LsnNotFound(entity='network', + entity_id=network_id) + + +def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id): + """Add Logical Service Node Port information to persistent datastore.""" + with context.session.begin(subtransactions=True): + lsn_port = LsnPort(lsn_port_id, subnet_id, mac, lsn_id) + context.session.add(lsn_port) + + +def lsn_port_get_for_subnet(context, subnet_id, raise_on_err=True): + """Return Logical Service Node Port information given its subnet id.""" + with context.session.begin(subtransactions=True): + try: + return (context.session.query(LsnPort). + filter_by(sub_id=subnet_id).one()) + except (orm.exc.NoResultFound, d_exc.DBError): + if raise_on_err: + raise p_exc.LsnPortNotFound(lsn_id=None, + entity='subnet', + entity_id=subnet_id) + + +def lsn_port_get_for_mac(context, mac_address, raise_on_err=True): + """Return Logical Service Node Port information given its mac address.""" + with context.session.begin(subtransactions=True): + try: + return (context.session.query(LsnPort). + filter_by(mac_addr=mac_address).one()) + except (orm.exc.NoResultFound, d_exc.DBError): + if raise_on_err: + raise p_exc.LsnPortNotFound(lsn_id=None, + entity='mac', + entity_id=mac_address) + + +def lsn_port_remove(context, lsn_port_id): + """Remove Logical Service Node port from the given Logical Service Node.""" + with context.session.begin(subtransactions=True): + (context.session.query(LsnPort). + filter_by(lsn_port_id=lsn_port_id).delete()) diff --git a/neutron/plugins/vmware/dbexts/maclearning.py b/neutron/plugins/vmware/dbexts/maclearning.py new file mode 100644 index 000000000..6a5f73acd --- /dev/null +++ b/neutron/plugins/vmware/dbexts/maclearning.py @@ -0,0 +1,78 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc + +from neutron.api.v2 import attributes +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.extensions import maclearning as mac + +LOG = logging.getLogger(__name__) + + +class MacLearningState(model_base.BASEV2): + + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + mac_learning_enabled = sa.Column(sa.Boolean(), nullable=False) + + # Add a relationship to the Port model using the backref attribute. + # This will instruct SQLAlchemy to eagerly load this association. + port = orm.relationship( + models_v2.Port, + backref=orm.backref("mac_learning_state", lazy='joined', + uselist=False, cascade='delete')) + + +class MacLearningDbMixin(object): + """Mixin class for mac learning.""" + + def _make_mac_learning_state_dict(self, port, fields=None): + res = {'port_id': port['port_id'], + mac.MAC_LEARNING: port[mac.MAC_LEARNING]} + return self._fields(res, fields) + + def _extend_port_mac_learning_state(self, port_res, port_db): + state = port_db.mac_learning_state + if state and state.mac_learning_enabled: + port_res[mac.MAC_LEARNING] = state.mac_learning_enabled + + # Register dict extend functions for ports + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.PORTS, ['_extend_port_mac_learning_state']) + + def _update_mac_learning_state(self, context, port_id, enabled): + try: + query = self._model_query(context, MacLearningState) + state = query.filter(MacLearningState.port_id == port_id).one() + state.update({mac.MAC_LEARNING: enabled}) + except exc.NoResultFound: + self._create_mac_learning_state(context, + {'id': port_id, + mac.MAC_LEARNING: enabled}) + + def _create_mac_learning_state(self, context, port): + with context.session.begin(subtransactions=True): + enabled = port[mac.MAC_LEARNING] + state = MacLearningState(port_id=port['id'], + mac_learning_enabled=enabled) + context.session.add(state) + return self._make_mac_learning_state_dict(state) diff --git a/neutron/plugins/vmware/dbexts/models.py b/neutron/plugins/vmware/dbexts/models.py new file mode 100644 index 000000000..356332811 --- /dev/null +++ b/neutron/plugins/vmware/dbexts/models.py @@ -0,0 +1,135 @@ +# Copyright 2013 VMware, Inc. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import Boolean, Column, Enum, ForeignKey, Integer, String +from sqlalchemy import orm + +from neutron.db import l3_db +from neutron.db import model_base + + +class TzNetworkBinding(model_base.BASEV2): + """Represents a binding of a virtual network with a transport zone. + + This model class associates a Neutron network with a transport zone; + optionally a vlan ID might be used if the binding type is 'bridge' + """ + __tablename__ = 'tz_network_bindings' + + # TODO(arosen) - it might be worth while refactoring the how this data + # is stored later so every column does not need to be a primary key. + network_id = Column(String(36), + ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + # 'flat', 'vlan', stt' or 'gre' + binding_type = Column(Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', + name='tz_network_bindings_binding_type'), + nullable=False, primary_key=True) + phy_uuid = Column(String(36), primary_key=True, nullable=True) + vlan_id = Column(Integer, primary_key=True, nullable=True, + autoincrement=False) + + def __init__(self, network_id, binding_type, phy_uuid, vlan_id): + self.network_id = network_id + self.binding_type = binding_type + self.phy_uuid = phy_uuid + self.vlan_id = vlan_id + + def __repr__(self): + return "" % (self.network_id, + self.binding_type, + self.phy_uuid, + self.vlan_id) + + +class NeutronNsxNetworkMapping(model_base.BASEV2): + """Maps neutron network identifiers to NSX identifiers. + + Because of chained logical switches more than one mapping might exist + for a single Neutron network. + """ + __tablename__ = 'neutron_nsx_network_mappings' + neutron_id = Column(String(36), + ForeignKey('networks.id', ondelete='CASCADE'), + primary_key=True) + nsx_id = Column(String(36), primary_key=True) + + +class NeutronNsxSecurityGroupMapping(model_base.BASEV2): + """Backend mappings for Neutron Security Group identifiers. + + This class maps a neutron security group identifier to the corresponding + NSX security profile identifier. + """ + + __tablename__ = 'neutron_nsx_security_group_mappings' + neutron_id = Column(String(36), + ForeignKey('securitygroups.id', ondelete="CASCADE"), + primary_key=True) + nsx_id = Column(String(36), primary_key=True) + + +class NeutronNsxPortMapping(model_base.BASEV2): + """Represents the mapping between neutron and nsx port uuids.""" + + __tablename__ = 'neutron_nsx_port_mappings' + neutron_id = Column(String(36), + ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + nsx_switch_id = Column(String(36)) + nsx_port_id = Column(String(36), nullable=False) + + def __init__(self, neutron_id, nsx_switch_id, nsx_port_id): + self.neutron_id = neutron_id + self.nsx_switch_id = nsx_switch_id + self.nsx_port_id = nsx_port_id + + +class NeutronNsxRouterMapping(model_base.BASEV2): + """Maps neutron router identifiers to NSX identifiers.""" + __tablename__ = 'neutron_nsx_router_mappings' + neutron_id = Column(String(36), + ForeignKey('routers.id', ondelete='CASCADE'), + primary_key=True) + nsx_id = Column(String(36)) + + +class MultiProviderNetworks(model_base.BASEV2): + """Networks provisioned through multiprovider extension.""" + + __tablename__ = 'multi_provider_networks' + network_id = Column(String(36), + ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + + def __init__(self, network_id): + self.network_id = network_id + + +class NSXRouterExtAttributes(model_base.BASEV2): + """Router attributes managed by NSX plugin extensions.""" + router_id = Column(String(36), + ForeignKey('routers.id', ondelete="CASCADE"), + primary_key=True) + distributed = Column(Boolean, default=False, nullable=False) + service_router = Column(Boolean, default=False, nullable=False) + # Add a relationship to the Router model in order to instruct + # SQLAlchemy to eagerly load this association + router = orm.relationship( + l3_db.Router, + backref=orm.backref("nsx_attributes", lazy='joined', + uselist=False, cascade='delete')) diff --git a/neutron/plugins/vmware/dbexts/networkgw_db.py b/neutron/plugins/vmware/dbexts/networkgw_db.py new file mode 100644 index 000000000..fb5eb6268 --- /dev/null +++ b/neutron/plugins/vmware/dbexts/networkgw_db.py @@ -0,0 +1,499 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa + +from sqlalchemy import orm +from sqlalchemy.orm import exc as sa_orm_exc + +from neutron.api.v2 import attributes +from neutron.common import exceptions +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.vmware.extensions import networkgw + + +LOG = logging.getLogger(__name__) +DEVICE_OWNER_NET_GW_INTF = 'network:gateway-interface' +NETWORK_ID = 'network_id' +SEGMENTATION_TYPE = 'segmentation_type' +SEGMENTATION_ID = 'segmentation_id' +ALLOWED_CONNECTION_ATTRIBUTES = set((NETWORK_ID, + SEGMENTATION_TYPE, + SEGMENTATION_ID)) +# Constants for gateway device operational status +STATUS_UNKNOWN = "UNKNOWN" +STATUS_ERROR = "ERROR" +STATUS_ACTIVE = "ACTIVE" +STATUS_DOWN = "DOWN" + + +class GatewayInUse(exceptions.InUse): + message = _("Network Gateway '%(gateway_id)s' still has active mappings " + "with one or more neutron networks.") + + +class GatewayNotFound(exceptions.NotFound): + message = _("Network Gateway %(gateway_id)s could not be found") + + +class GatewayDeviceInUse(exceptions.InUse): + message = _("Network Gateway Device '%(device_id)s' is still used by " + "one or more network gateways.") + + +class GatewayDeviceNotFound(exceptions.NotFound): + message = _("Network Gateway Device %(device_id)s could not be found.") + + +class NetworkGatewayPortInUse(exceptions.InUse): + message = _("Port '%(port_id)s' is owned by '%(device_owner)s' and " + "therefore cannot be deleted directly via the port API.") + + +class GatewayConnectionInUse(exceptions.InUse): + message = _("The specified mapping '%(mapping)s' is already in use on " + "network gateway '%(gateway_id)s'.") + + +class MultipleGatewayConnections(exceptions.Conflict): + message = _("Multiple network connections found on '%(gateway_id)s' " + "with provided criteria.") + + +class GatewayConnectionNotFound(exceptions.NotFound): + message = _("The connection %(network_mapping_info)s was not found on the " + "network gateway '%(network_gateway_id)s'") + + +class NetworkGatewayUnchangeable(exceptions.InUse): + message = _("The network gateway %(gateway_id)s " + "cannot be updated or deleted") + + +class NetworkConnection(model_base.BASEV2, models_v2.HasTenant): + """Defines a connection between a network gateway and a network.""" + # We use port_id as the primary key as one can connect a gateway + # to a network in multiple ways (and we cannot use the same port form + # more than a single gateway) + network_gateway_id = sa.Column(sa.String(36), + sa.ForeignKey('networkgateways.id', + ondelete='CASCADE')) + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete='CASCADE')) + segmentation_type = sa.Column( + sa.Enum('flat', 'vlan', + name='networkconnections_segmentation_type')) + segmentation_id = sa.Column(sa.Integer) + __table_args__ = (sa.UniqueConstraint(network_gateway_id, + segmentation_type, + segmentation_id),) + # Also, storing port id comes back useful when disconnecting a network + # from a gateway + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete='CASCADE'), + primary_key=True) + + +class NetworkGatewayDeviceReference(model_base.BASEV2): + id = sa.Column(sa.String(36), primary_key=True) + network_gateway_id = sa.Column(sa.String(36), + sa.ForeignKey('networkgateways.id', + ondelete='CASCADE'), + primary_key=True) + interface_name = sa.Column(sa.String(64), primary_key=True) + + +class NetworkGatewayDevice(model_base.BASEV2, models_v2.HasId, + models_v2.HasTenant): + nsx_id = sa.Column(sa.String(36)) + # Optional name for the gateway device + name = sa.Column(sa.String(255)) + # Transport connector type. Not using enum as range of + # connector types might vary with backend version + connector_type = sa.Column(sa.String(10)) + # Transport connector IP Address + connector_ip = sa.Column(sa.String(64)) + # operational status + status = sa.Column(sa.String(16)) + + +class NetworkGateway(model_base.BASEV2, models_v2.HasId, + models_v2.HasTenant): + """Defines the data model for a network gateway.""" + name = sa.Column(sa.String(255)) + # Tenant id is nullable for this resource + tenant_id = sa.Column(sa.String(36)) + default = sa.Column(sa.Boolean()) + devices = orm.relationship(NetworkGatewayDeviceReference, + backref='networkgateways', + cascade='all,delete') + network_connections = orm.relationship(NetworkConnection, lazy='joined') + + +class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase): + + gateway_resource = networkgw.GATEWAY_RESOURCE_NAME + device_resource = networkgw.DEVICE_RESOURCE_NAME + + def _get_network_gateway(self, context, gw_id): + try: + gw = self._get_by_id(context, NetworkGateway, gw_id) + except sa_orm_exc.NoResultFound: + raise GatewayNotFound(gateway_id=gw_id) + return gw + + def _make_gw_connection_dict(self, gw_conn): + return {'port_id': gw_conn['port_id'], + 'segmentation_type': gw_conn['segmentation_type'], + 'segmentation_id': gw_conn['segmentation_id']} + + def _make_network_gateway_dict(self, network_gateway, fields=None): + device_list = [] + for d in network_gateway['devices']: + device_list.append({'id': d['id'], + 'interface_name': d['interface_name']}) + res = {'id': network_gateway['id'], + 'name': network_gateway['name'], + 'default': network_gateway['default'], + 'devices': device_list, + 'tenant_id': network_gateway['tenant_id']} + # Query gateway connections only if needed + if (fields and 'ports' in fields) or not fields: + res['ports'] = [self._make_gw_connection_dict(conn) + for conn in network_gateway.network_connections] + return self._fields(res, fields) + + def _set_mapping_info_defaults(self, mapping_info): + if not mapping_info.get('segmentation_type'): + mapping_info['segmentation_type'] = 'flat' + if not mapping_info.get('segmentation_id'): + mapping_info['segmentation_id'] = 0 + + def _validate_network_mapping_info(self, network_mapping_info): + self._set_mapping_info_defaults(network_mapping_info) + network_id = network_mapping_info.get(NETWORK_ID) + if not network_id: + raise exceptions.InvalidInput( + error_message=_("A network identifier must be specified " + "when connecting a network to a network " + "gateway. Unable to complete operation")) + connection_attrs = set(network_mapping_info.keys()) + if not connection_attrs.issubset(ALLOWED_CONNECTION_ATTRIBUTES): + raise exceptions.InvalidInput( + error_message=(_("Invalid keys found among the ones provided " + "in request body: %(connection_attrs)s."), + connection_attrs)) + seg_type = network_mapping_info.get(SEGMENTATION_TYPE) + seg_id = network_mapping_info.get(SEGMENTATION_ID) + if not seg_type and seg_id: + msg = _("In order to specify a segmentation id the " + "segmentation type must be specified as well") + raise exceptions.InvalidInput(error_message=msg) + elif seg_type and seg_type.lower() == 'flat' and seg_id: + msg = _("Cannot specify a segmentation id when " + "the segmentation type is flat") + raise exceptions.InvalidInput(error_message=msg) + return network_id + + def _retrieve_gateway_connections(self, context, gateway_id, + mapping_info={}, only_one=False): + filters = {'network_gateway_id': [gateway_id]} + for k, v in mapping_info.iteritems(): + if v and k != NETWORK_ID: + filters[k] = [v] + query = self._get_collection_query(context, + NetworkConnection, + filters) + return only_one and query.one() or query.all() + + def _unset_default_network_gateways(self, context): + with context.session.begin(subtransactions=True): + context.session.query(NetworkGateway).update( + {NetworkGateway.default: False}) + + def _set_default_network_gateway(self, context, gw_id): + with context.session.begin(subtransactions=True): + gw = (context.session.query(NetworkGateway). + filter_by(id=gw_id).one()) + gw['default'] = True + + def prevent_network_gateway_port_deletion(self, context, port): + """Pre-deletion check. + + Ensures a port will not be deleted if is being used by a network + gateway. In that case an exception will be raised. + """ + if port['device_owner'] == DEVICE_OWNER_NET_GW_INTF: + raise NetworkGatewayPortInUse(port_id=port['id'], + device_owner=port['device_owner']) + + def create_network_gateway(self, context, network_gateway): + gw_data = network_gateway[self.gateway_resource] + tenant_id = self._get_tenant_id_for_create(context, gw_data) + with context.session.begin(subtransactions=True): + gw_db = NetworkGateway( + id=gw_data.get('id', uuidutils.generate_uuid()), + tenant_id=tenant_id, + name=gw_data.get('name')) + # Device list is guaranteed to be a valid list + device_query = self._query_gateway_devices( + context, filters={'id': [device['id'] + for device in gw_data['devices']]}) + for device in device_query: + if device['tenant_id'] != tenant_id: + raise GatewayDeviceNotFound(device_id=device['id']) + gw_db.devices.extend([NetworkGatewayDeviceReference(**device) + for device in gw_data['devices']]) + context.session.add(gw_db) + LOG.debug(_("Created network gateway with id:%s"), gw_db['id']) + return self._make_network_gateway_dict(gw_db) + + def update_network_gateway(self, context, id, network_gateway): + gw_data = network_gateway[self.gateway_resource] + with context.session.begin(subtransactions=True): + gw_db = self._get_network_gateway(context, id) + if gw_db.default: + raise NetworkGatewayUnchangeable(gateway_id=id) + # Ensure there is something to update before doing it + if any([gw_db[k] != gw_data[k] for k in gw_data]): + gw_db.update(gw_data) + LOG.debug(_("Updated network gateway with id:%s"), id) + return self._make_network_gateway_dict(gw_db) + + def get_network_gateway(self, context, id, fields=None): + gw_db = self._get_network_gateway(context, id) + return self._make_network_gateway_dict(gw_db, fields) + + def delete_network_gateway(self, context, id): + with context.session.begin(subtransactions=True): + gw_db = self._get_network_gateway(context, id) + if gw_db.network_connections: + raise GatewayInUse(gateway_id=id) + if gw_db.default: + raise NetworkGatewayUnchangeable(gateway_id=id) + context.session.delete(gw_db) + LOG.debug(_("Network gateway '%s' was destroyed."), id) + + def get_network_gateways(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj( + context, 'network_gateway', limit, marker) + return self._get_collection(context, NetworkGateway, + self._make_network_gateway_dict, + filters=filters, fields=fields, + sorts=sorts, limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def connect_network(self, context, network_gateway_id, + network_mapping_info): + network_id = self._validate_network_mapping_info(network_mapping_info) + LOG.debug(_("Connecting network '%(network_id)s' to gateway " + "'%(network_gateway_id)s'"), + {'network_id': network_id, + 'network_gateway_id': network_gateway_id}) + with context.session.begin(subtransactions=True): + gw_db = self._get_network_gateway(context, network_gateway_id) + tenant_id = self._get_tenant_id_for_create(context, gw_db) + # TODO(salvatore-orlando): Leverage unique constraint instead + # of performing another query! + if self._retrieve_gateway_connections(context, + network_gateway_id, + network_mapping_info): + raise GatewayConnectionInUse(mapping=network_mapping_info, + gateway_id=network_gateway_id) + # TODO(salvatore-orlando): Creating a port will give it an IP, + # but we actually do not need any. Instead of wasting an IP we + # should have a way to say a port shall not be associated with + # any subnet + try: + # We pass the segmentation type and id too - the plugin + # might find them useful as the network connection object + # does not exist yet. + # NOTE: they're not extended attributes, rather extra data + # passed in the port structure to the plugin + # TODO(salvatore-orlando): Verify optimal solution for + # ownership of the gateway port + port = self.create_port(context, { + 'port': + {'tenant_id': tenant_id, + 'network_id': network_id, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'admin_state_up': True, + 'fixed_ips': [], + 'device_id': network_gateway_id, + 'device_owner': DEVICE_OWNER_NET_GW_INTF, + 'name': '', + 'gw:segmentation_type': + network_mapping_info.get('segmentation_type'), + 'gw:segmentation_id': + network_mapping_info.get('segmentation_id')}}) + except exceptions.NetworkNotFound: + err_msg = (_("Requested network '%(network_id)s' not found." + "Unable to create network connection on " + "gateway '%(network_gateway_id)s") % + {'network_id': network_id, + 'network_gateway_id': network_gateway_id}) + LOG.error(err_msg) + raise exceptions.InvalidInput(error_message=err_msg) + port_id = port['id'] + LOG.debug(_("Gateway port for '%(network_gateway_id)s' " + "created on network '%(network_id)s':%(port_id)s"), + {'network_gateway_id': network_gateway_id, + 'network_id': network_id, + 'port_id': port_id}) + # Create NetworkConnection record + network_mapping_info['port_id'] = port_id + network_mapping_info['tenant_id'] = tenant_id + gw_db.network_connections.append( + NetworkConnection(**network_mapping_info)) + port_id = port['id'] + # now deallocate and recycle ip from the port + for fixed_ip in port.get('fixed_ips', []): + self._delete_ip_allocation(context, network_id, + fixed_ip['subnet_id'], + fixed_ip['ip_address']) + LOG.debug(_("Ensured no Ip addresses are configured on port %s"), + port_id) + return {'connection_info': + {'network_gateway_id': network_gateway_id, + 'network_id': network_id, + 'port_id': port_id}} + + def disconnect_network(self, context, network_gateway_id, + network_mapping_info): + network_id = self._validate_network_mapping_info(network_mapping_info) + LOG.debug(_("Disconnecting network '%(network_id)s' from gateway " + "'%(network_gateway_id)s'"), + {'network_id': network_id, + 'network_gateway_id': network_gateway_id}) + with context.session.begin(subtransactions=True): + # Uniquely identify connection, otherwise raise + try: + net_connection = self._retrieve_gateway_connections( + context, network_gateway_id, + network_mapping_info, only_one=True) + except sa_orm_exc.NoResultFound: + raise GatewayConnectionNotFound( + network_mapping_info=network_mapping_info, + network_gateway_id=network_gateway_id) + except sa_orm_exc.MultipleResultsFound: + raise MultipleGatewayConnections( + gateway_id=network_gateway_id) + # Remove gateway port from network + # FIXME(salvatore-orlando): Ensure state of port in NSX is + # consistent with outcome of transaction + self.delete_port(context, net_connection['port_id'], + nw_gw_port_check=False) + # Remove NetworkConnection record + context.session.delete(net_connection) + + def _make_gateway_device_dict(self, gateway_device, fields=None, + include_nsx_id=False): + res = {'id': gateway_device['id'], + 'name': gateway_device['name'], + 'status': gateway_device['status'], + 'connector_type': gateway_device['connector_type'], + 'connector_ip': gateway_device['connector_ip'], + 'tenant_id': gateway_device['tenant_id']} + if include_nsx_id: + # Return the NSX mapping as well. This attribute will not be + # returned in the API response anyway. Ensure it will not be + # filtered out in field selection. + if fields: + fields.append('nsx_id') + res['nsx_id'] = gateway_device['nsx_id'] + return self._fields(res, fields) + + def _get_gateway_device(self, context, device_id): + try: + return self._get_by_id(context, NetworkGatewayDevice, device_id) + except sa_orm_exc.NoResultFound: + raise GatewayDeviceNotFound(device_id=device_id) + + def _is_device_in_use(self, context, device_id): + query = self._get_collection_query( + context, NetworkGatewayDeviceReference, {'id': [device_id]}) + return query.first() + + def get_gateway_device(self, context, device_id, fields=None, + include_nsx_id=False): + return self._make_gateway_device_dict( + self._get_gateway_device(context, device_id), + fields, include_nsx_id) + + def _query_gateway_devices(self, context, + filters=None, sorts=None, + limit=None, marker=None, + page_reverse=None): + marker_obj = self._get_marker_obj( + context, 'gateway_device', limit, marker) + return self._get_collection_query(context, + NetworkGatewayDevice, + filters=filters, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_gateway_devices(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False, include_nsx_id=False): + query = self._query_gateway_devices(context, filters, sorts, limit, + marker, page_reverse) + return [self._make_gateway_device_dict(row, fields, include_nsx_id) + for row in query] + + def create_gateway_device(self, context, gateway_device, + initial_status=STATUS_UNKNOWN): + device_data = gateway_device[self.device_resource] + tenant_id = self._get_tenant_id_for_create(context, device_data) + with context.session.begin(subtransactions=True): + device_db = NetworkGatewayDevice( + id=device_data.get('id', uuidutils.generate_uuid()), + tenant_id=tenant_id, + name=device_data.get('name'), + connector_type=device_data['connector_type'], + connector_ip=device_data['connector_ip'], + status=initial_status) + context.session.add(device_db) + LOG.debug(_("Created network gateway device: %s"), device_db['id']) + return self._make_gateway_device_dict(device_db) + + def update_gateway_device(self, context, gateway_device_id, + gateway_device, include_nsx_id=False): + device_data = gateway_device[self.device_resource] + with context.session.begin(subtransactions=True): + device_db = self._get_gateway_device(context, gateway_device_id) + # Ensure there is something to update before doing it + if any([device_db[k] != device_data[k] for k in device_data]): + device_db.update(device_data) + LOG.debug(_("Updated network gateway device: %s"), + gateway_device_id) + return self._make_gateway_device_dict( + device_db, include_nsx_id=include_nsx_id) + + def delete_gateway_device(self, context, device_id): + with context.session.begin(subtransactions=True): + # A gateway device should not be deleted + # if it is used in any network gateway service + if self._is_device_in_use(context, device_id): + raise GatewayDeviceInUse(device_id=device_id) + device_db = self._get_gateway_device(context, device_id) + context.session.delete(device_db) + LOG.debug(_("Deleted network gateway device: %s."), device_id) diff --git a/neutron/plugins/vmware/dbexts/nsxrouter.py b/neutron/plugins/vmware/dbexts/nsxrouter.py new file mode 100644 index 000000000..48aa61266 --- /dev/null +++ b/neutron/plugins/vmware/dbexts/nsxrouter.py @@ -0,0 +1,66 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.db import db_base_plugin_v2 +from neutron.extensions import l3 +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.dbexts import models + +LOG = logging.getLogger(__name__) + + +class NsxRouterMixin(object): + """Mixin class to enable nsx router support.""" + + nsx_attributes = [] + + def _extend_nsx_router_dict(self, router_res, router_db): + nsx_attrs = router_db['nsx_attributes'] + # Return False if nsx attributes are not definied for this + # neutron router + for attr in self.nsx_attributes: + name = attr['name'] + default = attr['default'] + router_res[name] = ( + nsx_attrs and nsx_attrs[name] or default) + + def _process_nsx_router_create( + self, context, router_db, router_req): + if not router_db['nsx_attributes']: + kwargs = {} + for attr in self.nsx_attributes: + name = attr['name'] + default = attr['default'] + kwargs[name] = router_req.get(name, default) + nsx_attributes = models.NSXRouterExtAttributes( + router_id=router_db['id'], **kwargs) + context.session.add(nsx_attributes) + router_db['nsx_attributes'] = nsx_attributes + else: + # The situation where the record already exists will + # be likely once the NSXRouterExtAttributes model + # will allow for defining several attributes pertaining + # to different extensions + for attr in self.nsx_attributes: + name = attr['name'] + default = attr['default'] + router_db['nsx_attributes'][name] = router_req.get( + name, default) + LOG.debug(_("Nsx router extension successfully processed " + "for router:%s"), router_db['id']) + + # Register dict extend functions for ports + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + l3.ROUTERS, ['_extend_nsx_router_dict']) diff --git a/neutron/plugins/vmware/dbexts/qos_db.py b/neutron/plugins/vmware/dbexts/qos_db.py new file mode 100644 index 000000000..b094a2293 --- /dev/null +++ b/neutron/plugins/vmware/dbexts/qos_db.py @@ -0,0 +1,297 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc + +from neutron.api.v2 import attributes as attr +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.openstack.common import log +from neutron.openstack.common import uuidutils +from neutron.plugins.vmware.extensions import qos + + +LOG = log.getLogger(__name__) + + +class QoSQueue(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + name = sa.Column(sa.String(255)) + default = sa.Column(sa.Boolean, default=False) + min = sa.Column(sa.Integer, nullable=False) + max = sa.Column(sa.Integer, nullable=True) + qos_marking = sa.Column(sa.Enum('untrusted', 'trusted', + name='qosqueues_qos_marking')) + dscp = sa.Column(sa.Integer) + + +class PortQueueMapping(model_base.BASEV2): + port_id = sa.Column(sa.String(36), + sa.ForeignKey("ports.id", ondelete="CASCADE"), + primary_key=True) + + queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id"), + primary_key=True) + + # Add a relationship to the Port model adding a backref which will + # allow SQLAlchemy for eagerly load the queue binding + port = orm.relationship( + models_v2.Port, + backref=orm.backref("qos_queue", uselist=False, + cascade='delete', lazy='joined')) + + +class NetworkQueueMapping(model_base.BASEV2): + network_id = sa.Column(sa.String(36), + sa.ForeignKey("networks.id", ondelete="CASCADE"), + primary_key=True) + + queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id", + ondelete="CASCADE")) + + # Add a relationship to the Network model adding a backref which will + # allow SQLAlcremy for eagerly load the queue binding + network = orm.relationship( + models_v2.Network, + backref=orm.backref("qos_queue", uselist=False, + cascade='delete', lazy='joined')) + + +class QoSDbMixin(qos.QueuePluginBase): + """Mixin class to add queues.""" + + def create_qos_queue(self, context, qos_queue): + q = qos_queue['qos_queue'] + with context.session.begin(subtransactions=True): + qos_queue = QoSQueue(id=q.get('id', uuidutils.generate_uuid()), + name=q.get('name'), + tenant_id=q['tenant_id'], + default=q.get('default'), + min=q.get('min'), + max=q.get('max'), + qos_marking=q.get('qos_marking'), + dscp=q.get('dscp')) + context.session.add(qos_queue) + return self._make_qos_queue_dict(qos_queue) + + def get_qos_queue(self, context, queue_id, fields=None): + return self._make_qos_queue_dict( + self._get_qos_queue(context, queue_id), fields) + + def _get_qos_queue(self, context, queue_id): + try: + return self._get_by_id(context, QoSQueue, queue_id) + except exc.NoResultFound: + raise qos.QueueNotFound(id=queue_id) + + def get_qos_queues(self, context, filters=None, fields=None, sorts=None, + limit=None, marker=None, page_reverse=False): + marker_obj = self._get_marker_obj(context, 'qos_queue', limit, marker) + return self._get_collection(context, QoSQueue, + self._make_qos_queue_dict, + filters=filters, fields=fields, + sorts=sorts, limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def delete_qos_queue(self, context, queue_id): + qos_queue = self._get_qos_queue(context, queue_id) + with context.session.begin(subtransactions=True): + context.session.delete(qos_queue) + + def _process_port_queue_mapping(self, context, port_data, queue_id): + port_data[qos.QUEUE] = queue_id + if not queue_id: + return + with context.session.begin(subtransactions=True): + context.session.add(PortQueueMapping(port_id=port_data['id'], + queue_id=queue_id)) + + def _get_port_queue_bindings(self, context, filters=None, fields=None): + return self._get_collection(context, PortQueueMapping, + self._make_port_queue_binding_dict, + filters=filters, fields=fields) + + def _delete_port_queue_mapping(self, context, port_id): + query = self._model_query(context, PortQueueMapping) + try: + binding = query.filter(PortQueueMapping.port_id == port_id).one() + except exc.NoResultFound: + # return since this can happen if we are updating a port that + # did not already have a queue on it. There is no need to check + # if there is one before deleting if we return here. + return + with context.session.begin(subtransactions=True): + context.session.delete(binding) + + def _process_network_queue_mapping(self, context, net_data, queue_id): + net_data[qos.QUEUE] = queue_id + if not queue_id: + return + with context.session.begin(subtransactions=True): + context.session.add( + NetworkQueueMapping(network_id=net_data['id'], + queue_id=queue_id)) + + def _get_network_queue_bindings(self, context, filters=None, fields=None): + return self._get_collection(context, NetworkQueueMapping, + self._make_network_queue_binding_dict, + filters=filters, fields=fields) + + def _delete_network_queue_mapping(self, context, network_id): + query = self._model_query(context, NetworkQueueMapping) + with context.session.begin(subtransactions=True): + binding = query.filter_by(network_id=network_id).first() + if binding: + context.session.delete(binding) + + def _extend_dict_qos_queue(self, obj_res, obj_db): + queue_mapping = obj_db['qos_queue'] + if queue_mapping: + obj_res[qos.QUEUE] = queue_mapping.get('queue_id') + return obj_res + + def _extend_port_dict_qos_queue(self, port_res, port_db): + self._extend_dict_qos_queue(port_res, port_db) + + def _extend_network_dict_qos_queue(self, network_res, network_db): + self._extend_dict_qos_queue(network_res, network_db) + + # Register dict extend functions for networks and ports + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attr.NETWORKS, ['_extend_network_dict_qos_queue']) + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attr.PORTS, ['_extend_port_dict_qos_queue']) + + def _make_qos_queue_dict(self, queue, fields=None): + res = {'id': queue['id'], + 'name': queue.get('name'), + 'default': queue.get('default'), + 'tenant_id': queue['tenant_id'], + 'min': queue.get('min'), + 'max': queue.get('max'), + 'qos_marking': queue.get('qos_marking'), + 'dscp': queue.get('dscp')} + return self._fields(res, fields) + + def _make_port_queue_binding_dict(self, queue, fields=None): + res = {'port_id': queue['port_id'], + 'queue_id': queue['queue_id']} + return self._fields(res, fields) + + def _make_network_queue_binding_dict(self, queue, fields=None): + res = {'network_id': queue['network_id'], + 'queue_id': queue['queue_id']} + return self._fields(res, fields) + + def _check_for_queue_and_create(self, context, port): + """Check for queue and create. + + This function determines if a port should be associated with a + queue. It works by first querying NetworkQueueMapping to determine + if the network is associated with a queue. If so, then it queries + NetworkQueueMapping for all the networks that are associated with + this queue. Next, it queries against all the ports on these networks + with the port device_id. Finally it queries PortQueueMapping. If that + query returns a queue_id that is returned. Otherwise a queue is + created that is the size of the queue associated with the network and + that queue_id is returned. + + If the network is not associated with a queue we then query to see + if there is a default queue in the system. If so, a copy of that is + created and the queue_id is returned. + + Otherwise None is returned. None is also returned if the port does not + have a device_id or if the device_owner is network: + """ + + queue_to_create = None + # If there is no device_id don't create a queue. The queue will be + # created on update port when the device_id is present. Also don't + # apply QoS to network ports. + if (not port.get('device_id') or + port['device_owner'].startswith('network:')): + return + + # Check if there is a queue assocated with the network + filters = {'network_id': [port['network_id']]} + network_queue_id = self._get_network_queue_bindings( + context, filters, ['queue_id']) + if network_queue_id: + # get networks that queue is assocated with + filters = {'queue_id': [network_queue_id[0]['queue_id']]} + networks_with_same_queue = self._get_network_queue_bindings( + context, filters) + + # get the ports on these networks with the same_queue and device_id + filters = {'device_id': [port.get('device_id')], + 'network_id': [network['network_id'] for + network in networks_with_same_queue]} + query = self._model_query(context, models_v2.Port.id) + query = self._apply_filters_to_query(query, models_v2.Port, + filters) + ports_ids = [p[0] for p in query] + if ports_ids: + # shared queue already exists find the queue id + queues = self._get_port_queue_bindings(context, + {'port_id': ports_ids}, + ['queue_id']) + if queues: + return queues[0]['queue_id'] + + # get the size of the queue we want to create + queue_to_create = self._get_qos_queue( + context, network_queue_id[0]['queue_id']) + + else: + # check for default queue + filters = {'default': [True]} + # context is elevated since default queue is owned by admin + queue_to_create = self.get_qos_queues(context.elevated(), filters) + if not queue_to_create: + return + queue_to_create = queue_to_create[0] + + # create the queue + tenant_id = self._get_tenant_id_for_create(context, port) + if port.get(qos.RXTX_FACTOR) and queue_to_create.get('max'): + queue_to_create['max'] *= int(port[qos.RXTX_FACTOR]) + queue = {'qos_queue': {'name': queue_to_create.get('name'), + 'min': queue_to_create.get('min'), + 'max': queue_to_create.get('max'), + 'dscp': queue_to_create.get('dscp'), + 'qos_marking': + queue_to_create.get('qos_marking'), + 'tenant_id': tenant_id}} + return self.create_qos_queue(context, queue, False)['id'] + + def _validate_qos_queue(self, context, qos_queue): + if qos_queue.get('default'): + if context.is_admin: + if self.get_qos_queues(context, filters={'default': [True]}): + raise qos.DefaultQueueAlreadyExists() + else: + raise qos.DefaultQueueCreateNotAdmin() + if qos_queue.get('qos_marking') == 'trusted': + dscp = qos_queue.pop('dscp') + LOG.info(_("DSCP value (%s) will be ignored with 'trusted' " + "marking"), dscp) + max = qos_queue.get('max') + min = qos_queue.get('min') + # Max can be None + if max and min > max: + raise qos.QueueMinGreaterMax() diff --git a/neutron/plugins/vmware/dbexts/servicerouter.py b/neutron/plugins/vmware/dbexts/servicerouter.py new file mode 100644 index 000000000..bc34cd4c3 --- /dev/null +++ b/neutron/plugins/vmware/dbexts/servicerouter.py @@ -0,0 +1,27 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.plugins.vmware.dbexts import distributedrouter as dist_rtr +from neutron.plugins.vmware.extensions import servicerouter + + +class ServiceRouter_mixin(dist_rtr.DistributedRouter_mixin): + """Mixin class to enable service router support.""" + + nsx_attributes = ( + dist_rtr.DistributedRouter_mixin.nsx_attributes + [{ + 'name': servicerouter.SERVICE_ROUTER, + 'default': False + }]) diff --git a/neutron/plugins/vmware/dbexts/vcns_db.py b/neutron/plugins/vmware/dbexts/vcns_db.py new file mode 100644 index 000000000..24b3e5b8a --- /dev/null +++ b/neutron/plugins/vmware/dbexts/vcns_db.py @@ -0,0 +1,202 @@ +# Copyright 2013 VMware, Inc. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.orm import exc + +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.dbexts import vcns_models +from neutron.plugins.vmware.vshield.common import ( + exceptions as vcns_exc) + +LOG = logging.getLogger(__name__) + + +def add_vcns_router_binding(session, router_id, vse_id, lswitch_id, status): + with session.begin(subtransactions=True): + binding = vcns_models.VcnsRouterBinding( + router_id=router_id, + edge_id=vse_id, + lswitch_id=lswitch_id, + status=status) + session.add(binding) + return binding + + +def get_vcns_router_binding(session, router_id): + with session.begin(subtransactions=True): + return (session.query(vcns_models.VcnsRouterBinding). + filter_by(router_id=router_id).first()) + + +def update_vcns_router_binding(session, router_id, **kwargs): + with session.begin(subtransactions=True): + binding = (session.query(vcns_models.VcnsRouterBinding). + filter_by(router_id=router_id).one()) + for key, value in kwargs.iteritems(): + binding[key] = value + + +def delete_vcns_router_binding(session, router_id): + with session.begin(subtransactions=True): + binding = (session.query(vcns_models.VcnsRouterBinding). + filter_by(router_id=router_id).one()) + session.delete(binding) + + +# +# Edge Firewall binding methods +# +def add_vcns_edge_firewallrule_binding(session, map_info): + with session.begin(subtransactions=True): + binding = vcns_models.VcnsEdgeFirewallRuleBinding( + rule_id=map_info['rule_id'], + rule_vseid=map_info['rule_vseid'], + edge_id=map_info['edge_id']) + session.add(binding) + return binding + + +def delete_vcns_edge_firewallrule_binding(session, id, edge_id): + with session.begin(subtransactions=True): + if not (session.query(vcns_models.VcnsEdgeFirewallRuleBinding). + filter_by(rule_id=id, edge_id=edge_id).delete()): + msg = _("Rule Resource binding with id:%s not found!") % id + raise nsx_exc.NsxPluginException(err_msg=msg) + + +def get_vcns_edge_firewallrule_binding(session, id, edge_id): + with session.begin(subtransactions=True): + return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding). + filter_by(rule_id=id, edge_id=edge_id).first()) + + +def get_vcns_edge_firewallrule_binding_by_vseid( + session, edge_id, rule_vseid): + with session.begin(subtransactions=True): + try: + return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding). + filter_by(edge_id=edge_id, rule_vseid=rule_vseid).one()) + except exc.NoResultFound: + msg = _("Rule Resource binding not found!") + raise nsx_exc.NsxPluginException(err_msg=msg) + + +def cleanup_vcns_edge_firewallrule_binding(session, edge_id): + with session.begin(subtransactions=True): + session.query( + vcns_models.VcnsEdgeFirewallRuleBinding).filter_by( + edge_id=edge_id).delete() + + +def add_vcns_edge_vip_binding(session, map_info): + with session.begin(subtransactions=True): + binding = vcns_models.VcnsEdgeVipBinding( + vip_id=map_info['vip_id'], + edge_id=map_info['edge_id'], + vip_vseid=map_info['vip_vseid'], + app_profileid=map_info['app_profileid']) + session.add(binding) + + return binding + + +def get_vcns_edge_vip_binding(session, id): + with session.begin(subtransactions=True): + try: + qry = session.query(vcns_models.VcnsEdgeVipBinding) + return qry.filter_by(vip_id=id).one() + except exc.NoResultFound: + msg = _("VIP Resource binding with id:%s not found!") % id + LOG.exception(msg) + raise vcns_exc.VcnsNotFound( + resource='router_service_binding', msg=msg) + + +def delete_vcns_edge_vip_binding(session, id): + with session.begin(subtransactions=True): + qry = session.query(vcns_models.VcnsEdgeVipBinding) + if not qry.filter_by(vip_id=id).delete(): + msg = _("VIP Resource binding with id:%s not found!") % id + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + + +def add_vcns_edge_pool_binding(session, map_info): + with session.begin(subtransactions=True): + binding = vcns_models.VcnsEdgePoolBinding( + pool_id=map_info['pool_id'], + edge_id=map_info['edge_id'], + pool_vseid=map_info['pool_vseid']) + session.add(binding) + + return binding + + +def get_vcns_edge_pool_binding(session, id, edge_id): + with session.begin(subtransactions=True): + return (session.query(vcns_models.VcnsEdgePoolBinding). + filter_by(pool_id=id, edge_id=edge_id).first()) + + +def get_vcns_edge_pool_binding_by_vseid(session, edge_id, pool_vseid): + with session.begin(subtransactions=True): + try: + qry = session.query(vcns_models.VcnsEdgePoolBinding) + binding = qry.filter_by(edge_id=edge_id, + pool_vseid=pool_vseid).one() + except exc.NoResultFound: + msg = (_("Pool Resource binding with edge_id:%(edge_id)s " + "pool_vseid:%(pool_vseid)s not found!") % + {'edge_id': edge_id, 'pool_vseid': pool_vseid}) + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + return binding + + +def delete_vcns_edge_pool_binding(session, id, edge_id): + with session.begin(subtransactions=True): + qry = session.query(vcns_models.VcnsEdgePoolBinding) + if not qry.filter_by(pool_id=id, edge_id=edge_id).delete(): + msg = _("Pool Resource binding with id:%s not found!") % id + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + + +def add_vcns_edge_monitor_binding(session, map_info): + with session.begin(subtransactions=True): + binding = vcns_models.VcnsEdgeMonitorBinding( + monitor_id=map_info['monitor_id'], + edge_id=map_info['edge_id'], + monitor_vseid=map_info['monitor_vseid']) + session.add(binding) + + return binding + + +def get_vcns_edge_monitor_binding(session, id, edge_id): + with session.begin(subtransactions=True): + return (session.query(vcns_models.VcnsEdgeMonitorBinding). + filter_by(monitor_id=id, edge_id=edge_id).first()) + + +def delete_vcns_edge_monitor_binding(session, id, edge_id): + with session.begin(subtransactions=True): + qry = session.query(vcns_models.VcnsEdgeMonitorBinding) + if not qry.filter_by(monitor_id=id, edge_id=edge_id).delete(): + msg = _("Monitor Resource binding with id:%s not found!") % id + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) diff --git a/neutron/plugins/vmware/dbexts/vcns_models.py b/neutron/plugins/vmware/dbexts/vcns_models.py new file mode 100644 index 000000000..847161358 --- /dev/null +++ b/neutron/plugins/vmware/dbexts/vcns_models.py @@ -0,0 +1,90 @@ +# Copyright 2013 VMware, Inc. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import sqlalchemy as sa + +from neutron.db import model_base +from neutron.db import models_v2 + + +class VcnsRouterBinding(model_base.BASEV2, models_v2.HasStatusDescription): + """Represents the mapping between neutron router and vShield Edge.""" + + __tablename__ = 'vcns_router_bindings' + + # no ForeignKey to routers.id because for now, a router can be removed + # from routers when delete_router is executed, but the binding is only + # removed after the Edge is deleted + router_id = sa.Column(sa.String(36), + primary_key=True) + edge_id = sa.Column(sa.String(16), + nullable=True) + lswitch_id = sa.Column(sa.String(36), + nullable=False) + + +# +# VCNS Edge FW mapping tables +# +class VcnsEdgeFirewallRuleBinding(model_base.BASEV2): + """1:1 mapping between firewall rule and edge firewall rule_id.""" + + __tablename__ = 'vcns_firewall_rule_bindings' + + rule_id = sa.Column(sa.String(36), + sa.ForeignKey("firewall_rules.id"), + primary_key=True) + edge_id = sa.Column(sa.String(36), primary_key=True) + rule_vseid = sa.Column(sa.String(36)) + + +class VcnsEdgePoolBinding(model_base.BASEV2): + """Represents the mapping between neutron pool and Edge pool.""" + + __tablename__ = 'vcns_edge_pool_bindings' + + pool_id = sa.Column(sa.String(36), + sa.ForeignKey("pools.id", ondelete="CASCADE"), + primary_key=True) + edge_id = sa.Column(sa.String(36), primary_key=True) + pool_vseid = sa.Column(sa.String(36)) + + +class VcnsEdgeVipBinding(model_base.BASEV2): + """Represents the mapping between neutron vip and Edge vip.""" + + __tablename__ = 'vcns_edge_vip_bindings' + + vip_id = sa.Column(sa.String(36), + sa.ForeignKey("vips.id", ondelete="CASCADE"), + primary_key=True) + edge_id = sa.Column(sa.String(36)) + vip_vseid = sa.Column(sa.String(36)) + app_profileid = sa.Column(sa.String(36)) + + +class VcnsEdgeMonitorBinding(model_base.BASEV2): + """Represents the mapping between neutron monitor and Edge monitor.""" + + __tablename__ = 'vcns_edge_monitor_bindings' + + monitor_id = sa.Column(sa.String(36), + sa.ForeignKey("healthmonitors.id", + ondelete="CASCADE"), + primary_key=True) + edge_id = sa.Column(sa.String(36), primary_key=True) + monitor_vseid = sa.Column(sa.String(36)) diff --git a/neutron/plugins/vmware/dhcp_meta/__init__.py b/neutron/plugins/vmware/dhcp_meta/__init__.py new file mode 100644 index 000000000..c020e3bcd --- /dev/null +++ b/neutron/plugins/vmware/dhcp_meta/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/vmware/dhcp_meta/combined.py b/neutron/plugins/vmware/dhcp_meta/combined.py new file mode 100644 index 000000000..36ba563e8 --- /dev/null +++ b/neutron/plugins/vmware/dhcp_meta/combined.py @@ -0,0 +1,95 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.common import constants as const +from neutron.common import topics +from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc +from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc + + +class DhcpAgentNotifyAPI(dhcp_rpc_agent_api.DhcpAgentNotifyAPI): + + def __init__(self, plugin, manager): + super(DhcpAgentNotifyAPI, self).__init__(topic=topics.DHCP_AGENT) + self.agentless_notifier = nsx_svc.DhcpAgentNotifyAPI(plugin, manager) + + def notify(self, context, data, methodname): + [resource, action, _e] = methodname.split('.') + lsn_manager = self.agentless_notifier.plugin.lsn_manager + plugin = self.agentless_notifier.plugin + if resource == 'network': + net_id = data['network']['id'] + elif resource in ['port', 'subnet']: + net_id = data[resource]['network_id'] + else: + # no valid resource + return + lsn_exists = lsn_manager.lsn_exists(context, net_id) + treat_dhcp_owner_specially = False + if lsn_exists: + # if lsn exists, the network is one created with the new model + if (resource == 'subnet' and action == 'create' and + const.DEVICE_OWNER_DHCP not in plugin.port_special_owners): + # network/subnet provisioned in the new model have a plain + # nsx lswitch port, no vif attachment + plugin.port_special_owners.append(const.DEVICE_OWNER_DHCP) + treat_dhcp_owner_specially = True + if (resource == 'port' and action == 'update' or + resource == 'subnet'): + self.agentless_notifier.notify(context, data, methodname) + elif not lsn_exists and resource in ['port', 'subnet']: + # call notifier for the agent-based mode + super(DhcpAgentNotifyAPI, self).notify(context, data, methodname) + if treat_dhcp_owner_specially: + # if subnets belong to networks created with the old model + # dhcp port does not need to be special cased, so put things + # back, since they were modified + plugin.port_special_owners.remove(const.DEVICE_OWNER_DHCP) + + +def handle_network_dhcp_access(plugin, context, network, action): + nsx_svc.handle_network_dhcp_access(plugin, context, network, action) + + +def handle_port_dhcp_access(plugin, context, port, action): + if plugin.lsn_manager.lsn_exists(context, port['network_id']): + nsx_svc.handle_port_dhcp_access(plugin, context, port, action) + else: + nsx_rpc.handle_port_dhcp_access(plugin, context, port, action) + + +def handle_port_metadata_access(plugin, context, port, is_delete=False): + if plugin.lsn_manager.lsn_exists(context, port['network_id']): + nsx_svc.handle_port_metadata_access(plugin, context, port, is_delete) + else: + nsx_rpc.handle_port_metadata_access(plugin, context, port, is_delete) + + +def handle_router_metadata_access(plugin, context, router_id, interface=None): + if interface: + subnet = plugin.get_subnet(context, interface['subnet_id']) + network_id = subnet['network_id'] + if plugin.lsn_manager.lsn_exists(context, network_id): + nsx_svc.handle_router_metadata_access( + plugin, context, router_id, interface) + else: + nsx_rpc.handle_router_metadata_access( + plugin, context, router_id, interface) + else: + nsx_rpc.handle_router_metadata_access( + plugin, context, router_id, interface) diff --git a/neutron/plugins/vmware/dhcp_meta/constants.py b/neutron/plugins/vmware/dhcp_meta/constants.py new file mode 100644 index 000000000..1e9476a5b --- /dev/null +++ b/neutron/plugins/vmware/dhcp_meta/constants.py @@ -0,0 +1,28 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +from neutron.common import constants as const +from neutron.db import l3_db + +# A unique MAC to quickly identify the LSN port used for metadata services +# when dhcp on the subnet is off. Inspired by leet-speak for 'metadata'. +METADATA_MAC = "fa:15:73:74:d4:74" +METADATA_PORT_ID = 'metadata:id' +METADATA_PORT_NAME = 'metadata:name' +METADATA_DEVICE_ID = 'metadata:device' +SPECIAL_OWNERS = (const.DEVICE_OWNER_DHCP, + const.DEVICE_OWNER_ROUTER_GW, + l3_db.DEVICE_OWNER_ROUTER_INTF) diff --git a/neutron/plugins/vmware/dhcp_meta/lsnmanager.py b/neutron/plugins/vmware/dhcp_meta/lsnmanager.py new file mode 100644 index 000000000..1e8f9cb55 --- /dev/null +++ b/neutron/plugins/vmware/dhcp_meta/lsnmanager.py @@ -0,0 +1,462 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg + +from neutron.common import exceptions as n_exc +from neutron.openstack.common.db import exception as db_exc +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as p_exc +from neutron.plugins.vmware.common import nsx_utils +from neutron.plugins.vmware.dbexts import lsn_db +from neutron.plugins.vmware.dhcp_meta import constants as const +from neutron.plugins.vmware.nsxlib import lsn as lsn_api +from neutron.plugins.vmware.nsxlib import switch as switch_api + +LOG = logging.getLogger(__name__) + +META_CONF = 'metadata-proxy' +DHCP_CONF = 'dhcp' + + +lsn_opts = [ + cfg.BoolOpt('sync_on_missing_data', default=False, + help=_('Pull LSN information from NSX in case it is missing ' + 'from the local data store. This is useful to rebuild ' + 'the local store in case of server recovery.')) +] + + +def register_lsn_opts(config): + config.CONF.register_opts(lsn_opts, "NSX_LSN") + + +class LsnManager(object): + """Manage LSN entities associated with networks.""" + + def __init__(self, plugin): + self.plugin = plugin + + @property + def cluster(self): + return self.plugin.cluster + + def lsn_exists(self, context, network_id): + """Return True if a Logical Service Node exists for the network.""" + return self.lsn_get( + context, network_id, raise_on_err=False) is not None + + def lsn_get(self, context, network_id, raise_on_err=True): + """Retrieve the LSN id associated to the network.""" + try: + return lsn_api.lsn_for_network_get(self.cluster, network_id) + except (n_exc.NotFound, api_exc.NsxApiException): + logger = raise_on_err and LOG.error or LOG.warn + logger(_('Unable to find Logical Service Node for ' + 'network %s'), network_id) + if raise_on_err: + raise p_exc.LsnNotFound(entity='network', + entity_id=network_id) + + def lsn_create(self, context, network_id): + """Create a LSN associated to the network.""" + try: + return lsn_api.lsn_for_network_create(self.cluster, network_id) + except api_exc.NsxApiException: + err_msg = _('Unable to create LSN for network %s') % network_id + raise p_exc.NsxPluginException(err_msg=err_msg) + + def lsn_delete(self, context, lsn_id): + """Delete a LSN given its id.""" + try: + lsn_api.lsn_delete(self.cluster, lsn_id) + except (n_exc.NotFound, api_exc.NsxApiException): + LOG.warn(_('Unable to delete Logical Service Node %s'), lsn_id) + + def lsn_delete_by_network(self, context, network_id): + """Delete a LSN associated to the network.""" + lsn_id = self.lsn_get(context, network_id, raise_on_err=False) + if lsn_id: + self.lsn_delete(context, lsn_id) + + def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): + """Retrieve LSN and LSN port for the network and the subnet.""" + lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) + if lsn_id: + try: + lsn_port_id = lsn_api.lsn_port_by_subnet_get( + self.cluster, lsn_id, subnet_id) + except (n_exc.NotFound, api_exc.NsxApiException): + logger = raise_on_err and LOG.error or LOG.warn + logger(_('Unable to find Logical Service Node Port for ' + 'LSN %(lsn_id)s and subnet %(subnet_id)s') + % {'lsn_id': lsn_id, 'subnet_id': subnet_id}) + if raise_on_err: + raise p_exc.LsnPortNotFound(lsn_id=lsn_id, + entity='subnet', + entity_id=subnet_id) + return (lsn_id, None) + else: + return (lsn_id, lsn_port_id) + else: + return (None, None) + + def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): + """Retrieve LSN and LSN port given network and mac address.""" + lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) + if lsn_id: + try: + lsn_port_id = lsn_api.lsn_port_by_mac_get( + self.cluster, lsn_id, mac) + except (n_exc.NotFound, api_exc.NsxApiException): + logger = raise_on_err and LOG.error or LOG.warn + logger(_('Unable to find Logical Service Node Port for ' + 'LSN %(lsn_id)s and mac address %(mac)s') + % {'lsn_id': lsn_id, 'mac': mac}) + if raise_on_err: + raise p_exc.LsnPortNotFound(lsn_id=lsn_id, + entity='MAC', + entity_id=mac) + return (lsn_id, None) + else: + return (lsn_id, lsn_port_id) + else: + return (None, None) + + def lsn_port_create(self, context, lsn_id, subnet_info): + """Create and return LSN port for associated subnet.""" + try: + return lsn_api.lsn_port_create(self.cluster, lsn_id, subnet_info) + except n_exc.NotFound: + raise p_exc.LsnNotFound(entity='', entity_id=lsn_id) + except api_exc.NsxApiException: + err_msg = _('Unable to create port for LSN %s') % lsn_id + raise p_exc.NsxPluginException(err_msg=err_msg) + + def lsn_port_delete(self, context, lsn_id, lsn_port_id): + """Delete a LSN port from the Logical Service Node.""" + try: + lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) + except (n_exc.NotFound, api_exc.NsxApiException): + LOG.warn(_('Unable to delete LSN Port %s'), lsn_port_id) + + def lsn_port_dispose(self, context, network_id, mac_address): + """Delete a LSN port given the network and the mac address.""" + lsn_id, lsn_port_id = self.lsn_port_get_by_mac( + context, network_id, mac_address, raise_on_err=False) + if lsn_port_id: + self.lsn_port_delete(context, lsn_id, lsn_port_id) + if mac_address == const.METADATA_MAC: + try: + lswitch_port_id = switch_api.get_port_by_neutron_tag( + self.cluster, network_id, + const.METADATA_PORT_ID)['uuid'] + switch_api.delete_port( + self.cluster, network_id, lswitch_port_id) + except (n_exc.PortNotFoundOnNetwork, + api_exc.NsxApiException): + LOG.warn(_("Metadata port not found while attempting " + "to delete it from network %s"), network_id) + else: + LOG.warn(_("Unable to find Logical Services Node " + "Port with MAC %s"), mac_address) + + def lsn_port_dhcp_setup( + self, context, network_id, port_id, port_data, subnet_config=None): + """Connect network to LSN via specified port and port_data.""" + try: + lsn_id = None + switch_id = nsx_utils.get_nsx_switch_ids( + context.session, self.cluster, network_id)[0] + lswitch_port_id = switch_api.get_port_by_neutron_tag( + self.cluster, switch_id, port_id)['uuid'] + lsn_id = self.lsn_get(context, network_id) + lsn_port_id = self.lsn_port_create(context, lsn_id, port_data) + except (n_exc.NotFound, p_exc.NsxPluginException): + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=port_id) + else: + try: + lsn_api.lsn_port_plug_network( + self.cluster, lsn_id, lsn_port_id, lswitch_port_id) + except p_exc.LsnConfigurationConflict: + self.lsn_port_delete(context, lsn_id, lsn_port_id) + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=port_id) + if subnet_config: + self.lsn_port_dhcp_configure( + context, lsn_id, lsn_port_id, subnet_config) + else: + return (lsn_id, lsn_port_id) + + def lsn_port_metadata_setup(self, context, lsn_id, subnet): + """Connect subnet to specified LSN.""" + data = { + "mac_address": const.METADATA_MAC, + "ip_address": subnet['cidr'], + "subnet_id": subnet['id'] + } + network_id = subnet['network_id'] + tenant_id = subnet['tenant_id'] + lswitch_port_id = None + try: + switch_id = nsx_utils.get_nsx_switch_ids( + context.session, self.cluster, network_id)[0] + lswitch_port_id = switch_api.create_lport( + self.cluster, switch_id, tenant_id, + const.METADATA_PORT_ID, const.METADATA_PORT_NAME, + const.METADATA_DEVICE_ID, True)['uuid'] + lsn_port_id = self.lsn_port_create(context, lsn_id, data) + except (n_exc.NotFound, p_exc.NsxPluginException, + api_exc.NsxApiException): + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=lswitch_port_id) + else: + try: + lsn_api.lsn_port_plug_network( + self.cluster, lsn_id, lsn_port_id, lswitch_port_id) + except p_exc.LsnConfigurationConflict: + self.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) + switch_api.delete_port( + self.cluster, network_id, lswitch_port_id) + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) + + def lsn_port_dhcp_configure(self, context, lsn_id, lsn_port_id, subnet): + """Enable/disable dhcp services with the given config options.""" + is_enabled = subnet["enable_dhcp"] + dhcp_options = { + "domain_name": cfg.CONF.NSX_DHCP.domain_name, + "default_lease_time": cfg.CONF.NSX_DHCP.default_lease_time, + } + dns_servers = cfg.CONF.NSX_DHCP.extra_domain_name_servers or [] + dns_servers.extend(subnet["dns_nameservers"]) + if subnet['gateway_ip']: + dhcp_options["routers"] = subnet["gateway_ip"] + if dns_servers: + dhcp_options["domain_name_servers"] = ",".join(dns_servers) + if subnet["host_routes"]: + dhcp_options["classless_static_routes"] = ( + ",".join(subnet["host_routes"]) + ) + try: + lsn_api.lsn_port_dhcp_configure( + self.cluster, lsn_id, lsn_port_id, is_enabled, dhcp_options) + except (n_exc.NotFound, api_exc.NsxApiException): + err_msg = (_('Unable to configure dhcp for Logical Service ' + 'Node %(lsn_id)s and port %(lsn_port_id)s') + % {'lsn_id': lsn_id, 'lsn_port_id': lsn_port_id}) + LOG.error(err_msg) + raise p_exc.NsxPluginException(err_msg=err_msg) + + def lsn_metadata_configure(self, context, subnet_id, is_enabled): + """Configure metadata service for the specified subnet.""" + subnet = self.plugin.get_subnet(context, subnet_id) + network_id = subnet['network_id'] + meta_conf = cfg.CONF.NSX_METADATA + metadata_options = { + 'metadata_server_ip': meta_conf.metadata_server_address, + 'metadata_server_port': meta_conf.metadata_server_port, + 'metadata_proxy_shared_secret': meta_conf.metadata_shared_secret + } + try: + lsn_id = self.lsn_get(context, network_id) + lsn_api.lsn_metadata_configure( + self.cluster, lsn_id, is_enabled, metadata_options) + except (p_exc.LsnNotFound, api_exc.NsxApiException): + err_msg = (_('Unable to configure metadata ' + 'for subnet %s') % subnet_id) + LOG.error(err_msg) + raise p_exc.NsxPluginException(err_msg=err_msg) + if is_enabled: + try: + # test that the lsn port exists + self.lsn_port_get(context, network_id, subnet_id) + except p_exc.LsnPortNotFound: + # this might happen if subnet had dhcp off when created + # so create one, and wire it + self.lsn_port_metadata_setup(context, lsn_id, subnet) + else: + self.lsn_port_dispose(context, network_id, const.METADATA_MAC) + + def _lsn_port_host_conf(self, context, network_id, subnet_id, data, hdlr): + lsn_id, lsn_port_id = self.lsn_port_get( + context, network_id, subnet_id, raise_on_err=False) + try: + if lsn_id and lsn_port_id: + hdlr(self.cluster, lsn_id, lsn_port_id, data) + except (n_exc.NotFound, api_exc.NsxApiException): + LOG.error(_('Error while configuring LSN ' + 'port %s'), lsn_port_id) + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) + + def lsn_port_dhcp_host_add(self, context, network_id, subnet_id, host): + """Add dhcp host entry to LSN port configuration.""" + self._lsn_port_host_conf(context, network_id, subnet_id, host, + lsn_api.lsn_port_dhcp_host_add) + + def lsn_port_dhcp_host_remove(self, context, network_id, subnet_id, host): + """Remove dhcp host entry from LSN port configuration.""" + self._lsn_port_host_conf(context, network_id, subnet_id, host, + lsn_api.lsn_port_dhcp_host_remove) + + def lsn_port_meta_host_add(self, context, network_id, subnet_id, host): + """Add dhcp host entry to LSN port configuration.""" + self._lsn_port_host_conf(context, network_id, subnet_id, host, + lsn_api.lsn_port_metadata_host_add) + + def lsn_port_meta_host_remove(self, context, network_id, subnet_id, host): + """Remove dhcp host entry from LSN port configuration.""" + self._lsn_port_host_conf(context, network_id, subnet_id, host, + lsn_api.lsn_port_metadata_host_remove) + + def lsn_port_update( + self, context, network_id, subnet_id, dhcp=None, meta=None): + """Update the specified configuration for the LSN port.""" + if not dhcp and not meta: + return + try: + lsn_id, lsn_port_id = self.lsn_port_get( + context, network_id, subnet_id, raise_on_err=False) + if dhcp and lsn_id and lsn_port_id: + lsn_api.lsn_port_host_entries_update( + self.cluster, lsn_id, lsn_port_id, DHCP_CONF, dhcp) + if meta and lsn_id and lsn_port_id: + lsn_api.lsn_port_host_entries_update( + self.cluster, lsn_id, lsn_port_id, META_CONF, meta) + except api_exc.NsxApiException: + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) + + +class PersistentLsnManager(LsnManager): + """Add local persistent state to LSN Manager.""" + + def __init__(self, plugin): + super(PersistentLsnManager, self).__init__(plugin) + self.sync_on_missing = cfg.CONF.NSX_LSN.sync_on_missing_data + + def lsn_get(self, context, network_id, raise_on_err=True): + try: + obj = lsn_db.lsn_get_for_network( + context, network_id, raise_on_err=raise_on_err) + return obj.lsn_id if obj else None + except p_exc.LsnNotFound: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + if self.sync_on_missing: + lsn_id = super(PersistentLsnManager, self).lsn_get( + context, network_id, raise_on_err=raise_on_err) + self.lsn_save(context, network_id, lsn_id) + return lsn_id + if raise_on_err: + ctxt.reraise = True + + def lsn_save(self, context, network_id, lsn_id): + """Save LSN-Network mapping to the DB.""" + try: + lsn_db.lsn_add(context, network_id, lsn_id) + except db_exc.DBError: + err_msg = _('Unable to save LSN for network %s') % network_id + LOG.exception(err_msg) + raise p_exc.NsxPluginException(err_msg=err_msg) + + def lsn_create(self, context, network_id): + lsn_id = super(PersistentLsnManager, + self).lsn_create(context, network_id) + try: + self.lsn_save(context, network_id, lsn_id) + except p_exc.NsxPluginException: + with excutils.save_and_reraise_exception(): + super(PersistentLsnManager, self).lsn_delete(context, lsn_id) + return lsn_id + + def lsn_delete(self, context, lsn_id): + lsn_db.lsn_remove(context, lsn_id) + super(PersistentLsnManager, self).lsn_delete(context, lsn_id) + + def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): + try: + obj = lsn_db.lsn_port_get_for_subnet( + context, subnet_id, raise_on_err=raise_on_err) + return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None) + except p_exc.LsnPortNotFound: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + if self.sync_on_missing: + lsn_id, lsn_port_id = ( + super(PersistentLsnManager, self).lsn_port_get( + context, network_id, subnet_id, + raise_on_err=raise_on_err)) + mac_addr = lsn_api.lsn_port_info_get( + self.cluster, lsn_id, lsn_port_id)['mac_address'] + self.lsn_port_save( + context, lsn_port_id, subnet_id, mac_addr, lsn_id) + return (lsn_id, lsn_port_id) + if raise_on_err: + ctxt.reraise = True + + def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): + try: + obj = lsn_db.lsn_port_get_for_mac( + context, mac, raise_on_err=raise_on_err) + return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None) + except p_exc.LsnPortNotFound: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + if self.sync_on_missing: + lsn_id, lsn_port_id = ( + super(PersistentLsnManager, self).lsn_port_get_by_mac( + context, network_id, mac, + raise_on_err=raise_on_err)) + subnet_id = lsn_api.lsn_port_info_get( + self.cluster, lsn_id, lsn_port_id).get('subnet_id') + self.lsn_port_save( + context, lsn_port_id, subnet_id, mac, lsn_id) + return (lsn_id, lsn_port_id) + if raise_on_err: + ctxt.reraise = True + + def lsn_port_save(self, context, lsn_port_id, subnet_id, mac_addr, lsn_id): + """Save LSN Port information to the DB.""" + try: + lsn_db.lsn_port_add_for_lsn( + context, lsn_port_id, subnet_id, mac_addr, lsn_id) + except db_exc.DBError: + err_msg = _('Unable to save LSN port for subnet %s') % subnet_id + LOG.exception(err_msg) + raise p_exc.NsxPluginException(err_msg=err_msg) + + def lsn_port_create(self, context, lsn_id, subnet_info): + lsn_port_id = super(PersistentLsnManager, + self).lsn_port_create(context, lsn_id, subnet_info) + try: + self.lsn_port_save(context, lsn_port_id, subnet_info['subnet_id'], + subnet_info['mac_address'], lsn_id) + except p_exc.NsxPluginException: + with excutils.save_and_reraise_exception(): + super(PersistentLsnManager, self).lsn_port_delete( + context, lsn_id, lsn_port_id) + return lsn_port_id + + def lsn_port_delete(self, context, lsn_id, lsn_port_id): + lsn_db.lsn_port_remove(context, lsn_port_id) + super(PersistentLsnManager, self).lsn_port_delete( + context, lsn_id, lsn_port_id) diff --git a/neutron/plugins/vmware/dhcp_meta/migration.py b/neutron/plugins/vmware/dhcp_meta/migration.py new file mode 100644 index 000000000..0f1b32b77 --- /dev/null +++ b/neutron/plugins/vmware/dhcp_meta/migration.py @@ -0,0 +1,180 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.common import constants as const +from neutron.common import exceptions as n_exc +from neutron.extensions import external_net +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import exceptions as p_exc +from neutron.plugins.vmware.dhcp_meta import nsx +from neutron.plugins.vmware.dhcp_meta import rpc + +LOG = logging.getLogger(__name__) + + +class DhcpMetadataBuilder(object): + + def __init__(self, plugin, agent_notifier): + self.plugin = plugin + self.notifier = agent_notifier + + def dhcp_agent_get_all(self, context, network_id): + """Return the agents managing the network.""" + return self.plugin.list_dhcp_agents_hosting_network( + context, network_id)['agents'] + + def dhcp_port_get_all(self, context, network_id): + """Return the dhcp ports allocated for the network.""" + filters = { + 'network_id': [network_id], + 'device_owner': [const.DEVICE_OWNER_DHCP] + } + return self.plugin.get_ports(context, filters=filters) + + def router_id_get(self, context, subnet=None): + """Return the router and interface used for the subnet.""" + if not subnet: + return + network_id = subnet['network_id'] + filters = { + 'network_id': [network_id], + 'device_owner': [const.DEVICE_OWNER_ROUTER_INTF] + } + ports = self.plugin.get_ports(context, filters=filters) + for port in ports: + if port['fixed_ips'][0]['subnet_id'] == subnet['id']: + return port['device_id'] + + def metadata_deallocate(self, context, router_id, subnet_id): + """Deallocate metadata services for the subnet.""" + interface = {'subnet_id': subnet_id} + self.plugin.remove_router_interface(context, router_id, interface) + + def metadata_allocate(self, context, router_id, subnet_id): + """Allocate metadata resources for the subnet via the router.""" + interface = {'subnet_id': subnet_id} + self.plugin.add_router_interface(context, router_id, interface) + + def dhcp_deallocate(self, context, network_id, agents, ports): + """Deallocate dhcp resources for the network.""" + for agent in agents: + self.plugin.remove_network_from_dhcp_agent( + context, agent['id'], network_id) + for port in ports: + try: + self.plugin.delete_port(context, port['id']) + except n_exc.PortNotFound: + LOG.error(_('Port %s is already gone'), port['id']) + + def dhcp_allocate(self, context, network_id, subnet): + """Allocate dhcp resources for the subnet.""" + # Create LSN resources + network_data = {'id': network_id} + nsx.handle_network_dhcp_access(self.plugin, context, + network_data, 'create_network') + if subnet: + subnet_data = {'subnet': subnet} + self.notifier.notify(context, subnet_data, 'subnet.create.end') + # Get DHCP host and metadata entries created for the LSN + port = { + 'network_id': network_id, + 'fixed_ips': [{'subnet_id': subnet['id']}] + } + self.notifier.notify(context, {'port': port}, 'port.update.end') + + +class MigrationManager(object): + + def __init__(self, plugin, lsn_manager, agent_notifier): + self.plugin = plugin + self.manager = lsn_manager + self.builder = DhcpMetadataBuilder(plugin, agent_notifier) + + def validate(self, context, network_id): + """Validate and return subnet's dhcp info for migration.""" + network = self.plugin.get_network(context, network_id) + + if self.manager.lsn_exists(context, network_id): + reason = _("LSN already exist") + raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason) + + if network[external_net.EXTERNAL]: + reason = _("Cannot migrate an external network") + raise n_exc.BadRequest(resource='network', msg=reason) + + filters = {'network_id': [network_id]} + subnets = self.plugin.get_subnets(context, filters=filters) + count = len(subnets) + if count == 0: + return None + elif count == 1 and subnets[0]['cidr'] == rpc.METADATA_SUBNET_CIDR: + reason = _("Cannot migrate a 'metadata' network") + raise n_exc.BadRequest(resource='network', msg=reason) + elif count > 1: + reason = _("Unable to support multiple subnets per network") + raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason) + else: + return subnets[0] + + def migrate(self, context, network_id, subnet=None): + """Migrate subnet resources to LSN.""" + router_id = self.builder.router_id_get(context, subnet) + if router_id and subnet: + # Deallocate resources taken for the router, if any + self.builder.metadata_deallocate(context, router_id, subnet['id']) + if subnet: + # Deallocate reources taken for the agent, if any + agents = self.builder.dhcp_agent_get_all(context, network_id) + ports = self.builder.dhcp_port_get_all(context, network_id) + self.builder.dhcp_deallocate(context, network_id, agents, ports) + # (re)create the configuration for LSN + self.builder.dhcp_allocate(context, network_id, subnet) + if router_id and subnet: + # Allocate resources taken for the router, if any + self.builder.metadata_allocate(context, router_id, subnet['id']) + + def report(self, context, network_id, subnet_id=None): + """Return a report of the dhcp and metadata resources in use.""" + if subnet_id: + lsn_id, lsn_port_id = self.manager.lsn_port_get( + context, network_id, subnet_id, raise_on_err=False) + else: + filters = {'network_id': [network_id]} + subnets = self.plugin.get_subnets(context, filters=filters) + if subnets: + lsn_id, lsn_port_id = self.manager.lsn_port_get( + context, network_id, subnets[0]['id'], raise_on_err=False) + else: + lsn_id = self.manager.lsn_get(context, network_id, + raise_on_err=False) + lsn_port_id = None + if lsn_id: + ports = [lsn_port_id] if lsn_port_id else [] + report = { + 'type': 'lsn', + 'services': [lsn_id], + 'ports': ports + } + else: + agents = self.builder.dhcp_agent_get_all(context, network_id) + ports = self.builder.dhcp_port_get_all(context, network_id) + report = { + 'type': 'agent', + 'services': [a['id'] for a in agents], + 'ports': [p['id'] for p in ports] + } + return report diff --git a/neutron/plugins/vmware/dhcp_meta/nsx.py b/neutron/plugins/vmware/dhcp_meta/nsx.py new file mode 100644 index 000000000..5c1f3971a --- /dev/null +++ b/neutron/plugins/vmware/dhcp_meta/nsx.py @@ -0,0 +1,321 @@ +# Copyright 2013 VMware, Inc. + +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg + +from neutron.api.v2 import attributes as attr +from neutron.common import constants as const +from neutron.common import exceptions as n_exc +from neutron.db import db_base_plugin_v2 +from neutron.db import l3_db +from neutron.extensions import external_net +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import exceptions as p_exc +from neutron.plugins.vmware.dhcp_meta import constants as d_const +from neutron.plugins.vmware.nsxlib import lsn as lsn_api + +LOG = logging.getLogger(__name__) + + +dhcp_opts = [ + cfg.ListOpt('extra_domain_name_servers', + deprecated_group='NVP_DHCP', + default=[], + help=_('Comma separated list of additional ' + 'domain name servers')), + cfg.StrOpt('domain_name', + deprecated_group='NVP_DHCP', + default='openstacklocal', + help=_('Domain to use for building the hostnames')), + cfg.IntOpt('default_lease_time', default=43200, + deprecated_group='NVP_DHCP', + help=_("Default DHCP lease time")), +] + + +metadata_opts = [ + cfg.StrOpt('metadata_server_address', + deprecated_group='NVP_METADATA', + default='127.0.0.1', + help=_("IP address used by Metadata server.")), + cfg.IntOpt('metadata_server_port', + deprecated_group='NVP_METADATA', + default=8775, + help=_("TCP Port used by Metadata server.")), + cfg.StrOpt('metadata_shared_secret', + deprecated_group='NVP_METADATA', + default='', + help=_('Shared secret to sign instance-id request'), + secret=True) +] + + +def register_dhcp_opts(config): + config.CONF.register_opts(dhcp_opts, group="NSX_DHCP") + + +def register_metadata_opts(config): + config.CONF.register_opts(metadata_opts, group="NSX_METADATA") + + +class DhcpAgentNotifyAPI(object): + + def __init__(self, plugin, lsn_manager): + self.plugin = plugin + self.lsn_manager = lsn_manager + self._handle_subnet_dhcp_access = {'create': self._subnet_create, + 'update': self._subnet_update, + 'delete': self._subnet_delete} + + def notify(self, context, data, methodname): + [resource, action, _e] = methodname.split('.') + if resource == 'subnet': + self._handle_subnet_dhcp_access[action](context, data['subnet']) + elif resource == 'port' and action == 'update': + self._port_update(context, data['port']) + + def _port_update(self, context, port): + # With no fixed IP's there's nothing that can be updated + if not port["fixed_ips"]: + return + network_id = port['network_id'] + subnet_id = port["fixed_ips"][0]['subnet_id'] + filters = {'network_id': [network_id]} + # Because NSX does not support updating a single host entry we + # got to build the whole list from scratch and update in bulk + ports = self.plugin.get_ports(context, filters) + if not ports: + return + dhcp_conf = [ + {'mac_address': p['mac_address'], + 'ip_address': p["fixed_ips"][0]['ip_address']} + for p in ports if is_user_port(p) + ] + meta_conf = [ + {'instance_id': p['device_id'], + 'ip_address': p["fixed_ips"][0]['ip_address']} + for p in ports if is_user_port(p, check_dev_id=True) + ] + self.lsn_manager.lsn_port_update( + context, network_id, subnet_id, dhcp=dhcp_conf, meta=meta_conf) + + def _subnet_create(self, context, subnet, clean_on_err=True): + if subnet['enable_dhcp']: + network_id = subnet['network_id'] + # Create port for DHCP service + dhcp_port = { + "name": "", + "admin_state_up": True, + "device_id": "", + "device_owner": const.DEVICE_OWNER_DHCP, + "network_id": network_id, + "tenant_id": subnet["tenant_id"], + "mac_address": attr.ATTR_NOT_SPECIFIED, + "fixed_ips": [{"subnet_id": subnet['id']}] + } + try: + # This will end up calling handle_port_dhcp_access + # down below as well as handle_port_metadata_access + self.plugin.create_port(context, {'port': dhcp_port}) + except p_exc.PortConfigurationError as e: + err_msg = (_("Error while creating subnet %(cidr)s for " + "network %(network)s. Please, contact " + "administrator") % + {"cidr": subnet["cidr"], + "network": network_id}) + LOG.error(err_msg) + db_base_plugin_v2.NeutronDbPluginV2.delete_port( + self.plugin, context, e.port_id) + if clean_on_err: + self.plugin.delete_subnet(context, subnet['id']) + raise n_exc.Conflict() + + def _subnet_update(self, context, subnet): + network_id = subnet['network_id'] + try: + lsn_id, lsn_port_id = self.lsn_manager.lsn_port_get( + context, network_id, subnet['id']) + self.lsn_manager.lsn_port_dhcp_configure( + context, lsn_id, lsn_port_id, subnet) + except p_exc.LsnPortNotFound: + # It's possible that the subnet was created with dhcp off; + # check if the subnet was uplinked onto a router, and if so + # remove the patch attachment between the metadata port and + # the lsn port, in favor on the one we'll be creating during + # _subnet_create + self.lsn_manager.lsn_port_dispose( + context, network_id, d_const.METADATA_MAC) + # also, check that a dhcp port exists first and provision it + # accordingly + filters = dict(network_id=[network_id], + device_owner=[const.DEVICE_OWNER_DHCP]) + ports = self.plugin.get_ports(context, filters=filters) + if ports: + handle_port_dhcp_access( + self.plugin, context, ports[0], 'create_port') + else: + self._subnet_create(context, subnet, clean_on_err=False) + + def _subnet_delete(self, context, subnet): + # FIXME(armando-migliaccio): it looks like that a subnet filter + # is ineffective; so filter by network for now. + network_id = subnet['network_id'] + filters = dict(network_id=[network_id], + device_owner=[const.DEVICE_OWNER_DHCP]) + # FIXME(armando-migliaccio): this may be race-y + ports = self.plugin.get_ports(context, filters=filters) + if ports: + # This will end up calling handle_port_dhcp_access + # down below as well as handle_port_metadata_access + self.plugin.delete_port(context, ports[0]['id']) + + +def is_user_port(p, check_dev_id=False): + usable = p['fixed_ips'] and p['device_owner'] not in d_const.SPECIAL_OWNERS + return usable if not check_dev_id else usable and p['device_id'] + + +def check_services_requirements(cluster): + ver = cluster.api_client.get_version() + # It sounds like 4.1 is the first one where DHCP in NSX + # will have the experimental feature + if ver.major >= 4 and ver.minor >= 1: + cluster_id = cfg.CONF.default_service_cluster_uuid + if not lsn_api.service_cluster_exists(cluster, cluster_id): + raise p_exc.ServiceClusterUnavailable(cluster_id=cluster_id) + else: + raise p_exc.InvalidVersion(version=ver) + + +def handle_network_dhcp_access(plugin, context, network, action): + LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s") + % {"action": action, "resource": network}) + if action == 'create_network': + network_id = network['id'] + if network.get(external_net.EXTERNAL): + LOG.info(_("Network %s is external: no LSN to create"), network_id) + return + plugin.lsn_manager.lsn_create(context, network_id) + elif action == 'delete_network': + # NOTE(armando-migliaccio): on delete_network, network + # is just the network id + network_id = network + plugin.lsn_manager.lsn_delete_by_network(context, network_id) + LOG.info(_("Logical Services Node for network " + "%s configured successfully"), network_id) + + +def handle_port_dhcp_access(plugin, context, port, action): + LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s") + % {"action": action, "resource": port}) + if port["device_owner"] == const.DEVICE_OWNER_DHCP: + network_id = port["network_id"] + if action == "create_port": + # at this point the port must have a subnet and a fixed ip + subnet_id = port["fixed_ips"][0]['subnet_id'] + subnet = plugin.get_subnet(context, subnet_id) + subnet_data = { + "mac_address": port["mac_address"], + "ip_address": subnet['cidr'], + "subnet_id": subnet['id'] + } + try: + plugin.lsn_manager.lsn_port_dhcp_setup( + context, network_id, port['id'], subnet_data, subnet) + except p_exc.PortConfigurationError: + err_msg = (_("Error while configuring DHCP for " + "port %s"), port['id']) + LOG.error(err_msg) + raise n_exc.NeutronException() + elif action == "delete_port": + plugin.lsn_manager.lsn_port_dispose(context, network_id, + port['mac_address']) + elif port["device_owner"] != const.DEVICE_OWNER_DHCP: + if port.get("fixed_ips"): + # do something only if there are IP's and dhcp is enabled + subnet_id = port["fixed_ips"][0]['subnet_id'] + if not plugin.get_subnet(context, subnet_id)['enable_dhcp']: + LOG.info(_("DHCP is disabled for subnet %s: nothing " + "to do"), subnet_id) + return + host_data = { + "mac_address": port["mac_address"], + "ip_address": port["fixed_ips"][0]['ip_address'] + } + network_id = port["network_id"] + if action == "create_port": + handler = plugin.lsn_manager.lsn_port_dhcp_host_add + elif action == "delete_port": + handler = plugin.lsn_manager.lsn_port_dhcp_host_remove + try: + handler(context, network_id, subnet_id, host_data) + except p_exc.PortConfigurationError: + with excutils.save_and_reraise_exception(): + if action == 'create_port': + db_base_plugin_v2.NeutronDbPluginV2.delete_port( + plugin, context, port['id']) + LOG.info(_("DHCP for port %s configured successfully"), port['id']) + + +def handle_port_metadata_access(plugin, context, port, is_delete=False): + if is_user_port(port, check_dev_id=True): + network_id = port["network_id"] + network = plugin.get_network(context, network_id) + if network[external_net.EXTERNAL]: + LOG.info(_("Network %s is external: nothing to do"), network_id) + return + subnet_id = port["fixed_ips"][0]['subnet_id'] + host_data = { + "instance_id": port["device_id"], + "tenant_id": port["tenant_id"], + "ip_address": port["fixed_ips"][0]['ip_address'] + } + LOG.info(_("Configuring metadata entry for port %s"), port) + if not is_delete: + handler = plugin.lsn_manager.lsn_port_meta_host_add + else: + handler = plugin.lsn_manager.lsn_port_meta_host_remove + try: + handler(context, network_id, subnet_id, host_data) + except p_exc.PortConfigurationError: + with excutils.save_and_reraise_exception(): + if not is_delete: + db_base_plugin_v2.NeutronDbPluginV2.delete_port( + plugin, context, port['id']) + LOG.info(_("Metadata for port %s configured successfully"), port['id']) + + +def handle_router_metadata_access(plugin, context, router_id, interface=None): + LOG.info(_("Handle metadata access via router: %(r)s and " + "interface %(i)s") % {'r': router_id, 'i': interface}) + if interface: + try: + plugin.get_port(context, interface['port_id']) + is_enabled = True + except n_exc.NotFound: + is_enabled = False + subnet_id = interface['subnet_id'] + try: + plugin.lsn_manager.lsn_metadata_configure( + context, subnet_id, is_enabled) + except p_exc.NsxPluginException: + with excutils.save_and_reraise_exception(): + if is_enabled: + l3_db.L3_NAT_db_mixin.remove_router_interface( + plugin, context, router_id, interface) + LOG.info(_("Metadata for router %s handled successfully"), router_id) diff --git a/neutron/plugins/vmware/dhcp_meta/rpc.py b/neutron/plugins/vmware/dhcp_meta/rpc.py new file mode 100644 index 000000000..9d409d01a --- /dev/null +++ b/neutron/plugins/vmware/dhcp_meta/rpc.py @@ -0,0 +1,222 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from eventlet import greenthread +import netaddr +from oslo.config import cfg + +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as const +from neutron.common import exceptions as ntn_exc +from neutron.common import rpc_compat +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import config +from neutron.plugins.vmware.common import exceptions as nsx_exc + +LOG = logging.getLogger(__name__) + +METADATA_DEFAULT_PREFIX = 30 +METADATA_SUBNET_CIDR = '169.254.169.252/%d' % METADATA_DEFAULT_PREFIX +METADATA_GATEWAY_IP = '169.254.169.253' +METADATA_DHCP_ROUTE = '169.254.169.254/32' + + +class NSXRpcCallbacks(rpc_compat.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + +def handle_network_dhcp_access(plugin, context, network, action): + pass + + +def handle_port_dhcp_access(plugin, context, port_data, action): + active_port = (cfg.CONF.NSX.metadata_mode == config.MetadataModes.INDIRECT + and port_data.get('device_owner') == const.DEVICE_OWNER_DHCP + and port_data.get('fixed_ips', [])) + if active_port: + subnet_id = port_data['fixed_ips'][0]['subnet_id'] + subnet = plugin.get_subnet(context, subnet_id) + _notify_rpc_agent(context, {'subnet': subnet}, 'subnet.update.end') + + +def handle_port_metadata_access(plugin, context, port, is_delete=False): + if (cfg.CONF.NSX.metadata_mode == config.MetadataModes.INDIRECT and + port.get('device_owner') == const.DEVICE_OWNER_DHCP): + if port.get('fixed_ips', []) or is_delete: + fixed_ip = port['fixed_ips'][0] + query = context.session.query(models_v2.Subnet) + subnet = query.filter( + models_v2.Subnet.id == fixed_ip['subnet_id']).one() + # If subnet does not have a gateway do not create metadata + # route. This is done via the enable_isolated_metadata + # option if desired. + if not subnet.get('gateway_ip'): + LOG.info(_('Subnet %s does not have a gateway, the metadata ' + 'route will not be created'), subnet['id']) + return + metadata_routes = [r for r in subnet.routes + if r['destination'] == METADATA_DHCP_ROUTE] + if metadata_routes: + # We should have only a single metadata route at any time + # because the route logic forbids two routes with the same + # destination. Update next hop with the provided IP address + if not is_delete: + metadata_routes[0].nexthop = fixed_ip['ip_address'] + else: + context.session.delete(metadata_routes[0]) + else: + # add the metadata route + route = models_v2.SubnetRoute( + subnet_id=subnet.id, + destination=METADATA_DHCP_ROUTE, + nexthop=fixed_ip['ip_address']) + context.session.add(route) + + +def handle_router_metadata_access(plugin, context, router_id, interface=None): + if cfg.CONF.NSX.metadata_mode != config.MetadataModes.DIRECT: + LOG.debug(_("Metadata access network is disabled")) + return + if not cfg.CONF.allow_overlapping_ips: + LOG.warn(_("Overlapping IPs must be enabled in order to setup " + "the metadata access network")) + return + ctx_elevated = context.elevated() + device_filter = {'device_id': [router_id], + 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} + # Retrieve ports calling database plugin + ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports( + plugin, ctx_elevated, filters=device_filter) + try: + if ports: + if (interface and + not _find_metadata_port(plugin, ctx_elevated, ports)): + _create_metadata_access_network( + plugin, ctx_elevated, router_id) + elif len(ports) == 1: + # The only port left might be the metadata port + _destroy_metadata_access_network( + plugin, ctx_elevated, router_id, ports) + else: + LOG.debug(_("No router interface found for router '%s'. " + "No metadata access network should be " + "created or destroyed"), router_id) + # TODO(salvatore-orlando): A better exception handling in the + # NSX plugin would allow us to improve error handling here + except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, + api_exc.NsxApiException): + # Any exception here should be regarded as non-fatal + LOG.exception(_("An error occurred while operating on the " + "metadata access network for router:'%s'"), + router_id) + + +def _find_metadata_port(plugin, context, ports): + for port in ports: + for fixed_ip in port['fixed_ips']: + cidr = netaddr.IPNetwork( + plugin.get_subnet(context, fixed_ip['subnet_id'])['cidr']) + if cidr in netaddr.IPNetwork(METADATA_SUBNET_CIDR): + return port + + +def _create_metadata_access_network(plugin, context, router_id): + # Add network + # Network name is likely to be truncated on NSX + net_data = {'name': 'meta-%s' % router_id, + 'tenant_id': '', # intentionally not set + 'admin_state_up': True, + 'port_security_enabled': False, + 'shared': False, + 'status': const.NET_STATUS_ACTIVE} + meta_net = plugin.create_network(context, + {'network': net_data}) + greenthread.sleep(0) # yield + plugin.schedule_network(context, meta_net) + greenthread.sleep(0) # yield + # From this point on there will be resources to garbage-collect + # in case of failures + meta_sub = None + try: + # Add subnet + subnet_data = {'network_id': meta_net['id'], + 'tenant_id': '', # intentionally not set + 'name': 'meta-%s' % router_id, + 'ip_version': 4, + 'shared': False, + 'cidr': METADATA_SUBNET_CIDR, + 'enable_dhcp': True, + # Ensure default allocation pool is generated + 'allocation_pools': attributes.ATTR_NOT_SPECIFIED, + 'gateway_ip': METADATA_GATEWAY_IP, + 'dns_nameservers': [], + 'host_routes': []} + meta_sub = plugin.create_subnet(context, + {'subnet': subnet_data}) + greenthread.sleep(0) # yield + plugin.add_router_interface(context, router_id, + {'subnet_id': meta_sub['id']}) + greenthread.sleep(0) # yield + # Tell to start the metadata agent proxy, only if we had success + _notify_rpc_agent(context, {'subnet': meta_sub}, 'subnet.create.end') + except (ntn_exc.NeutronException, + nsx_exc.NsxPluginException, + api_exc.NsxApiException): + # It is not necessary to explicitly delete the subnet + # as it will be removed with the network + plugin.delete_network(context, meta_net['id']) + + +def _destroy_metadata_access_network(plugin, context, router_id, ports): + if not ports: + return + meta_port = _find_metadata_port(plugin, context, ports) + if not meta_port: + return + meta_net_id = meta_port['network_id'] + meta_sub_id = meta_port['fixed_ips'][0]['subnet_id'] + plugin.remove_router_interface( + context, router_id, {'port_id': meta_port['id']}) + greenthread.sleep(0) # yield + context.session.expunge_all() + try: + # Remove network (this will remove the subnet too) + plugin.delete_network(context, meta_net_id) + greenthread.sleep(0) # yield + except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, + api_exc.NsxApiException): + # must re-add the router interface + plugin.add_router_interface(context, router_id, + {'subnet_id': meta_sub_id}) + # Tell to stop the metadata agent proxy + _notify_rpc_agent( + context, {'network': {'id': meta_net_id}}, 'network.delete.end') + + +def _notify_rpc_agent(context, payload, event): + if cfg.CONF.dhcp_agent_notification: + dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + dhcp_notifier.notify(context, payload, event) diff --git a/neutron/plugins/vmware/dhcpmeta_modes.py b/neutron/plugins/vmware/dhcpmeta_modes.py new file mode 100644 index 000000000..0ce2112f6 --- /dev/null +++ b/neutron/plugins/vmware/dhcpmeta_modes.py @@ -0,0 +1,163 @@ +# Copyright 2013 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg + +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.common import constants as const +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.db import agents_db +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import config +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.dhcp_meta import combined +from neutron.plugins.vmware.dhcp_meta import lsnmanager +from neutron.plugins.vmware.dhcp_meta import migration +from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc +from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc +from neutron.plugins.vmware.extensions import lsn + +LOG = logging.getLogger(__name__) + + +class DhcpMetadataAccess(object): + + def setup_dhcpmeta_access(self): + """Initialize support for DHCP and Metadata services.""" + self._init_extensions() + if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENT: + self._setup_rpc_dhcp_metadata() + mod = nsx_rpc + elif cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS: + self._setup_nsx_dhcp_metadata() + mod = nsx_svc + elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED: + notifier = self._setup_nsx_dhcp_metadata() + self._setup_rpc_dhcp_metadata(notifier=notifier) + mod = combined + else: + error = _("Invalid agent_mode: %s") % cfg.CONF.NSX.agent_mode + LOG.error(error) + raise nsx_exc.NsxPluginException(err_msg=error) + self.handle_network_dhcp_access_delegate = ( + mod.handle_network_dhcp_access + ) + self.handle_port_dhcp_access_delegate = ( + mod.handle_port_dhcp_access + ) + self.handle_port_metadata_access_delegate = ( + mod.handle_port_metadata_access + ) + self.handle_metadata_access_delegate = ( + mod.handle_router_metadata_access + ) + + def _setup_rpc_dhcp_metadata(self, notifier=None): + self.topic = topics.PLUGIN + self.conn = rpc_compat.create_connection(new=True) + self.endpoints = [nsx_rpc.NSXRpcCallbacks(), + agents_db.AgentExtRpcCallback()] + self.conn.create_consumer(self.topic, self.endpoints, fanout=False) + self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( + notifier or dhcp_rpc_agent_api.DhcpAgentNotifyAPI()) + self.conn.consume_in_threads() + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + self.supported_extension_aliases.extend( + ['agent', 'dhcp_agent_scheduler']) + + def _setup_nsx_dhcp_metadata(self): + self._check_services_requirements() + nsx_svc.register_dhcp_opts(cfg) + nsx_svc.register_metadata_opts(cfg) + lsnmanager.register_lsn_opts(cfg) + lsn_manager = lsnmanager.PersistentLsnManager(self.safe_reference) + self.lsn_manager = lsn_manager + if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS: + notifier = nsx_svc.DhcpAgentNotifyAPI(self.safe_reference, + lsn_manager) + self.agent_notifiers[const.AGENT_TYPE_DHCP] = notifier + # In agentless mode, ports whose owner is DHCP need to + # be special cased; so add it to the list of special + # owners list + if const.DEVICE_OWNER_DHCP not in self.port_special_owners: + self.port_special_owners.append(const.DEVICE_OWNER_DHCP) + elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED: + # This becomes ineffective, as all new networks creations + # are handled by Logical Services Nodes in NSX + cfg.CONF.set_override('network_auto_schedule', False) + LOG.warn(_('network_auto_schedule has been disabled')) + notifier = combined.DhcpAgentNotifyAPI(self.safe_reference, + lsn_manager) + self.supported_extension_aliases.append(lsn.EXT_ALIAS) + # Add the capability to migrate dhcp and metadata services over + self.migration_manager = ( + migration.MigrationManager( + self.safe_reference, lsn_manager, notifier)) + return notifier + + def _init_extensions(self): + extensions = (lsn.EXT_ALIAS, 'agent', 'dhcp_agent_scheduler') + for ext in extensions: + if ext in self.supported_extension_aliases: + self.supported_extension_aliases.remove(ext) + + def _check_services_requirements(self): + try: + error = None + nsx_svc.check_services_requirements(self.cluster) + except nsx_exc.InvalidVersion: + error = _("Unable to run Neutron with config option '%s', as NSX " + "does not support it") % cfg.CONF.NSX.agent_mode + except nsx_exc.ServiceClusterUnavailable: + error = _("Unmet dependency for config option " + "'%s'") % cfg.CONF.NSX.agent_mode + if error: + LOG.exception(error) + raise nsx_exc.NsxPluginException(err_msg=error) + + def get_lsn(self, context, network_id, fields=None): + report = self.migration_manager.report(context, network_id) + return {'network': network_id, 'report': report} + + def create_lsn(self, context, lsn): + network_id = lsn['lsn']['network'] + subnet = self.migration_manager.validate(context, network_id) + subnet_id = None if not subnet else subnet['id'] + self.migration_manager.migrate(context, network_id, subnet) + r = self.migration_manager.report(context, network_id, subnet_id) + return {'network': network_id, 'report': r} + + def handle_network_dhcp_access(self, context, network, action): + self.handle_network_dhcp_access_delegate(self.safe_reference, context, + network, action) + + def handle_port_dhcp_access(self, context, port_data, action): + self.handle_port_dhcp_access_delegate(self.safe_reference, context, + port_data, action) + + def handle_port_metadata_access(self, context, port, is_delete=False): + self.handle_port_metadata_access_delegate(self.safe_reference, context, + port, is_delete) + + def handle_router_metadata_access(self, context, + router_id, interface=None): + self.handle_metadata_access_delegate(self.safe_reference, context, + router_id, interface) diff --git a/neutron/plugins/vmware/extensions/__init__.py b/neutron/plugins/vmware/extensions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/vmware/extensions/distributedrouter.py b/neutron/plugins/vmware/extensions/distributedrouter.py new file mode 100644 index 000000000..aa6949b82 --- /dev/null +++ b/neutron/plugins/vmware/extensions/distributedrouter.py @@ -0,0 +1,70 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.v2 import attributes + + +def convert_to_boolean_if_not_none(data): + if data is not None: + return attributes.convert_to_boolean(data) + return data + + +DISTRIBUTED = 'distributed' +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': { + DISTRIBUTED: {'allow_post': True, 'allow_put': False, + 'convert_to': convert_to_boolean_if_not_none, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + } +} + + +class Distributedrouter(object): + """Extension class supporting distributed router.""" + + @classmethod + def get_name(cls): + return "Distributed Router" + + @classmethod + def get_alias(cls): + return "dist-router" + + @classmethod + def get_description(cls): + return "Enables configuration of NSX Distributed routers." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/dist-router/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-08-1T10:00:00-00:00" + + def get_required_extensions(self): + return ["router"] + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + return [] + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/plugins/vmware/extensions/lsn.py b/neutron/plugins/vmware/extensions/lsn.py new file mode 100644 index 000000000..4a7d3ca3d --- /dev/null +++ b/neutron/plugins/vmware/extensions/lsn.py @@ -0,0 +1,82 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.api import extensions +from neutron.api.v2 import base +from neutron import manager + + +EXT_ALIAS = 'lsn' +COLLECTION_NAME = "%ss" % EXT_ALIAS + +RESOURCE_ATTRIBUTE_MAP = { + COLLECTION_NAME: { + 'network': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'report': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'validate': {'type:string': None}, 'is_visible': True}, + }, +} + + +class Lsn(object): + """Enable LSN configuration for Neutron NSX networks.""" + + @classmethod + def get_name(cls): + return "Logical Service Node configuration" + + @classmethod + def get_alias(cls): + return EXT_ALIAS + + @classmethod + def get_description(cls): + return "Enables configuration of NSX Logical Services Node." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/%s/api/v2.0" % EXT_ALIAS + + @classmethod + def get_updated(cls): + return "2013-10-05T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + plugin = manager.NeutronManager.get_plugin() + resource_name = EXT_ALIAS + collection_name = resource_name.replace('_', '-') + "s" + params = RESOURCE_ATTRIBUTE_MAP.get(COLLECTION_NAME, dict()) + controller = base.create_resource(collection_name, + resource_name, + plugin, params, allow_bulk=False) + ex = extensions.ResourceExtension(collection_name, controller) + exts.append(ex) + return exts + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} diff --git a/neutron/plugins/vmware/extensions/maclearning.py b/neutron/plugins/vmware/extensions/maclearning.py new file mode 100644 index 000000000..21c669150 --- /dev/null +++ b/neutron/plugins/vmware/extensions/maclearning.py @@ -0,0 +1,61 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.v2 import attributes + + +MAC_LEARNING = 'mac_learning_enabled' +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': { + MAC_LEARNING: {'allow_post': True, 'allow_put': True, + 'convert_to': attributes.convert_to_boolean, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + } +} + + +class Maclearning(object): + """Extension class supporting port mac learning.""" + + @classmethod + def get_name(cls): + return "MAC Learning" + + @classmethod + def get_alias(cls): + return "mac-learning" + + @classmethod + def get_description(cls): + return "Provides MAC learning capabilities." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/maclearning/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-05-1T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + return [] + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/plugins/vmware/extensions/networkgw.py b/neutron/plugins/vmware/extensions/networkgw.py new file mode 100644 index 000000000..2cb650b40 --- /dev/null +++ b/neutron/plugins/vmware/extensions/networkgw.py @@ -0,0 +1,251 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 VMware. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import abc + +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.api.v2 import resource_helper +from neutron.plugins.vmware.common import utils + +GATEWAY_RESOURCE_NAME = "network_gateway" +DEVICE_RESOURCE_NAME = "gateway_device" +# Use dash for alias and collection name +EXT_ALIAS = GATEWAY_RESOURCE_NAME.replace('_', '-') +NETWORK_GATEWAYS = "%ss" % EXT_ALIAS +GATEWAY_DEVICES = "%ss" % DEVICE_RESOURCE_NAME.replace('_', '-') +DEVICE_ID_ATTR = 'id' +IFACE_NAME_ATTR = 'interface_name' + +# Attribute Map for Network Gateway Resource +# TODO(salvatore-orlando): add admin state as other neutron resources +RESOURCE_ATTRIBUTE_MAP = { + NETWORK_GATEWAYS: { + 'id': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'default': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'devices': {'allow_post': True, 'allow_put': False, + 'validate': {'type:device_list': None}, + 'is_visible': True}, + 'ports': {'allow_post': False, 'allow_put': False, + 'default': [], + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True} + }, + GATEWAY_DEVICES: { + 'id': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'client_certificate': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'connector_type': {'allow_post': True, 'allow_put': True, + 'validate': {'type:connector_type': None}, + 'is_visible': True}, + 'connector_ip': {'allow_post': True, 'allow_put': True, + 'validate': {'type:ip_address': None}, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + } +} + + +def _validate_device_list(data, valid_values=None): + """Validate the list of service definitions.""" + if not data: + # Devices must be provided + msg = _("Cannot create a gateway with an empty device list") + return msg + try: + for device in data: + key_specs = {DEVICE_ID_ATTR: + {'type:regex': attributes.UUID_PATTERN, + 'required': True}, + IFACE_NAME_ATTR: + {'type:string': None, + 'required': False}} + err_msg = attributes._validate_dict( + device, key_specs=key_specs) + if err_msg: + return err_msg + unexpected_keys = [key for key in device if key not in key_specs] + if unexpected_keys: + err_msg = (_("Unexpected keys found in device description:%s") + % ",".join(unexpected_keys)) + return err_msg + except TypeError: + return (_("%s: provided data are not iterable") % + _validate_device_list.__name__) + + +def _validate_connector_type(data, valid_values=None): + if not data: + # A connector type is compulsory + msg = _("A connector type is required to create a gateway device") + return msg + connector_types = (valid_values if valid_values else + [utils.NetworkTypes.GRE, + utils.NetworkTypes.STT, + utils.NetworkTypes.BRIDGE, + 'ipsec%s' % utils.NetworkTypes.GRE, + 'ipsec%s' % utils.NetworkTypes.STT]) + if data not in connector_types: + msg = _("Unknown connector type: %s") % data + return msg + + +nw_gw_quota_opts = [ + cfg.IntOpt('quota_network_gateway', + default=5, + help=_('Number of network gateways allowed per tenant, ' + '-1 for unlimited')) +] + +cfg.CONF.register_opts(nw_gw_quota_opts, 'QUOTAS') + +attributes.validators['type:device_list'] = _validate_device_list +attributes.validators['type:connector_type'] = _validate_connector_type + + +class Networkgw(object): + """API extension for Layer-2 Gateway support. + + The Layer-2 gateway feature allows for connecting neutron networks + with external networks at the layer-2 level. No assumption is made on + the location of the external network, which might not even be directly + reachable from the hosts where the VMs are deployed. + + This is achieved by instantiating 'network gateways', and then connecting + Neutron network to them. + """ + + @classmethod + def get_name(cls): + return "Network Gateway" + + @classmethod + def get_alias(cls): + return EXT_ALIAS + + @classmethod + def get_description(cls): + return "Connects Neutron networks with external networks at layer 2." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/network-gateway/api/v1.0" + + @classmethod + def get_updated(cls): + return "2014-01-01T00:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + + member_actions = { + GATEWAY_RESOURCE_NAME.replace('_', '-'): { + 'connect_network': 'PUT', + 'disconnect_network': 'PUT'}} + + plural_mappings = resource_helper.build_plural_mappings( + {}, RESOURCE_ATTRIBUTE_MAP) + + return resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + None, + action_map=member_actions, + register_quota=True, + translate_name=True) + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +class NetworkGatewayPluginBase(object): + + @abc.abstractmethod + def create_network_gateway(self, context, network_gateway): + pass + + @abc.abstractmethod + def update_network_gateway(self, context, id, network_gateway): + pass + + @abc.abstractmethod + def get_network_gateway(self, context, id, fields=None): + pass + + @abc.abstractmethod + def delete_network_gateway(self, context, id): + pass + + @abc.abstractmethod + def get_network_gateways(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + pass + + @abc.abstractmethod + def connect_network(self, context, network_gateway_id, + network_mapping_info): + pass + + @abc.abstractmethod + def disconnect_network(self, context, network_gateway_id, + network_mapping_info): + pass + + @abc.abstractmethod + def create_gateway_device(self, context, gateway_device): + pass + + @abc.abstractmethod + def update_gateway_device(self, context, id, gateway_device): + pass + + @abc.abstractmethod + def delete_gateway_device(self, context, id): + pass + + @abc.abstractmethod + def get_gateway_device(self, context, id, fields=None): + pass + + @abc.abstractmethod + def get_gateway_devices(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + pass diff --git a/neutron/plugins/vmware/extensions/nvp_qos.py b/neutron/plugins/vmware/extensions/nvp_qos.py new file mode 100644 index 000000000..470f267b5 --- /dev/null +++ b/neutron/plugins/vmware/extensions/nvp_qos.py @@ -0,0 +1,40 @@ +# Copyright 2013 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# TODO(arosen): This is deprecated in Juno, and +# to be removed in Kxxxx. + +from neutron.plugins.vmware.extensions import qos + + +class Nvp_qos(qos.Qos): + """(Deprecated) Port Queue extension.""" + + @classmethod + def get_name(cls): + return "nvp-qos" + + @classmethod + def get_alias(cls): + return "nvp-qos" + + @classmethod + def get_description(cls): + return "NVP QoS extension (deprecated)." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/nvp-qos/api/v2.0" diff --git a/neutron/plugins/vmware/extensions/qos.py b/neutron/plugins/vmware/extensions/qos.py new file mode 100644 index 000000000..45b343a1e --- /dev/null +++ b/neutron/plugins/vmware/extensions/qos.py @@ -0,0 +1,223 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import abc + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.common import exceptions as qexception +from neutron import manager + + +# For policy.json/Auth +qos_queue_create = "create_qos_queue" +qos_queue_delete = "delete_qos_queue" +qos_queue_get = "get_qos_queue" +qos_queue_list = "get_qos_queues" + + +class DefaultQueueCreateNotAdmin(qexception.InUse): + message = _("Need to be admin in order to create queue called default") + + +class DefaultQueueAlreadyExists(qexception.InUse): + message = _("Default queue already exists.") + + +class QueueInvalidDscp(qexception.InvalidInput): + message = _("Invalid value for dscp %(data)s must be integer value" + " between 0 and 63.") + + +class QueueMinGreaterMax(qexception.InvalidInput): + message = _("Invalid bandwidth rate, min greater than max.") + + +class QueueInvalidBandwidth(qexception.InvalidInput): + message = _("Invalid bandwidth rate, %(data)s must be a non negative" + " integer.") + + +class QueueNotFound(qexception.NotFound): + message = _("Queue %(id)s does not exist") + + +class QueueInUseByPort(qexception.InUse): + message = _("Unable to delete queue attached to port.") + + +class QueuePortBindingNotFound(qexception.NotFound): + message = _("Port is not associated with lqueue") + + +def convert_to_unsigned_int_or_none(val): + if val is None: + return + try: + val = int(val) + if val < 0: + raise ValueError + except (ValueError, TypeError): + msg = _("'%s' must be a non negative integer.") % val + raise qexception.InvalidInput(error_message=msg) + return val + + +def convert_to_unsigned_int_or_none_max_63(val): + val = convert_to_unsigned_int_or_none(val) + if val > 63: + raise QueueInvalidDscp(data=val) + return val + +# As per NSX API, if a queue is trusted, DSCP must be omitted; if a queue is +# untrusted, DSCP must be specified. Whichever default values we choose for +# the tuple (qos_marking, dscp), there will be at least one combination of a +# request with conflicting values: for instance, with the following default: +# +# qos_marking = 'untrusted', dscp = '0' +# +# requests with qos_marking = 'trusted' and a default dscp will fail. Since +# it is convoluted to ask the admin to specify a None value for dscp when +# qos_marking is 'trusted', it is best to ignore the dscp value, regardless +# of whether it has been specified or not. This preserves the chosen default +# and keeps backward compatibility with the API. A warning will be logged, as +# the server is overriding a potentially conflicting request from the admin +RESOURCE_ATTRIBUTE_MAP = { + 'qos_queues': { + 'id': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'default': {'allow_post': True, 'allow_put': False, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True, 'default': False}, + 'name': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'min': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': '0', + 'convert_to': convert_to_unsigned_int_or_none}, + 'max': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': None, + 'convert_to': convert_to_unsigned_int_or_none}, + 'qos_marking': {'allow_post': True, 'allow_put': False, + 'validate': {'type:values': ['untrusted', 'trusted']}, + 'default': 'untrusted', 'is_visible': True}, + 'dscp': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': '0', + 'convert_to': convert_to_unsigned_int_or_none_max_63}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + }, +} + + +QUEUE = 'queue_id' +RXTX_FACTOR = 'rxtx_factor' +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': { + RXTX_FACTOR: {'allow_post': True, + # FIXME(arosen): the plugin currently does not + # implement updating rxtx factor on port. + 'allow_put': True, + 'is_visible': False, + 'default': 1, + 'enforce_policy': True, + 'convert_to': convert_to_unsigned_int_or_none}, + + QUEUE: {'allow_post': False, + 'allow_put': False, + 'is_visible': True, + 'default': False, + 'enforce_policy': True}}, + 'networks': {QUEUE: {'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': False, + 'enforce_policy': True}} + +} + + +class Qos(object): + """Port Queue extension.""" + + @classmethod + def get_name(cls): + return "QoS Queue" + + @classmethod + def get_alias(cls): + return "qos-queue" + + @classmethod + def get_description(cls): + return "NSX QoS extension." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/qos-queue/api/v2.0" + + @classmethod + def get_updated(cls): + return "2014-01-01T00:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + plugin = manager.NeutronManager.get_plugin() + resource_name = 'qos_queue' + collection_name = resource_name.replace('_', '-') + "s" + params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) + controller = base.create_resource(collection_name, + resource_name, + plugin, params, allow_bulk=False) + + ex = extensions.ResourceExtension(collection_name, + controller) + exts.append(ex) + + return exts + + def get_extended_resources(self, version): + if version == "2.0": + return dict(EXTENDED_ATTRIBUTES_2_0.items() + + RESOURCE_ATTRIBUTE_MAP.items()) + else: + return {} + + +class QueuePluginBase(object): + @abc.abstractmethod + def create_qos_queue(self, context, queue): + pass + + @abc.abstractmethod + def delete_qos_queue(self, context, id): + pass + + @abc.abstractmethod + def get_qos_queue(self, context, id, fields=None): + pass + + @abc.abstractmethod + def get_qos_queues(self, context, filters=None, fields=None, sorts=None, + limit=None, marker=None, page_reverse=False): + pass diff --git a/neutron/plugins/vmware/extensions/servicerouter.py b/neutron/plugins/vmware/extensions/servicerouter.py new file mode 100644 index 000000000..ea5382407 --- /dev/null +++ b/neutron/plugins/vmware/extensions/servicerouter.py @@ -0,0 +1,59 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.api import extensions +from neutron.api.v2 import attributes + + +SERVICE_ROUTER = 'service_router' +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': { + SERVICE_ROUTER: {'allow_post': True, 'allow_put': False, + 'convert_to': attributes.convert_to_boolean, + 'default': False, 'is_visible': True}, + } +} + + +class Servicerouter(extensions.ExtensionDescriptor): + """Extension class supporting advanced service router.""" + + @classmethod + def get_name(cls): + return "Service Router" + + @classmethod + def get_alias(cls): + return "service-router" + + @classmethod + def get_description(cls): + return "Provides service router." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/service-router/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-08-08T00:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/plugins/vmware/nsx_cluster.py b/neutron/plugins/vmware/nsx_cluster.py new file mode 100644 index 000000000..1c564385d --- /dev/null +++ b/neutron/plugins/vmware/nsx_cluster.py @@ -0,0 +1,97 @@ +# Copyright 2012 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import exceptions + +LOG = logging.getLogger(__name__) +DEFAULT_PORT = 443 +# Raise if one of those attributes is not specified +REQUIRED_ATTRIBUTES = ['default_tz_uuid', 'nsx_user', + 'nsx_password', 'nsx_controllers'] +# Emit a INFO log if one of those attributes is not specified +IMPORTANT_ATTRIBUTES = ['default_l3_gw_service_uuid'] +# Deprecated attributes +DEPRECATED_ATTRIBUTES = ['metadata_dhcp_host_route', + 'nvp_user', 'nvp_password', 'nvp_controllers'] + + +class NSXCluster(object): + """NSX cluster class. + + Encapsulates controller connections and the API client for a NSX cluster. + + Controller-specific parameters, such as timeouts are stored in the + elements of the controllers attribute, which are dicts. + """ + + def __init__(self, **kwargs): + self._required_attributes = REQUIRED_ATTRIBUTES[:] + self._important_attributes = IMPORTANT_ATTRIBUTES[:] + self._deprecated_attributes = {} + self._sanity_check(kwargs) + + for opt, val in self._deprecated_attributes.iteritems(): + LOG.deprecated(_("Attribute '%s' has been deprecated or moved " + "to a new section. See new configuration file " + "for details."), opt) + depr_func = getattr(self, '_process_%s' % opt, None) + if depr_func: + depr_func(val) + + # If everything went according to plan these two lists should be empty + if self._required_attributes: + raise exceptions.InvalidClusterConfiguration( + invalid_attrs=self._required_attributes) + if self._important_attributes: + LOG.info(_("The following cluster attributes were " + "not specified: %s'"), self._important_attributes) + # The API client will be explicitly created by users of this class + self.api_client = None + + def _sanity_check(self, options): + # Iterating this way ensures the conf parameters also + # define the structure of this class + for arg in cfg.CONF: + if arg not in DEPRECATED_ATTRIBUTES: + setattr(self, arg, options.get(arg, cfg.CONF.get(arg))) + self._process_attribute(arg) + elif options.get(arg) is not None: + # Process deprecated attributes only if specified + self._deprecated_attributes[arg] = options.get(arg) + + def _process_attribute(self, attribute): + # Process the attribute only if it's not empty! + if getattr(self, attribute, None): + if attribute in self._required_attributes: + self._required_attributes.remove(attribute) + if attribute in self._important_attributes: + self._important_attributes.remove(attribute) + handler_func = getattr(self, '_process_%s' % attribute, None) + if handler_func: + handler_func() + + def _process_nsx_controllers(self): + # If this raises something is not right, so let it bubble up + # TODO(salvatore-orlando): Also validate attribute here + for i, ctrl in enumerate(self.nsx_controllers or []): + if len(ctrl.split(':')) == 1: + self.nsx_controllers[i] = '%s:%s' % (ctrl, DEFAULT_PORT) + + def _process_nvp_controllers(self): + self.nsx_controllers = self.nvp_controllers + self._process_nsx_controllers() diff --git a/neutron/plugins/vmware/nsxlib/__init__.py b/neutron/plugins/vmware/nsxlib/__init__.py new file mode 100644 index 000000000..b09460b59 --- /dev/null +++ b/neutron/plugins/vmware/nsxlib/__init__.py @@ -0,0 +1,141 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions as exception +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron import version + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" +# Prefix to be used for all NSX API calls +URI_PREFIX = "/ws.v1" +NEUTRON_VERSION = version.version_info.release_string() + +LOG = log.getLogger(__name__) + + +def _build_uri_path(resource, + resource_id=None, + parent_resource_id=None, + fields=None, + relations=None, + filters=None, + types=None, + is_attachment=False, + extra_action=None): + resources = resource.split('/') + res_path = resources[0] + (resource_id and "/%s" % resource_id or '') + if len(resources) > 1: + # There is also a parent resource to account for in the uri + res_path = "%s/%s/%s" % (resources[1], + parent_resource_id, + res_path) + if is_attachment: + res_path = "%s/attachment" % res_path + elif extra_action: + res_path = "%s/%s" % (res_path, extra_action) + params = [] + params.append(fields and "fields=%s" % fields) + params.append(relations and "relations=%s" % relations) + params.append(types and "types=%s" % types) + if filters: + params.extend(['%s=%s' % (k, v) for (k, v) in filters.iteritems()]) + uri_path = "%s/%s" % (URI_PREFIX, res_path) + non_empty_params = [x for x in params if x is not None] + if non_empty_params: + query_string = '&'.join(non_empty_params) + if query_string: + uri_path += "?%s" % query_string + return uri_path + + +def format_exception(etype, e, exception_locals): + """Consistent formatting for exceptions. + + :param etype: a string describing the exception type. + :param e: the exception. + :param execption_locals: calling context local variable dict. + :returns: a formatted string. + """ + msg = [_("Error. %(type)s exception: %(exc)s.") % + {'type': etype, 'exc': e}] + l = dict((k, v) for k, v in exception_locals.iteritems() + if k != 'request') + msg.append(_("locals=[%s]") % str(l)) + return ' '.join(msg) + + +def do_request(*args, **kwargs): + """Issue a request to the cluster specified in kwargs. + + :param args: a list of positional arguments. + :param kwargs: a list of keyworkds arguments. + :returns: the result of the operation loaded into a python + object or None. + """ + cluster = kwargs["cluster"] + try: + res = cluster.api_client.request(*args) + if res: + return json.loads(res) + except api_exc.ResourceNotFound: + raise exception.NotFound() + except api_exc.ReadOnlyMode: + raise nsx_exc.MaintenanceInProgress() + + +def get_single_query_page(path, cluster, page_cursor=None, + page_length=1000, neutron_only=True): + params = [] + if page_cursor: + params.append("_page_cursor=%s" % page_cursor) + params.append("_page_length=%s" % page_length) + # NOTE(salv-orlando): On the NSX backend the 'Quantum' tag is still + # used for marking Neutron entities in order to preserve compatibility + if neutron_only: + params.append("tag_scope=quantum") + query_params = "&".join(params) + path = "%s%s%s" % (path, "&" if (path.find("?") != -1) else "?", + query_params) + body = do_request(HTTP_GET, path, cluster=cluster) + # Result_count won't be returned if _page_cursor is supplied + return body['results'], body.get('page_cursor'), body.get('result_count') + + +def get_all_query_pages(path, cluster): + need_more_results = True + result_list = [] + page_cursor = None + while need_more_results: + results, page_cursor = get_single_query_page( + path, cluster, page_cursor)[:2] + if not page_cursor: + need_more_results = False + result_list.extend(results) + return result_list + + +def mk_body(**kwargs): + """Convenience function creates and dumps dictionary to string. + + :param kwargs: the key/value pirs to be dumped into a json string. + :returns: a json string. + """ + return json.dumps(kwargs, ensure_ascii=False) diff --git a/neutron/plugins/vmware/nsxlib/l2gateway.py b/neutron/plugins/vmware/nsxlib/l2gateway.py new file mode 100644 index 000000000..bd261f922 --- /dev/null +++ b/neutron/plugins/vmware/nsxlib/l2gateway.py @@ -0,0 +1,211 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib +from neutron.plugins.vmware.nsxlib import switch + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" + +GWSERVICE_RESOURCE = "gateway-service" +TRANSPORTNODE_RESOURCE = "transport-node" + +LOG = log.getLogger(__name__) + + +def create_l2_gw_service(cluster, tenant_id, display_name, devices): + """Create a NSX Layer-2 Network Gateway Service. + + :param cluster: The target NSX cluster + :param tenant_id: Identifier of the Openstack tenant for which + the gateway service. + :param display_name: Descriptive name of this gateway service + :param devices: List of transport node uuids (and network + interfaces on them) to use for the network gateway service + :raise NsxApiException: if there is a problem while communicating + with the NSX controller + """ + # NOTE(salvatore-orlando): This is a little confusing, but device_id in + # NSX is actually the identifier a physical interface on the gateway + # device, which in the Neutron API is referred as interface_name + gateways = [{"transport_node_uuid": device['id'], + "device_id": device['interface_name'], + "type": "L2Gateway"} for device in devices] + gwservice_obj = { + "display_name": utils.check_and_truncate(display_name), + "tags": utils.get_tags(os_tid=tenant_id), + "gateways": gateways, + "type": "L2GatewayServiceConfig" + } + return nsxlib.do_request( + HTTP_POST, nsxlib._build_uri_path(GWSERVICE_RESOURCE), + json.dumps(gwservice_obj), cluster=cluster) + + +def plug_l2_gw_service(cluster, lswitch_id, lport_id, + gateway_id, vlan_id=None): + """Plug a Layer-2 Gateway Attachment object in a logical port.""" + att_obj = {'type': 'L2GatewayAttachment', + 'l2_gateway_service_uuid': gateway_id} + if vlan_id: + att_obj['vlan_id'] = vlan_id + return switch.plug_interface(cluster, lswitch_id, lport_id, att_obj) + + +def get_l2_gw_service(cluster, gateway_id): + return nsxlib.do_request( + HTTP_GET, nsxlib._build_uri_path(GWSERVICE_RESOURCE, + resource_id=gateway_id), + cluster=cluster) + + +def get_l2_gw_services(cluster, tenant_id=None, + fields=None, filters=None): + actual_filters = dict(filters or {}) + if tenant_id: + actual_filters['tag'] = tenant_id + actual_filters['tag_scope'] = 'os_tid' + return nsxlib.get_all_query_pages( + nsxlib._build_uri_path(GWSERVICE_RESOURCE, + filters=actual_filters), + cluster) + + +def update_l2_gw_service(cluster, gateway_id, display_name): + # TODO(salvatore-orlando): Allow updates for gateways too + gwservice_obj = get_l2_gw_service(cluster, gateway_id) + if not display_name: + # Nothing to update + return gwservice_obj + gwservice_obj["display_name"] = utils.check_and_truncate(display_name) + return nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(GWSERVICE_RESOURCE, + resource_id=gateway_id), + json.dumps(gwservice_obj), cluster=cluster) + + +def delete_l2_gw_service(cluster, gateway_id): + nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(GWSERVICE_RESOURCE, + resource_id=gateway_id), + cluster=cluster) + + +def _build_gateway_device_body(tenant_id, display_name, neutron_id, + connector_type, connector_ip, + client_certificate, tz_uuid): + + connector_type_mappings = { + utils.NetworkTypes.STT: "STTConnector", + utils.NetworkTypes.GRE: "GREConnector", + utils.NetworkTypes.BRIDGE: "BridgeConnector", + 'ipsec%s' % utils.NetworkTypes.STT: "IPsecSTT", + 'ipsec%s' % utils.NetworkTypes.GRE: "IPsecGRE"} + nsx_connector_type = connector_type_mappings.get(connector_type) + body = {"display_name": utils.check_and_truncate(display_name), + "tags": utils.get_tags(os_tid=tenant_id, + q_gw_dev_id=neutron_id), + "admin_status_enabled": True} + + if connector_ip and nsx_connector_type: + body["transport_connectors"] = [ + {"transport_zone_uuid": tz_uuid, + "ip_address": connector_ip, + "type": nsx_connector_type}] + + if client_certificate: + body["credential"] = {"client_certificate": + {"pem_encoded": client_certificate}, + "type": "SecurityCertificateCredential"} + return body + + +def create_gateway_device(cluster, tenant_id, display_name, neutron_id, + tz_uuid, connector_type, connector_ip, + client_certificate): + body = _build_gateway_device_body(tenant_id, display_name, neutron_id, + connector_type, connector_ip, + client_certificate, tz_uuid) + try: + return nsxlib.do_request( + HTTP_POST, nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE), + json.dumps(body), cluster=cluster) + except api_exc.InvalidSecurityCertificate: + raise nsx_exc.InvalidSecurityCertificate() + + +def update_gateway_device(cluster, gateway_id, tenant_id, + display_name, neutron_id, + tz_uuid, connector_type, connector_ip, + client_certificate): + body = _build_gateway_device_body(tenant_id, display_name, neutron_id, + connector_type, connector_ip, + client_certificate, tz_uuid) + try: + return nsxlib.do_request( + HTTP_PUT, + nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE, + resource_id=gateway_id), + json.dumps(body), cluster=cluster) + except api_exc.InvalidSecurityCertificate: + raise nsx_exc.InvalidSecurityCertificate() + + +def delete_gateway_device(cluster, device_uuid): + return nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE, + device_uuid), + cluster=cluster) + + +def get_gateway_device_status(cluster, device_uuid): + status_res = nsxlib.do_request(HTTP_GET, + nsxlib._build_uri_path( + TRANSPORTNODE_RESOURCE, + device_uuid, + extra_action='status'), + cluster=cluster) + # Returns the connection status + return status_res['connection']['connected'] + + +def get_gateway_devices_status(cluster, tenant_id=None): + if tenant_id: + gw_device_query_path = nsxlib._build_uri_path( + TRANSPORTNODE_RESOURCE, + fields="uuid,tags", + relations="TransportNodeStatus", + filters={'tag': tenant_id, + 'tag_scope': 'os_tid'}) + else: + gw_device_query_path = nsxlib._build_uri_path( + TRANSPORTNODE_RESOURCE, + fields="uuid,tags", + relations="TransportNodeStatus") + + response = nsxlib.get_all_query_pages(gw_device_query_path, cluster) + results = {} + for item in response: + results[item['uuid']] = (item['_relations']['TransportNodeStatus'] + ['connection']['connected']) + return results diff --git a/neutron/plugins/vmware/nsxlib/lsn.py b/neutron/plugins/vmware/nsxlib/lsn.py new file mode 100644 index 000000000..f67288bf0 --- /dev/null +++ b/neutron/plugins/vmware/nsxlib/lsn.py @@ -0,0 +1,270 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions as exception +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" + +SERVICECLUSTER_RESOURCE = "edge-cluster" +LSERVICESNODE_RESOURCE = "lservices-node" +LSERVICESNODEPORT_RESOURCE = "lport/%s" % LSERVICESNODE_RESOURCE +SUPPORTED_METADATA_OPTIONS = ['metadata_proxy_shared_secret'] + +LOG = log.getLogger(__name__) + + +def service_cluster_exists(cluster, svc_cluster_id): + exists = False + try: + exists = ( + svc_cluster_id and + nsxlib.do_request(HTTP_GET, + nsxlib._build_uri_path( + SERVICECLUSTER_RESOURCE, + resource_id=svc_cluster_id), + cluster=cluster) is not None) + except exception.NotFound: + pass + return exists + + +def lsn_for_network_create(cluster, network_id): + lsn_obj = { + "edge_cluster_uuid": cluster.default_service_cluster_uuid, + "tags": utils.get_tags(n_network_id=network_id) + } + return nsxlib.do_request(HTTP_POST, + nsxlib._build_uri_path(LSERVICESNODE_RESOURCE), + json.dumps(lsn_obj), + cluster=cluster)["uuid"] + + +def lsn_for_network_get(cluster, network_id): + filters = {"tag": network_id, "tag_scope": "n_network_id"} + results = nsxlib.do_request(HTTP_GET, + nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, + fields="uuid", + filters=filters), + cluster=cluster)['results'] + if not results: + raise exception.NotFound() + elif len(results) == 1: + return results[0]['uuid'] + + +def lsn_delete(cluster, lsn_id): + nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, + resource_id=lsn_id), + cluster=cluster) + + +def lsn_port_host_entries_update( + cluster, lsn_id, lsn_port_id, conf, hosts_data): + hosts_obj = {'hosts': hosts_data} + nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id, + extra_action=conf), + json.dumps(hosts_obj), + cluster=cluster) + + +def lsn_port_create(cluster, lsn_id, port_data): + port_obj = { + "ip_address": port_data["ip_address"], + "mac_address": port_data["mac_address"], + "tags": utils.get_tags(n_mac_address=port_data["mac_address"], + n_subnet_id=port_data["subnet_id"]), + "type": "LogicalServicesNodePortConfig", + } + return nsxlib.do_request(HTTP_POST, + nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id), + json.dumps(port_obj), + cluster=cluster)["uuid"] + + +def lsn_port_delete(cluster, lsn_id, lsn_port_id): + return nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id), + cluster=cluster) + + +def _lsn_port_get(cluster, lsn_id, filters): + results = nsxlib.do_request(HTTP_GET, + nsxlib._build_uri_path( + LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + fields="uuid", + filters=filters), + cluster=cluster)['results'] + if not results: + raise exception.NotFound() + elif len(results) == 1: + return results[0]['uuid'] + + +def lsn_port_by_mac_get(cluster, lsn_id, mac_address): + filters = {"tag": mac_address, "tag_scope": "n_mac_address"} + return _lsn_port_get(cluster, lsn_id, filters) + + +def lsn_port_by_subnet_get(cluster, lsn_id, subnet_id): + filters = {"tag": subnet_id, "tag_scope": "n_subnet_id"} + return _lsn_port_get(cluster, lsn_id, filters) + + +def lsn_port_info_get(cluster, lsn_id, lsn_port_id): + result = nsxlib.do_request(HTTP_GET, + nsxlib._build_uri_path( + LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id), + cluster=cluster) + for tag in result['tags']: + if tag['scope'] == 'n_subnet_id': + result['subnet_id'] = tag['tag'] + break + return result + + +def lsn_port_plug_network(cluster, lsn_id, lsn_port_id, lswitch_port_id): + patch_obj = { + "type": "PatchAttachment", + "peer_port_uuid": lswitch_port_id + } + try: + nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id, + is_attachment=True), + json.dumps(patch_obj), + cluster=cluster) + except api_exc.Conflict: + # This restriction might be lifted at some point + msg = (_("Attempt to plug Logical Services Node %(lsn)s into " + "network with port %(port)s failed. PatchAttachment " + "already exists with another port") % + {'lsn': lsn_id, 'port': lswitch_port_id}) + LOG.exception(msg) + raise nsx_exc.LsnConfigurationConflict(lsn_id=lsn_id) + + +def _lsn_configure_action( + cluster, lsn_id, action, is_enabled, obj): + lsn_obj = {"enabled": is_enabled} + lsn_obj.update(obj) + nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, + resource_id=lsn_id, + extra_action=action), + json.dumps(lsn_obj), + cluster=cluster) + + +def _lsn_port_configure_action( + cluster, lsn_id, lsn_port_id, action, is_enabled, obj): + nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, + resource_id=lsn_id, + extra_action=action), + json.dumps({"enabled": is_enabled}), + cluster=cluster) + nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id, + extra_action=action), + json.dumps(obj), + cluster=cluster) + + +def _get_opts(name, value): + return {"name": name, "value": str(value)} + + +def lsn_port_dhcp_configure( + cluster, lsn_id, lsn_port_id, is_enabled=True, dhcp_options=None): + dhcp_options = dhcp_options or {} + opts = [_get_opts(key, val) for key, val in dhcp_options.iteritems()] + dhcp_obj = {'options': opts} + _lsn_port_configure_action( + cluster, lsn_id, lsn_port_id, 'dhcp', is_enabled, dhcp_obj) + + +def lsn_metadata_configure( + cluster, lsn_id, is_enabled=True, metadata_info=None): + meta_obj = { + 'metadata_server_ip': metadata_info['metadata_server_ip'], + 'metadata_server_port': metadata_info['metadata_server_port'], + } + if metadata_info: + opts = [ + _get_opts(opt, metadata_info[opt]) + for opt in SUPPORTED_METADATA_OPTIONS + if metadata_info.get(opt) + ] + if opts: + meta_obj["options"] = opts + _lsn_configure_action( + cluster, lsn_id, 'metadata-proxy', is_enabled, meta_obj) + + +def _lsn_port_host_action( + cluster, lsn_id, lsn_port_id, host_obj, extra_action, action): + nsxlib.do_request(HTTP_POST, + nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id, + extra_action=extra_action, + filters={"action": action}), + json.dumps(host_obj), + cluster=cluster) + + +def lsn_port_dhcp_host_add(cluster, lsn_id, lsn_port_id, host_data): + _lsn_port_host_action( + cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'add_host') + + +def lsn_port_dhcp_host_remove(cluster, lsn_id, lsn_port_id, host_data): + _lsn_port_host_action( + cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'remove_host') + + +def lsn_port_metadata_host_add(cluster, lsn_id, lsn_port_id, host_data): + _lsn_port_host_action( + cluster, lsn_id, lsn_port_id, host_data, 'metadata-proxy', 'add_host') + + +def lsn_port_metadata_host_remove(cluster, lsn_id, lsn_port_id, host_data): + _lsn_port_host_action(cluster, lsn_id, lsn_port_id, + host_data, 'metadata-proxy', 'remove_host') diff --git a/neutron/plugins/vmware/nsxlib/queue.py b/neutron/plugins/vmware/nsxlib/queue.py new file mode 100644 index 000000000..708a210b6 --- /dev/null +++ b/neutron/plugins/vmware/nsxlib/queue.py @@ -0,0 +1,71 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as exception +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib + +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" + +LQUEUE_RESOURCE = "lqueue" + +LOG = log.getLogger(__name__) + + +def create_lqueue(cluster, queue_data): + params = { + 'name': 'display_name', + 'qos_marking': 'qos_marking', + 'min': 'min_bandwidth_rate', + 'max': 'max_bandwidth_rate', + 'dscp': 'dscp' + } + queue_obj = dict( + (nsx_name, queue_data.get(api_name)) + for api_name, nsx_name in params.iteritems() + if attr.is_attr_set(queue_data.get(api_name)) + ) + if 'display_name' in queue_obj: + queue_obj['display_name'] = utils.check_and_truncate( + queue_obj['display_name']) + + queue_obj['tags'] = utils.get_tags() + try: + return nsxlib.do_request(HTTP_POST, + nsxlib._build_uri_path(LQUEUE_RESOURCE), + jsonutils.dumps(queue_obj), + cluster=cluster)['uuid'] + except api_exc.NsxApiException: + # FIXME(salv-orlando): This should not raise NeutronException + with excutils.save_and_reraise_exception(): + raise exception.NeutronException() + + +def delete_lqueue(cluster, queue_id): + try: + nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(LQUEUE_RESOURCE, + resource_id=queue_id), + cluster=cluster) + except Exception: + # FIXME(salv-orlando): This should not raise NeutronException + with excutils.save_and_reraise_exception(): + raise exception.NeutronException() diff --git a/neutron/plugins/vmware/nsxlib/router.py b/neutron/plugins/vmware/nsxlib/router.py new file mode 100644 index 000000000..52d34299f --- /dev/null +++ b/neutron/plugins/vmware/nsxlib/router.py @@ -0,0 +1,689 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.common import exceptions as exception +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib +from neutron.plugins.vmware.nsxlib import switch +from neutron.plugins.vmware.nsxlib import versioning + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" + +LROUTER_RESOURCE = "lrouter" +LROUTER_RESOURCE = "lrouter" +LROUTERPORT_RESOURCE = "lport/%s" % LROUTER_RESOURCE +LROUTERRIB_RESOURCE = "rib/%s" % LROUTER_RESOURCE +LROUTERNAT_RESOURCE = "nat/lrouter" +# Constants for NAT rules +MATCH_KEYS = ["destination_ip_addresses", "destination_port_max", + "destination_port_min", "source_ip_addresses", + "source_port_max", "source_port_min", "protocol"] + +LOG = log.getLogger(__name__) + + +def _prepare_lrouter_body(name, neutron_router_id, tenant_id, + router_type, distributed=None, **kwargs): + body = { + "display_name": utils.check_and_truncate(name), + "tags": utils.get_tags(os_tid=tenant_id, + q_router_id=neutron_router_id), + "routing_config": { + "type": router_type + }, + "type": "LogicalRouterConfig", + "replication_mode": cfg.CONF.NSX.replication_mode, + } + # add the distributed key only if not None (ie: True or False) + if distributed is not None: + body['distributed'] = distributed + if kwargs: + body["routing_config"].update(kwargs) + return body + + +def _create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id, + display_name, nexthop, distributed=None): + implicit_routing_config = { + "default_route_next_hop": { + "gateway_ip_address": nexthop, + "type": "RouterNextHop" + }, + } + lrouter_obj = _prepare_lrouter_body( + display_name, neutron_router_id, tenant_id, + "SingleDefaultRouteImplicitRoutingConfig", + distributed=distributed, + **implicit_routing_config) + return nsxlib.do_request(HTTP_POST, + nsxlib._build_uri_path(LROUTER_RESOURCE), + jsonutils.dumps(lrouter_obj), cluster=cluster) + + +def create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id, + display_name, nexthop): + """Create a NSX logical router on the specified cluster. + + :param cluster: The target NSX cluster + :param tenant_id: Identifier of the Openstack tenant for which + the logical router is being created + :param display_name: Descriptive name of this logical router + :param nexthop: External gateway IP address for the logical router + :raise NsxApiException: if there is a problem while communicating + with the NSX controller + """ + return _create_implicit_routing_lrouter( + cluster, neutron_router_id, tenant_id, display_name, nexthop) + + +def create_implicit_routing_lrouter_with_distribution( + cluster, neutron_router_id, tenant_id, display_name, + nexthop, distributed=None): + """Create a NSX logical router on the specified cluster. + + This function also allows for creating distributed lrouters + :param cluster: The target NSX cluster + :param tenant_id: Identifier of the Openstack tenant for which + the logical router is being created + :param display_name: Descriptive name of this logical router + :param nexthop: External gateway IP address for the logical router + :param distributed: True for distributed logical routers + :raise NsxApiException: if there is a problem while communicating + with the NSX controller + """ + return _create_implicit_routing_lrouter( + cluster, neutron_router_id, tenant_id, + display_name, nexthop, distributed) + + +def create_explicit_routing_lrouter(cluster, neutron_router_id, tenant_id, + display_name, nexthop, distributed=None): + lrouter_obj = _prepare_lrouter_body( + display_name, neutron_router_id, tenant_id, + "RoutingTableRoutingConfig", distributed=distributed) + router = nsxlib.do_request(HTTP_POST, + nsxlib._build_uri_path(LROUTER_RESOURCE), + jsonutils.dumps(lrouter_obj), cluster=cluster) + default_gw = {'prefix': '0.0.0.0/0', 'next_hop_ip': nexthop} + create_explicit_route_lrouter(cluster, router['uuid'], default_gw) + return router + + +def delete_lrouter(cluster, lrouter_id): + nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(LROUTER_RESOURCE, + resource_id=lrouter_id), + cluster=cluster) + + +def get_lrouter(cluster, lrouter_id): + return nsxlib.do_request(HTTP_GET, + nsxlib._build_uri_path( + LROUTER_RESOURCE, + resource_id=lrouter_id, + relations='LogicalRouterStatus'), + cluster=cluster) + + +def query_lrouters(cluster, fields=None, filters=None): + return nsxlib.get_all_query_pages( + nsxlib._build_uri_path(LROUTER_RESOURCE, + fields=fields, + relations='LogicalRouterStatus', + filters=filters), + cluster) + + +def get_lrouters(cluster, tenant_id, fields=None, filters=None): + # FIXME(salv-orlando): Fields parameter is ignored in this routine + actual_filters = {} + if filters: + actual_filters.update(filters) + if tenant_id: + actual_filters['tag'] = tenant_id + actual_filters['tag_scope'] = 'os_tid' + lrouter_fields = "uuid,display_name,fabric_status,tags" + return query_lrouters(cluster, lrouter_fields, actual_filters) + + +def update_implicit_routing_lrouter(cluster, r_id, display_name, nexthop): + lrouter_obj = get_lrouter(cluster, r_id) + if not display_name and not nexthop: + # Nothing to update + return lrouter_obj + # It seems that this is faster than the doing an if on display_name + lrouter_obj["display_name"] = (utils.check_and_truncate(display_name) or + lrouter_obj["display_name"]) + if nexthop: + nh_element = lrouter_obj["routing_config"].get( + "default_route_next_hop") + if nh_element: + nh_element["gateway_ip_address"] = nexthop + return nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LROUTER_RESOURCE, + resource_id=r_id), + jsonutils.dumps(lrouter_obj), + cluster=cluster) + + +def get_explicit_routes_lrouter(cluster, router_id, protocol_type='static'): + static_filter = {'protocol': protocol_type} + existing_routes = nsxlib.do_request( + HTTP_GET, + nsxlib._build_uri_path(LROUTERRIB_RESOURCE, + filters=static_filter, + fields="*", + parent_resource_id=router_id), + cluster=cluster)['results'] + return existing_routes + + +def delete_explicit_route_lrouter(cluster, router_id, route_id): + nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(LROUTERRIB_RESOURCE, + resource_id=route_id, + parent_resource_id=router_id), + cluster=cluster) + + +def create_explicit_route_lrouter(cluster, router_id, route): + next_hop_ip = route.get("nexthop") or route.get("next_hop_ip") + prefix = route.get("destination") or route.get("prefix") + uuid = nsxlib.do_request( + HTTP_POST, + nsxlib._build_uri_path(LROUTERRIB_RESOURCE, + parent_resource_id=router_id), + jsonutils.dumps({ + "action": "accept", + "next_hop_ip": next_hop_ip, + "prefix": prefix, + "protocol": "static" + }), + cluster=cluster)['uuid'] + return uuid + + +def update_explicit_routes_lrouter(cluster, router_id, routes): + # Update in bulk: delete them all, and add the ones specified + # but keep track of what is been modified to allow roll-backs + # in case of failures + nsx_routes = get_explicit_routes_lrouter(cluster, router_id) + try: + deleted_routes = [] + added_routes = [] + # omit the default route (0.0.0.0/0) from the processing; + # this must be handled through the nexthop for the router + for route in nsx_routes: + prefix = route.get("destination") or route.get("prefix") + if prefix != '0.0.0.0/0': + delete_explicit_route_lrouter(cluster, + router_id, + route['uuid']) + deleted_routes.append(route) + for route in routes: + prefix = route.get("destination") or route.get("prefix") + if prefix != '0.0.0.0/0': + uuid = create_explicit_route_lrouter(cluster, + router_id, route) + added_routes.append(uuid) + except api_exc.NsxApiException: + LOG.exception(_('Cannot update NSX routes %(routes)s for ' + 'router %(router_id)s'), + {'routes': routes, 'router_id': router_id}) + # Roll back to keep NSX in consistent state + with excutils.save_and_reraise_exception(): + if nsx_routes: + if deleted_routes: + for route in deleted_routes: + create_explicit_route_lrouter(cluster, + router_id, route) + if added_routes: + for route_id in added_routes: + delete_explicit_route_lrouter(cluster, + router_id, route_id) + return nsx_routes + + +def get_default_route_explicit_routing_lrouter_v33(cluster, router_id): + static_filter = {"protocol": "static", + "prefix": "0.0.0.0/0"} + default_route = nsxlib.do_request( + HTTP_GET, + nsxlib._build_uri_path(LROUTERRIB_RESOURCE, + filters=static_filter, + fields="*", + parent_resource_id=router_id), + cluster=cluster)["results"][0] + return default_route + + +def get_default_route_explicit_routing_lrouter_v32(cluster, router_id): + # Scan all routes because 3.2 does not support query by prefix + all_routes = get_explicit_routes_lrouter(cluster, router_id) + for route in all_routes: + if route['prefix'] == '0.0.0.0/0': + return route + + +def update_default_gw_explicit_routing_lrouter(cluster, router_id, next_hop): + default_route = get_default_route_explicit_routing_lrouter(cluster, + router_id) + if next_hop != default_route["next_hop_ip"]: + new_default_route = {"action": "accept", + "next_hop_ip": next_hop, + "prefix": "0.0.0.0/0", + "protocol": "static"} + nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path( + LROUTERRIB_RESOURCE, + resource_id=default_route['uuid'], + parent_resource_id=router_id), + jsonutils.dumps(new_default_route), + cluster=cluster) + + +def update_explicit_routing_lrouter(cluster, router_id, + display_name, next_hop, routes=None): + update_implicit_routing_lrouter(cluster, router_id, display_name, next_hop) + if next_hop: + update_default_gw_explicit_routing_lrouter(cluster, + router_id, next_hop) + if routes is not None: + return update_explicit_routes_lrouter(cluster, router_id, routes) + + +def query_lrouter_lports(cluster, lr_uuid, fields="*", + filters=None, relations=None): + uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, + parent_resource_id=lr_uuid, + fields=fields, filters=filters, + relations=relations) + return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results'] + + +def create_router_lport(cluster, lrouter_uuid, tenant_id, neutron_port_id, + display_name, admin_status_enabled, ip_addresses, + mac_address=None): + """Creates a logical port on the assigned logical router.""" + lport_obj = dict( + admin_status_enabled=admin_status_enabled, + display_name=display_name, + tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id), + ip_addresses=ip_addresses, + type="LogicalRouterPortConfig" + ) + # Only add the mac_address to lport_obj if present. This is because + # when creating the fake_ext_gw there is no mac_address present. + if mac_address: + lport_obj['mac_address'] = mac_address + path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, + parent_resource_id=lrouter_uuid) + result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj), + cluster=cluster) + + LOG.debug(_("Created logical port %(lport_uuid)s on " + "logical router %(lrouter_uuid)s"), + {'lport_uuid': result['uuid'], + 'lrouter_uuid': lrouter_uuid}) + return result + + +def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid, + tenant_id, neutron_port_id, display_name, + admin_status_enabled, ip_addresses): + """Updates a logical port on the assigned logical router.""" + lport_obj = dict( + admin_status_enabled=admin_status_enabled, + display_name=display_name, + tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id), + ip_addresses=ip_addresses, + type="LogicalRouterPortConfig" + ) + # Do not pass null items to NSX + for key in lport_obj.keys(): + if lport_obj[key] is None: + del lport_obj[key] + path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, + lrouter_port_uuid, + parent_resource_id=lrouter_uuid) + result = nsxlib.do_request(HTTP_PUT, path, + jsonutils.dumps(lport_obj), + cluster=cluster) + LOG.debug(_("Updated logical port %(lport_uuid)s on " + "logical router %(lrouter_uuid)s"), + {'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid}) + return result + + +def delete_router_lport(cluster, lrouter_uuid, lport_uuid): + """Creates a logical port on the assigned logical router.""" + path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_uuid, + lrouter_uuid) + nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) + LOG.debug(_("Delete logical router port %(lport_uuid)s on " + "logical router %(lrouter_uuid)s"), + {'lport_uuid': lport_uuid, + 'lrouter_uuid': lrouter_uuid}) + + +def delete_peer_router_lport(cluster, lr_uuid, ls_uuid, lp_uuid): + nsx_port = switch.get_port(cluster, ls_uuid, lp_uuid, + relations="LogicalPortAttachment") + relations = nsx_port.get('_relations') + if relations: + att_data = relations.get('LogicalPortAttachment') + if att_data: + lrp_uuid = att_data.get('peer_port_uuid') + if lrp_uuid: + delete_router_lport(cluster, lr_uuid, lrp_uuid) + + +def find_router_gw_port(context, cluster, router_id): + """Retrieves the external gateway port for a NSX logical router.""" + + # Find the uuid of nsx ext gw logical router port + # TODO(salvatore-orlando): Consider storing it in Neutron DB + results = query_lrouter_lports( + cluster, router_id, + relations="LogicalPortAttachment") + for lport in results: + if '_relations' in lport: + attachment = lport['_relations'].get('LogicalPortAttachment') + if attachment and attachment.get('type') == 'L3GatewayAttachment': + return lport + + +def plug_router_port_attachment(cluster, router_id, port_id, + attachment_uuid, nsx_attachment_type, + attachment_vlan=None): + """Attach a router port to the given attachment. + + Current attachment types: + - PatchAttachment [-> logical switch port uuid] + - L3GatewayAttachment [-> L3GatewayService uuid] + For the latter attachment type a VLAN ID can be specified as well. + """ + uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, port_id, router_id, + is_attachment=True) + attach_obj = {} + attach_obj["type"] = nsx_attachment_type + if nsx_attachment_type == "PatchAttachment": + attach_obj["peer_port_uuid"] = attachment_uuid + elif nsx_attachment_type == "L3GatewayAttachment": + attach_obj["l3_gateway_service_uuid"] = attachment_uuid + if attachment_vlan: + attach_obj['vlan_id'] = attachment_vlan + else: + raise nsx_exc.InvalidAttachmentType( + attachment_type=nsx_attachment_type) + return nsxlib.do_request( + HTTP_PUT, uri, jsonutils.dumps(attach_obj), cluster=cluster) + + +def _create_nat_match_obj(**kwargs): + nat_match_obj = {'ethertype': 'IPv4'} + delta = set(kwargs.keys()) - set(MATCH_KEYS) + if delta: + raise Exception(_("Invalid keys for NAT match: %s"), delta) + nat_match_obj.update(kwargs) + return nat_match_obj + + +def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj): + LOG.debug(_("Creating NAT rule: %s"), nat_rule_obj) + uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, + parent_resource_id=router_id) + return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj), + cluster=cluster) + + +def _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj): + return {"to_source_ip_address_min": min_src_ip, + "to_source_ip_address_max": max_src_ip, + "type": "SourceNatRule", + "match": nat_match_obj} + + +def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None): + LOG.info(_("No SNAT rules cannot be applied as they are not available in " + "this version of the NSX platform")) + + +def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None): + LOG.info(_("No DNAT rules cannot be applied as they are not available in " + "this version of the NSX platform")) + + +def create_lrouter_snat_rule_v2(cluster, router_id, + min_src_ip, max_src_ip, match_criteria=None): + + nat_match_obj = _create_nat_match_obj(**match_criteria) + nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj) + return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) + + +def create_lrouter_dnat_rule_v2(cluster, router_id, dst_ip, + to_dst_port=None, match_criteria=None): + + nat_match_obj = _create_nat_match_obj(**match_criteria) + nat_rule_obj = { + "to_destination_ip_address_min": dst_ip, + "to_destination_ip_address_max": dst_ip, + "type": "DestinationNatRule", + "match": nat_match_obj + } + if to_dst_port: + nat_rule_obj['to_destination_port'] = to_dst_port + return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) + + +def create_lrouter_nosnat_rule_v3(cluster, router_id, order=None, + match_criteria=None): + nat_match_obj = _create_nat_match_obj(**match_criteria) + nat_rule_obj = { + "type": "NoSourceNatRule", + "match": nat_match_obj + } + if order: + nat_rule_obj['order'] = order + return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) + + +def create_lrouter_nodnat_rule_v3(cluster, router_id, order=None, + match_criteria=None): + nat_match_obj = _create_nat_match_obj(**match_criteria) + nat_rule_obj = { + "type": "NoDestinationNatRule", + "match": nat_match_obj + } + if order: + nat_rule_obj['order'] = order + return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) + + +def create_lrouter_snat_rule_v3(cluster, router_id, min_src_ip, max_src_ip, + order=None, match_criteria=None): + nat_match_obj = _create_nat_match_obj(**match_criteria) + nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj) + if order: + nat_rule_obj['order'] = order + return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) + + +def create_lrouter_dnat_rule_v3(cluster, router_id, dst_ip, to_dst_port=None, + order=None, match_criteria=None): + + nat_match_obj = _create_nat_match_obj(**match_criteria) + nat_rule_obj = { + "to_destination_ip_address": dst_ip, + "type": "DestinationNatRule", + "match": nat_match_obj + } + if to_dst_port: + nat_rule_obj['to_destination_port'] = to_dst_port + if order: + nat_rule_obj['order'] = order + return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) + + +def delete_nat_rules_by_match(cluster, router_id, rule_type, + max_num_expected, + min_num_expected=0, + **kwargs): + # remove nat rules + nat_rules = query_nat_rules(cluster, router_id) + to_delete_ids = [] + for r in nat_rules: + if (r['type'] != rule_type): + continue + + for key, value in kwargs.iteritems(): + if not (key in r['match'] and r['match'][key] == value): + break + else: + to_delete_ids.append(r['uuid']) + if not (len(to_delete_ids) in + range(min_num_expected, max_num_expected + 1)): + raise nsx_exc.NatRuleMismatch(actual_rules=len(to_delete_ids), + min_rules=min_num_expected, + max_rules=max_num_expected) + + for rule_id in to_delete_ids: + delete_router_nat_rule(cluster, router_id, rule_id) + + +def delete_router_nat_rule(cluster, router_id, rule_id): + uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id) + nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster) + + +def query_nat_rules(cluster, router_id, fields="*", filters=None): + uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, + parent_resource_id=router_id, + fields=fields, filters=filters) + return nsxlib.get_all_query_pages(uri, cluster) + + +# NOTE(salvatore-orlando): The following FIXME applies in general to +# each operation on list attributes. +# FIXME(salvatore-orlando): need a lock around the list of IPs on an iface +def update_lrouter_port_ips(cluster, lrouter_id, lport_id, + ips_to_add, ips_to_remove): + uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_id, lrouter_id) + try: + port = nsxlib.do_request(HTTP_GET, uri, cluster=cluster) + # TODO(salvatore-orlando): Enforce ips_to_add intersection with + # ips_to_remove is empty + ip_address_set = set(port['ip_addresses']) + ip_address_set = ip_address_set - set(ips_to_remove) + ip_address_set = ip_address_set | set(ips_to_add) + # Set is not JSON serializable - convert to list + port['ip_addresses'] = list(ip_address_set) + nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(port), + cluster=cluster) + except exception.NotFound: + # FIXME(salv-orlando):avoid raising different exception + data = {'lport_id': lport_id, 'lrouter_id': lrouter_id} + msg = (_("Router Port %(lport_id)s not found on router " + "%(lrouter_id)s") % data) + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + except api_exc.NsxApiException as e: + msg = _("An exception occurred while updating IP addresses on a " + "router logical port:%s") % str(e) + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + + +ROUTER_FUNC_DICT = { + 'create_lrouter': { + 2: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter, }, + 3: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter, + 1: create_implicit_routing_lrouter_with_distribution, + 2: create_explicit_routing_lrouter, }, }, + 'update_lrouter': { + 2: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter, }, + 3: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter, + 2: update_explicit_routing_lrouter, }, }, + 'create_lrouter_dnat_rule': { + 2: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v2, }, + 3: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v3, }, }, + 'create_lrouter_snat_rule': { + 2: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v2, }, + 3: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v3, }, }, + 'create_lrouter_nosnat_rule': { + 2: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v2, }, + 3: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v3, }, }, + 'create_lrouter_nodnat_rule': { + 2: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v2, }, + 3: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v3, }, }, + 'get_default_route_explicit_routing_lrouter': { + 3: {versioning.DEFAULT_VERSION: + get_default_route_explicit_routing_lrouter_v32, + 2: get_default_route_explicit_routing_lrouter_v32, }, }, +} + + +@versioning.versioned(ROUTER_FUNC_DICT) +def create_lrouter(cluster, *args, **kwargs): + if kwargs.get('distributed', None): + v = cluster.api_client.get_version() + if (v.major, v.minor) < (3, 1): + raise nsx_exc.InvalidVersion(version=v) + return v + + +@versioning.versioned(ROUTER_FUNC_DICT) +def get_default_route_explicit_routing_lrouter(cluster, *args, **kwargs): + pass + + +@versioning.versioned(ROUTER_FUNC_DICT) +def update_lrouter(cluster, *args, **kwargs): + if kwargs.get('routes', None): + v = cluster.api_client.get_version() + if (v.major, v.minor) < (3, 2): + raise nsx_exc.InvalidVersion(version=v) + return v + + +@versioning.versioned(ROUTER_FUNC_DICT) +def create_lrouter_dnat_rule(cluster, *args, **kwargs): + pass + + +@versioning.versioned(ROUTER_FUNC_DICT) +def create_lrouter_snat_rule(cluster, *args, **kwargs): + pass + + +@versioning.versioned(ROUTER_FUNC_DICT) +def create_lrouter_nosnat_rule(cluster, *args, **kwargs): + pass + + +@versioning.versioned(ROUTER_FUNC_DICT) +def create_lrouter_nodnat_rule(cluster, *args, **kwargs): + pass diff --git a/neutron/plugins/vmware/nsxlib/secgroup.py b/neutron/plugins/vmware/nsxlib/secgroup.py new file mode 100644 index 000000000..6c9ba5e2f --- /dev/null +++ b/neutron/plugins/vmware/nsxlib/secgroup.py @@ -0,0 +1,141 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants +from neutron.common import exceptions +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" + +SECPROF_RESOURCE = "security-profile" + +LOG = log.getLogger(__name__) + + +def mk_body(**kwargs): + """Convenience function creates and dumps dictionary to string. + + :param kwargs: the key/value pirs to be dumped into a json string. + :returns: a json string. + """ + return json.dumps(kwargs, ensure_ascii=False) + + +def query_security_profiles(cluster, fields=None, filters=None): + return nsxlib.get_all_query_pages( + nsxlib._build_uri_path(SECPROF_RESOURCE, + fields=fields, + filters=filters), + cluster) + + +def create_security_profile(cluster, tenant_id, neutron_id, security_profile): + """Create a security profile on the NSX backend. + + :param cluster: a NSX cluster object reference + :param tenant_id: identifier of the Neutron tenant + :param neutron_id: neutron security group identifier + :param security_profile: dictionary with data for + configuring the NSX security profile. + """ + path = "/ws.v1/security-profile" + # Allow all dhcp responses and all ingress traffic + hidden_rules = {'logical_port_egress_rules': + [{'ethertype': 'IPv4', + 'protocol': constants.PROTO_NUM_UDP, + 'port_range_min': constants.DHCP_RESPONSE_PORT, + 'port_range_max': constants.DHCP_RESPONSE_PORT, + 'ip_prefix': '0.0.0.0/0'}], + 'logical_port_ingress_rules': + [{'ethertype': 'IPv4'}, + {'ethertype': 'IPv6'}]} + display_name = utils.check_and_truncate(security_profile.get('name')) + # NOTE(salv-orlando): neutron-id tags are prepended with 'q' for + # historical reasons + body = mk_body( + tags=utils.get_tags(os_tid=tenant_id, q_sec_group_id=neutron_id), + display_name=display_name, + logical_port_ingress_rules=( + hidden_rules['logical_port_ingress_rules']), + logical_port_egress_rules=hidden_rules['logical_port_egress_rules'] + ) + rsp = nsxlib.do_request(HTTP_POST, path, body, cluster=cluster) + if security_profile.get('name') == 'default': + # If security group is default allow ip traffic between + # members of the same security profile is allowed and ingress traffic + # from the switch + rules = {'logical_port_egress_rules': [{'ethertype': 'IPv4', + 'profile_uuid': rsp['uuid']}, + {'ethertype': 'IPv6', + 'profile_uuid': rsp['uuid']}], + 'logical_port_ingress_rules': [{'ethertype': 'IPv4'}, + {'ethertype': 'IPv6'}]} + + update_security_group_rules(cluster, rsp['uuid'], rules) + LOG.debug(_("Created Security Profile: %s"), rsp) + return rsp + + +def update_security_group_rules(cluster, spid, rules): + path = "/ws.v1/security-profile/%s" % spid + + # Allow all dhcp responses in + rules['logical_port_egress_rules'].append( + {'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP, + 'port_range_min': constants.DHCP_RESPONSE_PORT, + 'port_range_max': constants.DHCP_RESPONSE_PORT, + 'ip_prefix': '0.0.0.0/0'}) + # If there are no ingress rules add bunk rule to drop all ingress traffic + if not rules['logical_port_ingress_rules']: + rules['logical_port_ingress_rules'].append( + {'ethertype': 'IPv4', 'ip_prefix': '127.0.0.1/32'}) + try: + body = mk_body( + logical_port_ingress_rules=rules['logical_port_ingress_rules'], + logical_port_egress_rules=rules['logical_port_egress_rules']) + rsp = nsxlib.do_request(HTTP_PUT, path, body, cluster=cluster) + except exceptions.NotFound as e: + LOG.error(nsxlib.format_exception("Unknown", e, locals())) + #FIXME(salvatore-orlando): This should not raise NeutronException + raise exceptions.NeutronException() + LOG.debug(_("Updated Security Profile: %s"), rsp) + return rsp + + +def update_security_profile(cluster, spid, name): + return nsxlib.do_request( + HTTP_PUT, + nsxlib._build_uri_path(SECPROF_RESOURCE, resource_id=spid), + json.dumps({"display_name": utils.check_and_truncate(name)}), + cluster=cluster) + + +def delete_security_profile(cluster, spid): + path = "/ws.v1/security-profile/%s" % spid + + try: + nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) + except exceptions.NotFound: + with excutils.save_and_reraise_exception(): + # This is not necessarily an error condition + LOG.warn(_("Unable to find security profile %s on NSX backend"), + spid) diff --git a/neutron/plugins/vmware/nsxlib/switch.py b/neutron/plugins/vmware/nsxlib/switch.py new file mode 100644 index 000000000..e94791e6f --- /dev/null +++ b/neutron/plugins/vmware/nsxlib/switch.py @@ -0,0 +1,397 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg + +from neutron.common import constants +from neutron.common import exceptions as exception +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" + +LSWITCH_RESOURCE = "lswitch" +LSWITCHPORT_RESOURCE = "lport/%s" % LSWITCH_RESOURCE + +LOG = log.getLogger(__name__) + + +def _configure_extensions(lport_obj, mac_address, fixed_ips, + port_security_enabled, security_profiles, + queue_id, mac_learning_enabled, + allowed_address_pairs): + lport_obj['allowed_address_pairs'] = [] + if port_security_enabled: + for fixed_ip in fixed_ips: + ip_address = fixed_ip.get('ip_address') + if ip_address: + lport_obj['allowed_address_pairs'].append( + {'mac_address': mac_address, 'ip_address': ip_address}) + # add address pair allowing src_ip 0.0.0.0 to leave + # this is required for outgoing dhcp request + lport_obj["allowed_address_pairs"].append( + {"mac_address": mac_address, + "ip_address": "0.0.0.0"}) + lport_obj['security_profiles'] = list(security_profiles or []) + lport_obj['queue_uuid'] = queue_id + if mac_learning_enabled is not None: + lport_obj["mac_learning"] = mac_learning_enabled + lport_obj["type"] = "LogicalSwitchPortConfig" + for address_pair in list(allowed_address_pairs or []): + lport_obj['allowed_address_pairs'].append( + {'mac_address': address_pair['mac_address'], + 'ip_address': address_pair['ip_address']}) + + +def get_lswitch_by_id(cluster, lswitch_id): + try: + lswitch_uri_path = nsxlib._build_uri_path( + LSWITCH_RESOURCE, lswitch_id, + relations="LogicalSwitchStatus") + return nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster) + except exception.NotFound: + # FIXME(salv-orlando): this should not raise a neutron exception + raise exception.NetworkNotFound(net_id=lswitch_id) + + +def get_lswitches(cluster, neutron_net_id): + + def lookup_switches_by_tag(): + # Fetch extra logical switches + lswitch_query_path = nsxlib._build_uri_path( + LSWITCH_RESOURCE, + fields="uuid,display_name,tags,lport_count", + relations="LogicalSwitchStatus", + filters={'tag': neutron_net_id, + 'tag_scope': 'quantum_net_id'}) + return nsxlib.get_all_query_pages(lswitch_query_path, cluster) + + lswitch_uri_path = nsxlib._build_uri_path(LSWITCH_RESOURCE, neutron_net_id, + relations="LogicalSwitchStatus") + results = [] + try: + ls = nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster) + results.append(ls) + for tag in ls['tags']: + if (tag['scope'] == "multi_lswitch" and + tag['tag'] == "True"): + results.extend(lookup_switches_by_tag()) + except exception.NotFound: + # This is legit if the neutron network was created using + # a post-Havana version of the plugin + results.extend(lookup_switches_by_tag()) + if results: + return results + else: + raise exception.NetworkNotFound(net_id=neutron_net_id) + + +def create_lswitch(cluster, neutron_net_id, tenant_id, display_name, + transport_zones_config, + shared=None, + **kwargs): + # The tag scope adopts a slightly different naming convention for + # historical reasons + lswitch_obj = {"display_name": utils.check_and_truncate(display_name), + "transport_zones": transport_zones_config, + "replication_mode": cfg.CONF.NSX.replication_mode, + "tags": utils.get_tags(os_tid=tenant_id, + quantum_net_id=neutron_net_id)} + # TODO(salv-orlando): Now that we have async status synchronization + # this tag is perhaps not needed anymore + if shared: + lswitch_obj["tags"].append({"tag": "true", + "scope": "shared"}) + if "tags" in kwargs: + lswitch_obj["tags"].extend(kwargs["tags"]) + uri = nsxlib._build_uri_path(LSWITCH_RESOURCE) + lswitch = nsxlib.do_request(HTTP_POST, uri, json.dumps(lswitch_obj), + cluster=cluster) + LOG.debug(_("Created logical switch: %s"), lswitch['uuid']) + return lswitch + + +def update_lswitch(cluster, lswitch_id, display_name, + tenant_id=None, **kwargs): + uri = nsxlib._build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id) + lswitch_obj = {"display_name": utils.check_and_truncate(display_name)} + # NOTE: tag update will not 'merge' existing tags with new ones. + tags = [] + if tenant_id: + tags = utils.get_tags(os_tid=tenant_id) + # The 'tags' kwarg might existing and be None + tags.extend(kwargs.get('tags') or []) + if tags: + lswitch_obj['tags'] = tags + try: + return nsxlib.do_request(HTTP_PUT, uri, json.dumps(lswitch_obj), + cluster=cluster) + except exception.NotFound as e: + LOG.error(_("Network not found, Error: %s"), str(e)) + raise exception.NetworkNotFound(net_id=lswitch_id) + + +def delete_network(cluster, net_id, lswitch_id): + delete_networks(cluster, net_id, [lswitch_id]) + + +#TODO(salvatore-orlando): Simplify and harmonize +def delete_networks(cluster, net_id, lswitch_ids): + for ls_id in lswitch_ids: + path = "/ws.v1/lswitch/%s" % ls_id + try: + nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) + except exception.NotFound as e: + LOG.error(_("Network not found, Error: %s"), str(e)) + raise exception.NetworkNotFound(net_id=ls_id) + + +def query_lswitch_lports(cluster, ls_uuid, fields="*", + filters=None, relations=None): + # Fix filter for attachments + if filters and "attachment" in filters: + filters['attachment_vif_uuid'] = filters["attachment"] + del filters['attachment'] + uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, + parent_resource_id=ls_uuid, + fields=fields, + filters=filters, + relations=relations) + return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results'] + + +def delete_port(cluster, switch, port): + uri = "/ws.v1/lswitch/" + switch + "/lport/" + port + try: + nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster) + except exception.NotFound: + LOG.exception(_("Port or Network not found")) + raise exception.PortNotFoundOnNetwork( + net_id=switch, port_id=port) + except api_exc.NsxApiException: + raise exception.NeutronException() + + +def get_ports(cluster, networks=None, devices=None, tenants=None): + vm_filter_obsolete = "" + vm_filter = "" + tenant_filter = "" + # This is used when calling delete_network. Neutron checks to see if + # the network has any ports. + if networks: + # FIXME (Aaron) If we get more than one network_id this won't work + lswitch = networks[0] + else: + lswitch = "*" + if devices: + for device_id in devices: + vm_filter_obsolete = '&'.join( + ["tag_scope=vm_id", + "tag=%s" % utils.device_id_to_vm_id(device_id, + obfuscate=True), + vm_filter_obsolete]) + vm_filter = '&'.join( + ["tag_scope=vm_id", + "tag=%s" % utils.device_id_to_vm_id(device_id), + vm_filter]) + if tenants: + for tenant in tenants: + tenant_filter = '&'.join( + ["tag_scope=os_tid", + "tag=%s" % tenant, + tenant_filter]) + + nsx_lports = {} + lport_fields_str = ("tags,admin_status_enabled,display_name," + "fabric_status_up") + try: + lport_query_path_obsolete = ( + "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" + "&relations=LogicalPortStatus" % + (lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter)) + lport_query_path = ( + "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" + "&relations=LogicalPortStatus" % + (lswitch, lport_fields_str, vm_filter, tenant_filter)) + try: + # NOTE(armando-migliaccio): by querying with obsolete tag first + # current deployments won't take the performance hit of a double + # call. In release L-** or M-**, we might want to swap the calls + # as it's likely that ports with the new tag would outnumber the + # ones with the old tag + ports = nsxlib.get_all_query_pages(lport_query_path_obsolete, + cluster) + if not ports: + ports = nsxlib.get_all_query_pages(lport_query_path, cluster) + except exception.NotFound: + LOG.warn(_("Lswitch %s not found in NSX"), lswitch) + ports = None + + if ports: + for port in ports: + for tag in port["tags"]: + if tag["scope"] == "q_port_id": + nsx_lports[tag["tag"]] = port + except Exception: + err_msg = _("Unable to get ports") + LOG.exception(err_msg) + raise nsx_exc.NsxPluginException(err_msg=err_msg) + return nsx_lports + + +def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id): + """Get port by neutron tag. + + Returns the NSX UUID of the logical port with tag q_port_id equal to + neutron_port_id or None if the port is not Found. + """ + uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, + parent_resource_id=lswitch_uuid, + fields='uuid', + filters={'tag': neutron_port_id, + 'tag_scope': 'q_port_id'}) + LOG.debug(_("Looking for port with q_port_id tag '%(neutron_port_id)s' " + "on: '%(lswitch_uuid)s'"), + {'neutron_port_id': neutron_port_id, + 'lswitch_uuid': lswitch_uuid}) + res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster) + num_results = len(res["results"]) + if num_results >= 1: + if num_results > 1: + LOG.warn(_("Found '%(num_ports)d' ports with " + "q_port_id tag: '%(neutron_port_id)s'. " + "Only 1 was expected."), + {'num_ports': num_results, + 'neutron_port_id': neutron_port_id}) + return res["results"][0] + + +def get_port(cluster, network, port, relations=None): + LOG.info(_("get_port() %(network)s %(port)s"), + {'network': network, 'port': port}) + uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?" + if relations: + uri += "relations=%s" % relations + try: + return nsxlib.do_request(HTTP_GET, uri, cluster=cluster) + except exception.NotFound as e: + LOG.error(_("Port or Network not found, Error: %s"), str(e)) + raise exception.PortNotFoundOnNetwork( + port_id=port, net_id=network) + + +def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id, + display_name, device_id, admin_status_enabled, + mac_address=None, fixed_ips=None, port_security_enabled=None, + security_profiles=None, queue_id=None, + mac_learning_enabled=None, allowed_address_pairs=None): + lport_obj = dict( + admin_status_enabled=admin_status_enabled, + display_name=utils.check_and_truncate(display_name), + tags=utils.get_tags(os_tid=tenant_id, + q_port_id=neutron_port_id, + vm_id=utils.device_id_to_vm_id(device_id))) + + _configure_extensions(lport_obj, mac_address, fixed_ips, + port_security_enabled, security_profiles, + queue_id, mac_learning_enabled, + allowed_address_pairs) + + path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid + try: + result = nsxlib.do_request(HTTP_PUT, path, json.dumps(lport_obj), + cluster=cluster) + LOG.debug(_("Updated logical port %(result)s " + "on logical switch %(uuid)s"), + {'result': result['uuid'], 'uuid': lswitch_uuid}) + return result + except exception.NotFound as e: + LOG.error(_("Port or Network not found, Error: %s"), str(e)) + raise exception.PortNotFoundOnNetwork( + port_id=lport_uuid, net_id=lswitch_uuid) + + +def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id, + display_name, device_id, admin_status_enabled, + mac_address=None, fixed_ips=None, port_security_enabled=None, + security_profiles=None, queue_id=None, + mac_learning_enabled=None, allowed_address_pairs=None): + """Creates a logical port on the assigned logical switch.""" + display_name = utils.check_and_truncate(display_name) + lport_obj = dict( + admin_status_enabled=admin_status_enabled, + display_name=display_name, + tags=utils.get_tags(os_tid=tenant_id, + q_port_id=neutron_port_id, + vm_id=utils.device_id_to_vm_id(device_id)) + ) + + _configure_extensions(lport_obj, mac_address, fixed_ips, + port_security_enabled, security_profiles, + queue_id, mac_learning_enabled, + allowed_address_pairs) + + path = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, + parent_resource_id=lswitch_uuid) + result = nsxlib.do_request(HTTP_POST, path, json.dumps(lport_obj), + cluster=cluster) + + LOG.debug(_("Created logical port %(result)s on logical switch %(uuid)s"), + {'result': result['uuid'], 'uuid': lswitch_uuid}) + return result + + +def get_port_status(cluster, lswitch_id, port_id): + """Retrieve the operational status of the port.""" + try: + r = nsxlib.do_request(HTTP_GET, + "/ws.v1/lswitch/%s/lport/%s/status" % + (lswitch_id, port_id), cluster=cluster) + except exception.NotFound as e: + LOG.error(_("Port not found, Error: %s"), str(e)) + raise exception.PortNotFoundOnNetwork( + port_id=port_id, net_id=lswitch_id) + if r['link_status_up'] is True: + return constants.PORT_STATUS_ACTIVE + else: + return constants.PORT_STATUS_DOWN + + +def plug_interface(cluster, lswitch_id, lport_id, att_obj): + return nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, + lport_id, lswitch_id, + is_attachment=True), + json.dumps(att_obj), + cluster=cluster) + + +def plug_vif_interface( + cluster, lswitch_id, port_id, port_type, attachment=None): + """Plug a VIF Attachment object in a logical port.""" + lport_obj = {} + if attachment: + lport_obj["vif_uuid"] = attachment + + lport_obj["type"] = port_type + return plug_interface(cluster, lswitch_id, port_id, lport_obj) diff --git a/neutron/plugins/vmware/nsxlib/versioning.py b/neutron/plugins/vmware/nsxlib/versioning.py new file mode 100644 index 000000000..0845a7d4c --- /dev/null +++ b/neutron/plugins/vmware/nsxlib/versioning.py @@ -0,0 +1,66 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect + +from neutron.plugins.vmware.api_client import exception + +DEFAULT_VERSION = -1 + + +def versioned(func_table): + + def versioned_function(wrapped_func): + func_name = wrapped_func.__name__ + + def dispatch_versioned_function(cluster, *args, **kwargs): + # Call the wrapper function, in case we need to + # run validation checks regarding versions. It + # should return the NSX version + v = (wrapped_func(cluster, *args, **kwargs) or + cluster.api_client.get_version()) + func = get_function_by_version(func_table, func_name, v) + func_kwargs = kwargs + arg_spec = inspect.getargspec(func) + if not arg_spec.keywords and not arg_spec.varargs: + # drop args unknown to function from func_args + arg_set = set(func_kwargs.keys()) + for arg in arg_set - set(arg_spec.args): + del func_kwargs[arg] + # NOTE(salvatore-orlando): shall we fail here if a required + # argument is not passed, or let the called function raise? + return func(cluster, *args, **func_kwargs) + + return dispatch_versioned_function + return versioned_function + + +def get_function_by_version(func_table, func_name, ver): + if ver: + if ver.major not in func_table[func_name]: + major = max(func_table[func_name].keys()) + minor = max(func_table[func_name][major].keys()) + if major > ver.major: + raise NotImplementedError(_("Operation may not be supported")) + else: + major = ver.major + minor = ver.minor + if ver.minor not in func_table[func_name][major]: + minor = DEFAULT_VERSION + return func_table[func_name][major][minor] + else: + msg = _('NSX version is not set. Unable to complete request ' + 'correctly. Check log for NSX communication errors.') + raise exception.ServiceUnavailable(message=msg) diff --git a/neutron/plugins/vmware/plugin.py b/neutron/plugins/vmware/plugin.py new file mode 100644 index 000000000..f5ea3dba1 --- /dev/null +++ b/neutron/plugins/vmware/plugin.py @@ -0,0 +1,22 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.plugins.vmware.plugins import base +from neutron.plugins.vmware.plugins import service + +NsxPlugin = base.NsxPluginV2 +NsxServicePlugin = service.NsxAdvancedPlugin diff --git a/neutron/plugins/vmware/plugins/__init__.py b/neutron/plugins/vmware/plugins/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/vmware/plugins/base.py b/neutron/plugins/vmware/plugins/base.py new file mode 100644 index 000000000..384964c8f --- /dev/null +++ b/neutron/plugins/vmware/plugins/base.py @@ -0,0 +1,2528 @@ +# Copyright 2012 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import uuid + +from oslo.config import cfg +from sqlalchemy import exc as sql_exc +from sqlalchemy.orm import exc as sa_exc +import webob.exc + +from neutron.api import extensions as neutron_extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import utils +from neutron import context as q_context +from neutron.db import agentschedulers_db +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_db +from neutron.db import l3_gwmode_db +from neutron.db import models_v2 +from neutron.db import portbindings_db +from neutron.db import portsecurity_db +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_db +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import external_net as ext_net_extn +from neutron.extensions import extraroute +from neutron.extensions import l3 +from neutron.extensions import multiprovidernet as mpnet +from neutron.extensions import portbindings as pbin +from neutron.extensions import portsecurity as psec +from neutron.extensions import providernet as pnet +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common.db import exception as db_exc +from neutron.openstack.common import excutils +from neutron.openstack.common import lockutils +from neutron.plugins.common import constants as plugin_const +from neutron.plugins import vmware +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import config # noqa +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import nsx_utils +from neutron.plugins.vmware.common import securitygroups as sg_utils +from neutron.plugins.vmware.common import sync +from neutron.plugins.vmware.common import utils as c_utils +from neutron.plugins.vmware.dbexts import db as nsx_db +from neutron.plugins.vmware.dbexts import distributedrouter as dist_rtr +from neutron.plugins.vmware.dbexts import maclearning as mac_db +from neutron.plugins.vmware.dbexts import networkgw_db +from neutron.plugins.vmware.dbexts import qos_db +from neutron.plugins.vmware import dhcpmeta_modes +from neutron.plugins.vmware.extensions import maclearning as mac_ext +from neutron.plugins.vmware.extensions import networkgw +from neutron.plugins.vmware.extensions import qos +from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib +from neutron.plugins.vmware.nsxlib import queue as queuelib +from neutron.plugins.vmware.nsxlib import router as routerlib +from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib +from neutron.plugins.vmware.nsxlib import switch as switchlib + +LOG = logging.getLogger("NeutronPlugin") + +NSX_NOSNAT_RULES_ORDER = 10 +NSX_FLOATINGIP_NAT_RULES_ORDER = 224 +NSX_EXTGW_NAT_RULES_ORDER = 255 +NSX_DEFAULT_NEXTHOP = '1.1.1.1' + + +class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + db_base_plugin_v2.NeutronDbPluginV2, + dhcpmeta_modes.DhcpMetadataAccess, + dist_rtr.DistributedRouter_mixin, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + mac_db.MacLearningDbMixin, + networkgw_db.NetworkGatewayMixin, + portbindings_db.PortBindingMixin, + portsecurity_db.PortSecurityDbMixin, + qos_db.QoSDbMixin, + securitygroups_db.SecurityGroupDbMixin): + + supported_extension_aliases = ["allowed-address-pairs", + "binding", + "dist-router", + "ext-gw-mode", + "extraroute", + "mac-learning", + "multi-provider", + "network-gateway", + "nvp-qos", + "port-security", + "provider", + "qos-queue", + "quotas", + "external-net", + "router", + "security-group"] + + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + # Map nova zones to cluster for easy retrieval + novazone_cluster_map = {} + + def __init__(self): + super(NsxPluginV2, self).__init__() + config.validate_config_options() + # TODO(salv-orlando): Replace These dicts with + # collections.defaultdict for better handling of default values + # Routines for managing logical ports in NSX + self.port_special_owners = [l3_db.DEVICE_OWNER_ROUTER_GW, + l3_db.DEVICE_OWNER_ROUTER_INTF] + self._port_drivers = { + 'create': {l3_db.DEVICE_OWNER_ROUTER_GW: + self._nsx_create_ext_gw_port, + l3_db.DEVICE_OWNER_FLOATINGIP: + self._nsx_create_fip_port, + l3_db.DEVICE_OWNER_ROUTER_INTF: + self._nsx_create_router_port, + networkgw_db.DEVICE_OWNER_NET_GW_INTF: + self._nsx_create_l2_gw_port, + 'default': self._nsx_create_port}, + 'delete': {l3_db.DEVICE_OWNER_ROUTER_GW: + self._nsx_delete_ext_gw_port, + l3_db.DEVICE_OWNER_ROUTER_INTF: + self._nsx_delete_router_port, + l3_db.DEVICE_OWNER_FLOATINGIP: + self._nsx_delete_fip_port, + networkgw_db.DEVICE_OWNER_NET_GW_INTF: + self._nsx_delete_port, + 'default': self._nsx_delete_port} + } + + neutron_extensions.append_api_extensions_path([vmware.NSX_EXT_PATH]) + self.nsx_opts = cfg.CONF.NSX + self.nsx_sync_opts = cfg.CONF.NSX_SYNC + self.cluster = nsx_utils.create_nsx_cluster( + cfg.CONF, + self.nsx_opts.concurrent_connections, + self.nsx_opts.nsx_gen_timeout) + + self.base_binding_dict = { + pbin.VIF_TYPE: pbin.VIF_TYPE_OVS, + pbin.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + pbin.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + + self._extend_fault_map() + self.setup_dhcpmeta_access() + # Set this flag to false as the default gateway has not + # been yet updated from the config file + self._is_default_net_gw_in_sync = False + # Create a synchronizer instance for backend sync + self._synchronizer = sync.NsxSynchronizer( + self.safe_reference, self.cluster, + self.nsx_sync_opts.state_sync_interval, + self.nsx_sync_opts.min_sync_req_delay, + self.nsx_sync_opts.min_chunk_size, + self.nsx_sync_opts.max_random_sync_delay) + + def _ensure_default_network_gateway(self): + if self._is_default_net_gw_in_sync: + return + # Add the gw in the db as default, and unset any previous default + def_l2_gw_uuid = self.cluster.default_l2_gw_service_uuid + try: + ctx = q_context.get_admin_context() + self._unset_default_network_gateways(ctx) + if not def_l2_gw_uuid: + return + try: + def_network_gw = self._get_network_gateway(ctx, + def_l2_gw_uuid) + except networkgw_db.GatewayNotFound: + # Create in DB only - don't go to backend + def_gw_data = {'id': def_l2_gw_uuid, + 'name': 'default L2 gateway service', + 'devices': []} + gw_res_name = networkgw.GATEWAY_RESOURCE_NAME.replace('-', '_') + def_network_gw = super( + NsxPluginV2, self).create_network_gateway( + ctx, {gw_res_name: def_gw_data}) + # In any case set is as default + self._set_default_network_gateway(ctx, def_network_gw['id']) + # Ensure this method is executed only once + self._is_default_net_gw_in_sync = True + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Unable to process default l2 gw service:%s"), + def_l2_gw_uuid) + + def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None): + """Build ip_addresses data structure for logical router port. + + No need to perform validation on IPs - this has already been + done in the l3_db mixin class. + """ + ip_addresses = [] + for ip in fixed_ips: + if not subnet_ids or (ip['subnet_id'] in subnet_ids): + subnet = self._get_subnet(context, ip['subnet_id']) + ip_prefix = '%s/%s' % (ip['ip_address'], + subnet['cidr'].split('/')[1]) + ip_addresses.append(ip_prefix) + return ip_addresses + + def _create_and_attach_router_port(self, cluster, context, + nsx_router_id, port_data, + attachment_type, attachment, + attachment_vlan=None, + subnet_ids=None): + # Use a fake IP address if gateway port is not 'real' + ip_addresses = (port_data.get('fake_ext_gw') and + ['0.0.0.0/31'] or + self._build_ip_address_list(context, + port_data['fixed_ips'], + subnet_ids)) + try: + lrouter_port = routerlib.create_router_lport( + cluster, nsx_router_id, port_data.get('tenant_id', 'fake'), + port_data.get('id', 'fake'), port_data.get('name', 'fake'), + port_data.get('admin_state_up', True), ip_addresses, + port_data.get('mac_address')) + LOG.debug(_("Created NSX router port:%s"), lrouter_port['uuid']) + except api_exc.NsxApiException: + LOG.exception(_("Unable to create port on NSX logical router %s"), + nsx_router_id) + raise nsx_exc.NsxPluginException( + err_msg=_("Unable to create logical router port for neutron " + "port id %(port_id)s on router %(nsx_router_id)s") % + {'port_id': port_data.get('id'), + 'nsx_router_id': nsx_router_id}) + self._update_router_port_attachment(cluster, context, nsx_router_id, + port_data, lrouter_port['uuid'], + attachment_type, attachment, + attachment_vlan) + return lrouter_port + + def _update_router_gw_info(self, context, router_id, info): + # NOTE(salvatore-orlando): We need to worry about rollback of NSX + # configuration in case of failures in the process + # Ref. LP bug 1102301 + router = self._get_router(context, router_id) + # Check whether SNAT rule update should be triggered + # NSX also supports multiple external networks so there is also + # the possibility that NAT rules should be replaced + current_ext_net_id = router.gw_port_id and router.gw_port.network_id + new_ext_net_id = info and info.get('network_id') + # SNAT should be enabled unless info['enable_snat'] is + # explicitly set to false + enable_snat = new_ext_net_id and info.get('enable_snat', True) + # Remove if ext net removed, changed, or if snat disabled + remove_snat_rules = (current_ext_net_id and + new_ext_net_id != current_ext_net_id or + router.enable_snat and not enable_snat) + # Add rules if snat is enabled, and if either the external network + # changed or snat was previously disabled + # NOTE: enable_snat == True implies new_ext_net_id != None + add_snat_rules = (enable_snat and + (new_ext_net_id != current_ext_net_id or + not router.enable_snat)) + router = super(NsxPluginV2, self)._update_router_gw_info( + context, router_id, info, router=router) + # Add/Remove SNAT rules as needed + # Create an elevated context for dealing with metadata access + # cidrs which are created within admin context + ctx_elevated = context.elevated() + if remove_snat_rules or add_snat_rules: + cidrs = self._find_router_subnets_cidrs(ctx_elevated, router_id) + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + if remove_snat_rules: + # Be safe and concede NAT rules might not exist. + # Therefore use min_num_expected=0 + for cidr in cidrs: + routerlib.delete_nat_rules_by_match( + self.cluster, nsx_router_id, "SourceNatRule", + max_num_expected=1, min_num_expected=0, + source_ip_addresses=cidr) + if add_snat_rules: + ip_addresses = self._build_ip_address_list( + ctx_elevated, router.gw_port['fixed_ips']) + # Set the SNAT rule for each subnet (only first IP) + for cidr in cidrs: + cidr_prefix = int(cidr.split('/')[1]) + routerlib.create_lrouter_snat_rule( + self.cluster, nsx_router_id, + ip_addresses[0].split('/')[0], + ip_addresses[0].split('/')[0], + order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix, + match_criteria={'source_ip_addresses': cidr}) + + def _update_router_port_attachment(self, cluster, context, + nsx_router_id, port_data, + nsx_router_port_id, + attachment_type, + attachment, + attachment_vlan=None): + if not nsx_router_port_id: + nsx_router_port_id = self._find_router_gw_port(context, port_data) + try: + routerlib.plug_router_port_attachment(cluster, nsx_router_id, + nsx_router_port_id, + attachment, + attachment_type, + attachment_vlan) + LOG.debug(_("Attached %(att)s to NSX router port %(port)s"), + {'att': attachment, 'port': nsx_router_port_id}) + except api_exc.NsxApiException: + # Must remove NSX logical port + routerlib.delete_router_lport(cluster, nsx_router_id, + nsx_router_port_id) + LOG.exception(_("Unable to plug attachment in NSX logical " + "router port %(r_port_id)s, associated with " + "Neutron %(q_port_id)s"), + {'r_port_id': nsx_router_port_id, + 'q_port_id': port_data.get('id')}) + raise nsx_exc.NsxPluginException( + err_msg=(_("Unable to plug attachment in router port " + "%(r_port_id)s for neutron port id %(q_port_id)s " + "on router %(router_id)s") % + {'r_port_id': nsx_router_port_id, + 'q_port_id': port_data.get('id'), + 'router_id': nsx_router_id})) + + def _get_port_by_device_id(self, context, device_id, device_owner): + """Retrieve ports associated with a specific device id. + + Used for retrieving all neutron ports attached to a given router. + """ + port_qry = context.session.query(models_v2.Port) + return port_qry.filter_by( + device_id=device_id, + device_owner=device_owner,).all() + + def _find_router_subnets_cidrs(self, context, router_id): + """Retrieve subnets attached to the specified router.""" + ports = self._get_port_by_device_id(context, router_id, + l3_db.DEVICE_OWNER_ROUTER_INTF) + # No need to check for overlapping CIDRs + cidrs = [] + for port in ports: + for ip in port.get('fixed_ips', []): + cidrs.append(self._get_subnet(context, + ip.subnet_id).cidr) + return cidrs + + def _nsx_find_lswitch_for_port(self, context, port_data): + network = self._get_network(context, port_data['network_id']) + network_bindings = nsx_db.get_network_bindings( + context.session, port_data['network_id']) + max_ports = self.nsx_opts.max_lp_per_overlay_ls + allow_extra_lswitches = False + for network_binding in network_bindings: + if network_binding.binding_type in (c_utils.NetworkTypes.FLAT, + c_utils.NetworkTypes.VLAN): + max_ports = self.nsx_opts.max_lp_per_bridged_ls + allow_extra_lswitches = True + break + try: + return self._handle_lswitch_selection( + context, self.cluster, network, network_bindings, + max_ports, allow_extra_lswitches) + except api_exc.NsxApiException: + err_desc = _("An exception occurred while selecting logical " + "switch for the port") + LOG.exception(err_desc) + raise nsx_exc.NsxPluginException(err_msg=err_desc) + + def _nsx_create_port_helper(self, session, ls_uuid, port_data, + do_port_security=True): + # Convert Neutron security groups identifiers into NSX security + # profiles identifiers + nsx_sec_profile_ids = [ + nsx_utils.get_nsx_security_group_id( + session, self.cluster, neutron_sg_id) for + neutron_sg_id in (port_data[ext_sg.SECURITYGROUPS] or [])] + return switchlib.create_lport(self.cluster, + ls_uuid, + port_data['tenant_id'], + port_data['id'], + port_data['name'], + port_data['device_id'], + port_data['admin_state_up'], + port_data['mac_address'], + port_data['fixed_ips'], + port_data[psec.PORTSECURITY], + nsx_sec_profile_ids, + port_data.get(qos.QUEUE), + port_data.get(mac_ext.MAC_LEARNING), + port_data.get(addr_pair.ADDRESS_PAIRS)) + + def _handle_create_port_exception(self, context, port_id, + ls_uuid, lp_uuid): + with excutils.save_and_reraise_exception(): + # rollback nsx logical port only if it was successfully + # created on NSX. Should this command fail the original + # exception will be raised. + if lp_uuid: + # Remove orphaned port from NSX + switchlib.delete_port(self.cluster, ls_uuid, lp_uuid) + # rollback the neutron-nsx port mapping + nsx_db.delete_neutron_nsx_port_mapping(context.session, + port_id) + msg = (_("An exception occurred while creating the " + "neutron port %s on the NSX plaform") % port_id) + LOG.exception(msg) + + def _nsx_create_port(self, context, port_data): + """Driver for creating a logical switch port on NSX platform.""" + # FIXME(salvatore-orlando): On the NSX platform we do not really have + # external networks. So if as user tries and create a "regular" VIF + # port on an external network we are unable to actually create. + # However, in order to not break unit tests, we need to still create + # the DB object and return success + if self._network_is_external(context, port_data['network_id']): + LOG.info(_("NSX plugin does not support regular VIF ports on " + "external networks. Port %s will be down."), + port_data['network_id']) + # No need to actually update the DB state - the default is down + return port_data + lport = None + selected_lswitch = None + try: + selected_lswitch = self._nsx_find_lswitch_for_port(context, + port_data) + lport = self._nsx_create_port_helper(context.session, + selected_lswitch['uuid'], + port_data, + True) + nsx_db.add_neutron_nsx_port_mapping( + context.session, port_data['id'], + selected_lswitch['uuid'], lport['uuid']) + if port_data['device_owner'] not in self.port_special_owners: + switchlib.plug_vif_interface( + self.cluster, selected_lswitch['uuid'], + lport['uuid'], "VifAttachment", port_data['id']) + LOG.debug(_("_nsx_create_port completed for port %(name)s " + "on network %(network_id)s. The new port id is " + "%(id)s."), port_data) + except (api_exc.NsxApiException, n_exc.NeutronException): + self._handle_create_port_exception( + context, port_data['id'], + selected_lswitch and selected_lswitch['uuid'], + lport and lport['uuid']) + except db_exc.DBError as e: + if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and + isinstance(e.inner_exception, sql_exc.IntegrityError)): + msg = (_("Concurrent network deletion detected; Back-end Port " + "%(nsx_id)s creation to be rolled back for Neutron " + "port: %(neutron_id)s") + % {'nsx_id': lport['uuid'], + 'neutron_id': port_data['id']}) + LOG.warning(msg) + if selected_lswitch and lport: + try: + switchlib.delete_port(self.cluster, + selected_lswitch['uuid'], + lport['uuid']) + except n_exc.NotFound: + LOG.debug(_("NSX Port %s already gone"), lport['uuid']) + + def _nsx_delete_port(self, context, port_data): + # FIXME(salvatore-orlando): On the NSX platform we do not really have + # external networks. So deleting regular ports from external networks + # does not make sense. However we cannot raise as this would break + # unit tests. + if self._network_is_external(context, port_data['network_id']): + LOG.info(_("NSX plugin does not support regular VIF ports on " + "external networks. Port %s will be down."), + port_data['network_id']) + return + nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( + context.session, self.cluster, port_data['id']) + if not nsx_port_id: + LOG.debug(_("Port '%s' was already deleted on NSX platform"), id) + return + # TODO(bgh): if this is a bridged network and the lswitch we just got + # back will have zero ports after the delete we should garbage collect + # the lswitch. + try: + switchlib.delete_port(self.cluster, nsx_switch_id, nsx_port_id) + LOG.debug(_("_nsx_delete_port completed for port %(port_id)s " + "on network %(net_id)s"), + {'port_id': port_data['id'], + 'net_id': port_data['network_id']}) + except n_exc.NotFound: + LOG.warning(_("Port %s not found in NSX"), port_data['id']) + + def _nsx_delete_router_port(self, context, port_data): + # Delete logical router port + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, port_data['device_id']) + nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( + context.session, self.cluster, port_data['id']) + if not nsx_port_id: + LOG.warn(_("Neutron port %(port_id)s not found on NSX backend. " + "Terminating delete operation. A dangling router port " + "might have been left on router %(router_id)s"), + {'port_id': port_data['id'], + 'router_id': nsx_router_id}) + return + try: + routerlib.delete_peer_router_lport(self.cluster, + nsx_router_id, + nsx_switch_id, + nsx_port_id) + except api_exc.NsxApiException: + # Do not raise because the issue might as well be that the + # router has already been deleted, so there would be nothing + # to do here + LOG.exception(_("Ignoring exception as this means the peer " + "for port '%s' has already been deleted."), + nsx_port_id) + + # Delete logical switch port + self._nsx_delete_port(context, port_data) + + def _nsx_create_router_port(self, context, port_data): + """Driver for creating a switch port to be connected to a router.""" + # No router ports on external networks! + if self._network_is_external(context, port_data['network_id']): + raise nsx_exc.NsxPluginException( + err_msg=(_("It is not allowed to create router interface " + "ports on external networks as '%s'") % + port_data['network_id'])) + ls_port = None + selected_lswitch = None + try: + selected_lswitch = self._nsx_find_lswitch_for_port( + context, port_data) + # Do not apply port security here! + ls_port = self._nsx_create_port_helper( + context.session, selected_lswitch['uuid'], + port_data, False) + # Assuming subnet being attached is on first fixed ip + # element in port data + subnet_id = port_data['fixed_ips'][0]['subnet_id'] + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, port_data['device_id']) + # Create peer port on logical router + self._create_and_attach_router_port( + self.cluster, context, nsx_router_id, port_data, + "PatchAttachment", ls_port['uuid'], + subnet_ids=[subnet_id]) + nsx_db.add_neutron_nsx_port_mapping( + context.session, port_data['id'], + selected_lswitch['uuid'], ls_port['uuid']) + LOG.debug(_("_nsx_create_router_port completed for port " + "%(name)s on network %(network_id)s. The new " + "port id is %(id)s."), + port_data) + except (api_exc.NsxApiException, n_exc.NeutronException): + self._handle_create_port_exception( + context, port_data['id'], + selected_lswitch and selected_lswitch['uuid'], + ls_port and ls_port['uuid']) + + def _find_router_gw_port(self, context, port_data): + router_id = port_data['device_id'] + if not router_id: + raise n_exc.BadRequest(_("device_id field must be populated in " + "order to create an external gateway " + "port for network %s"), + port_data['network_id']) + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + lr_port = routerlib.find_router_gw_port(context, self.cluster, + nsx_router_id) + if not lr_port: + raise nsx_exc.NsxPluginException( + err_msg=(_("The gateway port for the NSX router %s " + "was not found on the backend") + % nsx_router_id)) + return lr_port + + @lockutils.synchronized('vmware', 'neutron-') + def _nsx_create_ext_gw_port(self, context, port_data): + """Driver for creating an external gateway port on NSX platform.""" + # TODO(salvatore-orlando): Handle NSX resource + # rollback when something goes not quite as expected + lr_port = self._find_router_gw_port(context, port_data) + ip_addresses = self._build_ip_address_list(context, + port_data['fixed_ips']) + # This operation actually always updates a NSX logical port + # instead of creating one. This is because the gateway port + # is created at the same time as the NSX logical router, otherwise + # the fabric status of the NSX router will be down. + # admin_status should always be up for the gateway port + # regardless of what the user specifies in neutron + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, port_data['device_id']) + routerlib.update_router_lport(self.cluster, + nsx_router_id, + lr_port['uuid'], + port_data['tenant_id'], + port_data['id'], + port_data['name'], + True, + ip_addresses) + ext_network = self.get_network(context, port_data['network_id']) + if ext_network.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.L3_EXT: + # Update attachment + physical_network = (ext_network[pnet.PHYSICAL_NETWORK] or + self.cluster.default_l3_gw_service_uuid) + self._update_router_port_attachment( + self.cluster, context, nsx_router_id, port_data, + lr_port['uuid'], + "L3GatewayAttachment", + physical_network, + ext_network[pnet.SEGMENTATION_ID]) + + LOG.debug(_("_nsx_create_ext_gw_port completed on external network " + "%(ext_net_id)s, attached to router:%(router_id)s. " + "NSX port id is %(nsx_port_id)s"), + {'ext_net_id': port_data['network_id'], + 'router_id': nsx_router_id, + 'nsx_port_id': lr_port['uuid']}) + + @lockutils.synchronized('vmware', 'neutron-') + def _nsx_delete_ext_gw_port(self, context, port_data): + lr_port = self._find_router_gw_port(context, port_data) + # TODO(salvatore-orlando): Handle NSX resource + # rollback when something goes not quite as expected + try: + # Delete is actually never a real delete, otherwise the NSX + # logical router will stop working + router_id = port_data['device_id'] + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + routerlib.update_router_lport(self.cluster, + nsx_router_id, + lr_port['uuid'], + port_data['tenant_id'], + port_data['id'], + port_data['name'], + True, + ['0.0.0.0/31']) + # Reset attachment + self._update_router_port_attachment( + self.cluster, context, nsx_router_id, port_data, + lr_port['uuid'], + "L3GatewayAttachment", + self.cluster.default_l3_gw_service_uuid) + + except api_exc.ResourceNotFound: + raise nsx_exc.NsxPluginException( + err_msg=_("Logical router resource %s not found " + "on NSX platform") % router_id) + except api_exc.NsxApiException: + raise nsx_exc.NsxPluginException( + err_msg=_("Unable to update logical router" + "on NSX Platform")) + LOG.debug(_("_nsx_delete_ext_gw_port completed on external network " + "%(ext_net_id)s, attached to NSX router:%(router_id)s"), + {'ext_net_id': port_data['network_id'], + 'router_id': nsx_router_id}) + + def _nsx_create_l2_gw_port(self, context, port_data): + """Create a switch port, and attach it to a L2 gateway attachment.""" + # FIXME(salvatore-orlando): On the NSX platform we do not really have + # external networks. So if as user tries and create a "regular" VIF + # port on an external network we are unable to actually create. + # However, in order to not break unit tests, we need to still create + # the DB object and return success + if self._network_is_external(context, port_data['network_id']): + LOG.info(_("NSX plugin does not support regular VIF ports on " + "external networks. Port %s will be down."), + port_data['network_id']) + # No need to actually update the DB state - the default is down + return port_data + lport = None + try: + selected_lswitch = self._nsx_find_lswitch_for_port( + context, port_data) + lport = self._nsx_create_port_helper( + context.session, + selected_lswitch['uuid'], + port_data, + True) + nsx_db.add_neutron_nsx_port_mapping( + context.session, port_data['id'], + selected_lswitch['uuid'], lport['uuid']) + l2gwlib.plug_l2_gw_service( + self.cluster, + selected_lswitch['uuid'], + lport['uuid'], + port_data['device_id'], + int(port_data.get('gw:segmentation_id') or 0)) + except Exception: + with excutils.save_and_reraise_exception(): + if lport: + switchlib.delete_port(self.cluster, + selected_lswitch['uuid'], + lport['uuid']) + LOG.debug(_("_nsx_create_l2_gw_port completed for port %(name)s " + "on network %(network_id)s. The new port id " + "is %(id)s."), port_data) + + def _nsx_create_fip_port(self, context, port_data): + # As we do not create ports for floating IPs in NSX, + # this is a no-op driver + pass + + def _nsx_delete_fip_port(self, context, port_data): + # As we do not create ports for floating IPs in NSX, + # this is a no-op driver + pass + + def _extend_fault_map(self): + """Extends the Neutron Fault Map. + + Exceptions specific to the NSX Plugin are mapped to standard + HTTP Exceptions. + """ + base.FAULT_MAP.update({nsx_exc.InvalidNovaZone: + webob.exc.HTTPBadRequest, + nsx_exc.NoMorePortsException: + webob.exc.HTTPBadRequest, + nsx_exc.MaintenanceInProgress: + webob.exc.HTTPServiceUnavailable, + nsx_exc.InvalidSecurityCertificate: + webob.exc.HTTPBadRequest}) + + def _validate_provider_create(self, context, network): + if not attr.is_attr_set(network.get(mpnet.SEGMENTS)): + return + + for segment in network[mpnet.SEGMENTS]: + network_type = segment.get(pnet.NETWORK_TYPE) + physical_network = segment.get(pnet.PHYSICAL_NETWORK) + segmentation_id = segment.get(pnet.SEGMENTATION_ID) + network_type_set = attr.is_attr_set(network_type) + segmentation_id_set = attr.is_attr_set(segmentation_id) + + err_msg = None + if not network_type_set: + err_msg = _("%s required") % pnet.NETWORK_TYPE + elif network_type in (c_utils.NetworkTypes.GRE, + c_utils.NetworkTypes.STT, + c_utils.NetworkTypes.FLAT): + if segmentation_id_set: + err_msg = _("Segmentation ID cannot be specified with " + "flat network type") + elif network_type == c_utils.NetworkTypes.VLAN: + if not segmentation_id_set: + err_msg = _("Segmentation ID must be specified with " + "vlan network type") + elif (segmentation_id_set and + not utils.is_valid_vlan_tag(segmentation_id)): + err_msg = (_("%(segmentation_id)s out of range " + "(%(min_id)s through %(max_id)s)") % + {'segmentation_id': segmentation_id, + 'min_id': constants.MIN_VLAN_TAG, + 'max_id': constants.MAX_VLAN_TAG}) + else: + # Verify segment is not already allocated + bindings = nsx_db.get_network_bindings_by_vlanid( + context.session, segmentation_id) + if bindings: + raise n_exc.VlanIdInUse( + vlan_id=segmentation_id, + physical_network=physical_network) + elif network_type == c_utils.NetworkTypes.L3_EXT: + if (segmentation_id_set and + not utils.is_valid_vlan_tag(segmentation_id)): + err_msg = (_("%(segmentation_id)s out of range " + "(%(min_id)s through %(max_id)s)") % + {'segmentation_id': segmentation_id, + 'min_id': constants.MIN_VLAN_TAG, + 'max_id': constants.MAX_VLAN_TAG}) + else: + err_msg = (_("%(net_type_param)s %(net_type_value)s not " + "supported") % + {'net_type_param': pnet.NETWORK_TYPE, + 'net_type_value': network_type}) + if err_msg: + raise n_exc.InvalidInput(error_message=err_msg) + # TODO(salvatore-orlando): Validate tranport zone uuid + # which should be specified in physical_network + + def _extend_network_dict_provider(self, context, network, + multiprovider=None, bindings=None): + if not bindings: + bindings = nsx_db.get_network_bindings(context.session, + network['id']) + if not multiprovider: + multiprovider = nsx_db.is_multiprovider_network(context.session, + network['id']) + # With NSX plugin 'normal' overlay networks will have no binding + # TODO(salvatore-orlando) make sure users can specify a distinct + # phy_uuid as 'provider network' for STT net type + if bindings: + if not multiprovider: + # network came in through provider networks api + network[pnet.NETWORK_TYPE] = bindings[0].binding_type + network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid + network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id + else: + # network come in though multiprovider networks api + network[mpnet.SEGMENTS] = [ + {pnet.NETWORK_TYPE: binding.binding_type, + pnet.PHYSICAL_NETWORK: binding.phy_uuid, + pnet.SEGMENTATION_ID: binding.vlan_id} + for binding in bindings] + + def _handle_lswitch_selection(self, context, cluster, network, + network_bindings, max_ports, + allow_extra_lswitches): + lswitches = nsx_utils.fetch_nsx_switches( + context.session, cluster, network.id) + try: + return [ls for ls in lswitches + if (ls['_relations']['LogicalSwitchStatus'] + ['lport_count'] < max_ports)].pop(0) + except IndexError: + # Too bad, no switch available + LOG.debug(_("No switch has available ports (%d checked)"), + len(lswitches)) + if allow_extra_lswitches: + # The 'main' logical switch is either the only one available + # or the one where the 'multi_lswitch' tag was set + while lswitches: + main_ls = lswitches.pop(0) + tag_dict = dict((x['scope'], x['tag']) + for x in main_ls['tags']) + if 'multi_lswitch' in tag_dict: + break + else: + # by construction this statement is hit if there is only one + # logical switch and the multi_lswitch tag has not been set. + # The tag must therefore be added. + tags = main_ls['tags'] + tags.append({'tag': 'True', 'scope': 'multi_lswitch'}) + switchlib.update_lswitch(cluster, + main_ls['uuid'], + main_ls['display_name'], + network['tenant_id'], + tags=tags) + transport_zone_config = self._convert_to_nsx_transport_zones( + cluster, network, bindings=network_bindings) + selected_lswitch = switchlib.create_lswitch( + cluster, network.id, network.tenant_id, + "%s-ext-%s" % (network.name, len(lswitches)), + transport_zone_config) + # add a mapping between the neutron network and the newly + # created logical switch + nsx_db.add_neutron_nsx_network_mapping( + context.session, network.id, selected_lswitch['uuid']) + return selected_lswitch + else: + LOG.error(_("Maximum number of logical ports reached for " + "logical network %s"), network.id) + raise nsx_exc.NoMorePortsException(network=network.id) + + def _convert_to_nsx_transport_zones(self, cluster, network=None, + bindings=None): + nsx_transport_zones_config = [] + + # Convert fields from provider request to nsx format + if (network and not attr.is_attr_set( + network.get(mpnet.SEGMENTS))): + return [{"zone_uuid": cluster.default_tz_uuid, + "transport_type": cfg.CONF.NSX.default_transport_type}] + + # Convert fields from db to nsx format + if bindings: + transport_entry = {} + for binding in bindings: + if binding.binding_type in [c_utils.NetworkTypes.FLAT, + c_utils.NetworkTypes.VLAN]: + transport_entry['transport_type'] = ( + c_utils.NetworkTypes.BRIDGE) + transport_entry['binding_config'] = {} + vlan_id = binding.vlan_id + if vlan_id: + transport_entry['binding_config'] = ( + {'vlan_translation': [{'transport': vlan_id}]}) + else: + transport_entry['transport_type'] = binding.binding_type + transport_entry['zone_uuid'] = binding.phy_uuid + nsx_transport_zones_config.append(transport_entry) + return nsx_transport_zones_config + + for transport_zone in network.get(mpnet.SEGMENTS): + for value in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID]: + if transport_zone.get(value) == attr.ATTR_NOT_SPECIFIED: + transport_zone[value] = None + + transport_entry = {} + transport_type = transport_zone.get(pnet.NETWORK_TYPE) + if transport_type in [c_utils.NetworkTypes.FLAT, + c_utils.NetworkTypes.VLAN]: + transport_entry['transport_type'] = c_utils.NetworkTypes.BRIDGE + transport_entry['binding_config'] = {} + vlan_id = transport_zone.get(pnet.SEGMENTATION_ID) + if vlan_id: + transport_entry['binding_config'] = ( + {'vlan_translation': [{'transport': vlan_id}]}) + else: + transport_entry['transport_type'] = transport_type + transport_entry['zone_uuid'] = ( + transport_zone[pnet.PHYSICAL_NETWORK] or + cluster.default_tz_uuid) + nsx_transport_zones_config.append(transport_entry) + return nsx_transport_zones_config + + def _convert_to_transport_zones_dict(self, network): + """Converts the provider request body to multiprovider. + Returns: True if request is multiprovider False if provider + and None if neither. + """ + if any(attr.is_attr_set(network.get(f)) + for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID)): + if attr.is_attr_set(network.get(mpnet.SEGMENTS)): + raise mpnet.SegmentsSetInConjunctionWithProviders() + # convert to transport zone list + network[mpnet.SEGMENTS] = [ + {pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE], + pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK], + pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}] + del network[pnet.NETWORK_TYPE] + del network[pnet.PHYSICAL_NETWORK] + del network[pnet.SEGMENTATION_ID] + return False + if attr.is_attr_set(mpnet.SEGMENTS): + return True + + def create_network(self, context, network): + net_data = network['network'] + tenant_id = self._get_tenant_id_for_create(context, net_data) + self._ensure_default_security_group(context, tenant_id) + # Process the provider network extension + provider_type = self._convert_to_transport_zones_dict(net_data) + self._validate_provider_create(context, net_data) + # Replace ATTR_NOT_SPECIFIED with None before sending to NSX + for key, value in network['network'].iteritems(): + if value is attr.ATTR_NOT_SPECIFIED: + net_data[key] = None + # FIXME(arosen) implement admin_state_up = False in NSX + if net_data['admin_state_up'] is False: + LOG.warning(_("Network with admin_state_up=False are not yet " + "supported by this plugin. Ignoring setting for " + "network %s"), net_data.get('name', '')) + transport_zone_config = self._convert_to_nsx_transport_zones( + self.cluster, net_data) + external = net_data.get(ext_net_extn.EXTERNAL) + # NOTE(salv-orlando): Pre-generating uuid for Neutron + # network. This will be removed once the network create operation + # becomes an asynchronous task + net_data['id'] = str(uuid.uuid4()) + if (not attr.is_attr_set(external) or + attr.is_attr_set(external) and not external): + lswitch = switchlib.create_lswitch( + self.cluster, net_data['id'], + tenant_id, net_data.get('name'), + transport_zone_config, + shared=net_data.get(attr.SHARED)) + + with context.session.begin(subtransactions=True): + new_net = super(NsxPluginV2, self).create_network(context, + network) + # Process port security extension + self._process_network_port_security_create( + context, net_data, new_net) + # DB Operations for setting the network as external + self._process_l3_create(context, new_net, net_data) + # Process QoS queue extension + net_queue_id = net_data.get(qos.QUEUE) + if net_queue_id: + # Raises if not found + self.get_qos_queue(context, net_queue_id) + self._process_network_queue_mapping( + context, new_net, net_queue_id) + # Add mapping between neutron network and NSX switch + if (not attr.is_attr_set(external) or + attr.is_attr_set(external) and not external): + nsx_db.add_neutron_nsx_network_mapping( + context.session, new_net['id'], + lswitch['uuid']) + if (net_data.get(mpnet.SEGMENTS) and + isinstance(provider_type, bool)): + net_bindings = [] + for tz in net_data[mpnet.SEGMENTS]: + segmentation_id = tz.get(pnet.SEGMENTATION_ID, 0) + segmentation_id_set = attr.is_attr_set(segmentation_id) + if not segmentation_id_set: + segmentation_id = 0 + net_bindings.append(nsx_db.add_network_binding( + context.session, new_net['id'], + tz.get(pnet.NETWORK_TYPE), + tz.get(pnet.PHYSICAL_NETWORK), + segmentation_id)) + if provider_type: + nsx_db.set_multiprovider_network(context.session, + new_net['id']) + self._extend_network_dict_provider(context, new_net, + provider_type, + net_bindings) + self.handle_network_dhcp_access(context, new_net, + action='create_network') + return new_net + + def delete_network(self, context, id): + external = self._network_is_external(context, id) + # Before deleting ports, ensure the peer of a NSX logical + # port with a patch attachment is removed too + port_filter = {'network_id': [id], + 'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]} + router_iface_ports = self.get_ports(context, filters=port_filter) + for port in router_iface_ports: + nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( + context.session, self.cluster, id) + # Before removing entry from Neutron DB, retrieve NSX switch + # identifiers for removing them from backend + if not external: + lswitch_ids = nsx_utils.get_nsx_switch_ids( + context.session, self.cluster, id) + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, id) + super(NsxPluginV2, self).delete_network(context, id) + + # clean up network owned ports + for port in router_iface_ports: + try: + if nsx_port_id: + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, port['device_id']) + routerlib.delete_peer_router_lport(self.cluster, + nsx_router_id, + nsx_switch_id, + nsx_port_id) + else: + LOG.warning(_("A nsx lport identifier was not found for " + "neutron port '%s'. Unable to remove " + "the peer router port for this switch port"), + port['id']) + + except (TypeError, KeyError, + api_exc.NsxApiException, + api_exc.ResourceNotFound): + # Do not raise because the issue might as well be that the + # router has already been deleted, so there would be nothing + # to do here + LOG.warning(_("Ignoring exception as this means the peer for " + "port '%s' has already been deleted."), + nsx_port_id) + + # Do not go to NSX for external networks + if not external: + try: + switchlib.delete_networks(self.cluster, id, lswitch_ids) + LOG.debug(_("delete_network completed for tenant: %s"), + context.tenant_id) + except n_exc.NotFound: + LOG.warning(_("Did not found lswitch %s in NSX"), id) + self.handle_network_dhcp_access(context, id, action='delete_network') + + def get_network(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + # goto to the plugin DB and fetch the network + network = self._get_network(context, id) + if (self.nsx_sync_opts.always_read_status or + fields and 'status' in fields): + # External networks are not backed by nsx lswitches + if not network.external: + # Perform explicit state synchronization + self._synchronizer.synchronize_network(context, network) + # Don't do field selection here otherwise we won't be able + # to add provider networks fields + net_result = self._make_network_dict(network) + self._extend_network_dict_provider(context, net_result) + return self._fields(net_result, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + filters = filters or {} + with context.session.begin(subtransactions=True): + networks = ( + super(NsxPluginV2, self).get_networks( + context, filters, fields, sorts, + limit, marker, page_reverse)) + for net in networks: + self._extend_network_dict_provider(context, net) + return [self._fields(network, fields) for network in networks] + + def update_network(self, context, id, network): + pnet._raise_if_updates_provider_attributes(network['network']) + if network["network"].get("admin_state_up") is False: + raise NotImplementedError(_("admin_state_up=False networks " + "are not supported.")) + with context.session.begin(subtransactions=True): + net = super(NsxPluginV2, self).update_network(context, id, network) + if psec.PORTSECURITY in network['network']: + self._process_network_port_security_update( + context, network['network'], net) + net_queue_id = network['network'].get(qos.QUEUE) + if net_queue_id: + self._delete_network_queue_mapping(context, id) + self._process_network_queue_mapping(context, net, net_queue_id) + self._process_l3_update(context, net, network['network']) + self._extend_network_dict_provider(context, net) + # If provided, update port name on backend; treat backend failures as + # not critical (log error, but do not raise) + if 'name' in network['network']: + # in case of chained switches update name only for the first one + nsx_switch_ids = nsx_utils.get_nsx_switch_ids( + context.session, self.cluster, id) + if not nsx_switch_ids or len(nsx_switch_ids) < 1: + LOG.warn(_("Unable to find NSX mappings for neutron " + "network:%s"), id) + try: + switchlib.update_lswitch(self.cluster, + nsx_switch_ids[0], + network['network']['name']) + except api_exc.NsxApiException as e: + LOG.warn(_("Logical switch update on NSX backend failed. " + "Neutron network id:%(net_id)s; " + "NSX lswitch id:%(lswitch_id)s;" + "Error:%(error)s"), + {'net_id': id, 'lswitch_id': nsx_switch_ids[0], + 'error': e}) + + return net + + def create_port(self, context, port): + # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED + # then we pass the port to the policy engine. The reason why we don't + # pass the value to the policy engine when the port is + # ATTR_NOT_SPECIFIED is for the case where a port is created on a + # shared network that is not owned by the tenant. + port_data = port['port'] + # Set port status as 'DOWN'. This will be updated by backend sync. + port_data['status'] = constants.PORT_STATUS_DOWN + with context.session.begin(subtransactions=True): + # First we allocate port in neutron database + neutron_db = super(NsxPluginV2, self).create_port(context, port) + neutron_port_id = neutron_db['id'] + # Update fields obtained from neutron db (eg: MAC address) + port["port"].update(neutron_db) + self.handle_port_metadata_access(context, neutron_db) + # port security extension checks + (port_security, has_ip) = self._determine_port_security_and_has_ip( + context, port_data) + port_data[psec.PORTSECURITY] = port_security + self._process_port_port_security_create( + context, port_data, neutron_db) + # allowed address pair checks + if attr.is_attr_set(port_data.get(addr_pair.ADDRESS_PAIRS)): + if not port_security: + raise addr_pair.AddressPairAndPortSecurityRequired() + else: + self._process_create_allowed_address_pairs( + context, neutron_db, + port_data[addr_pair.ADDRESS_PAIRS]) + else: + # remove ATTR_NOT_SPECIFIED + port_data[addr_pair.ADDRESS_PAIRS] = None + + # security group extension checks + if port_security and has_ip: + self._ensure_default_security_group_on_port(context, port) + elif attr.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)): + raise psec.PortSecurityAndIPRequiredForSecurityGroups() + port_data[ext_sg.SECURITYGROUPS] = ( + self._get_security_groups_on_port(context, port)) + self._process_port_create_security_group( + context, port_data, port_data[ext_sg.SECURITYGROUPS]) + # QoS extension checks + port_queue_id = self._check_for_queue_and_create( + context, port_data) + self._process_port_queue_mapping( + context, port_data, port_queue_id) + if (isinstance(port_data.get(mac_ext.MAC_LEARNING), bool)): + self._create_mac_learning_state(context, port_data) + elif mac_ext.MAC_LEARNING in port_data: + port_data.pop(mac_ext.MAC_LEARNING) + self._process_portbindings_create_and_update(context, + port['port'], + port_data) + # DB Operation is complete, perform NSX operation + try: + port_data = port['port'].copy() + port_create_func = self._port_drivers['create'].get( + port_data['device_owner'], + self._port_drivers['create']['default']) + port_create_func(context, port_data) + LOG.debug(_("port created on NSX backend for tenant " + "%(tenant_id)s: (%(id)s)"), port_data) + except n_exc.NotFound: + LOG.warning(_("Logical switch for network %s was not " + "found in NSX."), port_data['network_id']) + # Put port in error on neutron DB + with context.session.begin(subtransactions=True): + port = self._get_port(context, neutron_port_id) + port_data['status'] = constants.PORT_STATUS_ERROR + port['status'] = port_data['status'] + context.session.add(port) + except Exception: + # Port must be removed from neutron DB + with excutils.save_and_reraise_exception(): + LOG.error(_("Unable to create port or set port " + "attachment in NSX.")) + with context.session.begin(subtransactions=True): + self._delete_port(context, neutron_port_id) + + self.handle_port_dhcp_access(context, port_data, action='create_port') + return port_data + + def update_port(self, context, id, port): + delete_security_groups = self._check_update_deletes_security_groups( + port) + has_security_groups = self._check_update_has_security_groups(port) + delete_addr_pairs = self._check_update_deletes_allowed_address_pairs( + port) + has_addr_pairs = self._check_update_has_allowed_address_pairs(port) + + with context.session.begin(subtransactions=True): + ret_port = super(NsxPluginV2, self).update_port( + context, id, port) + # Save current mac learning state to check whether it's + # being updated or not + old_mac_learning_state = ret_port.get(mac_ext.MAC_LEARNING) + # copy values over - except fixed_ips as + # they've already been processed + port['port'].pop('fixed_ips', None) + ret_port.update(port['port']) + tenant_id = self._get_tenant_id_for_create(context, ret_port) + + # populate port_security setting + if psec.PORTSECURITY not in port['port']: + ret_port[psec.PORTSECURITY] = self._get_port_security_binding( + context, id) + has_ip = self._ip_on_port(ret_port) + # validate port security and allowed address pairs + if not ret_port[psec.PORTSECURITY]: + # has address pairs in request + if has_addr_pairs: + raise addr_pair.AddressPairAndPortSecurityRequired() + elif not delete_addr_pairs: + # check if address pairs are in db + ret_port[addr_pair.ADDRESS_PAIRS] = ( + self.get_allowed_address_pairs(context, id)) + if ret_port[addr_pair.ADDRESS_PAIRS]: + raise addr_pair.AddressPairAndPortSecurityRequired() + + if (delete_addr_pairs or has_addr_pairs): + # delete address pairs and read them in + self._delete_allowed_address_pairs(context, id) + self._process_create_allowed_address_pairs( + context, ret_port, ret_port[addr_pair.ADDRESS_PAIRS]) + # checks if security groups were updated adding/modifying + # security groups, port security is set and port has ip + if not (has_ip and ret_port[psec.PORTSECURITY]): + if has_security_groups: + raise psec.PortSecurityAndIPRequiredForSecurityGroups() + # Update did not have security groups passed in. Check + # that port does not have any security groups already on it. + filters = {'port_id': [id]} + security_groups = ( + super(NsxPluginV2, self)._get_port_security_group_bindings( + context, filters) + ) + if security_groups and not delete_security_groups: + raise psec.PortSecurityPortHasSecurityGroup() + + if (delete_security_groups or has_security_groups): + # delete the port binding and read it with the new rules. + self._delete_port_security_group_bindings(context, id) + sgids = self._get_security_groups_on_port(context, port) + self._process_port_create_security_group(context, ret_port, + sgids) + + if psec.PORTSECURITY in port['port']: + self._process_port_port_security_update( + context, port['port'], ret_port) + + port_queue_id = self._check_for_queue_and_create( + context, ret_port) + # Populate the mac learning attribute + new_mac_learning_state = port['port'].get(mac_ext.MAC_LEARNING) + if (new_mac_learning_state is not None and + old_mac_learning_state != new_mac_learning_state): + self._update_mac_learning_state(context, id, + new_mac_learning_state) + ret_port[mac_ext.MAC_LEARNING] = new_mac_learning_state + self._delete_port_queue_mapping(context, ret_port['id']) + self._process_port_queue_mapping(context, ret_port, + port_queue_id) + LOG.debug(_("Updating port: %s"), port) + nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( + context.session, self.cluster, id) + # Convert Neutron security groups identifiers into NSX security + # profiles identifiers + nsx_sec_profile_ids = [ + nsx_utils.get_nsx_security_group_id( + context.session, self.cluster, neutron_sg_id) for + neutron_sg_id in (ret_port[ext_sg.SECURITYGROUPS] or [])] + + if nsx_port_id: + try: + switchlib.update_port( + self.cluster, nsx_switch_id, nsx_port_id, + id, tenant_id, + ret_port['name'], + ret_port['device_id'], + ret_port['admin_state_up'], + ret_port['mac_address'], + ret_port['fixed_ips'], + ret_port[psec.PORTSECURITY], + nsx_sec_profile_ids, + ret_port[qos.QUEUE], + ret_port.get(mac_ext.MAC_LEARNING), + ret_port.get(addr_pair.ADDRESS_PAIRS)) + + # Update the port status from nsx. If we fail here hide it + # since the port was successfully updated but we were not + # able to retrieve the status. + ret_port['status'] = switchlib.get_port_status( + self.cluster, nsx_switch_id, + nsx_port_id) + # FIXME(arosen) improve exception handling. + except Exception: + ret_port['status'] = constants.PORT_STATUS_ERROR + LOG.exception(_("Unable to update port id: %s."), + nsx_port_id) + + # If nsx_port_id is not in database or in nsx put in error state. + else: + ret_port['status'] = constants.PORT_STATUS_ERROR + + self._process_portbindings_create_and_update(context, + port['port'], + ret_port) + return ret_port + + def delete_port(self, context, id, l3_port_check=True, + nw_gw_port_check=True): + """Deletes a port on a specified Virtual Network. + + If the port contains a remote interface attachment, the remote + interface is first un-plugged and then the port is deleted. + + :returns: None + :raises: exception.PortInUse + :raises: exception.PortNotFound + :raises: exception.NetworkNotFound + """ + # if needed, check to see if this is a port owned by + # a l3 router. If so, we should prevent deletion here + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + neutron_db_port = self.get_port(context, id) + # Perform the same check for ports owned by layer-2 gateways + if nw_gw_port_check: + self.prevent_network_gateway_port_deletion(context, + neutron_db_port) + port_delete_func = self._port_drivers['delete'].get( + neutron_db_port['device_owner'], + self._port_drivers['delete']['default']) + + port_delete_func(context, neutron_db_port) + self.disassociate_floatingips(context, id) + with context.session.begin(subtransactions=True): + queue = self._get_port_queue_bindings(context, {'port_id': [id]}) + # metadata_dhcp_host_route + self.handle_port_metadata_access( + context, neutron_db_port, is_delete=True) + super(NsxPluginV2, self).delete_port(context, id) + # Delete qos queue if possible + if queue: + self.delete_qos_queue(context, queue[0]['queue_id'], False) + self.handle_port_dhcp_access( + context, neutron_db_port, action='delete_port') + + def get_port(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + if (self.nsx_sync_opts.always_read_status or + fields and 'status' in fields): + # Perform explicit state synchronization + db_port = self._get_port(context, id) + self._synchronizer.synchronize_port( + context, db_port) + return self._make_port_dict(db_port, fields) + else: + return super(NsxPluginV2, self).get_port(context, id, fields) + + def get_router(self, context, id, fields=None): + if (self.nsx_sync_opts.always_read_status or + fields and 'status' in fields): + db_router = self._get_router(context, id) + # Perform explicit state synchronization + self._synchronizer.synchronize_router( + context, db_router) + return self._make_router_dict(db_router, fields) + else: + return super(NsxPluginV2, self).get_router(context, id, fields) + + def _create_lrouter(self, context, router, nexthop): + tenant_id = self._get_tenant_id_for_create(context, router) + distributed = router.get('distributed') + try: + lrouter = routerlib.create_lrouter( + self.cluster, router['id'], + tenant_id, router['name'], nexthop, + distributed=attr.is_attr_set(distributed) and distributed) + except nsx_exc.InvalidVersion: + msg = _("Cannot create a distributed router with the NSX " + "platform currently in execution. Please, try " + "without specifying the 'distributed' attribute.") + LOG.exception(msg) + raise n_exc.BadRequest(resource='router', msg=msg) + except api_exc.NsxApiException: + err_msg = _("Unable to create logical router on NSX Platform") + LOG.exception(err_msg) + raise nsx_exc.NsxPluginException(err_msg=err_msg) + + # Create the port here - and update it later if we have gw_info + try: + self._create_and_attach_router_port( + self.cluster, context, lrouter['uuid'], {'fake_ext_gw': True}, + "L3GatewayAttachment", + self.cluster.default_l3_gw_service_uuid) + except nsx_exc.NsxPluginException: + LOG.exception(_("Unable to create L3GW port on logical router " + "%(router_uuid)s. Verify Default Layer-3 Gateway " + "service %(def_l3_gw_svc)s id is correct"), + {'router_uuid': lrouter['uuid'], + 'def_l3_gw_svc': + self.cluster.default_l3_gw_service_uuid}) + # Try and remove logical router from NSX + routerlib.delete_lrouter(self.cluster, lrouter['uuid']) + # Return user a 500 with an apter message + raise nsx_exc.NsxPluginException( + err_msg=(_("Unable to create router %s on NSX backend") % + router['id'])) + lrouter['status'] = plugin_const.ACTIVE + return lrouter + + def create_router(self, context, router): + # NOTE(salvatore-orlando): We completely override this method in + # order to be able to use the NSX ID as Neutron ID + # TODO(salvatore-orlando): Propose upstream patch for allowing + # 3rd parties to specify IDs as we do with l2 plugin + r = router['router'] + has_gw_info = False + tenant_id = self._get_tenant_id_for_create(context, r) + # default value to set - nsx wants it (even if we don't have it) + nexthop = NSX_DEFAULT_NEXTHOP + # if external gateway info are set, then configure nexthop to + # default external gateway + if 'external_gateway_info' in r and r.get('external_gateway_info'): + has_gw_info = True + gw_info = r['external_gateway_info'] + del r['external_gateway_info'] + # The following DB read will be performed again when updating + # gateway info. This is not great, but still better than + # creating NSX router here and updating it later + network_id = (gw_info.get('network_id', None) if gw_info + else None) + if network_id: + ext_net = self._get_network(context, network_id) + if not ext_net.external: + msg = (_("Network '%s' is not a valid external " + "network") % network_id) + raise n_exc.BadRequest(resource='router', msg=msg) + if ext_net.subnets: + ext_subnet = ext_net.subnets[0] + nexthop = ext_subnet.gateway_ip + # NOTE(salv-orlando): Pre-generating uuid for Neutron + # router. This will be removed once the router create operation + # becomes an asynchronous task + neutron_router_id = str(uuid.uuid4()) + r['id'] = neutron_router_id + lrouter = self._create_lrouter(context, r, nexthop) + # Update 'distributed' with value returned from NSX + # This will be useful for setting the value if the API request + # did not specify any value for the 'distributed' attribute + # Platforms older than 3.x do not support the attribute + r['distributed'] = lrouter.get('distributed', False) + # TODO(salv-orlando): Deal with backend object removal in case + # of db failures + with context.session.begin(subtransactions=True): + # Transaction nesting is needed to avoid foreign key violations + # when processing the distributed router binding + with context.session.begin(subtransactions=True): + router_db = l3_db.Router(id=neutron_router_id, + tenant_id=tenant_id, + name=r['name'], + admin_state_up=r['admin_state_up'], + status=lrouter['status']) + context.session.add(router_db) + self._process_nsx_router_create(context, router_db, r) + # Ensure neutron router is moved into the transaction's buffer + context.session.flush() + # Add mapping between neutron and nsx identifiers + nsx_db.add_neutron_nsx_router_mapping( + context.session, router_db['id'], lrouter['uuid']) + + if has_gw_info: + # NOTE(salv-orlando): This operation has been moved out of the + # database transaction since it performs several NSX queries, + # ithis ncreasing the risk of deadlocks between eventlet and + # sqlalchemy operations. + # Set external gateway and remove router in case of failure + try: + self._update_router_gw_info(context, router_db['id'], gw_info) + except (n_exc.NeutronException, api_exc.NsxApiException): + with excutils.save_and_reraise_exception(): + # As setting gateway failed, the router must be deleted + # in order to ensure atomicity + router_id = router_db['id'] + LOG.warn(_("Failed to set gateway info for router being " + "created:%s - removing router"), router_id) + self.delete_router(context, router_id) + LOG.info(_("Create router failed while setting external " + "gateway. Router:%s has been removed from " + "DB and backend"), + router_id) + return self._make_router_dict(router_db) + + def _update_lrouter(self, context, router_id, name, nexthop, routes=None): + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + return routerlib.update_lrouter( + self.cluster, nsx_router_id, name, + nexthop, routes=routes) + + def _update_lrouter_routes(self, context, router_id, routes): + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + routerlib.update_explicit_routes_lrouter( + self.cluster, nsx_router_id, routes) + + def update_router(self, context, router_id, router): + # Either nexthop is updated or should be kept as it was before + r = router['router'] + nexthop = None + if 'external_gateway_info' in r and r.get('external_gateway_info'): + gw_info = r['external_gateway_info'] + # The following DB read will be performed again when updating + # gateway info. This is not great, but still better than + # creating NSX router here and updating it later + network_id = (gw_info.get('network_id', None) if gw_info + else None) + if network_id: + ext_net = self._get_network(context, network_id) + if not ext_net.external: + msg = (_("Network '%s' is not a valid external " + "network") % network_id) + raise n_exc.BadRequest(resource='router', msg=msg) + if ext_net.subnets: + ext_subnet = ext_net.subnets[0] + nexthop = ext_subnet.gateway_ip + try: + for route in r.get('routes', []): + if route['destination'] == '0.0.0.0/0': + msg = _("'routes' cannot contain route '0.0.0.0/0', " + "this must be updated through the default " + "gateway attribute") + raise n_exc.BadRequest(resource='router', msg=msg) + previous_routes = self._update_lrouter( + context, router_id, r.get('name'), + nexthop, routes=r.get('routes')) + # NOTE(salv-orlando): The exception handling below is not correct, but + # unfortunately nsxlib raises a neutron notfound exception when an + # object is not found in the underlying backend + except n_exc.NotFound: + # Put the router in ERROR status + with context.session.begin(subtransactions=True): + router_db = self._get_router(context, router_id) + router_db['status'] = constants.NET_STATUS_ERROR + raise nsx_exc.NsxPluginException( + err_msg=_("Logical router %s not found " + "on NSX Platform") % router_id) + except api_exc.NsxApiException: + raise nsx_exc.NsxPluginException( + err_msg=_("Unable to update logical router on NSX Platform")) + except nsx_exc.InvalidVersion: + msg = _("Request cannot contain 'routes' with the NSX " + "platform currently in execution. Please, try " + "without specifying the static routes.") + LOG.exception(msg) + raise n_exc.BadRequest(resource='router', msg=msg) + try: + return super(NsxPluginV2, self).update_router(context, + router_id, router) + except (extraroute.InvalidRoutes, + extraroute.RouterInterfaceInUseByRoute, + extraroute.RoutesExhausted): + with excutils.save_and_reraise_exception(): + # revert changes made to NSX + self._update_lrouter_routes( + context, router_id, previous_routes) + + def _delete_lrouter(self, context, router_id, nsx_router_id): + # The neutron router id (router_id) is ignored in this routine, + # but used in plugins deriving from this one + routerlib.delete_lrouter(self.cluster, nsx_router_id) + + def delete_router(self, context, router_id): + with context.session.begin(subtransactions=True): + # TODO(salv-orlando): This call should have no effect on delete + # router, but if it does, it should not happen within a + # transaction, and it should be restored on rollback + self.handle_router_metadata_access( + context, router_id, interface=None) + # Pre-delete checks + # NOTE(salv-orlando): These checks will be repeated anyway when + # calling the superclass. This is wasteful, but is the simplest + # way of ensuring a consistent removal of the router both in + # the neutron Database and in the NSX backend. + # TODO(salv-orlando): split pre-delete checks and actual + # deletion in superclass. + + # Ensure that the router is not used + fips = self.get_floatingips_count( + context.elevated(), filters={'router_id': [router_id]}) + if fips: + raise l3.RouterInUse(router_id=router_id) + + device_filter = {'device_id': [router_id], + 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} + ports = self._core_plugin.get_ports_count(context.elevated(), + filters=device_filter) + if ports: + raise l3.RouterInUse(router_id=router_id) + + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + # It is safe to remove the router from the database, so remove it + # from the backend + try: + self._delete_lrouter(context, router_id, nsx_router_id) + except n_exc.NotFound: + # This is not a fatal error, but needs to be logged + LOG.warning(_("Logical router '%s' not found " + "on NSX Platform"), router_id) + except api_exc.NsxApiException: + raise nsx_exc.NsxPluginException( + err_msg=(_("Unable to delete logical router '%s' " + "on NSX Platform") % nsx_router_id)) + # Remove the NSX mapping first in order to ensure a mapping to + # a non-existent NSX router is not left in the DB in case of + # failure while removing the router from the neutron DB + try: + nsx_db.delete_neutron_nsx_router_mapping( + context.session, router_id) + except db_exc.DBError as d_exc: + # Do not make this error fatal + LOG.warn(_("Unable to remove NSX mapping for Neutron router " + "%(router_id)s because of the following exception:" + "%(d_exc)s"), {'router_id': router_id, + 'd_exc': str(d_exc)}) + # Perform the actual delete on the Neutron DB + super(NsxPluginV2, self).delete_router(context, router_id) + + def _add_subnet_snat_rule(self, context, router, subnet): + gw_port = router.gw_port + if gw_port and router.enable_snat: + # There is a change gw_port might have multiple IPs + # In that case we will consider only the first one + if gw_port.get('fixed_ips'): + snat_ip = gw_port['fixed_ips'][0]['ip_address'] + cidr_prefix = int(subnet['cidr'].split('/')[1]) + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router['id']) + routerlib.create_lrouter_snat_rule( + self.cluster, nsx_router_id, snat_ip, snat_ip, + order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix, + match_criteria={'source_ip_addresses': subnet['cidr']}) + + def _delete_subnet_snat_rule(self, context, router, subnet): + # Remove SNAT rule if external gateway is configured + if router.gw_port: + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router['id']) + routerlib.delete_nat_rules_by_match( + self.cluster, nsx_router_id, "SourceNatRule", + max_num_expected=1, min_num_expected=1, + source_ip_addresses=subnet['cidr']) + + def add_router_interface(self, context, router_id, interface_info): + # When adding interface by port_id we need to create the + # peer port on the nsx logical router in this routine + port_id = interface_info.get('port_id') + router_iface_info = super(NsxPluginV2, self).add_router_interface( + context, router_id, interface_info) + # router_iface_info will always have a subnet_id attribute + subnet_id = router_iface_info['subnet_id'] + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + if port_id: + port_data = self._get_port(context, port_id) + nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( + context.session, self.cluster, port_id) + # Unplug current attachment from lswitch port + switchlib.plug_vif_interface(self.cluster, nsx_switch_id, + nsx_port_id, "NoAttachment") + # Create logical router port and plug patch attachment + self._create_and_attach_router_port( + self.cluster, context, nsx_router_id, port_data, + "PatchAttachment", nsx_port_id, subnet_ids=[subnet_id]) + subnet = self._get_subnet(context, subnet_id) + # If there is an external gateway we need to configure the SNAT rule. + # Fetch router from DB + router = self._get_router(context, router_id) + self._add_subnet_snat_rule(context, router, subnet) + routerlib.create_lrouter_nosnat_rule( + self.cluster, nsx_router_id, + order=NSX_NOSNAT_RULES_ORDER, + match_criteria={'destination_ip_addresses': subnet['cidr']}) + + # Ensure the NSX logical router has a connection to a 'metadata access' + # network (with a proxy listening on its DHCP port), by creating it + # if needed. + self.handle_router_metadata_access( + context, router_id, interface=router_iface_info) + LOG.debug(_("Add_router_interface completed for subnet:%(subnet_id)s " + "and router:%(router_id)s"), + {'subnet_id': subnet_id, 'router_id': router_id}) + return router_iface_info + + def remove_router_interface(self, context, router_id, interface_info): + # The code below is duplicated from base class, but comes handy + # as we need to retrieve the router port id before removing the port + subnet = None + subnet_id = None + if 'port_id' in interface_info: + port_id = interface_info['port_id'] + # find subnet_id - it is need for removing the SNAT rule + port = self._get_port(context, port_id) + if port.get('fixed_ips'): + subnet_id = port['fixed_ips'][0]['subnet_id'] + if not (port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF and + port['device_id'] == router_id): + raise l3.RouterInterfaceNotFound(router_id=router_id, + port_id=port_id) + elif 'subnet_id' in interface_info: + subnet_id = interface_info['subnet_id'] + subnet = self._get_subnet(context, subnet_id) + rport_qry = context.session.query(models_v2.Port) + ports = rport_qry.filter_by( + device_id=router_id, + device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF, + network_id=subnet['network_id']) + for p in ports: + if p['fixed_ips'][0]['subnet_id'] == subnet_id: + port_id = p['id'] + break + else: + raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id, + subnet_id=subnet_id) + # Finally remove the data from the Neutron DB + # This will also destroy the port on the logical switch + info = super(NsxPluginV2, self).remove_router_interface( + context, router_id, interface_info) + + try: + # Ensure the connection to the 'metadata access network' + # is removed (with the network) if this the last subnet + # on the router + self.handle_router_metadata_access( + context, router_id, interface=info) + if not subnet: + subnet = self._get_subnet(context, subnet_id) + router = self._get_router(context, router_id) + # If router is enabled_snat = False there are no snat rules to + # delete. + if router.enable_snat: + self._delete_subnet_snat_rule(context, router, subnet) + # Relax the minimum expected number as the nosnat rules + # do not exist in 2.x deployments + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + routerlib.delete_nat_rules_by_match( + self.cluster, nsx_router_id, "NoSourceNatRule", + max_num_expected=1, min_num_expected=0, + destination_ip_addresses=subnet['cidr']) + except n_exc.NotFound: + LOG.error(_("Logical router resource %s not found " + "on NSX platform") % router_id) + except api_exc.NsxApiException: + raise nsx_exc.NsxPluginException( + err_msg=(_("Unable to update logical router" + "on NSX Platform"))) + return info + + def _retrieve_and_delete_nat_rules(self, context, floating_ip_address, + internal_ip, nsx_router_id, + min_num_rules_expected=0): + """Finds and removes NAT rules from a NSX router.""" + # NOTE(salv-orlando): The context parameter is ignored in this method + # but used by derived classes + try: + # Remove DNAT rule for the floating IP + routerlib.delete_nat_rules_by_match( + self.cluster, nsx_router_id, "DestinationNatRule", + max_num_expected=1, + min_num_expected=min_num_rules_expected, + destination_ip_addresses=floating_ip_address) + + # Remove SNAT rules for the floating IP + routerlib.delete_nat_rules_by_match( + self.cluster, nsx_router_id, "SourceNatRule", + max_num_expected=1, + min_num_expected=min_num_rules_expected, + source_ip_addresses=internal_ip) + routerlib.delete_nat_rules_by_match( + self.cluster, nsx_router_id, "SourceNatRule", + max_num_expected=1, + min_num_expected=min_num_rules_expected, + destination_ip_addresses=internal_ip) + + except api_exc.NsxApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("An error occurred while removing NAT rules " + "on the NSX platform for floating ip:%s"), + floating_ip_address) + except nsx_exc.NatRuleMismatch: + # Do not surface to the user + LOG.warning(_("An incorrect number of matching NAT rules " + "was found on the NSX platform")) + + def _remove_floatingip_address(self, context, fip_db): + # Remove floating IP address from logical router port + # Fetch logical port of router's external gateway + router_id = fip_db.router_id + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + nsx_gw_port_id = routerlib.find_router_gw_port( + context, self.cluster, nsx_router_id)['uuid'] + ext_neutron_port_db = self._get_port(context.elevated(), + fip_db.floating_port_id) + nsx_floating_ips = self._build_ip_address_list( + context.elevated(), ext_neutron_port_db['fixed_ips']) + routerlib.update_lrouter_port_ips(self.cluster, + nsx_router_id, + nsx_gw_port_id, + ips_to_add=[], + ips_to_remove=nsx_floating_ips) + + def _get_fip_assoc_data(self, context, fip, floatingip_db): + if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and + not ('port_id' in fip and fip['port_id'])): + msg = _("fixed_ip_address cannot be specified without a port_id") + raise n_exc.BadRequest(resource='floatingip', msg=msg) + port_id = internal_ip = router_id = None + if 'port_id' in fip and fip['port_id']: + fip_qry = context.session.query(l3_db.FloatingIP) + port_id, internal_ip, router_id = self.get_assoc_data( + context, + fip, + floatingip_db['floating_network_id']) + try: + fip_qry.filter_by( + fixed_port_id=fip['port_id'], + floating_network_id=floatingip_db['floating_network_id'], + fixed_ip_address=internal_ip).one() + raise l3.FloatingIPPortAlreadyAssociated( + port_id=fip['port_id'], + fip_id=floatingip_db['id'], + floating_ip_address=floatingip_db['floating_ip_address'], + fixed_ip=floatingip_db['fixed_ip_address'], + net_id=floatingip_db['floating_network_id']) + except sa_exc.NoResultFound: + pass + return (port_id, internal_ip, router_id) + + def _update_fip_assoc(self, context, fip, floatingip_db, external_port): + """Update floating IP association data. + + Overrides method from base class. + The method is augmented for creating NAT rules in the process. + """ + # Store router currently serving the floating IP + old_router_id = floatingip_db.router_id + port_id, internal_ip, router_id = self._get_fip_assoc_data( + context, fip, floatingip_db) + floating_ip = floatingip_db['floating_ip_address'] + # If there's no association router_id will be None + if router_id: + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + self._retrieve_and_delete_nat_rules( + context, floating_ip, internal_ip, nsx_router_id) + # Fetch logical port of router's external gateway + # Fetch logical port of router's external gateway + nsx_floating_ips = self._build_ip_address_list( + context.elevated(), external_port['fixed_ips']) + floating_ip = floatingip_db['floating_ip_address'] + # Retrieve and delete existing NAT rules, if any + if old_router_id: + nsx_old_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, old_router_id) + # Retrieve the current internal ip + _p, _s, old_internal_ip = self._internal_fip_assoc_data( + context, {'id': floatingip_db.id, + 'port_id': floatingip_db.fixed_port_id, + 'fixed_ip_address': floatingip_db.fixed_ip_address, + 'tenant_id': floatingip_db.tenant_id}) + nsx_gw_port_id = routerlib.find_router_gw_port( + context, self.cluster, nsx_old_router_id)['uuid'] + self._retrieve_and_delete_nat_rules( + context, floating_ip, old_internal_ip, nsx_old_router_id) + routerlib.update_lrouter_port_ips( + self.cluster, nsx_old_router_id, nsx_gw_port_id, + ips_to_add=[], ips_to_remove=nsx_floating_ips) + + if router_id: + nsx_gw_port_id = routerlib.find_router_gw_port( + context, self.cluster, nsx_router_id)['uuid'] + # Re-create NAT rules only if a port id is specified + if fip.get('port_id'): + try: + # Setup DNAT rules for the floating IP + routerlib.create_lrouter_dnat_rule( + self.cluster, nsx_router_id, internal_ip, + order=NSX_FLOATINGIP_NAT_RULES_ORDER, + match_criteria={'destination_ip_addresses': + floating_ip}) + # Setup SNAT rules for the floating IP + # Create a SNAT rule for enabling connectivity to the + # floating IP from the same network as the internal port + # Find subnet id for internal_ip from fixed_ips + internal_port = self._get_port(context, port_id) + # Cchecks not needed on statements below since otherwise + # _internal_fip_assoc_data would have raised + subnet_ids = [ip['subnet_id'] for ip in + internal_port['fixed_ips'] if + ip['ip_address'] == internal_ip] + internal_subnet_cidr = self._build_ip_address_list( + context, internal_port['fixed_ips'], + subnet_ids=subnet_ids)[0] + routerlib.create_lrouter_snat_rule( + self.cluster, nsx_router_id, floating_ip, floating_ip, + order=NSX_NOSNAT_RULES_ORDER - 1, + match_criteria={'source_ip_addresses': + internal_subnet_cidr, + 'destination_ip_addresses': + internal_ip}) + # setup snat rule such that src ip of a IP packet when + # using floating is the floating ip itself. + routerlib.create_lrouter_snat_rule( + self.cluster, nsx_router_id, floating_ip, floating_ip, + order=NSX_FLOATINGIP_NAT_RULES_ORDER, + match_criteria={'source_ip_addresses': internal_ip}) + + # Add Floating IP address to router_port + routerlib.update_lrouter_port_ips( + self.cluster, nsx_router_id, nsx_gw_port_id, + ips_to_add=nsx_floating_ips, ips_to_remove=[]) + except api_exc.NsxApiException: + LOG.exception(_("An error occurred while creating NAT " + "rules on the NSX platform for floating " + "ip:%(floating_ip)s mapped to " + "internal ip:%(internal_ip)s"), + {'floating_ip': floating_ip, + 'internal_ip': internal_ip}) + msg = _("Failed to update NAT rules for floatingip update") + raise nsx_exc.NsxPluginException(err_msg=msg) + + floatingip_db.update({'fixed_ip_address': internal_ip, + 'fixed_port_id': port_id, + 'router_id': router_id}) + + def delete_floatingip(self, context, id): + fip_db = self._get_floatingip(context, id) + # Check whether the floating ip is associated or not + if fip_db.fixed_port_id: + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, fip_db.router_id) + self._retrieve_and_delete_nat_rules(context, + fip_db.floating_ip_address, + fip_db.fixed_ip_address, + nsx_router_id, + min_num_rules_expected=1) + # Remove floating IP address from logical router port + self._remove_floatingip_address(context, fip_db) + return super(NsxPluginV2, self).delete_floatingip(context, id) + + def disassociate_floatingips(self, context, port_id): + try: + fip_qry = context.session.query(l3_db.FloatingIP) + fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) + + for fip_db in fip_dbs: + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, fip_db.router_id) + self._retrieve_and_delete_nat_rules(context, + fip_db.floating_ip_address, + fip_db.fixed_ip_address, + nsx_router_id, + min_num_rules_expected=1) + self._remove_floatingip_address(context, fip_db) + except sa_exc.NoResultFound: + LOG.debug(_("The port '%s' is not associated with floating IPs"), + port_id) + except n_exc.NotFound: + LOG.warning(_("Nat rules not found in nsx for port: %s"), id) + + super(NsxPluginV2, self).disassociate_floatingips(context, port_id) + + def create_network_gateway(self, context, network_gateway): + """Create a layer-2 network gateway. + + Create the gateway service on NSX platform and corresponding data + structures in Neutron datase. + """ + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + # Need to re-do authZ checks here in order to avoid creation on NSX + gw_data = network_gateway[networkgw.GATEWAY_RESOURCE_NAME] + tenant_id = self._get_tenant_id_for_create(context, gw_data) + devices = gw_data['devices'] + # Populate default physical network where not specified + for device in devices: + if not device.get('interface_name'): + device['interface_name'] = self.cluster.default_interface_name + try: + # Replace Neutron device identifiers with NSX identifiers + dev_map = dict((dev['id'], dev['interface_name']) for + dev in devices) + nsx_devices = [] + for db_device in self._query_gateway_devices( + context, filters={'id': [device['id'] for device in devices]}): + nsx_devices.append( + {'id': db_device['nsx_id'], + 'interface_name': dev_map[db_device['id']]}) + nsx_res = l2gwlib.create_l2_gw_service( + self.cluster, tenant_id, gw_data['name'], nsx_devices) + nsx_uuid = nsx_res.get('uuid') + except api_exc.Conflict: + raise nsx_exc.L2GatewayAlreadyInUse(gateway=gw_data['name']) + except api_exc.NsxApiException: + err_msg = _("Unable to create l2_gw_service for: %s") % gw_data + LOG.exception(err_msg) + raise nsx_exc.NsxPluginException(err_msg=err_msg) + gw_data['id'] = nsx_uuid + return super(NsxPluginV2, self).create_network_gateway( + context, network_gateway) + + def delete_network_gateway(self, context, gateway_id): + """Remove a layer-2 network gateway. + + Remove the gateway service from NSX platform and corresponding data + structures in Neutron datase. + """ + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + with context.session.begin(subtransactions=True): + try: + super(NsxPluginV2, self).delete_network_gateway( + context, gateway_id) + l2gwlib.delete_l2_gw_service(self.cluster, gateway_id) + except api_exc.ResourceNotFound: + # Do not cause a 500 to be returned to the user if + # the corresponding NSX resource does not exist + LOG.exception(_("Unable to remove gateway service from " + "NSX plaform - the resource was not found")) + + def get_network_gateway(self, context, id, fields=None): + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + return super(NsxPluginV2, self).get_network_gateway(context, + id, fields) + + def get_network_gateways(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + # Ensure the tenant_id attribute is populated on returned gateways + return super(NsxPluginV2, self).get_network_gateways( + context, filters, fields, sorts, limit, marker, page_reverse) + + def update_network_gateway(self, context, id, network_gateway): + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + # Update gateway on backend when there's a name change + name = network_gateway[networkgw.GATEWAY_RESOURCE_NAME].get('name') + if name: + try: + l2gwlib.update_l2_gw_service(self.cluster, id, name) + except api_exc.NsxApiException: + # Consider backend failures as non-fatal, but still warn + # because this might indicate something dodgy is going on + LOG.warn(_("Unable to update name on NSX backend " + "for network gateway: %s"), id) + return super(NsxPluginV2, self).update_network_gateway( + context, id, network_gateway) + + def connect_network(self, context, network_gateway_id, + network_mapping_info): + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + try: + return super(NsxPluginV2, self).connect_network( + context, network_gateway_id, network_mapping_info) + except api_exc.Conflict: + raise nsx_exc.L2GatewayAlreadyInUse(gateway=network_gateway_id) + + def disconnect_network(self, context, network_gateway_id, + network_mapping_info): + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + return super(NsxPluginV2, self).disconnect_network( + context, network_gateway_id, network_mapping_info) + + def _get_nsx_device_id(self, context, device_id): + return self._get_gateway_device(context, device_id)['nsx_id'] + + def _rollback_gw_device(self, context, device_id, + gw_data=None, new_status=None, + is_create=False, log_level=logging.ERROR): + LOG.log(log_level, + _("Rolling back database changes for gateway device %s " + "because of an error in the NSX backend"), device_id) + with context.session.begin(subtransactions=True): + query = self._model_query( + context, networkgw_db.NetworkGatewayDevice).filter( + networkgw_db.NetworkGatewayDevice.id == device_id) + if is_create: + query.delete(synchronize_session=False) + else: + super(NsxPluginV2, self).update_gateway_device( + context, device_id, + {networkgw.DEVICE_RESOURCE_NAME: gw_data}) + if new_status: + query.update({'status': new_status}, + synchronize_session=False) + + # TODO(salv-orlando): Handlers for Gateway device operations should be + # moved into the appropriate nsx_handlers package once the code for the + # blueprint nsx-async-backend-communication merges + def create_gateway_device_handler(self, context, gateway_device, + client_certificate): + neutron_id = gateway_device['id'] + try: + nsx_res = l2gwlib.create_gateway_device( + self.cluster, + gateway_device['tenant_id'], + gateway_device['name'], + neutron_id, + self.cluster.default_tz_uuid, + gateway_device['connector_type'], + gateway_device['connector_ip'], + client_certificate) + + # Fetch status (it needs another NSX API call) + device_status = nsx_utils.get_nsx_device_status(self.cluster, + nsx_res['uuid']) + + # set NSX GW device in neutron database and update status + with context.session.begin(subtransactions=True): + query = self._model_query( + context, networkgw_db.NetworkGatewayDevice).filter( + networkgw_db.NetworkGatewayDevice.id == neutron_id) + query.update({'status': device_status, + 'nsx_id': nsx_res['uuid']}, + synchronize_session=False) + LOG.debug(_("Neutron gateway device: %(neutron_id)s; " + "NSX transport node identifier: %(nsx_id)s; " + "Operational status: %(status)s."), + {'neutron_id': neutron_id, + 'nsx_id': nsx_res['uuid'], + 'status': device_status}) + return device_status + except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException): + with excutils.save_and_reraise_exception(): + self._rollback_gw_device(context, neutron_id, is_create=True) + + def update_gateway_device_handler(self, context, gateway_device, + old_gateway_device_data, + client_certificate): + nsx_id = gateway_device['nsx_id'] + neutron_id = gateway_device['id'] + try: + l2gwlib.update_gateway_device( + self.cluster, + nsx_id, + gateway_device['tenant_id'], + gateway_device['name'], + neutron_id, + self.cluster.default_tz_uuid, + gateway_device['connector_type'], + gateway_device['connector_ip'], + client_certificate) + + # Fetch status (it needs another NSX API call) + device_status = nsx_utils.get_nsx_device_status(self.cluster, + nsx_id) + # update status + with context.session.begin(subtransactions=True): + query = self._model_query( + context, networkgw_db.NetworkGatewayDevice).filter( + networkgw_db.NetworkGatewayDevice.id == neutron_id) + query.update({'status': device_status}, + synchronize_session=False) + LOG.debug(_("Neutron gateway device: %(neutron_id)s; " + "NSX transport node identifier: %(nsx_id)s; " + "Operational status: %(status)s."), + {'neutron_id': neutron_id, + 'nsx_id': nsx_id, + 'status': device_status}) + return device_status + except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException): + with excutils.save_and_reraise_exception(): + self._rollback_gw_device(context, neutron_id, + gw_data=old_gateway_device_data) + except n_exc.NotFound: + # The gateway device was probably deleted in the backend. + # The DB change should be rolled back and the status must + # be put in error + with excutils.save_and_reraise_exception(): + self._rollback_gw_device(context, neutron_id, + gw_data=old_gateway_device_data, + new_status=networkgw_db.ERROR) + + def get_gateway_device(self, context, device_id, fields=None): + # Get device from database + gw_device = super(NsxPluginV2, self).get_gateway_device( + context, device_id, fields, include_nsx_id=True) + # Fetch status from NSX + nsx_id = gw_device['nsx_id'] + device_status = nsx_utils.get_nsx_device_status(self.cluster, nsx_id) + # TODO(salv-orlando): Asynchronous sync for gateway device status + # Update status in database + with context.session.begin(subtransactions=True): + query = self._model_query( + context, networkgw_db.NetworkGatewayDevice).filter( + networkgw_db.NetworkGatewayDevice.id == device_id) + query.update({'status': device_status}, + synchronize_session=False) + gw_device['status'] = device_status + return gw_device + + def get_gateway_devices(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + # Get devices from database + devices = super(NsxPluginV2, self).get_gateway_devices( + context, filters, fields, include_nsx_id=True) + # Fetch operational status from NSX, filter by tenant tag + # TODO(salv-orlando): Asynchronous sync for gateway device status + tenant_id = context.tenant_id if not context.is_admin else None + nsx_statuses = nsx_utils.get_nsx_device_statuses(self.cluster, + tenant_id) + # Update statuses in database + with context.session.begin(subtransactions=True): + for device in devices: + new_status = nsx_statuses.get(device['nsx_id']) + if new_status: + device['status'] = new_status + return devices + + def create_gateway_device(self, context, gateway_device): + # NOTE(salv-orlando): client-certificate will not be stored + # in the database + device_data = gateway_device[networkgw.DEVICE_RESOURCE_NAME] + client_certificate = device_data.pop('client_certificate') + gw_device = super(NsxPluginV2, self).create_gateway_device( + context, gateway_device) + # DB operation was successful, perform NSX operation + gw_device['status'] = self.create_gateway_device_handler( + context, gw_device, client_certificate) + return gw_device + + def update_gateway_device(self, context, device_id, + gateway_device): + # NOTE(salv-orlando): client-certificate will not be stored + # in the database + client_certificate = ( + gateway_device[networkgw.DEVICE_RESOURCE_NAME].pop( + 'client_certificate', None)) + # Retrive current state from DB in case a rollback should be needed + old_gw_device_data = super(NsxPluginV2, self).get_gateway_device( + context, device_id, include_nsx_id=True) + gw_device = super(NsxPluginV2, self).update_gateway_device( + context, device_id, gateway_device, include_nsx_id=True) + # DB operation was successful, perform NSX operation + gw_device['status'] = self.update_gateway_device_handler( + context, gw_device, old_gw_device_data, client_certificate) + gw_device.pop('nsx_id') + return gw_device + + def delete_gateway_device(self, context, device_id): + nsx_device_id = self._get_nsx_device_id(context, device_id) + super(NsxPluginV2, self).delete_gateway_device( + context, device_id) + # DB operation was successful, peform NSX operation + # TODO(salv-orlando): State consistency with neutron DB + # should be ensured even in case of backend failures + try: + l2gwlib.delete_gateway_device(self.cluster, nsx_device_id) + except n_exc.NotFound: + LOG.warn(_("Removal of gateway device: %(neutron_id)s failed on " + "NSX backend (NSX id:%(nsx_id)s) because the NSX " + "resource was not found"), + {'neutron_id': device_id, 'nsx_id': nsx_device_id}) + except api_exc.NsxApiException: + with excutils.save_and_reraise_exception(): + # In this case a 500 should be returned + LOG.exception(_("Removal of gateway device: %(neutron_id)s " + "failed on NSX backend (NSX id:%(nsx_id)s). " + "Neutron and NSX states have diverged."), + {'neutron_id': device_id, + 'nsx_id': nsx_device_id}) + + def create_security_group(self, context, security_group, default_sg=False): + """Create security group. + + If default_sg is true that means we are creating a default security + group and we don't need to check if one exists. + """ + s = security_group.get('security_group') + + tenant_id = self._get_tenant_id_for_create(context, s) + if not default_sg: + self._ensure_default_security_group(context, tenant_id) + # NOTE(salv-orlando): Pre-generating Neutron ID for security group. + neutron_id = str(uuid.uuid4()) + nsx_secgroup = secgrouplib.create_security_profile( + self.cluster, tenant_id, neutron_id, s) + with context.session.begin(subtransactions=True): + s['id'] = neutron_id + sec_group = super(NsxPluginV2, self).create_security_group( + context, security_group, default_sg) + context.session.flush() + # Add mapping between neutron and nsx identifiers + nsx_db.add_neutron_nsx_security_group_mapping( + context.session, neutron_id, nsx_secgroup['uuid']) + return sec_group + + def update_security_group(self, context, secgroup_id, security_group): + secgroup = (super(NsxPluginV2, self). + update_security_group(context, + secgroup_id, + security_group)) + if ('name' in security_group['security_group'] and + secgroup['name'] != 'default'): + nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( + context.session, self.cluster, secgroup_id) + try: + name = security_group['security_group']['name'] + secgrouplib.update_security_profile( + self.cluster, nsx_sec_profile_id, name) + except (n_exc.NotFound, api_exc.NsxApiException) as e: + # Reverting the DB change is not really worthwhile + # for a mismatch between names. It's the rules that + # we care about. + LOG.error(_('Error while updating security profile ' + '%(uuid)s with name %(name)s: %(error)s.') + % {'uuid': secgroup_id, 'name': name, 'error': e}) + return secgroup + + def delete_security_group(self, context, security_group_id): + """Delete a security group. + + :param security_group_id: security group rule to remove. + """ + with context.session.begin(subtransactions=True): + security_group = super(NsxPluginV2, self).get_security_group( + context, security_group_id) + if not security_group: + raise ext_sg.SecurityGroupNotFound(id=security_group_id) + + if security_group['name'] == 'default' and not context.is_admin: + raise ext_sg.SecurityGroupCannotRemoveDefault() + + filters = {'security_group_id': [security_group['id']]} + if super(NsxPluginV2, self)._get_port_security_group_bindings( + context, filters): + raise ext_sg.SecurityGroupInUse(id=security_group['id']) + nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( + context.session, self.cluster, security_group_id) + + try: + secgrouplib.delete_security_profile( + self.cluster, nsx_sec_profile_id) + except n_exc.NotFound: + # The security profile was not found on the backend + # do not fail in this case. + LOG.warning(_("The NSX security profile %(sec_profile_id)s, " + "associated with the Neutron security group " + "%(sec_group_id)s was not found on the backend"), + {'sec_profile_id': nsx_sec_profile_id, + 'sec_group_id': security_group_id}) + except api_exc.NsxApiException: + # Raise and fail the operation, as there is a problem which + # prevented the sec group from being removed from the backend + LOG.exception(_("An exception occurred while removing the " + "NSX security profile %(sec_profile_id)s, " + "associated with Netron security group " + "%(sec_group_id)s"), + {'sec_profile_id': nsx_sec_profile_id, + 'sec_group_id': security_group_id}) + raise nsx_exc.NsxPluginException( + _("Unable to remove security group %s from backend"), + security_group['id']) + return super(NsxPluginV2, self).delete_security_group( + context, security_group_id) + + def _validate_security_group_rules(self, context, rules): + for rule in rules['security_group_rules']: + r = rule.get('security_group_rule') + port_based_proto = (self._get_ip_proto_number(r['protocol']) + in securitygroups_db.IP_PROTOCOL_MAP.values()) + if (not port_based_proto and + (r['port_range_min'] is not None or + r['port_range_max'] is not None)): + msg = (_("Port values not valid for " + "protocol: %s") % r['protocol']) + raise n_exc.BadRequest(resource='security_group_rule', + msg=msg) + return super(NsxPluginV2, self)._validate_security_group_rules(context, + rules) + + def create_security_group_rule(self, context, security_group_rule): + """Create a single security group rule.""" + bulk_rule = {'security_group_rules': [security_group_rule]} + return self.create_security_group_rule_bulk(context, bulk_rule)[0] + + def create_security_group_rule_bulk(self, context, security_group_rule): + """Create security group rules. + + :param security_group_rule: list of rules to create + """ + s = security_group_rule.get('security_group_rules') + tenant_id = self._get_tenant_id_for_create(context, s) + + # TODO(arosen) is there anyway we could avoid having the update of + # the security group rules in nsx outside of this transaction? + with context.session.begin(subtransactions=True): + self._ensure_default_security_group(context, tenant_id) + security_group_id = self._validate_security_group_rules( + context, security_group_rule) + # Check to make sure security group exists + security_group = super(NsxPluginV2, self).get_security_group( + context, security_group_id) + + if not security_group: + raise ext_sg.SecurityGroupNotFound(id=security_group_id) + # Check for duplicate rules + self._check_for_duplicate_rules(context, s) + # gather all the existing security group rules since we need all + # of them to PUT to NSX. + existing_rules = self.get_security_group_rules( + context, {'security_group_id': [security_group['id']]}) + combined_rules = sg_utils.merge_security_group_rules_with_current( + context.session, self.cluster, s, existing_rules) + nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( + context.session, self.cluster, security_group_id) + secgrouplib.update_security_group_rules(self.cluster, + nsx_sec_profile_id, + combined_rules) + return super( + NsxPluginV2, self).create_security_group_rule_bulk_native( + context, security_group_rule) + + def delete_security_group_rule(self, context, sgrid): + """Delete a security group rule + :param sgrid: security group id to remove. + """ + with context.session.begin(subtransactions=True): + # determine security profile id + security_group_rule = ( + super(NsxPluginV2, self).get_security_group_rule( + context, sgrid)) + if not security_group_rule: + raise ext_sg.SecurityGroupRuleNotFound(id=sgrid) + + sgid = security_group_rule['security_group_id'] + current_rules = self.get_security_group_rules( + context, {'security_group_id': [sgid]}) + current_rules_nsx = sg_utils.get_security_group_rules_nsx_format( + context.session, self.cluster, current_rules, True) + + sg_utils.remove_security_group_with_id_and_id_field( + current_rules_nsx, sgrid) + nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( + context.session, self.cluster, sgid) + secgrouplib.update_security_group_rules( + self.cluster, nsx_sec_profile_id, current_rules_nsx) + return super(NsxPluginV2, self).delete_security_group_rule(context, + sgrid) + + def create_qos_queue(self, context, qos_queue, check_policy=True): + q = qos_queue.get('qos_queue') + self._validate_qos_queue(context, q) + q['id'] = queuelib.create_lqueue(self.cluster, q) + return super(NsxPluginV2, self).create_qos_queue(context, qos_queue) + + def delete_qos_queue(self, context, queue_id, raise_in_use=True): + filters = {'queue_id': [queue_id]} + queues = self._get_port_queue_bindings(context, filters) + if queues: + if raise_in_use: + raise qos.QueueInUseByPort() + else: + return + queuelib.delete_lqueue(self.cluster, queue_id) + return super(NsxPluginV2, self).delete_qos_queue(context, queue_id) diff --git a/neutron/plugins/vmware/plugins/service.py b/neutron/plugins/vmware/plugins/service.py new file mode 100644 index 000000000..4a5b8e957 --- /dev/null +++ b/neutron/plugins/vmware/plugins/service.py @@ -0,0 +1,1812 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import netaddr +from oslo.config import cfg + +from neutron.common import exceptions as n_exc +from neutron.db.firewall import firewall_db +from neutron.db import l3_db +from neutron.db.loadbalancer import loadbalancer_db +from neutron.db import routedserviceinsertion_db as rsi_db +from neutron.db.vpn import vpn_db +from neutron.extensions import firewall as fw_ext +from neutron.extensions import l3 +from neutron.extensions import routedserviceinsertion as rsi +from neutron.extensions import vpnaas as vpn_ext +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as service_constants +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import config # noqa +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware.dbexts import servicerouter as sr_db +from neutron.plugins.vmware.dbexts import vcns_db +from neutron.plugins.vmware.dbexts import vcns_models +from neutron.plugins.vmware.extensions import servicerouter as sr +from neutron.plugins.vmware.nsxlib import router as routerlib +from neutron.plugins.vmware.nsxlib import switch as switchlib +from neutron.plugins.vmware.plugins import base +from neutron.plugins.vmware.vshield.common import constants as vcns_const +from neutron.plugins.vmware.vshield.common import exceptions +from neutron.plugins.vmware.vshield.tasks import constants as tasks_const +from neutron.plugins.vmware.vshield import vcns_driver +from sqlalchemy.orm import exc as sa_exc + +LOG = logging.getLogger(__name__) + +ROUTER_TYPE_BASIC = 1 +ROUTER_TYPE_ADVANCED = 2 + +ROUTER_STATUS = [ + service_constants.ACTIVE, + service_constants.DOWN, + service_constants.PENDING_CREATE, + service_constants.PENDING_DELETE, + service_constants.ERROR +] + +ROUTER_STATUS_LEVEL = { + service_constants.ACTIVE: vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE, + service_constants.DOWN: vcns_const.RouterStatus.ROUTER_STATUS_DOWN, + service_constants.PENDING_CREATE: ( + vcns_const.RouterStatus.ROUTER_STATUS_PENDING_CREATE + ), + service_constants.PENDING_DELETE: ( + vcns_const.RouterStatus.ROUTER_STATUS_PENDING_DELETE + ), + service_constants.ERROR: vcns_const.RouterStatus.ROUTER_STATUS_ERROR +} + + +class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin, + base.NsxPluginV2, + rsi_db.RoutedServiceInsertionDbMixin, + firewall_db.Firewall_db_mixin, + loadbalancer_db.LoadBalancerPluginDb, + vpn_db.VPNPluginDb + ): + + supported_extension_aliases = ( + base.NsxPluginV2.supported_extension_aliases + [ + "service-router", + "routed-service-insertion", + "fwaas", + "lbaas", + "vpnaas" + ]) + # The service plugin cannot currently support pagination + __native_pagination_support = False + __native_sorting_support = False + + def __init__(self): + super(NsxAdvancedPlugin, self).__init__() + + self._super_create_ext_gw_port = ( + self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW]) + self._super_delete_ext_gw_port = ( + self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW]) + + self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW] = ( + self._vcns_create_ext_gw_port) + self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW] = ( + self._vcns_delete_ext_gw_port) + + # cache router type based on router id + self._router_type = {} + self.callbacks = VcnsCallbacks(self.safe_reference) + + # load the vCNS driver + self._load_vcns_drivers() + + # switchlib's create_lswitch needs to be replaced in order to proxy + # logical switch create requests to vcns + self._set_create_lswitch_proxy() + + def _set_create_lswitch_proxy(self): + base.switchlib.create_lswitch = self._proxy_create_lswitch + + def _proxy_create_lswitch(self, *args, **kwargs): + name, tz_config, tags = ( + _process_base_create_lswitch_args(*args, **kwargs) + ) + return self.vcns_driver.create_lswitch( + name, tz_config, tags=tags, + port_isolation=None, replication_mode=None) + + def _load_vcns_drivers(self): + self.vcns_driver = vcns_driver.VcnsDriver(self.callbacks) + + def _set_router_type(self, router_id, router_type): + self._router_type[router_id] = router_type + + def _get_router_type(self, context=None, router_id=None, router=None): + if not router: + if router_id in self._router_type: + return self._router_type[router_id] + router = self._get_router(context, router_id) + + LOG.debug(_("EDGE: router = %s"), router) + if router['nsx_attributes']['service_router']: + router_type = ROUTER_TYPE_ADVANCED + else: + router_type = ROUTER_TYPE_BASIC + self._set_router_type(router['id'], router_type) + return router_type + + def _find_router_type(self, router): + is_service_router = router.get(sr.SERVICE_ROUTER, False) + if is_service_router: + return ROUTER_TYPE_ADVANCED + else: + return ROUTER_TYPE_BASIC + + def _is_advanced_service_router(self, context=None, router_id=None, + router=None): + if router: + router_type = self._get_router_type(router=router) + else: + router_type = self._get_router_type(context, router_id) + return (router_type == ROUTER_TYPE_ADVANCED) + + def _vcns_create_ext_gw_port(self, context, port_data): + router_id = port_data['device_id'] + if not self._is_advanced_service_router(context, router_id): + self._super_create_ext_gw_port(context, port_data) + return + + # NOP for Edge because currently the port will be create internally + # by VSM + LOG.debug(_("EDGE: _vcns_create_ext_gw_port")) + + def _vcns_delete_ext_gw_port(self, context, port_data): + router_id = port_data['device_id'] + if not self._is_advanced_service_router(context, router_id): + self._super_delete_ext_gw_port(context, port_data) + return + + # NOP for Edge + LOG.debug(_("EDGE: _vcns_delete_ext_gw_port")) + + def _get_external_attachment_info(self, context, router): + gw_port = router.gw_port + ipaddress = None + netmask = None + nexthop = None + + if gw_port: + # gw_port may have multiple IPs, only configure the first one + if gw_port.get('fixed_ips'): + ipaddress = gw_port['fixed_ips'][0]['ip_address'] + + network_id = gw_port.get('network_id') + if network_id: + ext_net = self._get_network(context, network_id) + if not ext_net.external: + msg = (_("Network '%s' is not a valid external " + "network") % network_id) + raise n_exc.BadRequest(resource='router', msg=msg) + if ext_net.subnets: + ext_subnet = ext_net.subnets[0] + netmask = str(netaddr.IPNetwork(ext_subnet.cidr).netmask) + nexthop = ext_subnet.gateway_ip + + return (ipaddress, netmask, nexthop) + + def _get_external_gateway_address(self, context, router): + ipaddress, netmask, nexthop = self._get_external_attachment_info( + context, router) + return nexthop + + def _vcns_update_static_routes(self, context, **kwargs): + router = kwargs.get('router') + if router is None: + router = self._get_router(context, kwargs['router_id']) + + edge_id = kwargs.get('edge_id') + if edge_id is None: + binding = vcns_db.get_vcns_router_binding(context.session, + router['id']) + edge_id = binding['edge_id'] + + skippable = True + if 'nexthop' in kwargs: + nexthop = kwargs['nexthop'] + # The default gateway and vnic config has dependencies, if we + # explicitly specify nexthop to change, tell the driver not to + # skip this route update + skippable = False + else: + nexthop = self._get_external_gateway_address(context, + router) + + if 'subnets' in kwargs: + subnets = kwargs['subnets'] + else: + subnets = self._find_router_subnets_cidrs(context.elevated(), + router['id']) + + routes = [] + for subnet in subnets: + routes.append({ + 'cidr': subnet, + 'nexthop': vcns_const.INTEGRATION_LR_IPADDRESS.split('/')[0] + }) + self.vcns_driver.update_routes(router['id'], edge_id, nexthop, routes, + skippable) + + def _get_nat_rules(self, context, router): + fip_qry = context.session.query(l3_db.FloatingIP) + fip_db = fip_qry.filter_by(router_id=router['id']).all() + + dnat = [] + snat = [] + for fip in fip_db: + if fip.fixed_port_id: + dnat.append({ + 'dst': fip.floating_ip_address, + 'translated': fip.fixed_ip_address + }) + + gw_port = router.gw_port + if gw_port and router.enable_snat: + if gw_port.get('fixed_ips'): + snat_ip = gw_port['fixed_ips'][0]['ip_address'] + subnets = self._find_router_subnets_cidrs(context.elevated(), + router['id']) + for subnet in subnets: + snat.append({ + 'src': subnet, + 'translated': snat_ip + }) + + return (snat, dnat) + + def _update_nat_rules(self, context, router): + snat, dnat = self._get_nat_rules(context, router) + binding = vcns_db.get_vcns_router_binding(context.session, + router['id']) + self.vcns_driver.update_nat_rules(router['id'], + binding['edge_id'], + snat, dnat) + + def _update_interface(self, context, router, sync=False): + addr, mask, nexthop = self._get_external_attachment_info( + context, router) + + secondary = [] + fip_qry = context.session.query(l3_db.FloatingIP) + fip_db = fip_qry.filter_by(router_id=router['id']).all() + for fip in fip_db: + if fip.fixed_port_id: + secondary.append(fip.floating_ip_address) + #Add all vip addresses bound on the router + vip_addrs = self._get_all_vip_addrs_by_router_id(context, + router['id']) + secondary.extend(vip_addrs) + + binding = vcns_db.get_vcns_router_binding(context.session, + router['id']) + task = self.vcns_driver.update_interface( + router['id'], binding['edge_id'], + vcns_const.EXTERNAL_VNIC_INDEX, + self.vcns_driver.external_network, + addr, mask, secondary=secondary) + if sync: + task.wait(tasks_const.TaskState.RESULT) + + def _update_router_gw_info(self, context, router_id, info): + if not self._is_advanced_service_router(context, router_id): + super(NsxAdvancedPlugin, self)._update_router_gw_info( + context, router_id, info) + return + + # get original gw_port config + router = self._get_router(context, router_id) + org_ext_net_id = router.gw_port_id and router.gw_port.network_id + org_enable_snat = router.enable_snat + orgaddr, orgmask, orgnexthop = self._get_external_attachment_info( + context, router) + + super(base.NsxPluginV2, self)._update_router_gw_info( + context, router_id, info, router=router) + + new_ext_net_id = router.gw_port_id and router.gw_port.network_id + new_enable_snat = router.enable_snat + newaddr, newmask, newnexthop = self._get_external_attachment_info( + context, router) + + binding = vcns_db.get_vcns_router_binding(context.session, router_id) + + if new_ext_net_id != org_ext_net_id and orgnexthop: + # network changed, need to remove default gateway before vnic + # can be configured + LOG.debug(_("VCNS: delete default gateway %s"), orgnexthop) + self._vcns_update_static_routes(context, + router=router, + edge_id=binding['edge_id'], + nexthop=None) + + if orgaddr != newaddr or orgmask != newmask: + self.vcns_driver.update_interface( + router_id, binding['edge_id'], + vcns_const.EXTERNAL_VNIC_INDEX, + self.vcns_driver.external_network, + newaddr, newmask) + + if orgnexthop != newnexthop: + self._vcns_update_static_routes(context, + router=router, + edge_id=binding['edge_id'], + nexthop=newnexthop) + + if (new_ext_net_id == org_ext_net_id and + org_enable_snat == new_enable_snat): + return + + self._update_nat_rules(context, router) + + def _add_subnet_snat_rule(self, context, router, subnet): + # NOP for service router + if not self._is_advanced_service_router(router=router): + super(NsxAdvancedPlugin, self)._add_subnet_snat_rule( + context, router, subnet) + + def _delete_subnet_snat_rule(self, context, router, subnet): + # NOP for service router + if not self._is_advanced_service_router(router=router): + super(NsxAdvancedPlugin, self)._delete_subnet_snat_rule( + context, router, subnet) + + def _remove_floatingip_address(self, context, fip_db): + # NOP for service router + router_id = fip_db.router_id + if not self._is_advanced_service_router(context, router_id): + super(NsxAdvancedPlugin, self)._remove_floatingip_address( + context, fip_db) + + def _create_advanced_service_router(self, context, neutron_router_id, + name, lrouter, lswitch): + + # store binding + binding = vcns_db.add_vcns_router_binding( + context.session, neutron_router_id, None, lswitch['uuid'], + service_constants.PENDING_CREATE) + + # deploy edge + jobdata = { + 'neutron_router_id': neutron_router_id, + 'lrouter': lrouter, + 'lswitch': lswitch, + 'context': context + } + + # deploy and wait until the deploy requeste has been requested + # so we will have edge_id ready. The wait here should be fine + # as we're not in a database transaction now + self.vcns_driver.deploy_edge( + lrouter['uuid'], name, lswitch['uuid'], jobdata=jobdata, + wait_for_exec=True) + + return binding + + def _create_integration_lswitch(self, tenant_id, name): + # use defautl transport zone + transport_zone_config = [{ + "zone_uuid": self.cluster.default_tz_uuid, + "transport_type": cfg.CONF.NSX.default_transport_type + }] + return self.vcns_driver.create_lswitch(name, transport_zone_config) + + def _add_router_integration_interface(self, tenant_id, name, + lrouter, lswitch): + # create logic switch port + try: + ls_port = switchlib.create_lport( + self.cluster, lswitch['uuid'], tenant_id, + '', '', lrouter['uuid'], True) + except api_exc.NsxApiException: + msg = (_("An exception occurred while creating a port " + "on lswitch %s") % lswitch['uuid']) + LOG.exception(msg) + raise n_exc.NeutronException(message=msg) + + # create logic router port + try: + neutron_port_id = '' + pname = name[:36] + '-lp' + admin_status_enabled = True + lr_port = routerlib.create_router_lport( + self.cluster, lrouter['uuid'], tenant_id, + neutron_port_id, pname, admin_status_enabled, + [vcns_const.INTEGRATION_LR_IPADDRESS]) + except api_exc.NsxApiException: + msg = (_("Unable to create port on NSX logical router %s") % name) + LOG.exception(msg) + switchlib.delete_port( + self.cluster, lswitch['uuid'], ls_port['uuid']) + raise n_exc.NeutronException(message=msg) + + # attach logic router port to switch port + try: + self._update_router_port_attachment( + self.cluster, None, lrouter['uuid'], {}, lr_port['uuid'], + 'PatchAttachment', ls_port['uuid'], None) + except api_exc.NsxApiException as e: + # lr_port should have been deleted + switchlib.delete_port( + self.cluster, lswitch['uuid'], ls_port['uuid']) + raise e + + def _create_lrouter(self, context, router, nexthop): + lrouter = super(NsxAdvancedPlugin, self)._create_lrouter( + context, router, vcns_const.INTEGRATION_EDGE_IPADDRESS) + + router_type = self._find_router_type(router) + self._set_router_type(lrouter['uuid'], router_type) + if router_type == ROUTER_TYPE_BASIC: + return lrouter + + tenant_id = self._get_tenant_id_for_create(context, router) + name = router['name'] + try: + lsname = name[:36] + '-ls' + lswitch = self._create_integration_lswitch( + tenant_id, lsname) + except Exception: + msg = _("Unable to create integration logic switch " + "for router %s") % name + LOG.exception(msg) + routerlib.delete_lrouter(self.cluster, lrouter['uuid']) + raise n_exc.NeutronException(message=msg) + + try: + self._add_router_integration_interface(tenant_id, name, + lrouter, lswitch) + except Exception: + msg = _("Unable to add router interface to integration lswitch " + "for router %s") % name + LOG.exception(msg) + routerlib.delete_lrouter(self.cluster, lrouter['uuid']) + raise n_exc.NeutronException(message=msg) + + try: + self._create_advanced_service_router( + context, router['id'], name, lrouter, lswitch) + except Exception: + msg = (_("Unable to create advance service router for %s") % name) + LOG.exception(msg) + self.vcns_driver.delete_lswitch(lswitch('uuid')) + routerlib.delete_lrouter(self.cluster, lrouter['uuid']) + raise n_exc.NeutronException(message=msg) + + lrouter['status'] = service_constants.PENDING_CREATE + return lrouter + + def check_router_in_use(self, context, router_id): + router_filter = {'router_id': [router_id]} + vpnservices = self.get_vpnservices( + context, filters={'router_id': [router_id]}) + if vpnservices: + raise vpn_ext.RouterInUseByVPNService( + router_id=router_id, + vpnservice_id=vpnservices[0]['id']) + vips = self.get_vips( + context, filters=router_filter) + if vips: + raise nsx_exc.RouterInUseByLBService( + router_id=router_id, + vip_id=vips[0]['id']) + firewalls = self.get_firewalls( + context, filters=router_filter) + if firewalls: + raise nsx_exc.RouterInUseByFWService( + router_id=router_id, + firewall_id=firewalls[0]['id']) + + def check_router(self, context, router_id): + if not router_id: + msg = _("router_id is not provided!") + raise n_exc.BadRequest(resource='router', msg=msg) + router = self._get_router(context, router_id) + if not self._is_advanced_service_router(context, router=router): + msg = _("router_id:%s is not an advanced router!") % router['id'] + raise n_exc.BadRequest(resource='router', msg=msg) + if router['status'] != service_constants.ACTIVE: + raise nsx_exc.AdvRouterServiceUnavailable(router_id=router['id']) + + def _delete_lrouter(self, context, router_id, nsx_router_id): + binding = vcns_db.get_vcns_router_binding(context.session, router_id) + if not binding: + super(NsxAdvancedPlugin, self)._delete_lrouter( + context, router_id, nsx_router_id) + else: + #Check whether router has an advanced service inserted. + self.check_router_in_use(context, router_id) + vcns_db.update_vcns_router_binding( + context.session, router_id, + status=service_constants.PENDING_DELETE) + + lswitch_id = binding['lswitch_id'] + edge_id = binding['edge_id'] + + # delete lswitch + try: + self.vcns_driver.delete_lswitch(lswitch_id) + except exceptions.ResourceNotFound: + LOG.warning(_("Did not found lswitch %s in NSX"), lswitch_id) + + # delete edge + jobdata = { + 'context': context + } + self.vcns_driver.delete_edge(router_id, edge_id, jobdata=jobdata) + + # delete NSX logical router + routerlib.delete_lrouter(self.cluster, nsx_router_id) + + if id in self._router_type: + del self._router_type[router_id] + + def _update_lrouter(self, context, router_id, name, nexthop, routes=None): + if not self._is_advanced_service_router(context, router_id): + return super(NsxAdvancedPlugin, self)._update_lrouter( + context, router_id, name, nexthop, routes=routes) + + previous_routes = super(NsxAdvancedPlugin, self)._update_lrouter( + context, router_id, name, + vcns_const.INTEGRATION_EDGE_IPADDRESS, routes=routes) + + # TODO(fank): Theoretically users can specify extra routes for + # physical network, and routes for phyiscal network needs to be + # configured on Edge. This can be done by checking if nexthop is in + # external network. But for now we only handle routes for logic + # space and leave it for future enhancement. + + # Let _update_router_gw_info handle nexthop change + #self._vcns_update_static_routes(context, router_id=router_id) + + return previous_routes + + def _retrieve_and_delete_nat_rules(self, context, floating_ip_address, + internal_ip, router_id, + min_num_rules_expected=0): + # NOP for advanced service router + if not self._is_advanced_service_router(context, router_id): + super(NsxAdvancedPlugin, self)._retrieve_and_delete_nat_rules( + context, floating_ip_address, internal_ip, router_id, + min_num_rules_expected=min_num_rules_expected) + + def _update_fip_assoc(self, context, fip, floatingip_db, external_port): + # Update DB model only for advanced service router + router_id = self._get_fip_assoc_data(context, fip, floatingip_db)[2] + if (router_id and + not self._is_advanced_service_router(context, router_id)): + super(NsxAdvancedPlugin, self)._update_fip_assoc( + context, fip, floatingip_db, external_port) + else: + super(base.NsxPluginV2, self)._update_fip_assoc( + context, fip, floatingip_db, external_port) + + def _get_nsx_lrouter_status(self, id): + try: + lrouter = routerlib.get_lrouter(self.cluster, id) + lr_status = lrouter["_relations"]["LogicalRouterStatus"] + if lr_status["fabric_status"]: + nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE + else: + nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_DOWN + except n_exc.NotFound: + nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_ERROR + + return nsx_status + + def _get_vse_status(self, context, id): + binding = vcns_db.get_vcns_router_binding(context.session, id) + edge_status_level = self.vcns_driver.get_edge_status( + binding['edge_id']) + edge_db_status_level = ROUTER_STATUS_LEVEL[binding.status] + + if edge_status_level > edge_db_status_level: + return edge_status_level + else: + return edge_db_status_level + + def _get_all_nsx_lrouters_statuses(self, tenant_id, fields): + # get nsx lrouters status + nsx_lrouters = routerlib.get_lrouters(self.cluster, + tenant_id, + fields) + + nsx_status = {} + for nsx_lrouter in nsx_lrouters: + if (nsx_lrouter["_relations"]["LogicalRouterStatus"] + ["fabric_status"]): + nsx_status[nsx_lrouter['uuid']] = ( + vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE + ) + else: + nsx_status[nsx_lrouter['uuid']] = ( + vcns_const.RouterStatus.ROUTER_STATUS_DOWN + ) + + return nsx_status + + def _get_all_vse_statuses(self, context): + bindings = self._model_query( + context, vcns_models.VcnsRouterBinding) + + vse_db_status_level = {} + edge_id_to_router_id = {} + router_ids = [] + for binding in bindings: + if not binding['edge_id']: + continue + router_id = binding['router_id'] + router_ids.append(router_id) + edge_id_to_router_id[binding['edge_id']] = router_id + vse_db_status_level[router_id] = ( + ROUTER_STATUS_LEVEL[binding['status']]) + + if not vse_db_status_level: + # no advanced service router, no need to query + return {} + + vse_status_level = {} + edges_status_level = self.vcns_driver.get_edges_statuses() + for edge_id, status_level in edges_status_level.iteritems(): + if edge_id in edge_id_to_router_id: + router_id = edge_id_to_router_id[edge_id] + db_status_level = vse_db_status_level[router_id] + if status_level > db_status_level: + vse_status_level[router_id] = status_level + else: + vse_status_level[router_id] = db_status_level + + return vse_status_level + + def get_router(self, context, id, fields=None): + if fields and 'status' not in fields: + return super(NsxAdvancedPlugin, self).get_router( + context, id, fields=fields) + + router = super(NsxAdvancedPlugin, self).get_router(context, id) + + router_type = self._find_router_type(router) + if router_type == ROUTER_TYPE_ADVANCED: + vse_status_level = self._get_vse_status(context, id) + if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]: + router['status'] = ROUTER_STATUS[vse_status_level] + + return self._fields(router, fields) + + def get_routers(self, context, filters=None, fields=None, **kwargs): + routers = super(NsxAdvancedPlugin, self).get_routers( + context, filters=filters, **kwargs) + + if fields and 'status' not in fields: + # no status checking, just return regular get_routers + return [self._fields(router, fields) for router in routers] + + for router in routers: + router_type = self._find_router_type(router) + if router_type == ROUTER_TYPE_ADVANCED: + break + else: + # no advanced service router, return here + return [self._fields(router, fields) for router in routers] + + vse_status_all = self._get_all_vse_statuses(context) + for router in routers: + router_type = self._find_router_type(router) + if router_type == ROUTER_TYPE_ADVANCED: + vse_status_level = vse_status_all.get(router['id']) + if vse_status_level is None: + vse_status_level = ( + vcns_const.RouterStatus.ROUTER_STATUS_ERROR) + if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]: + router['status'] = ROUTER_STATUS[vse_status_level] + + return [self._fields(router, fields) for router in routers] + + def add_router_interface(self, context, router_id, interface_info): + info = super(NsxAdvancedPlugin, self).add_router_interface( + context, router_id, interface_info) + if self._is_advanced_service_router(context, router_id): + router = self._get_router(context, router_id) + if router.enable_snat: + self._update_nat_rules(context, router) + # TODO(fank): do rollback on error, or have a dedicated thread + # do sync work (rollback, re-configure, or make router down) + self._vcns_update_static_routes(context, router=router) + return info + + def remove_router_interface(self, context, router_id, interface_info): + info = super(NsxAdvancedPlugin, self).remove_router_interface( + context, router_id, interface_info) + if self._is_advanced_service_router(context, router_id): + router = self._get_router(context, router_id) + if router.enable_snat: + self._update_nat_rules(context, router) + # TODO(fank): do rollback on error, or have a dedicated thread + # do sync work (rollback, re-configure, or make router down) + self._vcns_update_static_routes(context, router=router) + return info + + def create_floatingip(self, context, floatingip): + fip = super(NsxAdvancedPlugin, self).create_floatingip( + context, floatingip) + router_id = fip.get('router_id') + if router_id and self._is_advanced_service_router(context, router_id): + router = self._get_router(context, router_id) + # TODO(fank): do rollback on error, or have a dedicated thread + # do sync work (rollback, re-configure, or make router down) + self._update_nat_rules(context, router) + self._update_interface(context, router) + return fip + + def update_floatingip(self, context, id, floatingip): + fip = super(NsxAdvancedPlugin, self).update_floatingip( + context, id, floatingip) + router_id = fip.get('router_id') + if router_id and self._is_advanced_service_router(context, router_id): + router = self._get_router(context, router_id) + # TODO(fank): do rollback on error, or have a dedicated thread + # do sync work (rollback, re-configure, or make router down) + self._update_nat_rules(context, router) + self._update_interface(context, router) + return fip + + def delete_floatingip(self, context, id): + fip_db = self._get_floatingip(context, id) + router_id = None + if fip_db.fixed_port_id: + router_id = fip_db.router_id + super(NsxAdvancedPlugin, self).delete_floatingip(context, id) + if router_id and self._is_advanced_service_router(context, router_id): + router = self._get_router(context, router_id) + # TODO(fank): do rollback on error, or have a dedicated thread + # do sync work (rollback, re-configure, or make router down) + self._update_interface(context, router) + self._update_nat_rules(context, router) + + def disassociate_floatingips(self, context, port_id): + routers = set() + + try: + fip_qry = context.session.query(l3_db.FloatingIP) + fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) + for fip_db in fip_dbs: + routers.add(fip_db.router_id) + except sa_exc.NoResultFound: + pass + super(NsxAdvancedPlugin, self).disassociate_floatingips(context, + port_id) + + for router_id in routers: + if self._is_advanced_service_router(context, router_id): + router = self._get_router(context, router_id) + # TODO(fank): do rollback on error, or have a dedicated thread + # do sync work (rollback, re-configure, or make router down) + self._update_interface(context, router) + self._update_nat_rules(context, router) + + # + # FWaaS plugin implementation + # + def _firewall_set_status( + self, context, firewall_id, status, firewall=None): + with context.session.begin(subtransactions=True): + fw_db = self._get_firewall(context, firewall_id) + if status == service_constants.PENDING_UPDATE and ( + fw_db.status == service_constants.PENDING_DELETE): + raise fw_ext.FirewallInPendingState( + firewall_id=firewall_id, pending_state=status) + else: + fw_db.status = status + if firewall: + firewall['status'] = status + + def _ensure_firewall_update_allowed(self, context, firewall_id): + fwall = self.get_firewall(context, firewall_id) + if fwall['status'] in [service_constants.PENDING_CREATE, + service_constants.PENDING_UPDATE, + service_constants.PENDING_DELETE]: + raise fw_ext.FirewallInPendingState(firewall_id=firewall_id, + pending_state=fwall['status']) + + def _ensure_firewall_policy_update_allowed( + self, context, firewall_policy_id): + firewall_policy = self.get_firewall_policy(context, firewall_policy_id) + for firewall_id in firewall_policy.get('firewall_list', []): + self._ensure_firewall_update_allowed(context, firewall_id) + + def _ensure_update_or_delete_firewall_rule( + self, context, firewall_rule_id): + fw_rule = self.get_firewall_rule(context, firewall_rule_id) + if fw_rule.get('firewall_policy_id'): + self._ensure_firewall_policy_update_allowed( + context, fw_rule['firewall_policy_id']) + + def _make_firewall_rule_list_by_policy_id(self, context, fw_policy_id): + if not fw_policy_id: + return [] + firewall_policy_db = self._get_firewall_policy(context, fw_policy_id) + return [ + self._make_firewall_rule_dict(fw_rule_db) + for fw_rule_db in firewall_policy_db['firewall_rules'] + ] + + def _get_edge_id_by_vcns_edge_binding(self, context, + router_id): + #Get vcns_router_binding mapping between router and edge + router_binding = vcns_db.get_vcns_router_binding( + context.session, router_id) + return router_binding.edge_id + + def _get_firewall_list_from_firewall_policy(self, context, policy_id): + firewall_policy_db = self._get_firewall_policy(context, policy_id) + return [ + self._make_firewall_dict(fw_db) + for fw_db in firewall_policy_db['firewalls'] + ] + + def _get_firewall_list_from_firewall_rule(self, context, rule_id): + rule = self._get_firewall_rule(context, rule_id) + if not rule.firewall_policy_id: + # The firewall rule is not associated with firewall policy yet + return None + + return self._get_firewall_list_from_firewall_policy( + context, rule.firewall_policy_id) + + def _vcns_update_firewall(self, context, fw, router_id=None, **kwargs): + edge_id = kwargs.get('edge_id') + if not edge_id: + edge_id = self._get_edge_id_by_vcns_edge_binding( + context, router_id) + firewall_rule_list = kwargs.get('firewall_rule_list') + if not firewall_rule_list: + firewall_rule_list = self._make_firewall_rule_list_by_policy_id( + context, fw['firewall_policy_id']) + fw_with_rules = fw + fw_with_rules['firewall_rule_list'] = firewall_rule_list + try: + self.vcns_driver.update_firewall(context, edge_id, fw_with_rules) + except exceptions.VcnsApiException as e: + self._firewall_set_status( + context, fw['id'], service_constants.ERROR) + msg = (_("Failed to create firewall on vShield Edge " + "bound on router %s") % router_id) + LOG.exception(msg) + raise e + + except exceptions.VcnsBadRequest as e: + self._firewall_set_status( + context, fw['id'], service_constants.ERROR) + LOG.exception(_("Bad Firewall request Input")) + raise e + + def _vcns_delete_firewall(self, context, router_id=None, **kwargs): + edge_id = kwargs.get('edge_id') + if not edge_id: + edge_id = self._get_edge_id_by_vcns_edge_binding( + context, router_id) + #TODO(linb):do rollback on error + self.vcns_driver.delete_firewall(context, edge_id) + + def create_firewall(self, context, firewall): + LOG.debug(_("create_firewall() called")) + router_id = firewall['firewall'].get(vcns_const.ROUTER_ID) + self.check_router(context, router_id) + if self._get_resource_router_id_binding( + context, firewall_db.Firewall, router_id=router_id): + msg = _("A firewall is already associated with the router") + LOG.error(msg) + raise nsx_exc.ServiceOverQuota( + overs='firewall', err_msg=msg) + + fw = super(NsxAdvancedPlugin, self).create_firewall(context, firewall) + #Add router service insertion binding with firewall object + res = { + 'id': fw['id'], + 'router_id': router_id + } + self._process_create_resource_router_id( + context, res, firewall_db.Firewall) + # Since there is only one firewall per edge, + # here would be bulk configuration operation on firewall + self._vcns_update_firewall(context, fw, router_id) + self._firewall_set_status( + context, fw['id'], service_constants.ACTIVE, fw) + fw[rsi.ROUTER_ID] = router_id + return fw + + def update_firewall(self, context, id, firewall): + LOG.debug(_("update_firewall() called")) + self._ensure_firewall_update_allowed(context, id) + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=id) + rule_list_pre = self._make_firewall_rule_list_by_policy_id( + context, + self.get_firewall(context, id)['firewall_policy_id']) + firewall['firewall']['status'] = service_constants.PENDING_UPDATE + fw = super(NsxAdvancedPlugin, self).update_firewall( + context, id, firewall) + fw[rsi.ROUTER_ID] = service_router_binding['router_id'] + rule_list_new = self._make_firewall_rule_list_by_policy_id( + context, fw['firewall_policy_id']) + if rule_list_pre == rule_list_new: + self._firewall_set_status( + context, fw['id'], service_constants.ACTIVE, fw) + return fw + else: + self._vcns_update_firewall( + context, fw, service_router_binding.router_id, + firewall_rule_list=rule_list_new) + self._firewall_set_status( + context, fw['id'], service_constants.ACTIVE, fw) + return fw + + def delete_firewall(self, context, id): + LOG.debug(_("delete_firewall() called")) + self._firewall_set_status( + context, id, service_constants.PENDING_DELETE) + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=id) + self._vcns_delete_firewall(context, service_router_binding.router_id) + super(NsxAdvancedPlugin, self).delete_firewall(context, id) + self._delete_resource_router_id_binding( + context, id, firewall_db.Firewall) + + def get_firewall(self, context, id, fields=None): + fw = super(NsxAdvancedPlugin, self).get_firewall( + context, id, fields) + if fields and rsi.ROUTER_ID not in fields: + return fw + + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=fw['id']) + fw[rsi.ROUTER_ID] = service_router_binding['router_id'] + return fw + + def get_firewalls(self, context, filters=None, fields=None): + fws = super(NsxAdvancedPlugin, self).get_firewalls( + context, filters, fields) + if fields and rsi.ROUTER_ID not in fields: + return fws + service_router_bindings = self._get_resource_router_id_bindings( + context, firewall_db.Firewall, + resource_ids=[fw['id'] for fw in fws]) + mapping = dict([(binding['resource_id'], binding['router_id']) + for binding in service_router_bindings]) + for fw in fws: + fw[rsi.ROUTER_ID] = mapping[fw['id']] + return fws + + def update_firewall_rule(self, context, id, firewall_rule): + LOG.debug(_("update_firewall_rule() called")) + self._ensure_update_or_delete_firewall_rule(context, id) + fwr_pre = self.get_firewall_rule(context, id) + fwr = super(NsxAdvancedPlugin, self).update_firewall_rule( + context, id, firewall_rule) + if fwr_pre == fwr: + return fwr + + # check if this rule is associated with firewall + fw_list = self._get_firewall_list_from_firewall_rule(context, id) + if not fw_list: + return fwr + + for fw in fw_list: + # get router service insertion binding with firewall id + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=fw['id']) + edge_id = self._get_edge_id_by_vcns_edge_binding( + context, service_router_binding.router_id) + + #TODO(linb): do rollback on error + self.vcns_driver.update_firewall_rule(context, id, edge_id, fwr) + + return fwr + + def update_firewall_policy(self, context, id, firewall_policy): + LOG.debug(_("update_firewall_policy() called")) + self._ensure_firewall_policy_update_allowed(context, id) + firewall_rules_pre = self._make_firewall_rule_list_by_policy_id( + context, id) + fwp = super(NsxAdvancedPlugin, self).update_firewall_policy( + context, id, firewall_policy) + firewall_rules = self._make_firewall_rule_list_by_policy_id( + context, id) + if firewall_rules_pre == firewall_rules: + return fwp + + # check if this policy is associated with firewall + fw_list = self._get_firewall_list_from_firewall_policy(context, id) + if not fw_list: + return fwp + + for fw in fw_list: + # Get the router_service insertion binding with firewall id + # TODO(fank): optimized by using _get_resource_router_id_bindings + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=fw['id']) + self._vcns_update_firewall( + context, fw, service_router_binding.router_id, + firewall_rule_list=firewall_rules) + return fwp + + def insert_rule(self, context, id, rule_info): + LOG.debug(_("insert_rule() called")) + self._ensure_firewall_policy_update_allowed(context, id) + fwp = super(NsxAdvancedPlugin, self).insert_rule( + context, id, rule_info) + fwr = super(NsxAdvancedPlugin, self).get_firewall_rule( + context, rule_info['firewall_rule_id']) + + # check if this policy is associated with firewall + fw_list = self._get_firewall_list_from_firewall_policy(context, id) + if not fw_list: + return fwp + for fw in fw_list: + # TODO(fank): optimized by using _get_resource_router_id_bindings + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=fw['id']) + edge_id = self._get_edge_id_by_vcns_edge_binding( + context, service_router_binding.router_id) + + if rule_info.get('insert_before') or rule_info.get('insert_after'): + #if insert_before or insert_after is set, we would call + #VCNS insert_rule API + #TODO(linb): do rollback on error + self.vcns_driver.insert_rule(context, rule_info, edge_id, fwr) + else: + #Else we would call bulk configuration on the firewall + self._vcns_update_firewall(context, fw, edge_id=edge_id) + return fwp + + def remove_rule(self, context, id, rule_info): + LOG.debug(_("remove_rule() called")) + self._ensure_firewall_policy_update_allowed(context, id) + fwp = super(NsxAdvancedPlugin, self).remove_rule( + context, id, rule_info) + fwr = super(NsxAdvancedPlugin, self).get_firewall_rule( + context, rule_info['firewall_rule_id']) + + # check if this policy is associated with firewall + fw_list = self._get_firewall_list_from_firewall_policy(context, id) + if not fw_list: + return fwp + for fw in fw_list: + # TODO(fank): optimized by using _get_resource_router_id_bindings + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=fw['id']) + edge_id = self._get_edge_id_by_vcns_edge_binding( + context, service_router_binding.router_id) + #TODO(linb): do rollback on error + self.vcns_driver.delete_firewall_rule( + context, fwr['id'], edge_id) + return fwp + + # + # LBAAS service plugin implementation + # + def _get_edge_id_by_vip_id(self, context, vip_id): + try: + service_router_binding = self._get_resource_router_id_binding( + context, loadbalancer_db.Vip, resource_id=vip_id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to find the edge with " + "vip_id: %s"), vip_id) + return self._get_edge_id_by_vcns_edge_binding( + context, service_router_binding.router_id) + + def _get_all_vip_addrs_by_router_id( + self, context, router_id): + vip_bindings = self._get_resource_router_id_bindings( + context, loadbalancer_db.Vip, router_ids=[router_id]) + vip_addrs = [] + for vip_binding in vip_bindings: + vip = self.get_vip(context, vip_binding.resource_id) + vip_addrs.append(vip.get('address')) + return vip_addrs + + def _add_router_service_insertion_binding(self, context, resource_id, + router_id, + model): + res = { + 'id': resource_id, + 'router_id': router_id + } + self._process_create_resource_router_id(context, res, + model) + + def _resource_set_status(self, context, model, id, status, obj=None, + pool_id=None): + with context.session.begin(subtransactions=True): + try: + qry = context.session.query(model) + if issubclass(model, loadbalancer_db.PoolMonitorAssociation): + res = qry.filter_by(monitor_id=id, + pool_id=pool_id).one() + else: + res = qry.filter_by(id=id).one() + if status == service_constants.PENDING_UPDATE and ( + res.get('status') == service_constants.PENDING_DELETE): + msg = (_("Operation can't be performed, Since resource " + "%(model)s : %(id)s is in DELETEing status!") % + {'model': model, + 'id': id}) + LOG.error(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + else: + res.status = status + except sa_exc.NoResultFound: + msg = (_("Resource %(model)s : %(id)s not found!") % + {'model': model, + 'id': id}) + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + if obj: + obj['status'] = status + + def _vcns_create_pool_and_monitors(self, context, pool_id, **kwargs): + pool = self.get_pool(context, pool_id) + edge_id = kwargs.get('edge_id') + if not edge_id: + edge_id = self._get_edge_id_by_vip_id( + context, pool['vip_id']) + #Check wheter the pool is already created on the router + #in case of future's M:N relation between Pool and Vip + + #Check associated HealthMonitors and then create them + for monitor_id in pool.get('health_monitors'): + hm = self.get_health_monitor(context, monitor_id) + try: + self.vcns_driver.create_health_monitor( + context, edge_id, hm) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create healthmonitor " + "associated with pool id: %s!") % pool_id) + for monitor_ide in pool.get('health_monitors'): + if monitor_ide == monitor_id: + break + self.vcns_driver.delete_health_monitor( + context, monitor_ide, edge_id) + #Create the pool on the edge + members = [ + super(NsxAdvancedPlugin, self).get_member( + context, member_id) + for member_id in pool.get('members') + ] + try: + self.vcns_driver.create_pool(context, edge_id, pool, members) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create pool on vshield edge")) + self.vcns_driver.delete_pool( + context, pool_id, edge_id) + for monitor_id in pool.get('health_monitors'): + self.vcns_driver.delete_health_monitor( + context, monitor_id, edge_id) + + def _vcns_update_pool(self, context, pool, **kwargs): + edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id']) + members = kwargs.get('members') + if not members: + members = [ + super(NsxAdvancedPlugin, self).get_member( + context, member_id) + for member_id in pool.get('members') + ] + self.vcns_driver.update_pool(context, edge_id, pool, members) + + def create_vip(self, context, vip): + LOG.debug(_("create_vip() called")) + router_id = vip['vip'].get(vcns_const.ROUTER_ID) + self.check_router(context, router_id) + #Check whether the vip port is an external port + subnet_id = vip['vip']['subnet_id'] + network_id = self.get_subnet(context, subnet_id)['network_id'] + ext_net = self._get_network(context, network_id) + if not ext_net.external: + msg = (_("Network '%s' is not a valid external " + "network") % network_id) + raise nsx_exc.NsxPluginException(err_msg=msg) + + v = super(NsxAdvancedPlugin, self).create_vip(context, vip) + #Get edge_id for the resource + router_binding = vcns_db.get_vcns_router_binding( + context.session, + router_id) + edge_id = router_binding.edge_id + #Add vip_router binding + self._add_router_service_insertion_binding(context, v['id'], + router_id, + loadbalancer_db.Vip) + #Create the vip port on vShield Edge + router = self._get_router(context, router_id) + self._update_interface(context, router, sync=True) + #Create the vip and associated pool/monitor on the corresponding edge + try: + self._vcns_create_pool_and_monitors( + context, v['pool_id'], edge_id=edge_id) + self.vcns_driver.create_vip(context, edge_id, v) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create vip!")) + self._delete_resource_router_id_binding( + context, v['id'], loadbalancer_db.Vip) + super(NsxAdvancedPlugin, self).delete_vip(context, v['id']) + self._resource_set_status(context, loadbalancer_db.Vip, + v['id'], service_constants.ACTIVE, v) + v[rsi.ROUTER_ID] = router_id + + return v + + def update_vip(self, context, id, vip): + edge_id = self._get_edge_id_by_vip_id(context, id) + old_vip = self.get_vip(context, id) + session_persistence_update = bool( + vip['vip'].get('session_persistence')) + vip['vip']['status'] = service_constants.PENDING_UPDATE + v = super(NsxAdvancedPlugin, self).update_vip(context, id, vip) + v[rsi.ROUTER_ID] = self._get_resource_router_id_binding( + context, loadbalancer_db.Vip, resource_id=id)['router_id'] + if old_vip['pool_id'] != v['pool_id']: + self.vcns_driver.delete_vip(context, id) + #Delete old pool/monitor on the edge + #TODO(linb): Factor out procedure for removing pool and health + #separate method + old_pool = self.get_pool(context, old_vip['pool_id']) + self.vcns_driver.delete_pool( + context, old_vip['pool_id'], edge_id) + for monitor_id in old_pool.get('health_monitors'): + self.vcns_driver.delete_health_monitor( + context, monitor_id, edge_id) + #Create new pool/monitor object on the edge + #TODO(linb): add exception handle if error + self._vcns_create_pool_and_monitors( + context, v['pool_id'], edge_id=edge_id) + self.vcns_driver.create_vip(context, edge_id, v) + return v + try: + self.vcns_driver.update_vip(context, v, session_persistence_update) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update vip with id: %s!"), id) + self._resource_set_status(context, loadbalancer_db.Vip, + id, service_constants.ERROR, v) + + self._resource_set_status(context, loadbalancer_db.Vip, + v['id'], service_constants.ACTIVE, v) + return v + + def delete_vip(self, context, id): + v = self.get_vip(context, id) + self._resource_set_status( + context, loadbalancer_db.Vip, + id, service_constants.PENDING_DELETE) + try: + self.vcns_driver.delete_vip(context, id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete vip with id: %s!"), id) + self._resource_set_status(context, loadbalancer_db.Vip, + id, service_constants.ERROR) + edge_id = self._get_edge_id_by_vip_id(context, id) + #Check associated HealthMonitors and then delete them + pool = self.get_pool(context, v['pool_id']) + self.vcns_driver.delete_pool(context, v['pool_id'], edge_id) + for monitor_id in pool.get('health_monitors'): + #TODO(linb): do exception handle if error + self.vcns_driver.delete_health_monitor( + context, monitor_id, edge_id) + + router_binding = self._get_resource_router_id_binding( + context, loadbalancer_db.Vip, resource_id=id) + router = self._get_router(context, router_binding.router_id) + self._delete_resource_router_id_binding( + context, id, loadbalancer_db.Vip) + super(NsxAdvancedPlugin, self).delete_vip(context, id) + self._update_interface(context, router, sync=True) + + def get_vip(self, context, id, fields=None): + vip = super(NsxAdvancedPlugin, self).get_vip(context, id, fields) + if fields and rsi.ROUTER_ID not in fields: + return vip + + service_router_binding = self._get_resource_router_id_binding( + context, loadbalancer_db.Vip, resource_id=vip['id']) + vip[rsi.ROUTER_ID] = service_router_binding['router_id'] + return vip + + def get_vips(self, context, filters=None, fields=None): + vips = super(NsxAdvancedPlugin, self).get_vips( + context, filters, fields) + if fields and rsi.ROUTER_ID not in fields: + return vips + service_router_bindings = self._get_resource_router_id_bindings( + context, loadbalancer_db.Vip, + resource_ids=[vip['id'] for vip in vips]) + mapping = dict([(binding['resource_id'], binding['router_id']) + for binding in service_router_bindings]) + for vip in vips: + vip[rsi.ROUTER_ID] = mapping[vip['id']] + return vips + + def update_pool(self, context, id, pool): + pool['pool']['status'] = service_constants.PENDING_UPDATE + p = super(NsxAdvancedPlugin, self).update_pool(context, id, pool) + #Check whether the pool is already associated with the vip + if not p.get('vip_id'): + self._resource_set_status(context, loadbalancer_db.Pool, + p['id'], service_constants.ACTIVE, p) + return p + try: + self._vcns_update_pool(context, p) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update pool with id: %s!"), id) + self._resource_set_status(context, loadbalancer_db.Pool, + p['id'], service_constants.ERROR, p) + self._resource_set_status(context, loadbalancer_db.Pool, + p['id'], service_constants.ACTIVE, p) + return p + + def create_member(self, context, member): + m = super(NsxAdvancedPlugin, self).create_member(context, member) + pool_id = m.get('pool_id') + pool = self.get_pool(context, pool_id) + if not pool.get('vip_id'): + self._resource_set_status(context, loadbalancer_db.Member, + m['id'], service_constants.ACTIVE, m) + return m + self._resource_set_status(context, loadbalancer_db.Pool, + pool_id, + service_constants.PENDING_UPDATE) + try: + self._vcns_update_pool(context, pool) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update pool with the member")) + super(NsxAdvancedPlugin, self).delete_member(context, m['id']) + + self._resource_set_status(context, loadbalancer_db.Pool, + pool_id, service_constants.ACTIVE) + self._resource_set_status(context, loadbalancer_db.Member, + m['id'], service_constants.ACTIVE, m) + return m + + def update_member(self, context, id, member): + member['member']['status'] = service_constants.PENDING_UPDATE + old_member = self.get_member(context, id) + m = super(NsxAdvancedPlugin, self).update_member( + context, id, member) + + if m['pool_id'] != old_member['pool_id']: + old_pool_id = old_member['pool_id'] + old_pool = self.get_pool(context, old_pool_id) + if old_pool.get('vip_id'): + self._resource_set_status( + context, loadbalancer_db.Pool, + old_pool_id, service_constants.PENDING_UPDATE) + try: + self._vcns_update_pool(context, old_pool) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update old pool " + "with the member")) + super(NsxAdvancedPlugin, self).delete_member( + context, m['id']) + self._resource_set_status( + context, loadbalancer_db.Pool, + old_pool_id, service_constants.ACTIVE) + + pool_id = m['pool_id'] + pool = self.get_pool(context, pool_id) + if not pool.get('vip_id'): + self._resource_set_status(context, loadbalancer_db.Member, + m['id'], service_constants.ACTIVE, m) + return m + self._resource_set_status(context, loadbalancer_db.Pool, + pool_id, + service_constants.PENDING_UPDATE) + try: + self._vcns_update_pool(context, pool) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update pool with the member")) + super(NsxAdvancedPlugin, self).delete_member( + context, m['id']) + + self._resource_set_status(context, loadbalancer_db.Pool, + pool_id, service_constants.ACTIVE) + self._resource_set_status(context, loadbalancer_db.Member, + m['id'], service_constants.ACTIVE, m) + return m + + def delete_member(self, context, id): + m = self.get_member(context, id) + super(NsxAdvancedPlugin, self).delete_member(context, id) + pool_id = m['pool_id'] + pool = self.get_pool(context, pool_id) + if not pool.get('vip_id'): + return + self._resource_set_status(context, loadbalancer_db.Pool, + pool_id, service_constants.PENDING_UPDATE) + try: + self._vcns_update_pool(context, pool) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update pool with the member")) + self._resource_set_status(context, loadbalancer_db.Pool, + pool_id, service_constants.ACTIVE) + + def update_health_monitor(self, context, id, health_monitor): + old_hm = super(NsxAdvancedPlugin, self).get_health_monitor( + context, id) + hm = super(NsxAdvancedPlugin, self).update_health_monitor( + context, id, health_monitor) + for hm_pool in hm.get('pools'): + pool_id = hm_pool['pool_id'] + pool = self.get_pool(context, pool_id) + if pool.get('vip_id'): + edge_id = self._get_edge_id_by_vip_id( + context, pool['vip_id']) + try: + self.vcns_driver.update_health_monitor( + context, edge_id, old_hm, hm) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update monitor " + "with id: %s!"), id) + return hm + + def create_pool_health_monitor(self, context, + health_monitor, pool_id): + monitor_id = health_monitor['health_monitor']['id'] + pool = self.get_pool(context, pool_id) + monitors = pool.get('health_monitors') + if len(monitors) > 0: + msg = _("Vcns right now can only support " + "one monitor per pool") + LOG.error(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + #Check whether the pool is already associated with the vip + if not pool.get('vip_id'): + res = super(NsxAdvancedPlugin, + self).create_pool_health_monitor(context, + health_monitor, + pool_id) + return res + #Get the edge_id + edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id']) + res = super(NsxAdvancedPlugin, + self).create_pool_health_monitor(context, + health_monitor, + pool_id) + monitor = self.get_health_monitor(context, monitor_id) + #TODO(linb)Add Exception handle if error + self.vcns_driver.create_health_monitor(context, edge_id, monitor) + #Get updated pool + pool['health_monitors'].append(monitor['id']) + self._resource_set_status( + context, loadbalancer_db.Pool, + pool_id, service_constants.PENDING_UPDATE) + try: + self._vcns_update_pool(context, pool) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to associate monitor with pool!")) + self._resource_set_status( + context, loadbalancer_db.Pool, + pool_id, service_constants.ERROR) + super(NsxAdvancedPlugin, self).delete_pool_health_monitor( + context, monitor_id, pool_id) + self._resource_set_status( + context, loadbalancer_db.Pool, + pool_id, service_constants.ACTIVE) + self._resource_set_status( + context, loadbalancer_db.PoolMonitorAssociation, + monitor_id, service_constants.ACTIVE, res, + pool_id=pool_id) + return res + + def delete_pool_health_monitor(self, context, id, pool_id): + super(NsxAdvancedPlugin, self).delete_pool_health_monitor( + context, id, pool_id) + pool = self.get_pool(context, pool_id) + #Check whether the pool is already associated with the vip + if pool.get('vip_id'): + #Delete the monitor on vshield edge + edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id']) + self._resource_set_status( + context, loadbalancer_db.Pool, + pool_id, service_constants.PENDING_UPDATE) + try: + self._vcns_update_pool(context, pool) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception( + _("Failed to update pool with pool_monitor!")) + self._resource_set_status( + context, loadbalancer_db.Pool, + pool_id, service_constants.ERROR) + #TODO(linb): Add exception handle if error + self.vcns_driver.delete_health_monitor(context, id, edge_id) + self._resource_set_status( + context, loadbalancer_db.Pool, + pool_id, service_constants.ACTIVE) + + def _vcns_update_ipsec_config( + self, context, vpnservice_id, removed_ipsec_conn_id=None): + sites = [] + vpn_service = self._get_vpnservice(context, vpnservice_id) + edge_id = self._get_edge_id_by_vcns_edge_binding( + context, vpn_service.router_id) + if not vpn_service.router.gw_port: + msg = _("Failed to update ipsec vpn configuration on edge, since " + "the router: %s does not have a gateway yet!" + ) % vpn_service.router_id + LOG.error(msg) + raise exceptions.VcnsBadRequest(resource='router', msg=msg) + + external_ip = vpn_service.router.gw_port['fixed_ips'][0]['ip_address'] + subnet = self._make_subnet_dict(vpn_service.subnet) + for ipsec_site_conn in vpn_service.ipsec_site_connections: + if ipsec_site_conn.id != removed_ipsec_conn_id: + site = self._make_ipsec_site_connection_dict(ipsec_site_conn) + ikepolicy = self._make_ikepolicy_dict( + ipsec_site_conn.ikepolicy) + ipsecpolicy = self._make_ipsecpolicy_dict( + ipsec_site_conn.ipsecpolicy) + sites.append({'site': site, + 'ikepolicy': ikepolicy, + 'ipsecpolicy': ipsecpolicy, + 'subnet': subnet, + 'external_ip': external_ip}) + try: + self.vcns_driver.update_ipsec_config( + edge_id, sites, enabled=vpn_service.admin_state_up) + except exceptions.VcnsBadRequest: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Bad or unsupported Input request!")) + except exceptions.VcnsApiException: + with excutils.save_and_reraise_exception(): + msg = (_("Failed to update ipsec VPN configuration " + "with vpnservice: %(vpnservice_id)s on vShield Edge: " + "%(edge_id)s") % {'vpnservice_id': vpnservice_id, + 'edge_id': edge_id}) + LOG.exception(msg) + + def create_vpnservice(self, context, vpnservice): + LOG.debug(_("create_vpnservice() called")) + router_id = vpnservice['vpnservice'].get('router_id') + self.check_router(context, router_id) + if self.get_vpnservices(context, filters={'router_id': [router_id]}): + msg = _("a vpnservice is already associated with the router: %s" + ) % router_id + LOG.warning(msg) + raise nsx_exc.ServiceOverQuota( + overs='vpnservice', err_msg=msg) + + service = super(NsxAdvancedPlugin, self).create_vpnservice( + context, vpnservice) + self._resource_set_status( + context, vpn_db.VPNService, + service['id'], service_constants.ACTIVE, service) + return service + + def update_vpnservice(self, context, vpnservice_id, vpnservice): + vpnservice['vpnservice']['status'] = service_constants.PENDING_UPDATE + service = super(NsxAdvancedPlugin, self).update_vpnservice( + context, vpnservice_id, vpnservice) + # Only admin_state_up attribute is configurable on Edge. + if vpnservice['vpnservice'].get('admin_state_up') is None: + self._resource_set_status( + context, vpn_db.VPNService, + service['id'], service_constants.ACTIVE, service) + return service + # Test whether there is one ipsec site connection attached to + # the vpnservice. If not, just return without updating ipsec + # config on edge side. + vpn_service_db = self._get_vpnservice(context, vpnservice_id) + if not vpn_service_db.ipsec_site_connections: + self._resource_set_status( + context, vpn_db.VPNService, + service['id'], service_constants.ACTIVE, service) + return service + try: + self._vcns_update_ipsec_config(context, service['id']) + except Exception: + with excutils.save_and_reraise_exception(): + self._resource_set_status( + context, vpn_db.VPNService, + service['id'], service_constants.ERROR, service) + self._resource_set_status( + context, vpn_db.VPNService, + service['id'], service_constants.ACTIVE, service) + return service + + def create_ipsec_site_connection(self, context, ipsec_site_connection): + ipsec_site_conn = super( + NsxAdvancedPlugin, self).create_ipsec_site_connection( + context, ipsec_site_connection) + try: + self._vcns_update_ipsec_config( + context, ipsec_site_conn['vpnservice_id']) + except Exception: + with excutils.save_and_reraise_exception(): + super(NsxAdvancedPlugin, self).delete_ipsec_site_connection( + context, ipsec_site_conn['id']) + self._resource_set_status( + context, vpn_db.IPsecSiteConnection, + ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn) + return ipsec_site_conn + + def update_ipsec_site_connection(self, context, ipsec_site_connection_id, + ipsec_site_connection): + ipsec_site_connection['ipsec_site_connection']['status'] = ( + service_constants.PENDING_UPDATE) + ipsec_site_conn = super( + NsxAdvancedPlugin, self).update_ipsec_site_connection( + context, ipsec_site_connection_id, ipsec_site_connection) + try: + self._vcns_update_ipsec_config( + context, ipsec_site_conn['vpnservice_id']) + except Exception: + with excutils.save_and_reraise_exception(): + self._resource_set_status( + context, vpn_db.IPsecSiteConnection, ipsec_site_conn['id'], + service_constants.ERROR, ipsec_site_conn) + self._resource_set_status( + context, vpn_db.IPsecSiteConnection, + ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn) + return ipsec_site_conn + + def delete_ipsec_site_connection(self, context, ipsec_site_conn_id): + self._resource_set_status( + context, vpn_db.IPsecSiteConnection, + ipsec_site_conn_id, service_constants.PENDING_DELETE) + vpnservice_id = self.get_ipsec_site_connection( + context, ipsec_site_conn_id)['vpnservice_id'] + try: + self._vcns_update_ipsec_config( + context, vpnservice_id, ipsec_site_conn_id) + except Exception: + with excutils.save_and_reraise_exception(): + self._resource_set_status( + context, vpn_db.IPsecSiteConnection, ipsec_site_conn_id, + service_constants.ERROR) + super(NsxAdvancedPlugin, self).delete_ipsec_site_connection( + context, ipsec_site_conn_id) + + +class VcnsCallbacks(object): + """Edge callback implementation Callback functions for + asynchronous tasks. + """ + def __init__(self, plugin): + self.plugin = plugin + + def edge_deploy_started(self, task): + """callback when deployment task started.""" + jobdata = task.userdata['jobdata'] + context = jobdata['context'] + edge_id = task.userdata.get('edge_id') + neutron_router_id = jobdata['neutron_router_id'] + name = task.userdata['router_name'] + if edge_id: + LOG.debug(_("Start deploying %(edge_id)s for router %(name)s"), { + 'edge_id': edge_id, + 'name': name}) + vcns_db.update_vcns_router_binding( + context.session, neutron_router_id, edge_id=edge_id) + else: + LOG.debug(_("Failed to deploy Edge for router %s"), name) + vcns_db.update_vcns_router_binding( + context.session, neutron_router_id, + status=service_constants.ERROR) + + def edge_deploy_result(self, task): + """callback when deployment task finished.""" + jobdata = task.userdata['jobdata'] + lrouter = jobdata['lrouter'] + context = jobdata['context'] + name = task.userdata['router_name'] + neutron_router_id = jobdata['neutron_router_id'] + router_db = None + try: + router_db = self.plugin._get_router( + context, neutron_router_id) + except l3.RouterNotFound: + # Router might have been deleted before deploy finished + LOG.exception(_("Router %s not found"), lrouter['uuid']) + + if task.status == tasks_const.TaskStatus.COMPLETED: + LOG.debug(_("Successfully deployed %(edge_id)s for " + "router %(name)s"), { + 'edge_id': task.userdata['edge_id'], + 'name': name}) + if (router_db and + router_db['status'] == service_constants.PENDING_CREATE): + router_db['status'] = service_constants.ACTIVE + + binding = vcns_db.get_vcns_router_binding( + context.session, neutron_router_id) + # only update status to active if its status is pending create + if binding['status'] == service_constants.PENDING_CREATE: + vcns_db.update_vcns_router_binding( + context.session, neutron_router_id, + status=service_constants.ACTIVE) + else: + LOG.debug(_("Failed to deploy Edge for router %s"), name) + if router_db: + router_db['status'] = service_constants.ERROR + vcns_db.update_vcns_router_binding( + context.session, neutron_router_id, + status=service_constants.ERROR) + + def edge_delete_result(self, task): + jobdata = task.userdata['jobdata'] + router_id = task.userdata['router_id'] + context = jobdata['context'] + if task.status == tasks_const.TaskStatus.COMPLETED: + vcns_db.delete_vcns_router_binding(context.session, + router_id) + + def interface_update_result(self, task): + LOG.debug(_("interface_update_result %d"), task.status) + + def snat_create_result(self, task): + LOG.debug(_("snat_create_result %d"), task.status) + + def snat_delete_result(self, task): + LOG.debug(_("snat_delete_result %d"), task.status) + + def dnat_create_result(self, task): + LOG.debug(_("dnat_create_result %d"), task.status) + + def dnat_delete_result(self, task): + LOG.debug(_("dnat_delete_result %d"), task.status) + + def routes_update_result(self, task): + LOG.debug(_("routes_update_result %d"), task.status) + + def nat_update_result(self, task): + LOG.debug(_("nat_update_result %d"), task.status) + + +def _process_base_create_lswitch_args(*args, **kwargs): + tags = utils.get_tags() + tags.append({"tag": args[1], + "scope": "quantum_net_id"}) + if args[2]: + tags.append({"tag": args[2], "scope": "os_tid"}) + switch_name = args[3] + tz_config = args[4] + if kwargs.get("shared", False) or len(args) >= 6: + tags.append({"tag": "true", "scope": "shared"}) + if kwargs.get("tags"): + tags.extend(kwargs["tags"]) + return switch_name, tz_config, tags diff --git a/neutron/plugins/vmware/shell/__init__.py b/neutron/plugins/vmware/shell/__init__.py new file mode 100644 index 000000000..e0b15b8d2 --- /dev/null +++ b/neutron/plugins/vmware/shell/__init__.py @@ -0,0 +1,41 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from neutron.plugins.vmware.shell import commands as cmd +from neutronclient import shell + + +class NsxManage(shell.NeutronShell): + + def __init__(self, api_version): + super(NsxManage, self).__init__(api_version) + self.command_manager.add_command('net-migrate', cmd.NetworkMigrate) + self.command_manager.add_command('net-report', cmd.NetworkReport) + + def build_option_parser(self, description, version): + parser = super(NsxManage, self).build_option_parser( + description, version) + return parser + + def initialize_app(self, argv): + super(NsxManage, self).initialize_app(argv) + self.client = self.client_manager.neutron + + +def main(): + return NsxManage(shell.NEUTRON_API_VERSION).run(sys.argv[1:]) diff --git a/neutron/plugins/vmware/shell/commands.py b/neutron/plugins/vmware/shell/commands.py new file mode 100644 index 000000000..bd6706ff8 --- /dev/null +++ b/neutron/plugins/vmware/shell/commands.py @@ -0,0 +1,67 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutronclient.neutron import v2_0 as client + +LSN_PATH = '/lsns' + + +def print_report(write_func, report): + write_func(_("\nService type = %s\n") % report['report']['type']) + services = ','.join(report['report']['services']) + ports = ','.join(report['report']['ports']) + write_func(_("Service uuids = %s\n") % services) + write_func(_("Port uuids = %s\n\n") % ports) + + +class NetworkReport(client.NeutronCommand): + """Retrieve network migration report.""" + + def get_parser(self, prog_name): + parser = super(NetworkReport, self).get_parser(prog_name) + parser.add_argument('network', metavar='network', + help=_('ID or name of network to run report on')) + return parser + + def run(self, parsed_args): + net = parsed_args.network + net_id = client.find_resourceid_by_name_or_id(self.app.client, + 'network', net) + res = self.app.client.get("%s/%s" % (LSN_PATH, net_id)) + if res: + self.app.stdout.write(_('Migration report is:\n')) + print_report(self.app.stdout.write, res['lsn']) + + +class NetworkMigrate(client.NeutronCommand): + """Perform network migration.""" + + def get_parser(self, prog_name): + parser = super(NetworkMigrate, self).get_parser(prog_name) + parser.add_argument('network', metavar='network', + help=_('ID or name of network to migrate')) + return parser + + def run(self, parsed_args): + net = parsed_args.network + net_id = client.find_resourceid_by_name_or_id(self.app.client, + 'network', net) + body = {'network': net_id} + res = self.app.client.post(LSN_PATH, body={'lsn': body}) + if res: + self.app.stdout.write(_('Migration has been successful:\n')) + print_report(self.app.stdout.write, res['lsn']) diff --git a/neutron/plugins/vmware/vshield/__init__.py b/neutron/plugins/vmware/vshield/__init__.py new file mode 100644 index 000000000..6818a0c8f --- /dev/null +++ b/neutron/plugins/vmware/vshield/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/plugins/vmware/vshield/common/VcnsApiClient.py b/neutron/plugins/vmware/vshield/common/VcnsApiClient.py new file mode 100644 index 000000000..7127b6780 --- /dev/null +++ b/neutron/plugins/vmware/vshield/common/VcnsApiClient.py @@ -0,0 +1,80 @@ +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 + +import eventlet + +from neutron.openstack.common import jsonutils +from neutron.plugins.vmware.vshield.common import exceptions + +httplib2 = eventlet.import_patched('httplib2') + + +def xmldumps(obj): + config = "" + if isinstance(obj, dict): + for key, value in obj.iteritems(): + cfg = "<%s>%s" % (key, xmldumps(value), key) + config += cfg + elif isinstance(obj, list): + for value in obj: + config += xmldumps(value) + else: + config = obj + + return config + + +class VcnsApiHelper(object): + errors = { + 303: exceptions.ResourceRedirect, + 400: exceptions.RequestBad, + 403: exceptions.Forbidden, + 404: exceptions.ResourceNotFound, + 415: exceptions.MediaTypeUnsupport, + 503: exceptions.ServiceUnavailable + } + + def __init__(self, address, user, password, format='json'): + self.authToken = base64.encodestring("%s:%s" % (user, password)) + self.user = user + self.passwd = password + self.address = address + self.format = format + if format == 'json': + self.encode = jsonutils.dumps + else: + self.encode = xmldumps + + def request(self, method, uri, params=None): + uri = self.address + uri + http = httplib2.Http() + http.disable_ssl_certificate_validation = True + headers = { + 'Content-Type': 'application/' + self.format, + 'Accept': 'application/' + 'json', + 'Authorization': 'Basic ' + self.authToken + } + body = self.encode(params) if params else None + header, response = http.request(uri, method, + body=body, headers=headers) + status = int(header['status']) + if 200 <= status < 300: + return header, response + if status in self.errors: + cls = self.errors[status] + else: + cls = exceptions.VcnsApiException + raise cls(uri=uri, status=status, header=header, response=response) diff --git a/neutron/plugins/vmware/vshield/common/__init__.py b/neutron/plugins/vmware/vshield/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/vmware/vshield/common/constants.py b/neutron/plugins/vmware/vshield/common/constants.py new file mode 100644 index 000000000..1c2aa25db --- /dev/null +++ b/neutron/plugins/vmware/vshield/common/constants.py @@ -0,0 +1,45 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +EDGE_ID = 'edge_id' +ROUTER_ID = 'router_id' + +# Interface +EXTERNAL_VNIC_INDEX = 0 +INTERNAL_VNIC_INDEX = 1 +EXTERNAL_VNIC_NAME = "external" +INTERNAL_VNIC_NAME = "internal" + +INTEGRATION_LR_IPADDRESS = "169.254.2.1/28" +INTEGRATION_EDGE_IPADDRESS = "169.254.2.3" +INTEGRATION_SUBNET_NETMASK = "255.255.255.240" + +# SNAT rule location +PREPEND = 0 +APPEND = -1 + +# error code +VCNS_ERROR_CODE_EDGE_NOT_RUNNING = 10013 + +SUFFIX_LENGTH = 8 + + +# router status by number +class RouterStatus(object): + ROUTER_STATUS_ACTIVE = 0 + ROUTER_STATUS_DOWN = 1 + ROUTER_STATUS_PENDING_CREATE = 2 + ROUTER_STATUS_PENDING_DELETE = 3 + ROUTER_STATUS_ERROR = 4 diff --git a/neutron/plugins/vmware/vshield/common/exceptions.py b/neutron/plugins/vmware/vshield/common/exceptions.py new file mode 100644 index 000000000..4764db034 --- /dev/null +++ b/neutron/plugins/vmware/vshield/common/exceptions.py @@ -0,0 +1,70 @@ +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: linb, VMware + +from neutron.common import exceptions + + +class VcnsException(exceptions.NeutronException): + pass + + +class VcnsGeneralException(VcnsException): + def __init__(self, message): + self.message = message + super(VcnsGeneralException, self).__init__() + + +class VcnsBadRequest(exceptions.BadRequest): + pass + + +class VcnsNotFound(exceptions.NotFound): + message = _('%(resource)s not found: %(msg)s') + + +class VcnsApiException(VcnsException): + message = _("An unknown exception %(status)s occurred: %(response)s.") + + def __init__(self, **kwargs): + super(VcnsApiException, self).__init__(**kwargs) + + self.status = kwargs.get('status') + self.header = kwargs.get('header') + self.response = kwargs.get('response') + + +class ResourceRedirect(VcnsApiException): + message = _("Resource %(uri)s has been redirected") + + +class RequestBad(VcnsApiException): + message = _("Request %(uri)s is Bad, response %(response)s") + + +class Forbidden(VcnsApiException): + message = _("Forbidden: %(uri)s") + + +class ResourceNotFound(VcnsApiException): + message = _("Resource %(uri)s not found") + + +class MediaTypeUnsupport(VcnsApiException): + message = _("Media Type %(uri)s is not supported") + + +class ServiceUnavailable(VcnsApiException): + message = _("Service Unavailable: %(uri)s") diff --git a/neutron/plugins/vmware/vshield/edge_appliance_driver.py b/neutron/plugins/vmware/vshield/edge_appliance_driver.py new file mode 100644 index 000000000..aadc1cb4b --- /dev/null +++ b/neutron/plugins/vmware/vshield/edge_appliance_driver.py @@ -0,0 +1,667 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kaiwei Fan, VMware, Inc. +# @author: Bo Link, VMware, Inc. + +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware.vshield.common import ( + constants as vcns_const) +from neutron.plugins.vmware.vshield.common import constants as common_constants +from neutron.plugins.vmware.vshield.common import exceptions +from neutron.plugins.vmware.vshield.tasks import constants +from neutron.plugins.vmware.vshield.tasks import tasks + +LOG = logging.getLogger(__name__) + + +class EdgeApplianceDriver(object): + def __init__(self): + # store the last task per edge that has the latest config + self.updated_task = { + 'nat': {}, + 'route': {}, + } + + def _assemble_edge(self, name, appliance_size="compact", + deployment_container_id=None, datacenter_moid=None, + enable_aesni=True, hypervisor_assist=False, + enable_fips=False, remote_access=False): + edge = { + 'name': name, + 'fqdn': name, + 'hypervisorAssist': hypervisor_assist, + 'type': 'gatewayServices', + 'enableAesni': enable_aesni, + 'enableFips': enable_fips, + 'cliSettings': { + 'remoteAccess': remote_access + }, + 'appliances': { + 'applianceSize': appliance_size + }, + 'vnics': { + 'vnics': [] + } + } + if deployment_container_id: + edge['appliances']['deploymentContainerId'] = ( + deployment_container_id) + if datacenter_moid: + edge['datacenterMoid'] = datacenter_moid, + + return edge + + def _assemble_edge_appliance(self, resource_pool_id, datastore_id): + appliance = {} + if resource_pool_id: + appliance['resourcePoolId'] = resource_pool_id + if datastore_id: + appliance['datastoreId'] = datastore_id + return appliance + + def _assemble_edge_vnic(self, name, index, portgroup_id, + primary_address=None, subnet_mask=None, + secondary=None, + type="internal", + enable_proxy_arp=False, + enable_send_redirects=True, + is_connected=True, + mtu=1500): + vnic = { + 'index': index, + 'name': name, + 'type': type, + 'portgroupId': portgroup_id, + 'mtu': mtu, + 'enableProxyArp': enable_proxy_arp, + 'enableSendRedirects': enable_send_redirects, + 'isConnected': is_connected + } + if primary_address and subnet_mask: + address_group = { + 'primaryAddress': primary_address, + 'subnetMask': subnet_mask + } + if secondary: + address_group['secondaryAddresses'] = { + 'ipAddress': secondary, + 'type': 'IpAddressesDto' + } + + vnic['addressGroups'] = { + 'addressGroups': [address_group] + } + + return vnic + + def _edge_status_to_level(self, status): + if status == 'GREEN': + status_level = common_constants.RouterStatus.ROUTER_STATUS_ACTIVE + elif status in ('GREY', 'YELLOW'): + status_level = common_constants.RouterStatus.ROUTER_STATUS_DOWN + else: + status_level = common_constants.RouterStatus.ROUTER_STATUS_ERROR + return status_level + + def _enable_loadbalancer(self, edge): + if not edge.get('featureConfigs') or ( + not edge['featureConfigs'].get('features')): + edge['featureConfigs'] = {'features': []} + edge['featureConfigs']['features'].append( + {'featureType': 'loadbalancer_4.0', + 'enabled': True}) + + def get_edge_status(self, edge_id): + try: + response = self.vcns.get_edge_status(edge_id)[1] + status_level = self._edge_status_to_level( + response['edgeStatus']) + except exceptions.VcnsApiException as e: + LOG.exception(_("VCNS: Failed to get edge status:\n%s"), + e.response) + status_level = common_constants.RouterStatus.ROUTER_STATUS_ERROR + try: + desc = jsonutils.loads(e.response) + if desc.get('errorCode') == ( + vcns_const.VCNS_ERROR_CODE_EDGE_NOT_RUNNING): + status_level = ( + common_constants.RouterStatus.ROUTER_STATUS_DOWN) + except ValueError: + LOG.exception(e.response) + + return status_level + + def get_edges_statuses(self): + edges_status_level = {} + edges = self._get_edges() + for edge in edges['edgePage'].get('data', []): + edge_id = edge['id'] + status = edge['edgeStatus'] + edges_status_level[edge_id] = self._edge_status_to_level(status) + + return edges_status_level + + def _update_interface(self, task): + edge_id = task.userdata['edge_id'] + config = task.userdata['config'] + LOG.debug(_("VCNS: start updating vnic %s"), config) + try: + self.vcns.update_interface(edge_id, config) + except exceptions.VcnsApiException as e: + with excutils.save_and_reraise_exception(): + LOG.exception(_("VCNS: Failed to update vnic %(config)s:\n" + "%(response)s"), { + 'config': config, + 'response': e.response}) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("VCNS: Failed to update vnic %d"), + config['index']) + + return constants.TaskStatus.COMPLETED + + def update_interface(self, router_id, edge_id, index, network, + address=None, netmask=None, secondary=None, + jobdata=None): + LOG.debug(_("VCNS: update vnic %(index)d: %(addr)s %(netmask)s"), { + 'index': index, 'addr': address, 'netmask': netmask}) + if index == vcns_const.EXTERNAL_VNIC_INDEX: + name = vcns_const.EXTERNAL_VNIC_NAME + intf_type = 'uplink' + elif index == vcns_const.INTERNAL_VNIC_INDEX: + name = vcns_const.INTERNAL_VNIC_NAME + intf_type = 'internal' + else: + msg = _("Vnic %d currently not supported") % index + raise exceptions.VcnsGeneralException(msg) + + config = self._assemble_edge_vnic( + name, index, network, address, netmask, secondary, type=intf_type) + + userdata = { + 'edge_id': edge_id, + 'config': config, + 'jobdata': jobdata + } + task_name = "update-interface-%s-%d" % (edge_id, index) + task = tasks.Task(task_name, router_id, + self._update_interface, userdata=userdata) + task.add_result_monitor(self.callbacks.interface_update_result) + self.task_manager.add(task) + return task + + def _deploy_edge(self, task): + userdata = task.userdata + name = userdata['router_name'] + LOG.debug(_("VCNS: start deploying edge %s"), name) + request = userdata['request'] + try: + header = self.vcns.deploy_edge(request)[0] + objuri = header['location'] + job_id = objuri[objuri.rfind("/") + 1:] + response = self.vcns.get_edge_id(job_id)[1] + edge_id = response['edgeId'] + LOG.debug(_("VCNS: deploying edge %s"), edge_id) + userdata['edge_id'] = edge_id + status = constants.TaskStatus.PENDING + except exceptions.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("VCNS: deploy edge failed for router %s."), + name) + + return status + + def _status_edge(self, task): + edge_id = task.userdata['edge_id'] + try: + response = self.vcns.get_edge_deploy_status(edge_id)[1] + task.userdata['retries'] = 0 + system_status = response.get('systemStatus', None) + if system_status is None: + status = constants.TaskStatus.PENDING + elif system_status == 'good': + status = constants.TaskStatus.COMPLETED + else: + status = constants.TaskStatus.ERROR + except exceptions.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("VCNS: Edge %s status query failed."), edge_id) + except Exception: + retries = task.userdata.get('retries', 0) + 1 + if retries < 3: + task.userdata['retries'] = retries + msg = _("VCNS: Unable to retrieve edge %(edge_id)s status. " + "Retry %(retries)d.") % { + 'edge_id': edge_id, + 'retries': retries} + LOG.exception(msg) + status = constants.TaskStatus.PENDING + else: + msg = _("VCNS: Unable to retrieve edge %s status. " + "Abort.") % edge_id + LOG.exception(msg) + status = constants.TaskStatus.ERROR + LOG.debug(_("VCNS: Edge %s status"), edge_id) + return status + + def _result_edge(self, task): + router_name = task.userdata['router_name'] + edge_id = task.userdata.get('edge_id') + if task.status != constants.TaskStatus.COMPLETED: + LOG.error(_("VCNS: Failed to deploy edge %(edge_id)s " + "for %(name)s, status %(status)d"), { + 'edge_id': edge_id, + 'name': router_name, + 'status': task.status + }) + else: + LOG.debug(_("VCNS: Edge %(edge_id)s deployed for " + "router %(name)s"), { + 'edge_id': edge_id, 'name': router_name + }) + + def _delete_edge(self, task): + edge_id = task.userdata['edge_id'] + LOG.debug(_("VCNS: start destroying edge %s"), edge_id) + status = constants.TaskStatus.COMPLETED + if edge_id: + try: + self.vcns.delete_edge(edge_id) + except exceptions.ResourceNotFound: + pass + except exceptions.VcnsApiException as e: + msg = _("VCNS: Failed to delete %(edge_id)s:\n" + "%(response)s") % { + 'edge_id': edge_id, 'response': e.response} + LOG.exception(msg) + status = constants.TaskStatus.ERROR + except Exception: + LOG.exception(_("VCNS: Failed to delete %s"), edge_id) + status = constants.TaskStatus.ERROR + + return status + + def _get_edges(self): + try: + return self.vcns.get_edges()[1] + except exceptions.VcnsApiException as e: + with excutils.save_and_reraise_exception(): + LOG.exception(_("VCNS: Failed to get edges:\n%s"), e.response) + + def deploy_edge(self, router_id, name, internal_network, jobdata=None, + wait_for_exec=False, loadbalancer_enable=True): + task_name = 'deploying-%s' % name + edge_name = name + edge = self._assemble_edge( + edge_name, datacenter_moid=self.datacenter_moid, + deployment_container_id=self.deployment_container_id, + appliance_size='large', remote_access=True) + appliance = self._assemble_edge_appliance(self.resource_pool_id, + self.datastore_id) + if appliance: + edge['appliances']['appliances'] = [appliance] + + vnic_external = self._assemble_edge_vnic( + vcns_const.EXTERNAL_VNIC_NAME, vcns_const.EXTERNAL_VNIC_INDEX, + self.external_network, type="uplink") + edge['vnics']['vnics'].append(vnic_external) + vnic_inside = self._assemble_edge_vnic( + vcns_const.INTERNAL_VNIC_NAME, vcns_const.INTERNAL_VNIC_INDEX, + internal_network, + vcns_const.INTEGRATION_EDGE_IPADDRESS, + vcns_const.INTEGRATION_SUBNET_NETMASK, + type="internal") + edge['vnics']['vnics'].append(vnic_inside) + if loadbalancer_enable: + self._enable_loadbalancer(edge) + userdata = { + 'request': edge, + 'router_name': name, + 'jobdata': jobdata + } + task = tasks.Task(task_name, router_id, + self._deploy_edge, + status_callback=self._status_edge, + result_callback=self._result_edge, + userdata=userdata) + task.add_executed_monitor(self.callbacks.edge_deploy_started) + task.add_result_monitor(self.callbacks.edge_deploy_result) + self.task_manager.add(task) + + if wait_for_exec: + # wait until the deploy task is executed so edge_id is available + task.wait(constants.TaskState.EXECUTED) + + return task + + def delete_edge(self, router_id, edge_id, jobdata=None): + task_name = 'delete-%s' % edge_id + userdata = { + 'router_id': router_id, + 'edge_id': edge_id, + 'jobdata': jobdata + } + task = tasks.Task(task_name, router_id, self._delete_edge, + userdata=userdata) + task.add_result_monitor(self.callbacks.edge_delete_result) + self.task_manager.add(task) + return task + + def _assemble_nat_rule(self, action, original_address, + translated_address, + vnic_index=vcns_const.EXTERNAL_VNIC_INDEX, + enabled=True): + nat_rule = {} + nat_rule['action'] = action + nat_rule['vnic'] = vnic_index + nat_rule['originalAddress'] = original_address + nat_rule['translatedAddress'] = translated_address + nat_rule['enabled'] = enabled + return nat_rule + + def get_nat_config(self, edge_id): + try: + return self.vcns.get_nat_config(edge_id)[1] + except exceptions.VcnsApiException as e: + with excutils.save_and_reraise_exception(): + LOG.exception(_("VCNS: Failed to get nat config:\n%s"), + e.response) + + def _create_nat_rule(self, task): + # TODO(fank): use POST for optimization + # return rule_id for future reference + rule = task.userdata['rule'] + LOG.debug(_("VCNS: start creating nat rules: %s"), rule) + edge_id = task.userdata['edge_id'] + nat = self.get_nat_config(edge_id) + location = task.userdata['location'] + + del nat['version'] + + if location is None or location == vcns_const.APPEND: + nat['rules']['natRulesDtos'].append(rule) + else: + nat['rules']['natRulesDtos'].insert(location, rule) + + try: + self.vcns.update_nat_config(edge_id, nat) + status = constants.TaskStatus.COMPLETED + except exceptions.VcnsApiException as e: + LOG.exception(_("VCNS: Failed to create snat rule:\n%s"), + e.response) + status = constants.TaskStatus.ERROR + + return status + + def create_snat_rule(self, router_id, edge_id, src, translated, + jobdata=None, location=None): + LOG.debug(_("VCNS: create snat rule %(src)s/%(translated)s"), { + 'src': src, 'translated': translated}) + snat_rule = self._assemble_nat_rule("snat", src, translated) + userdata = { + 'router_id': router_id, + 'edge_id': edge_id, + 'rule': snat_rule, + 'location': location, + 'jobdata': jobdata + } + task_name = "create-snat-%s-%s-%s" % (edge_id, src, translated) + task = tasks.Task(task_name, router_id, self._create_nat_rule, + userdata=userdata) + task.add_result_monitor(self.callbacks.snat_create_result) + self.task_manager.add(task) + return task + + def _delete_nat_rule(self, task): + # TODO(fank): pass in rule_id for optimization + # handle routes update for optimization + edge_id = task.userdata['edge_id'] + address = task.userdata['address'] + addrtype = task.userdata['addrtype'] + LOG.debug(_("VCNS: start deleting %(type)s rules: %(addr)s"), { + 'type': addrtype, 'addr': address}) + nat = self.get_nat_config(edge_id) + del nat['version'] + status = constants.TaskStatus.COMPLETED + for nat_rule in nat['rules']['natRulesDtos']: + if nat_rule[addrtype] == address: + rule_id = nat_rule['ruleId'] + try: + self.vcns.delete_nat_rule(edge_id, rule_id) + except exceptions.VcnsApiException as e: + LOG.exception(_("VCNS: Failed to delete snat rule:\n" + "%s"), e.response) + status = constants.TaskStatus.ERROR + + return status + + def delete_snat_rule(self, router_id, edge_id, src, jobdata=None): + LOG.debug(_("VCNS: delete snat rule %s"), src) + userdata = { + 'edge_id': edge_id, + 'address': src, + 'addrtype': 'originalAddress', + 'jobdata': jobdata + } + task_name = "delete-snat-%s-%s" % (edge_id, src) + task = tasks.Task(task_name, router_id, self._delete_nat_rule, + userdata=userdata) + task.add_result_monitor(self.callbacks.snat_delete_result) + self.task_manager.add(task) + return task + + def create_dnat_rule(self, router_id, edge_id, dst, translated, + jobdata=None, location=None): + # TODO(fank): use POST for optimization + # return rule_id for future reference + LOG.debug(_("VCNS: create dnat rule %(dst)s/%(translated)s"), { + 'dst': dst, 'translated': translated}) + dnat_rule = self._assemble_nat_rule( + "dnat", dst, translated) + userdata = { + 'router_id': router_id, + 'edge_id': edge_id, + 'rule': dnat_rule, + 'location': location, + 'jobdata': jobdata + } + task_name = "create-dnat-%s-%s-%s" % (edge_id, dst, translated) + task = tasks.Task(task_name, router_id, self._create_nat_rule, + userdata=userdata) + task.add_result_monitor(self.callbacks.dnat_create_result) + self.task_manager.add(task) + return task + + def delete_dnat_rule(self, router_id, edge_id, translated, + jobdata=None): + # TODO(fank): pass in rule_id for optimization + LOG.debug(_("VCNS: delete dnat rule %s"), translated) + userdata = { + 'edge_id': edge_id, + 'address': translated, + 'addrtype': 'translatedAddress', + 'jobdata': jobdata + } + task_name = "delete-dnat-%s-%s" % (edge_id, translated) + task = tasks.Task(task_name, router_id, self._delete_nat_rule, + userdata=userdata) + task.add_result_monitor(self.callbacks.dnat_delete_result) + self.task_manager.add(task) + return task + + def _update_nat_rule(self, task): + # TODO(fank): use POST for optimization + # return rule_id for future reference + edge_id = task.userdata['edge_id'] + if task != self.updated_task['nat'][edge_id]: + # this task does not have the latest config, abort now + # for speedup + return constants.TaskStatus.ABORT + + rules = task.userdata['rules'] + LOG.debug(_("VCNS: start updating nat rules: %s"), rules) + + nat = { + 'featureType': 'nat', + 'rules': { + 'natRulesDtos': rules + } + } + + try: + self.vcns.update_nat_config(edge_id, nat) + status = constants.TaskStatus.COMPLETED + except exceptions.VcnsApiException as e: + LOG.exception(_("VCNS: Failed to create snat rule:\n%s"), + e.response) + status = constants.TaskStatus.ERROR + + return status + + def update_nat_rules(self, router_id, edge_id, snats, dnats, + jobdata=None): + LOG.debug(_("VCNS: update nat rule\n" + "SNAT:%(snat)s\n" + "DNAT:%(dnat)s\n"), { + 'snat': snats, 'dnat': dnats}) + nat_rules = [] + + for dnat in dnats: + nat_rules.append(self._assemble_nat_rule( + 'dnat', dnat['dst'], dnat['translated'])) + nat_rules.append(self._assemble_nat_rule( + 'snat', dnat['translated'], dnat['dst'])) + + for snat in snats: + nat_rules.append(self._assemble_nat_rule( + 'snat', snat['src'], snat['translated'])) + + userdata = { + 'edge_id': edge_id, + 'rules': nat_rules, + 'jobdata': jobdata, + } + task_name = "update-nat-%s" % edge_id + task = tasks.Task(task_name, router_id, self._update_nat_rule, + userdata=userdata) + task.add_result_monitor(self.callbacks.nat_update_result) + self.updated_task['nat'][edge_id] = task + self.task_manager.add(task) + return task + + def _update_routes(self, task): + edge_id = task.userdata['edge_id'] + if (task != self.updated_task['route'][edge_id] and + task.userdata.get('skippable', True)): + # this task does not have the latest config, abort now + # for speedup + return constants.TaskStatus.ABORT + gateway = task.userdata['gateway'] + routes = task.userdata['routes'] + LOG.debug(_("VCNS: start updating routes for %s"), edge_id) + static_routes = [] + for route in routes: + static_routes.append({ + "description": "", + "vnic": vcns_const.INTERNAL_VNIC_INDEX, + "network": route['cidr'], + "nextHop": route['nexthop'] + }) + request = { + "staticRoutes": { + "staticRoutes": static_routes + } + } + if gateway: + request["defaultRoute"] = { + "description": "default-gateway", + "gatewayAddress": gateway, + "vnic": vcns_const.EXTERNAL_VNIC_INDEX + } + try: + self.vcns.update_routes(edge_id, request) + status = constants.TaskStatus.COMPLETED + except exceptions.VcnsApiException as e: + LOG.exception(_("VCNS: Failed to update routes:\n%s"), + e.response) + status = constants.TaskStatus.ERROR + + return status + + def update_routes(self, router_id, edge_id, gateway, routes, + skippable=True, jobdata=None): + if gateway: + gateway = gateway.split('/')[0] + + userdata = { + 'edge_id': edge_id, + 'gateway': gateway, + 'routes': routes, + 'skippable': skippable, + 'jobdata': jobdata + } + task_name = "update-routes-%s" % (edge_id) + task = tasks.Task(task_name, router_id, self._update_routes, + userdata=userdata) + task.add_result_monitor(self.callbacks.routes_update_result) + self.updated_task['route'][edge_id] = task + self.task_manager.add(task) + return task + + def create_lswitch(self, name, tz_config, tags=None, + port_isolation=False, replication_mode="service"): + lsconfig = { + 'display_name': utils.check_and_truncate(name), + "tags": tags or [], + "type": "LogicalSwitchConfig", + "_schema": "/ws.v1/schema/LogicalSwitchConfig", + "transport_zones": tz_config + } + if port_isolation is bool: + lsconfig["port_isolation_enabled"] = port_isolation + if replication_mode: + lsconfig["replication_mode"] = replication_mode + + response = self.vcns.create_lswitch(lsconfig)[1] + return response + + def delete_lswitch(self, lswitch_id): + self.vcns.delete_lswitch(lswitch_id) + + def get_loadbalancer_config(self, edge_id): + try: + header, response = self.vcns.get_loadbalancer_config( + edge_id) + except exceptions.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to get service config")) + return response + + def enable_service_loadbalancer(self, edge_id): + config = self.get_loadbalancer_config( + edge_id) + if not config['enabled']: + config['enabled'] = True + try: + self.vcns.enable_service_loadbalancer(edge_id, config) + except exceptions.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to enable loadbalancer " + "service config")) diff --git a/neutron/plugins/vmware/vshield/edge_firewall_driver.py b/neutron/plugins/vmware/vshield/edge_firewall_driver.py new file mode 100644 index 000000000..f2e899645 --- /dev/null +++ b/neutron/plugins/vmware/vshield/edge_firewall_driver.py @@ -0,0 +1,354 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Leon Cui, VMware + +from neutron.db import db_base_plugin_v2 +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.plugins.vmware.dbexts import vcns_db +from neutron.plugins.vmware.vshield.common import ( + exceptions as vcns_exc) + +LOG = logging.getLogger(__name__) + +VSE_FWAAS_ALLOW = "accept" +VSE_FWAAS_DENY = "deny" + + +class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2): + """Implementation of driver APIs for + Edge Firewall feature configuration + """ + def _convert_firewall_action(self, action): + if action == constants.FWAAS_ALLOW: + return VSE_FWAAS_ALLOW + elif action == constants.FWAAS_DENY: + return VSE_FWAAS_DENY + else: + msg = _("Invalid action value %s in a firewall rule") % action + raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) + + def _restore_firewall_action(self, action): + if action == VSE_FWAAS_ALLOW: + return constants.FWAAS_ALLOW + elif action == VSE_FWAAS_DENY: + return constants.FWAAS_DENY + else: + msg = (_("Invalid action value %s in " + "a vshield firewall rule") % action) + raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) + + def _get_port_range_from_min_max_ports(self, min_port, max_port): + if not min_port: + return None + if min_port == max_port: + return str(min_port) + else: + return '%d:%d' % (min_port, max_port) + + def _get_min_max_ports_from_range(self, port_range): + if not port_range: + return [None, None] + min_port, sep, max_port = port_range.partition(":") + if not max_port: + max_port = min_port + return [int(min_port), int(max_port)] + + def _convert_firewall_rule(self, context, rule, index=None): + vcns_rule = { + "name": rule['name'], + "description": rule['description'], + "action": self._convert_firewall_action(rule['action']), + "enabled": rule['enabled']} + if rule.get('source_ip_address'): + vcns_rule['source'] = { + "ipAddress": [rule['source_ip_address']] + } + if rule.get('destination_ip_address'): + vcns_rule['destination'] = { + "ipAddress": [rule['destination_ip_address']] + } + service = {} + if rule.get('source_port'): + min_port, max_port = self._get_min_max_ports_from_range( + rule['source_port']) + service['sourcePort'] = [i for i in range(min_port, max_port + 1)] + if rule.get('destination_port'): + min_port, max_port = self._get_min_max_ports_from_range( + rule['destination_port']) + service['port'] = [i for i in range(min_port, max_port + 1)] + if rule.get('protocol'): + service['protocol'] = rule['protocol'] + if service: + vcns_rule['application'] = { + 'service': [service] + } + if index: + vcns_rule['ruleTag'] = index + return vcns_rule + + def _restore_firewall_rule(self, context, edge_id, response): + rule = response + rule_binding = vcns_db.get_vcns_edge_firewallrule_binding_by_vseid( + context.session, edge_id, rule['ruleId']) + service = rule['application']['service'][0] + src_port_range = self._get_port_range_from_min_max_ports( + service['sourcePort'][0], service['sourcePort'][-1]) + dst_port_range = self._get_port_range_from_min_max_ports( + service['port'][0], service['port'][-1]) + return { + 'firewall_rule': { + 'name': rule['name'], + 'id': rule_binding['rule_id'], + 'description': rule['description'], + 'source_ip_address': rule['source']['ipAddress'][0], + 'destination_ip_address': rule['destination']['ipAddress'][0], + 'protocol': service['protocol'], + 'destination_port': dst_port_range, + 'source_port': src_port_range, + 'action': self._restore_firewall_action(rule['action']), + 'enabled': rule['enabled']}} + + def _convert_firewall(self, context, firewall): + #bulk configuration on firewall and rescheduling the rule binding + ruleTag = 1 + vcns_rules = [] + for rule in firewall['firewall_rule_list']: + vcns_rule = self._convert_firewall_rule(context, rule, ruleTag) + vcns_rules.append(vcns_rule) + ruleTag += 1 + return { + 'featureType': "firewall_4.0", + 'firewallRules': { + 'firewallRules': vcns_rules}} + + def _restore_firewall(self, context, edge_id, response): + res = {} + res['firewall_rule_list'] = [] + for rule in response['firewallRules']['firewallRules']: + rule_binding = ( + vcns_db.get_vcns_edge_firewallrule_binding_by_vseid( + context.session, edge_id, rule['ruleId'])) + if rule_binding is None: + continue + service = rule['application']['service'][0] + src_port_range = self._get_port_range_from_min_max_ports( + service['sourcePort'][0], service['sourcePort'][-1]) + dst_port_range = self._get_port_range_from_min_max_ports( + service['port'][0], service['port'][-1]) + item = { + 'firewall_rule': { + 'name': rule['name'], + 'id': rule_binding['rule_id'], + 'description': rule['description'], + 'source_ip_address': rule['source']['ipAddress'][0], + 'destination_ip_address': rule[ + 'destination']['ipAddress'][0], + 'protocol': service['protocol'], + 'destination_port': dst_port_range, + 'source_port': src_port_range, + 'action': self._restore_firewall_action(rule['action']), + 'enabled': rule['enabled']}} + res['firewall_rule_list'].append(item) + return res + + def _create_rule_id_mapping( + self, context, edge_id, firewall, vcns_fw): + for rule in vcns_fw['firewallRules']['firewallRules']: + index = rule['ruleTag'] - 1 + #TODO(linb):a simple filter of the retrived rules which may be + #created by other operations unintentionally + if index < len(firewall['firewall_rule_list']): + rule_vseid = rule['ruleId'] + rule_id = firewall['firewall_rule_list'][index]['id'] + map_info = { + 'rule_id': rule_id, + 'rule_vseid': rule_vseid, + 'edge_id': edge_id + } + vcns_db.add_vcns_edge_firewallrule_binding( + context.session, map_info) + + def _get_firewall(self, context, edge_id): + try: + return self.vcns.get_firewall(edge_id)[1] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to get firewall with edge " + "id: %s"), edge_id) + + def _get_firewall_rule_next(self, context, edge_id, rule_vseid): + # Return the firewall rule below 'rule_vseid' + fw_cfg = self._get_firewall(context, edge_id) + for i in range(len(fw_cfg['firewallRules']['firewallRules'])): + rule_cur = fw_cfg['firewallRules']['firewallRules'][i] + if str(rule_cur['ruleId']) == rule_vseid: + if (i + 1) == len(fw_cfg['firewallRules']['firewallRules']): + return None + else: + return fw_cfg['firewallRules']['firewallRules'][i + 1] + + def get_firewall_rule(self, context, id, edge_id): + rule_map = vcns_db.get_vcns_edge_firewallrule_binding( + context.session, id, edge_id) + if rule_map is None: + msg = _("No rule id:%s found in the edge_firewall_binding") % id + LOG.error(msg) + raise vcns_exc.VcnsNotFound( + resource='vcns_firewall_rule_bindings', msg=msg) + vcns_rule_id = rule_map.rule_vseid + try: + response = self.vcns.get_firewall_rule( + edge_id, vcns_rule_id)[1] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to get firewall rule: %(rule_id)s " + "with edge_id: %(edge_id)s"), { + 'rule_id': id, + 'edge_id': edge_id}) + return self._restore_firewall_rule(context, edge_id, response) + + def get_firewall(self, context, edge_id): + response = self._get_firewall(context, edge_id) + return self._restore_firewall(context, edge_id, response) + + def update_firewall(self, context, edge_id, firewall): + fw_req = self._convert_firewall(context, firewall) + try: + self.vcns.update_firewall(edge_id, fw_req) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update firewall " + "with edge_id: %s"), edge_id) + fw_res = self._get_firewall(context, edge_id) + vcns_db.cleanup_vcns_edge_firewallrule_binding( + context.session, edge_id) + self._create_rule_id_mapping(context, edge_id, firewall, fw_res) + + def delete_firewall(self, context, edge_id): + try: + self.vcns.delete_firewall(edge_id) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete firewall " + "with edge_id:%s"), edge_id) + vcns_db.cleanup_vcns_edge_firewallrule_binding( + context.session, edge_id) + + def update_firewall_rule(self, context, id, edge_id, firewall_rule): + rule_map = vcns_db.get_vcns_edge_firewallrule_binding( + context.session, id, edge_id) + vcns_rule_id = rule_map.rule_vseid + fwr_req = self._convert_firewall_rule(context, firewall_rule) + try: + self.vcns.update_firewall_rule(edge_id, vcns_rule_id, fwr_req) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update firewall rule: %(rule_id)s " + "with edge_id: %(edge_id)s"), + {'rule_id': id, + 'edge_id': edge_id}) + + def delete_firewall_rule(self, context, id, edge_id): + rule_map = vcns_db.get_vcns_edge_firewallrule_binding( + context.session, id, edge_id) + vcns_rule_id = rule_map.rule_vseid + try: + self.vcns.delete_firewall_rule(edge_id, vcns_rule_id) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete firewall rule: %(rule_id)s " + "with edge_id: %(edge_id)s"), + {'rule_id': id, + 'edge_id': edge_id}) + vcns_db.delete_vcns_edge_firewallrule_binding( + context.session, id, edge_id) + + def _add_rule_above(self, context, ref_rule_id, edge_id, firewall_rule): + rule_map = vcns_db.get_vcns_edge_firewallrule_binding( + context.session, ref_rule_id, edge_id) + ref_vcns_rule_id = rule_map.rule_vseid + fwr_req = self._convert_firewall_rule(context, firewall_rule) + try: + header = self.vcns.add_firewall_rule_above( + edge_id, ref_vcns_rule_id, fwr_req)[0] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to add firewall rule above: " + "%(rule_id)s with edge_id: %(edge_id)s"), + {'rule_id': ref_vcns_rule_id, + 'edge_id': edge_id}) + + objuri = header['location'] + fwr_vseid = objuri[objuri.rfind("/") + 1:] + map_info = { + 'rule_id': firewall_rule['id'], + 'rule_vseid': fwr_vseid, + 'edge_id': edge_id} + vcns_db.add_vcns_edge_firewallrule_binding( + context.session, map_info) + + def _add_rule_below(self, context, ref_rule_id, edge_id, firewall_rule): + rule_map = vcns_db.get_vcns_edge_firewallrule_binding( + context.session, ref_rule_id, edge_id) + ref_vcns_rule_id = rule_map.rule_vseid + fwr_vse_next = self._get_firewall_rule_next( + context, edge_id, ref_vcns_rule_id) + fwr_req = self._convert_firewall_rule(context, firewall_rule) + if fwr_vse_next: + ref_vcns_rule_id = fwr_vse_next['ruleId'] + try: + header = self.vcns.add_firewall_rule_above( + edge_id, int(ref_vcns_rule_id), fwr_req)[0] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to add firewall rule above: " + "%(rule_id)s with edge_id: %(edge_id)s"), + {'rule_id': ref_vcns_rule_id, + 'edge_id': edge_id}) + else: + # append the rule at the bottom + try: + header = self.vcns.add_firewall_rule( + edge_id, fwr_req)[0] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to append a firewall rule" + "with edge_id: %s"), edge_id) + + objuri = header['location'] + fwr_vseid = objuri[objuri.rfind("/") + 1:] + map_info = { + 'rule_id': firewall_rule['id'], + 'rule_vseid': fwr_vseid, + 'edge_id': edge_id + } + vcns_db.add_vcns_edge_firewallrule_binding( + context.session, map_info) + + def insert_rule(self, context, rule_info, edge_id, fwr): + if rule_info.get('insert_before'): + self._add_rule_above( + context, rule_info['insert_before'], edge_id, fwr) + elif rule_info.get('insert_after'): + self._add_rule_below( + context, rule_info['insert_after'], edge_id, fwr) + else: + msg = _("Can't execute insert rule operation " + "without reference rule_id") + raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) diff --git a/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py b/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py new file mode 100644 index 000000000..7e74fe1f8 --- /dev/null +++ b/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py @@ -0,0 +1,150 @@ +# Copyright 2014 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.vshield.common import ( + exceptions as vcns_exc) + +LOG = logging.getLogger(__name__) + +ENCRYPTION_ALGORITHM_MAP = { + '3des': '3des', + 'aes-128': 'aes', + 'aes-256': 'aes256' +} + +PFS_MAP = { + 'group2': 'dh2', + 'group5': 'dh5'} + +TRANSFORM_PROTOCOL_ALLOWED = ('esp',) + +ENCAPSULATION_MODE_ALLOWED = ('tunnel',) + + +class EdgeIPsecVpnDriver(): + + """Driver APIs for Edge IPsec VPN bulk configuration.""" + + def _check_ikepolicy_ipsecpolicy_allowed(self, ikepolicy, ipsecpolicy): + """Check whether ikepolicy and ipsecpolicy are allowed on vshield edge. + + Some IPsec VPN configurations and features are configured by default or + not supported on vshield edge. + + """ + # Check validation of IKEPolicy. + if ikepolicy['ike_version'] != 'v1': + msg = _("Unsupported ike_version: %s! Only 'v1' ike version is " + "supported on vshield Edge!" + ) % ikepolicy['ike_version'] + LOG.warning(msg) + raise vcns_exc.VcnsBadRequest(resource='ikepolicy', + msg=msg) + + # In VSE, Phase 1 and Phase 2 share the same encryption_algorithm + # and authentication algorithms setting. At present, just record the + # discrepancy error in log and take ipsecpolicy to do configuration. + if (ikepolicy['auth_algorithm'] != ipsecpolicy['auth_algorithm'] or + ikepolicy['encryption_algorithm'] != ipsecpolicy[ + 'encryption_algorithm'] or + ikepolicy['pfs'] != ipsecpolicy['pfs']): + msg = _("IKEPolicy and IPsecPolicy should have consistent " + "auth_algorithm, encryption_algorithm and pfs for VSE!") + LOG.warning(msg) + + # Check whether encryption_algorithm is allowed. + encryption_algorithm = ENCRYPTION_ALGORITHM_MAP.get( + ipsecpolicy.get('encryption_algorithm'), None) + if not encryption_algorithm: + msg = _("Unsupported encryption_algorithm: %s! '3des', " + "'aes-128' and 'aes-256' are supported on VSE right now." + ) % ipsecpolicy['encryption_algorithm'] + LOG.warning(msg) + raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', + msg=msg) + + # Check whether pfs is allowed. + if not PFS_MAP.get(ipsecpolicy['pfs']): + msg = _("Unsupported pfs: %s! 'group2' and 'group5' " + "are supported on VSE right now.") % ipsecpolicy['pfs'] + LOG.warning(msg) + raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', + msg=msg) + + # Check whether transform protocol is allowed. + if ipsecpolicy['transform_protocol'] not in TRANSFORM_PROTOCOL_ALLOWED: + msg = _("Unsupported transform protocol: %s! 'esp' is supported " + "by default on VSE right now." + ) % ipsecpolicy['transform_protocol'] + LOG.warning(msg) + raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', + msg=msg) + + # Check whether encapsulation mode is allowed. + if ipsecpolicy['encapsulation_mode'] not in ENCAPSULATION_MODE_ALLOWED: + msg = _("Unsupported encapsulation mode: %s! 'tunnel' is " + "supported by default on VSE right now." + ) % ipsecpolicy['encapsulation_mode'] + LOG.warning(msg) + raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', + msg=msg) + + def _convert_ipsec_site(self, site, enablePfs=True): + self._check_ikepolicy_ipsecpolicy_allowed( + site['ikepolicy'], site['ipsecpolicy']) + return { + 'enabled': site['site'].get('admin_state_up'), + 'enablePfs': enablePfs, + 'dhGroup': PFS_MAP.get(site['ipsecpolicy']['pfs']), + 'name': site['site'].get('name'), + 'description': site['site'].get('description'), + 'localId': site['external_ip'], + 'localIp': site['external_ip'], + 'peerId': site['site'].get('peer_id'), + 'peerIp': site['site'].get('peer_address'), + 'localSubnets': { + 'subnets': [site['subnet'].get('cidr')]}, + 'peerSubnets': { + 'subnets': site['site'].get('peer_cidrs')}, + 'authenticationMode': site['site'].get('auth_mode'), + 'psk': site['site'].get('psk'), + 'encryptionAlgorithm': ENCRYPTION_ALGORITHM_MAP.get( + site['ipsecpolicy'].get('encryption_algorithm'))} + + def update_ipsec_config(self, edge_id, sites, enabled=True): + ipsec_config = {'featureType': "ipsec_4.0", + 'enabled': enabled} + vse_sites = [self._convert_ipsec_site(site) for site in sites] + ipsec_config['sites'] = {'sites': vse_sites} + try: + self.vcns.update_ipsec_config(edge_id, ipsec_config) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update ipsec vpn configuration " + "with edge_id: %s"), edge_id) + + def delete_ipsec_config(self, edge_id): + try: + self.vcns.delete_ipsec_config(edge_id) + except vcns_exc.ResourceNotFound: + LOG.warning(_("IPsec config not found on edge: %s"), edge_id) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete ipsec vpn configuration " + "with edge_id: %s"), edge_id) + + def get_ipsec_config(self, edge_id): + return self.vcns.get_ipsec_config(edge_id) diff --git a/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py b/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py new file mode 100644 index 000000000..cb9e24eaa --- /dev/null +++ b/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py @@ -0,0 +1,403 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Leon Cui, VMware + +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.dbexts import vcns_db +from neutron.plugins.vmware.vshield.common import ( + constants as vcns_const) +from neutron.plugins.vmware.vshield.common import ( + exceptions as vcns_exc) +from neutron.services.loadbalancer import constants as lb_constants + +LOG = logging.getLogger(__name__) + +BALANCE_MAP = { + lb_constants.LB_METHOD_ROUND_ROBIN: 'round-robin', + lb_constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn', + lb_constants.LB_METHOD_SOURCE_IP: 'source' +} +PROTOCOL_MAP = { + lb_constants.PROTOCOL_TCP: 'tcp', + lb_constants.PROTOCOL_HTTP: 'http', + lb_constants.PROTOCOL_HTTPS: 'tcp' +} +SESSION_PERSISTENCE_METHOD_MAP = { + lb_constants.SESSION_PERSISTENCE_SOURCE_IP: 'sourceip', + lb_constants.SESSION_PERSISTENCE_APP_COOKIE: 'cookie', + lb_constants.SESSION_PERSISTENCE_HTTP_COOKIE: 'cookie'} +SESSION_PERSISTENCE_COOKIE_MAP = { + lb_constants.SESSION_PERSISTENCE_APP_COOKIE: 'app', + lb_constants.SESSION_PERSISTENCE_HTTP_COOKIE: 'insert'} + + +class EdgeLbDriver(): + """Implementation of driver APIs for + Edge Loadbalancer feature configuration + """ + + def _convert_lb_vip(self, context, edge_id, vip, app_profileid): + pool_id = vip.get('pool_id') + poolid_map = vcns_db.get_vcns_edge_pool_binding( + context.session, pool_id, edge_id) + pool_vseid = poolid_map['pool_vseid'] + return { + 'name': vip.get( + 'name', '') + vip['id'][-vcns_const.SUFFIX_LENGTH:], + 'description': vip.get('description'), + 'ipAddress': vip.get('address'), + 'protocol': vip.get('protocol'), + 'port': vip.get('protocol_port'), + 'connectionLimit': max(0, vip.get('connection_limit')), + 'defaultPoolId': pool_vseid, + 'applicationProfileId': app_profileid + } + + def _restore_lb_vip(self, context, edge_id, vip_vse): + pool_binding = vcns_db.get_vcns_edge_pool_binding_by_vseid( + context.session, + edge_id, + vip_vse['defaultPoolId']) + + return { + 'name': vip_vse['name'][:-vcns_const.SUFFIX_LENGTH], + 'address': vip_vse['ipAddress'], + 'protocol': vip_vse['protocol'], + 'protocol_port': vip_vse['port'], + 'pool_id': pool_binding['pool_id'] + } + + def _convert_lb_pool(self, context, edge_id, pool, members): + vsepool = { + 'name': pool.get( + 'name', '') + pool['id'][-vcns_const.SUFFIX_LENGTH:], + 'description': pool.get('description'), + 'algorithm': BALANCE_MAP.get( + pool.get('lb_method'), + 'round-robin'), + 'transparent': True, + 'member': [], + 'monitorId': [] + } + for member in members: + vsepool['member'].append({ + 'ipAddress': member['address'], + 'weight': member['weight'], + 'port': member['protocol_port'] + }) + ##TODO(linb) right now, vse only accept at most one monitor per pool + monitors = pool.get('health_monitors') + if not monitors: + return vsepool + monitorid_map = vcns_db.get_vcns_edge_monitor_binding( + context.session, + monitors[0], + edge_id) + vsepool['monitorId'].append(monitorid_map['monitor_vseid']) + return vsepool + + def _restore_lb_pool(self, context, edge_id, pool_vse): + #TODO(linb): Get more usefule info + return { + 'name': pool_vse['name'][:-vcns_const.SUFFIX_LENGTH], + } + + def _convert_lb_monitor(self, context, monitor): + return { + 'type': PROTOCOL_MAP.get( + monitor.get('type'), 'http'), + 'interval': monitor.get('delay'), + 'timeout': monitor.get('timeout'), + 'maxRetries': monitor.get('max_retries'), + 'name': monitor.get('id') + } + + def _restore_lb_monitor(self, context, edge_id, monitor_vse): + return { + 'delay': monitor_vse['interval'], + 'timeout': monitor_vse['timeout'], + 'max_retries': monitor_vse['maxRetries'], + 'id': monitor_vse['name'] + } + + def _convert_app_profile(self, name, sess_persist, protocol): + vcns_app_profile = { + 'insertXForwardedFor': False, + 'name': name, + 'serverSslEnabled': False, + 'sslPassthrough': False, + 'template': protocol, + } + # Since SSL Termination is not supported right now, so just use + # sslPassthrough mehtod if the protocol is HTTPS. + if protocol == lb_constants.PROTOCOL_HTTPS: + vcns_app_profile['sslPassthrough'] = True + + if sess_persist.get('type'): + # If protocol is not HTTP, only sourceip is supported + if (protocol != lb_constants.PROTOCOL_HTTP and + sess_persist['type'] != ( + lb_constants.SESSION_PERSISTENCE_SOURCE_IP)): + msg = (_("Invalid %(protocol)s persistence method: %(type)s") % + {'protocol': protocol, + 'type': sess_persist['type']}) + raise vcns_exc.VcnsBadRequest(resource='sess_persist', msg=msg) + persistence = { + 'method': SESSION_PERSISTENCE_METHOD_MAP.get( + sess_persist['type'])} + if sess_persist['type'] in SESSION_PERSISTENCE_COOKIE_MAP: + if sess_persist.get('cookie_name'): + persistence['cookieName'] = sess_persist['cookie_name'] + else: + persistence['cookieName'] = 'default_cookie_name' + persistence['cookieMode'] = SESSION_PERSISTENCE_COOKIE_MAP.get( + sess_persist['type']) + vcns_app_profile['persistence'] = persistence + return vcns_app_profile + + def create_vip(self, context, edge_id, vip): + app_profile = self._convert_app_profile( + vip['name'], (vip.get('session_persistence') or {}), + vip.get('protocol')) + try: + header, response = self.vcns.create_app_profile( + edge_id, app_profile) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create app profile on edge: %s"), + edge_id) + objuri = header['location'] + app_profileid = objuri[objuri.rfind("/") + 1:] + + vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid) + try: + header, response = self.vcns.create_vip( + edge_id, vip_new) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create vip on vshield edge: %s"), + edge_id) + self.vcns.delete_app_profile(edge_id, app_profileid) + objuri = header['location'] + vip_vseid = objuri[objuri.rfind("/") + 1:] + + # Add the vip mapping + map_info = { + "vip_id": vip['id'], + "vip_vseid": vip_vseid, + "edge_id": edge_id, + "app_profileid": app_profileid + } + vcns_db.add_vcns_edge_vip_binding(context.session, map_info) + + def _get_vip_binding(self, session, id): + vip_binding = vcns_db.get_vcns_edge_vip_binding(session, id) + if not vip_binding: + msg = (_("vip_binding not found with id: %(id)s " + "edge_id: %(edge_id)s") % { + 'id': id, + 'edge_id': vip_binding[vcns_const.EDGE_ID]}) + LOG.error(msg) + raise vcns_exc.VcnsNotFound( + resource='router_service_binding', msg=msg) + return vip_binding + + def get_vip(self, context, id): + vip_binding = vcns_db.get_vcns_edge_vip_binding(context.session, id) + edge_id = vip_binding[vcns_const.EDGE_ID] + vip_vseid = vip_binding['vip_vseid'] + try: + response = self.vcns.get_vip(edge_id, vip_vseid)[1] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to get vip on edge")) + return self._restore_lb_vip(context, edge_id, response) + + def update_vip(self, context, vip, session_persistence_update=True): + vip_binding = self._get_vip_binding(context.session, vip['id']) + edge_id = vip_binding[vcns_const.EDGE_ID] + vip_vseid = vip_binding.get('vip_vseid') + if session_persistence_update: + app_profileid = vip_binding.get('app_profileid') + app_profile = self._convert_app_profile( + vip['name'], vip.get('session_persistence', {}), + vip.get('protocol')) + try: + self.vcns.update_app_profile( + edge_id, app_profileid, app_profile) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update app profile on " + "edge: %s") % edge_id) + + vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid) + try: + self.vcns.update_vip(edge_id, vip_vseid, vip_new) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update vip on edge: %s") % edge_id) + + def delete_vip(self, context, id): + vip_binding = self._get_vip_binding(context.session, id) + edge_id = vip_binding[vcns_const.EDGE_ID] + vip_vseid = vip_binding['vip_vseid'] + app_profileid = vip_binding['app_profileid'] + + try: + self.vcns.delete_vip(edge_id, vip_vseid) + except vcns_exc.ResourceNotFound: + LOG.exception(_("vip not found on edge: %s") % edge_id) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete vip on edge: %s") % edge_id) + + try: + self.vcns.delete_app_profile(edge_id, app_profileid) + except vcns_exc.ResourceNotFound: + LOG.exception(_("app profile not found on edge: %s") % edge_id) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete app profile on edge: %s") % + edge_id) + + vcns_db.delete_vcns_edge_vip_binding(context.session, id) + + def create_pool(self, context, edge_id, pool, members): + pool_new = self._convert_lb_pool(context, edge_id, pool, members) + try: + header = self.vcns.create_pool(edge_id, pool_new)[0] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create pool")) + + objuri = header['location'] + pool_vseid = objuri[objuri.rfind("/") + 1:] + + # update the pool mapping table + map_info = { + "pool_id": pool['id'], + "pool_vseid": pool_vseid, + "edge_id": edge_id + } + vcns_db.add_vcns_edge_pool_binding(context.session, map_info) + + def get_pool(self, context, id, edge_id): + pool_binding = vcns_db.get_vcns_edge_pool_binding( + context.session, id, edge_id) + if not pool_binding: + msg = (_("pool_binding not found with id: %(id)s " + "edge_id: %(edge_id)s") % {'id': id, 'edge_id': edge_id}) + LOG.error(msg) + raise vcns_exc.VcnsNotFound( + resource='router_service_binding', msg=msg) + pool_vseid = pool_binding['pool_vseid'] + try: + response = self.vcns.get_pool(edge_id, pool_vseid)[1] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to get pool on edge")) + return self._restore_lb_pool(context, edge_id, response) + + def update_pool(self, context, edge_id, pool, members): + pool_binding = vcns_db.get_vcns_edge_pool_binding( + context.session, pool['id'], edge_id) + pool_vseid = pool_binding['pool_vseid'] + pool_new = self._convert_lb_pool(context, edge_id, pool, members) + try: + self.vcns.update_pool(edge_id, pool_vseid, pool_new) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update pool")) + + def delete_pool(self, context, id, edge_id): + pool_binding = vcns_db.get_vcns_edge_pool_binding( + context.session, id, edge_id) + pool_vseid = pool_binding['pool_vseid'] + try: + self.vcns.delete_pool(edge_id, pool_vseid) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete pool")) + vcns_db.delete_vcns_edge_pool_binding( + context.session, id, edge_id) + + def create_health_monitor(self, context, edge_id, health_monitor): + monitor_new = self._convert_lb_monitor(context, health_monitor) + try: + header = self.vcns.create_health_monitor(edge_id, monitor_new)[0] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create monitor on edge: %s"), + edge_id) + + objuri = header['location'] + monitor_vseid = objuri[objuri.rfind("/") + 1:] + + # update the health_monitor mapping table + map_info = { + "monitor_id": health_monitor['id'], + "monitor_vseid": monitor_vseid, + "edge_id": edge_id + } + vcns_db.add_vcns_edge_monitor_binding(context.session, map_info) + + def get_health_monitor(self, context, id, edge_id): + monitor_binding = vcns_db.get_vcns_edge_monitor_binding( + context.session, id, edge_id) + if not monitor_binding: + msg = (_("monitor_binding not found with id: %(id)s " + "edge_id: %(edge_id)s") % {'id': id, 'edge_id': edge_id}) + LOG.error(msg) + raise vcns_exc.VcnsNotFound( + resource='router_service_binding', msg=msg) + monitor_vseid = monitor_binding['monitor_vseid'] + try: + response = self.vcns.get_health_monitor(edge_id, monitor_vseid)[1] + except vcns_exc.VcnsApiException as e: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to get monitor on edge: %s"), + e.response) + return self._restore_lb_monitor(context, edge_id, response) + + def update_health_monitor(self, context, edge_id, + old_health_monitor, health_monitor): + monitor_binding = vcns_db.get_vcns_edge_monitor_binding( + context.session, + old_health_monitor['id'], edge_id) + monitor_vseid = monitor_binding['monitor_vseid'] + monitor_new = self._convert_lb_monitor( + context, health_monitor) + try: + self.vcns.update_health_monitor( + edge_id, monitor_vseid, monitor_new) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update monitor on edge: %s"), + edge_id) + + def delete_health_monitor(self, context, id, edge_id): + monitor_binding = vcns_db.get_vcns_edge_monitor_binding( + context.session, id, edge_id) + monitor_vseid = monitor_binding['monitor_vseid'] + try: + self.vcns.delete_health_monitor(edge_id, monitor_vseid) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete monitor")) + vcns_db.delete_vcns_edge_monitor_binding( + context.session, id, edge_id) diff --git a/neutron/plugins/vmware/vshield/tasks/__init__.py b/neutron/plugins/vmware/vshield/tasks/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/plugins/vmware/vshield/tasks/constants.py b/neutron/plugins/vmware/vshield/tasks/constants.py new file mode 100644 index 000000000..f5322e0b9 --- /dev/null +++ b/neutron/plugins/vmware/vshield/tasks/constants.py @@ -0,0 +1,44 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class TaskStatus(object): + """Task running status. + + This is used by execution/status callback function to notify the + task manager what's the status of current task, and also used for + indication the final task execution result. + """ + PENDING = 1 + COMPLETED = 2 + ERROR = 3 + ABORT = 4 + + +class TaskState(object): + """Current state of a task. + + This is to keep track of the current state of a task. + NONE: the task is still in the queue + START: the task is pull out from the queue and is about to be executed + EXECUTED: the task has been executed + STATUS: we're running periodic status check for this task + RESULT: the task has finished and result is ready + """ + NONE = -1 + START = 0 + EXECUTED = 1 + STATUS = 2 + RESULT = 3 diff --git a/neutron/plugins/vmware/vshield/tasks/tasks.py b/neutron/plugins/vmware/vshield/tasks/tasks.py new file mode 100644 index 000000000..7037c430d --- /dev/null +++ b/neutron/plugins/vmware/vshield/tasks/tasks.py @@ -0,0 +1,397 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import uuid + +from eventlet import event +from eventlet import greenthread + +from neutron.common import exceptions +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.vmware.vshield.tasks import constants + +DEFAULT_INTERVAL = 1000 + +LOG = logging.getLogger(__name__) + + +def nop(task): + return constants.TaskStatus.COMPLETED + + +class TaskException(exceptions.NeutronException): + + def __init__(self, message=None, **kwargs): + if message is not None: + self.message = message + + super(TaskException, self).__init__(**kwargs) + + +class InvalidState(TaskException): + message = _("Invalid state %(state)d") + + +class TaskStateSkipped(TaskException): + message = _("State %(state)d skipped. Current state %(current)d") + + +class Task(): + def __init__(self, name, resource_id, execute_callback, + status_callback=nop, result_callback=nop, userdata=None): + self.name = name + self.resource_id = resource_id + self._execute_callback = execute_callback + self._status_callback = status_callback + self._result_callback = result_callback + self.userdata = userdata + self.id = None + self.status = None + + self._monitors = { + constants.TaskState.START: [], + constants.TaskState.EXECUTED: [], + constants.TaskState.RESULT: [] + } + self._states = [None, None, None, None] + self._state = constants.TaskState.NONE + + def _add_monitor(self, action, func): + self._monitors[action].append(func) + return self + + def _move_state(self, state): + self._state = state + if self._states[state] is not None: + e = self._states[state] + self._states[state] = None + e.send() + + for s in range(state): + if self._states[s] is not None: + e = self._states[s] + self._states[s] = None + e.send_exception( + TaskStateSkipped(state=s, current=self._state)) + + def _invoke_monitor(self, state): + for func in self._monitors[state]: + try: + func(self) + except Exception: + msg = _("Task %(task)s encountered exception in %(func)s " + "at state %(state)s") % { + 'task': str(self), + 'func': str(func), + 'state': state} + LOG.exception(msg) + + self._move_state(state) + + return self + + def _start(self): + return self._invoke_monitor(constants.TaskState.START) + + def _executed(self): + return self._invoke_monitor(constants.TaskState.EXECUTED) + + def _update_status(self, status): + if self.status == status: + return self + + self.status = status + + def _finished(self): + return self._invoke_monitor(constants.TaskState.RESULT) + + def add_start_monitor(self, func): + return self._add_monitor(constants.TaskState.START, func) + + def add_executed_monitor(self, func): + return self._add_monitor(constants.TaskState.EXECUTED, func) + + def add_result_monitor(self, func): + return self._add_monitor(constants.TaskState.RESULT, func) + + def wait(self, state): + if (state < constants.TaskState.START or + state > constants.TaskState.RESULT or + state == constants.TaskState.STATUS): + raise InvalidState(state=state) + + if state <= self._state: + # we already passed this current state, so no wait + return + + e = event.Event() + self._states[state] = e + e.wait() + + def __repr__(self): + return "Task-%s-%s-%s" % ( + self.name, self.resource_id, self.id) + + +class TaskManager(): + + _instance = None + _default_interval = DEFAULT_INTERVAL + + def __init__(self, interval=None): + self._interval = interval or TaskManager._default_interval + + # A queue to pass tasks from other threads + self._tasks_queue = collections.deque() + + # A dict to store resource -> resource's tasks + self._tasks = {} + + # Current task being executed in main thread + self._main_thread_exec_task = None + + # New request event + self._req = event.Event() + + # TaskHandler stopped event + self._stopped = False + + # Periodic function trigger + self._monitor = None + self._monitor_busy = False + + # Thread handling the task request + self._thread = None + + def _execute(self, task): + """Execute task.""" + msg = _("Start task %s") % str(task) + LOG.debug(msg) + task._start() + try: + status = task._execute_callback(task) + except Exception: + msg = _("Task %(task)s encountered exception in %(cb)s") % { + 'task': str(task), + 'cb': str(task._execute_callback)} + LOG.exception(msg) + status = constants.TaskStatus.ERROR + + LOG.debug(_("Task %(task)s return %(status)s"), { + 'task': str(task), + 'status': status}) + + task._update_status(status) + task._executed() + + return status + + def _result(self, task): + """Notify task execution result.""" + try: + task._result_callback(task) + except Exception: + msg = _("Task %(task)s encountered exception in %(cb)s") % { + 'task': str(task), + 'cb': str(task._result_callback)} + LOG.exception(msg) + + LOG.debug(_("Task %(task)s return %(status)s"), + {'task': str(task), 'status': task.status}) + + task._finished() + + def _check_pending_tasks(self): + """Check all pending tasks status.""" + for resource_id in self._tasks.keys(): + if self._stopped: + # Task manager is stopped, return now + return + + tasks = self._tasks[resource_id] + # only the first task is executed and pending + task = tasks[0] + try: + status = task._status_callback(task) + except Exception: + msg = _("Task %(task)s encountered exception in %(cb)s") % { + 'task': str(task), + 'cb': str(task._status_callback)} + LOG.exception(msg) + status = constants.TaskStatus.ERROR + task._update_status(status) + if status != constants.TaskStatus.PENDING: + self._dequeue(task, True) + + def _enqueue(self, task): + if task.resource_id in self._tasks: + # append to existing resource queue for ordered processing + self._tasks[task.resource_id].append(task) + else: + # put the task to a new resource queue + tasks = collections.deque() + tasks.append(task) + self._tasks[task.resource_id] = tasks + + def _dequeue(self, task, run_next): + self._result(task) + tasks = self._tasks[task.resource_id] + tasks.remove(task) + if not tasks: + # no more tasks for this resource + del self._tasks[task.resource_id] + return + + if run_next: + # process next task for this resource + while tasks: + task = tasks[0] + status = self._execute(task) + if status == constants.TaskStatus.PENDING: + break + self._dequeue(task, False) + + def _abort(self): + """Abort all tasks.""" + # put all tasks haven't been received by main thread to queue + # so the following abort handling can cover them + for t in self._tasks_queue: + self._enqueue(t) + self._tasks_queue.clear() + + for resource_id in self._tasks.keys(): + tasks = list(self._tasks[resource_id]) + for task in tasks: + task._update_status(constants.TaskStatus.ABORT) + self._dequeue(task, False) + + def _get_task(self): + """Get task request.""" + while True: + for t in self._tasks_queue: + return self._tasks_queue.popleft() + self._req.wait() + self._req.reset() + + def run(self): + while True: + try: + if self._stopped: + # Gracefully terminate this thread if the _stopped + # attribute was set to true + LOG.info(_("Stopping TaskManager")) + break + + # get a task from queue, or timeout for periodic status check + task = self._get_task() + if task.resource_id in self._tasks: + # this resource already has some tasks under processing, + # append the task to same queue for ordered processing + self._enqueue(task) + continue + + try: + self._main_thread_exec_task = task + self._execute(task) + finally: + self._main_thread_exec_task = None + if task.status is None: + # The thread is killed during _execute(). To guarantee + # the task been aborted correctly, put it to the queue. + self._enqueue(task) + elif task.status != constants.TaskStatus.PENDING: + self._result(task) + else: + self._enqueue(task) + except Exception: + LOG.exception(_("TaskManager terminating because " + "of an exception")) + break + + def add(self, task): + task.id = uuid.uuid1() + self._tasks_queue.append(task) + if not self._req.ready(): + self._req.send() + return task.id + + def stop(self): + if self._thread is None: + return + self._stopped = True + self._thread.kill() + self._thread = None + # Stop looping call and abort running tasks + self._monitor.stop() + if self._monitor_busy: + self._monitor.wait() + self._abort() + LOG.info(_("TaskManager terminated")) + + def has_pending_task(self): + if self._tasks_queue or self._tasks or self._main_thread_exec_task: + return True + else: + return False + + def show_pending_tasks(self): + for task in self._tasks_queue: + LOG.info(str(task)) + for resource, tasks in self._tasks.iteritems(): + for task in tasks: + LOG.info(str(task)) + if self._main_thread_exec_task: + LOG.info(str(self._main_thread_exec_task)) + + def count(self): + count = 0 + for resource_id, tasks in self._tasks.iteritems(): + count += len(tasks) + return count + + def start(self, interval=None): + def _inner(): + self.run() + + def _loopingcall_callback(): + self._monitor_busy = True + try: + self._check_pending_tasks() + except Exception: + LOG.exception(_("Exception in _check_pending_tasks")) + self._monitor_busy = False + + if self._thread is not None: + return self + + if interval is None or interval == 0: + interval = self._interval + + self._stopped = False + self._thread = greenthread.spawn(_inner) + self._monitor = loopingcall.FixedIntervalLoopingCall( + _loopingcall_callback) + self._monitor.start(interval / 1000.0, + interval / 1000.0) + # To allow the created thread start running + greenthread.sleep(0) + + return self + + @classmethod + def set_default_interval(cls, interval): + cls._default_interval = interval diff --git a/neutron/plugins/vmware/vshield/vcns.py b/neutron/plugins/vmware/vshield/vcns.py new file mode 100644 index 000000000..11f0c1e2f --- /dev/null +++ b/neutron/plugins/vmware/vshield/vcns.py @@ -0,0 +1,304 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: linb, VMware + +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.vshield.common import VcnsApiClient + +LOG = logging.getLogger(__name__) + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" +URI_PREFIX = "/api/4.0/edges" + +#FwaaS constants +FIREWALL_SERVICE = "firewall/config" +FIREWALL_RULE_RESOURCE = "rules" + +#LbaaS Constants +LOADBALANCER_SERVICE = "loadbalancer/config" +VIP_RESOURCE = "virtualservers" +POOL_RESOURCE = "pools" +MONITOR_RESOURCE = "monitors" +APP_PROFILE_RESOURCE = "applicationprofiles" + +# IPsec VPNaaS Constants +IPSEC_VPN_SERVICE = 'ipsec/config' + + +class Vcns(object): + + def __init__(self, address, user, password): + self.address = address + self.user = user + self.password = password + self.jsonapi_client = VcnsApiClient.VcnsApiHelper(address, user, + password, 'json') + + def do_request(self, method, uri, params=None, format='json', **kwargs): + LOG.debug(_("VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')"), { + 'method': method, + 'uri': uri, + 'body': jsonutils.dumps(params)}) + if format == 'json': + header, content = self.jsonapi_client.request(method, uri, params) + else: + header, content = self.xmlapi_client.request(method, uri, params) + LOG.debug(_("Header: '%s'"), header) + LOG.debug(_("Content: '%s'"), content) + if content == '': + return header, {} + if kwargs.get('decode', True): + content = jsonutils.loads(content) + return header, content + + def deploy_edge(self, request): + uri = URI_PREFIX + "?async=true" + return self.do_request(HTTP_POST, uri, request, decode=False) + + def get_edge_id(self, job_id): + uri = URI_PREFIX + "/jobs/%s" % job_id + return self.do_request(HTTP_GET, uri, decode=True) + + def get_edge_deploy_status(self, edge_id): + uri = URI_PREFIX + "/%s/status?getlatest=false" % edge_id + return self.do_request(HTTP_GET, uri, decode="True") + + def delete_edge(self, edge_id): + uri = "%s/%s" % (URI_PREFIX, edge_id) + return self.do_request(HTTP_DELETE, uri) + + def update_interface(self, edge_id, vnic): + uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic['index']) + return self.do_request(HTTP_PUT, uri, vnic, decode=True) + + def get_nat_config(self, edge_id): + uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id) + return self.do_request(HTTP_GET, uri, decode=True) + + def update_nat_config(self, edge_id, nat): + uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id) + return self.do_request(HTTP_PUT, uri, nat, decode=True) + + def delete_nat_rule(self, edge_id, rule_id): + uri = "%s/%s/nat/config/rules/%s" % (URI_PREFIX, edge_id, rule_id) + return self.do_request(HTTP_DELETE, uri, decode=True) + + def get_edge_status(self, edge_id): + uri = "%s/%s/status?getlatest=false" % (URI_PREFIX, edge_id) + return self.do_request(HTTP_GET, uri, decode=True) + + def get_edges(self): + uri = URI_PREFIX + return self.do_request(HTTP_GET, uri, decode=True) + + def update_routes(self, edge_id, routes): + uri = "%s/%s/routing/config/static" % (URI_PREFIX, edge_id) + return self.do_request(HTTP_PUT, uri, routes) + + def create_lswitch(self, lsconfig): + uri = "/api/ws.v1/lswitch" + return self.do_request(HTTP_POST, uri, lsconfig, decode=True) + + def delete_lswitch(self, lswitch_id): + uri = "/api/ws.v1/lswitch/%s" % lswitch_id + return self.do_request(HTTP_DELETE, uri) + + def get_loadbalancer_config(self, edge_id): + uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE) + return self.do_request(HTTP_GET, uri, decode=True) + + def enable_service_loadbalancer(self, edge_id, config): + uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE) + return self.do_request(HTTP_PUT, uri, config) + + def update_firewall(self, edge_id, fw_req): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE) + return self.do_request(HTTP_PUT, uri, fw_req) + + def delete_firewall(self, edge_id): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE, None) + return self.do_request(HTTP_DELETE, uri) + + def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE, + FIREWALL_RULE_RESOURCE, + vcns_rule_id) + return self.do_request(HTTP_PUT, uri, fwr_req) + + def delete_firewall_rule(self, edge_id, vcns_rule_id): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE, + FIREWALL_RULE_RESOURCE, + vcns_rule_id) + return self.do_request(HTTP_DELETE, uri) + + def add_firewall_rule_above(self, edge_id, ref_vcns_rule_id, fwr_req): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE, + FIREWALL_RULE_RESOURCE) + uri += "?aboveRuleId=" + ref_vcns_rule_id + return self.do_request(HTTP_POST, uri, fwr_req) + + def add_firewall_rule(self, edge_id, fwr_req): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE, + FIREWALL_RULE_RESOURCE) + return self.do_request(HTTP_POST, uri, fwr_req) + + def get_firewall(self, edge_id): + uri = self._build_uri_path(edge_id, FIREWALL_SERVICE) + return self.do_request(HTTP_GET, uri, decode=True) + + def get_firewall_rule(self, edge_id, vcns_rule_id): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE, + FIREWALL_RULE_RESOURCE, + vcns_rule_id) + return self.do_request(HTTP_GET, uri, decode=True) + + # + #Edge LBAAS call helper + # + def create_vip(self, edge_id, vip_new): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + VIP_RESOURCE) + return self.do_request(HTTP_POST, uri, vip_new) + + def get_vip(self, edge_id, vip_vseid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + VIP_RESOURCE, vip_vseid) + return self.do_request(HTTP_GET, uri, decode=True) + + def update_vip(self, edge_id, vip_vseid, vip_new): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + VIP_RESOURCE, vip_vseid) + return self.do_request(HTTP_PUT, uri, vip_new) + + def delete_vip(self, edge_id, vip_vseid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + VIP_RESOURCE, vip_vseid) + return self.do_request(HTTP_DELETE, uri) + + def create_pool(self, edge_id, pool_new): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + POOL_RESOURCE) + return self.do_request(HTTP_POST, uri, pool_new) + + def get_pool(self, edge_id, pool_vseid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + POOL_RESOURCE, pool_vseid) + return self.do_request(HTTP_GET, uri, decode=True) + + def update_pool(self, edge_id, pool_vseid, pool_new): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + POOL_RESOURCE, pool_vseid) + return self.do_request(HTTP_PUT, uri, pool_new) + + def delete_pool(self, edge_id, pool_vseid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + POOL_RESOURCE, pool_vseid) + return self.do_request(HTTP_DELETE, uri) + + def create_health_monitor(self, edge_id, monitor_new): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + MONITOR_RESOURCE) + return self.do_request(HTTP_POST, uri, monitor_new) + + def get_health_monitor(self, edge_id, monitor_vseid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + MONITOR_RESOURCE, monitor_vseid) + return self.do_request(HTTP_GET, uri, decode=True) + + def update_health_monitor(self, edge_id, monitor_vseid, monitor_new): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + MONITOR_RESOURCE, + monitor_vseid) + return self.do_request(HTTP_PUT, uri, monitor_new) + + def delete_health_monitor(self, edge_id, monitor_vseid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + MONITOR_RESOURCE, + monitor_vseid) + return self.do_request(HTTP_DELETE, uri) + + def create_app_profile(self, edge_id, app_profile): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + APP_PROFILE_RESOURCE) + return self.do_request(HTTP_POST, uri, app_profile) + + def update_app_profile(self, edge_id, app_profileid, app_profile): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + APP_PROFILE_RESOURCE, app_profileid) + return self.do_request(HTTP_PUT, uri, app_profile) + + def delete_app_profile(self, edge_id, app_profileid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + APP_PROFILE_RESOURCE, + app_profileid) + return self.do_request(HTTP_DELETE, uri) + + def update_ipsec_config(self, edge_id, ipsec_config): + uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) + return self.do_request(HTTP_PUT, uri, ipsec_config) + + def delete_ipsec_config(self, edge_id): + uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) + return self.do_request(HTTP_DELETE, uri) + + def get_ipsec_config(self, edge_id): + uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) + return self.do_request(HTTP_GET, uri) + + def _build_uri_path(self, edge_id, + service, + resource=None, + resource_id=None, + parent_resource_id=None, + fields=None, + relations=None, + filters=None, + types=None, + is_attachment=False): + uri_prefix = "%s/%s/%s" % (URI_PREFIX, edge_id, service) + if resource: + res_path = resource + (resource_id and "/%s" % resource_id or '') + uri_path = "%s/%s" % (uri_prefix, res_path) + else: + uri_path = uri_prefix + return uri_path diff --git a/neutron/plugins/vmware/vshield/vcns_driver.py b/neutron/plugins/vmware/vshield/vcns_driver.py new file mode 100644 index 000000000..e705b3329 --- /dev/null +++ b/neutron/plugins/vmware/vshield/vcns_driver.py @@ -0,0 +1,53 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: linb, VMware + +from oslo.config import cfg + +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import config # noqa +from neutron.plugins.vmware.vshield import edge_appliance_driver +from neutron.plugins.vmware.vshield import edge_firewall_driver +from neutron.plugins.vmware.vshield import edge_ipsecvpn_driver +from neutron.plugins.vmware.vshield import edge_loadbalancer_driver +from neutron.plugins.vmware.vshield.tasks import tasks +from neutron.plugins.vmware.vshield import vcns + +LOG = logging.getLogger(__name__) + + +class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver, + edge_firewall_driver.EdgeFirewallDriver, + edge_loadbalancer_driver.EdgeLbDriver, + edge_ipsecvpn_driver.EdgeIPsecVpnDriver): + + def __init__(self, callbacks): + super(VcnsDriver, self).__init__() + + self.callbacks = callbacks + self.vcns_uri = cfg.CONF.vcns.manager_uri + self.vcns_user = cfg.CONF.vcns.user + self.vcns_passwd = cfg.CONF.vcns.password + self.datacenter_moid = cfg.CONF.vcns.datacenter_moid + self.deployment_container_id = cfg.CONF.vcns.deployment_container_id + self.resource_pool_id = cfg.CONF.vcns.resource_pool_id + self.datastore_id = cfg.CONF.vcns.datastore_id + self.external_network = cfg.CONF.vcns.external_network + interval = cfg.CONF.vcns.task_status_check_interval + self.task_manager = tasks.TaskManager(interval) + self.task_manager.start() + self.vcns = vcns.Vcns(self.vcns_uri, self.vcns_user, self.vcns_passwd) diff --git a/neutron/policy.py b/neutron/policy.py new file mode 100644 index 000000000..747638287 --- /dev/null +++ b/neutron/policy.py @@ -0,0 +1,416 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Policy engine for neutron. Largely copied from nova. +""" +import itertools +import re + +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.common import exceptions +import neutron.common.utils as utils +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import policy + + +LOG = logging.getLogger(__name__) +_POLICY_PATH = None +_POLICY_CACHE = {} +ADMIN_CTX_POLICY = 'context_is_admin' +# Maps deprecated 'extension' policies to new-style policies +DEPRECATED_POLICY_MAP = { + 'extension:provider_network': + ['network:provider:network_type', + 'network:provider:physical_network', + 'network:provider:segmentation_id'], + 'extension:router': + ['network:router:external'], + 'extension:port_binding': + ['port:binding:vif_type', 'port:binding:vif_details', + 'port:binding:profile', 'port:binding:host_id'] +} +DEPRECATED_ACTION_MAP = { + 'view': ['get'], + 'set': ['create', 'update'] +} + +cfg.CONF.import_opt('policy_file', 'neutron.common.config') + + +def reset(): + global _POLICY_PATH + global _POLICY_CACHE + _POLICY_PATH = None + _POLICY_CACHE = {} + policy.reset() + + +def init(): + global _POLICY_PATH + global _POLICY_CACHE + if not _POLICY_PATH: + _POLICY_PATH = utils.find_config_file({}, cfg.CONF.policy_file) + if not _POLICY_PATH: + raise exceptions.PolicyFileNotFound(path=cfg.CONF.policy_file) + # pass _set_brain to read_cached_file so that the policy brain + # is reset only if the file has changed + utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, + reload_func=_set_rules) + + +def get_resource_and_action(action): + """Extract resource and action (write, read) from api operation.""" + data = action.split(':', 1)[0].split('_', 1) + return ("%ss" % data[-1], data[0] != 'get') + + +def _set_rules(data): + default_rule = 'default' + LOG.debug(_("Loading policies from file: %s"), _POLICY_PATH) + # Ensure backward compatibility with folsom/grizzly convention + # for extension rules + policies = policy.Rules.load_json(data, default_rule) + for pol in policies.keys(): + if any([pol.startswith(depr_pol) for depr_pol in + DEPRECATED_POLICY_MAP.keys()]): + LOG.warn(_("Found deprecated policy rule:%s. Please consider " + "upgrading your policy configuration file"), pol) + pol_name, action = pol.rsplit(':', 1) + try: + new_actions = DEPRECATED_ACTION_MAP[action] + new_policies = DEPRECATED_POLICY_MAP[pol_name] + # bind new actions and policies together + for actual_policy in ['_'.join(item) for item in + itertools.product(new_actions, + new_policies)]: + if actual_policy not in policies: + # New policy, same rule + LOG.info(_("Inserting policy:%(new_policy)s in place " + "of deprecated policy:%(old_policy)s"), + {'new_policy': actual_policy, + 'old_policy': pol}) + policies[actual_policy] = policies[pol] + # Remove old-style policy + del policies[pol] + except KeyError: + LOG.error(_("Backward compatibility unavailable for " + "deprecated policy %s. The policy will " + "not be enforced"), pol) + policy.set_rules(policies) + + +def _is_attribute_explicitly_set(attribute_name, resource, target): + """Verify that an attribute is present and has a non-default value.""" + return ('default' in resource[attribute_name] and + attribute_name in target and + target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and + target[attribute_name] != resource[attribute_name]['default']) + + +def _build_subattr_match_rule(attr_name, attr, action, target): + """Create the rule to match for sub-attribute policy checks.""" + # TODO(salv-orlando): Instead of relying on validator info, introduce + # typing for API attributes + # Expect a dict as type descriptor + validate = attr['validate'] + key = filter(lambda k: k.startswith('type:dict'), validate.keys()) + if not key: + LOG.warn(_("Unable to find data type descriptor for attribute %s"), + attr_name) + return + data = validate[key[0]] + if not isinstance(data, dict): + LOG.debug(_("Attribute type descriptor is not a dict. Unable to " + "generate any sub-attr policy rule for %s."), + attr_name) + return + sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' % + (action, attr_name, + sub_attr_name)) for + sub_attr_name in data if sub_attr_name in + target[attr_name]] + return policy.AndCheck(sub_attr_rules) + + +def _build_match_rule(action, target): + """Create the rule to match for a given action. + + The policy rule to be matched is built in the following way: + 1) add entries for matching permission on objects + 2) add an entry for the specific action (e.g.: create_network) + 3) add an entry for attributes of a resource for which the action + is being executed (e.g.: create_network:shared) + 4) add an entry for sub-attributes of a resource for which the + action is being executed + (e.g.: create_router:external_gateway_info:network_id) + """ + match_rule = policy.RuleCheck('rule', action) + resource, is_write = get_resource_and_action(action) + # Attribute-based checks shall not be enforced on GETs + if is_write: + # assigning to variable with short name for improving readability + res_map = attributes.RESOURCE_ATTRIBUTE_MAP + if resource in res_map: + for attribute_name in res_map[resource]: + if _is_attribute_explicitly_set(attribute_name, + res_map[resource], + target): + attribute = res_map[resource][attribute_name] + if 'enforce_policy' in attribute: + attr_rule = policy.RuleCheck('rule', '%s:%s' % + (action, attribute_name)) + # Build match entries for sub-attributes, if present + validate = attribute.get('validate') + if (validate and any([k.startswith('type:dict') and v + for (k, v) in + validate.iteritems()])): + attr_rule = policy.AndCheck( + [attr_rule, _build_subattr_match_rule( + attribute_name, attribute, + action, target)]) + match_rule = policy.AndCheck([match_rule, attr_rule]) + return match_rule + + +# This check is registered as 'tenant_id' so that it can override +# GenericCheck which was used for validating parent resource ownership. +# This will prevent us from having to handling backward compatibility +# for policy.json +# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks +@policy.register('tenant_id') +class OwnerCheck(policy.Check): + """Resource ownership check. + + This check verifies the owner of the current resource, or of another + resource referenced by the one under analysis. + In the former case it falls back to a regular GenericCheck, whereas + in the latter case it leverages the plugin to load the referenced + resource and perform the check. + """ + def __init__(self, kind, match): + # Process the match + try: + self.target_field = re.findall('^\%\((.*)\)s$', + match)[0] + except IndexError: + err_reason = (_("Unable to identify a target field from:%s." + "match should be in the form %%()s") % + match) + LOG.exception(err_reason) + raise exceptions.PolicyInitError( + policy="%s:%s" % (kind, match), + reason=err_reason) + super(OwnerCheck, self).__init__(kind, match) + + def __call__(self, target, creds): + if self.target_field not in target: + # policy needs a plugin check + # target field is in the form resource:field + # however if they're not separated by a colon, use an underscore + # as a separator for backward compatibility + + def do_split(separator): + parent_res, parent_field = self.target_field.split( + separator, 1) + return parent_res, parent_field + + for separator in (':', '_'): + try: + parent_res, parent_field = do_split(separator) + break + except ValueError: + LOG.debug(_("Unable to find ':' as separator in %s."), + self.target_field) + else: + # If we are here split failed with both separators + err_reason = (_("Unable to find resource name in %s") % + self.target_field) + LOG.exception(err_reason) + raise exceptions.PolicyCheckError( + policy="%s:%s" % (self.kind, self.match), + reason=err_reason) + parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get( + "%ss" % parent_res, None) + if not parent_foreign_key: + err_reason = (_("Unable to verify match:%(match)s as the " + "parent resource: %(res)s was not found") % + {'match': self.match, 'res': parent_res}) + LOG.exception(err_reason) + raise exceptions.PolicyCheckError( + policy="%s:%s" % (self.kind, self.match), + reason=err_reason) + # NOTE(salv-orlando): This check currently assumes the parent + # resource is handled by the core plugin. It might be worth + # having a way to map resources to plugins so to make this + # check more general + # FIXME(ihrachys): if import is put in global, circular + # import failure occurs + from neutron import manager + f = getattr(manager.NeutronManager.get_instance().plugin, + 'get_%s' % parent_res) + # f *must* exist, if not found it is better to let neutron + # explode. Check will be performed with admin context + context = importutils.import_module('neutron.context') + try: + data = f(context.get_admin_context(), + target[parent_foreign_key], + fields=[parent_field]) + target[self.target_field] = data[parent_field] + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_('Policy check error while calling %s!'), f) + match = self.match % target + if self.kind in creds: + return match == unicode(creds[self.kind]) + return False + + +@policy.register('field') +class FieldCheck(policy.Check): + def __init__(self, kind, match): + # Process the match + resource, field_value = match.split(':', 1) + field, value = field_value.split('=', 1) + + super(FieldCheck, self).__init__(kind, '%s:%s:%s' % + (resource, field, value)) + + # Value might need conversion - we need help from the attribute map + try: + attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field] + conv_func = attr['convert_to'] + except KeyError: + conv_func = lambda x: x + + self.field = field + self.value = conv_func(value) + + def __call__(self, target_dict, cred_dict): + target_value = target_dict.get(self.field) + # target_value might be a boolean, explicitly compare with None + if target_value is None: + LOG.debug(_("Unable to find requested field: %(field)s in " + "target: %(target_dict)s"), + {'field': self.field, + 'target_dict': target_dict}) + return False + return target_value == self.value + + +def _prepare_check(context, action, target): + """Prepare rule, target, and credentials for the policy engine.""" + # Compare with None to distinguish case in which target is {} + if target is None: + target = {} + match_rule = _build_match_rule(action, target) + credentials = context.to_dict() + return match_rule, target, credentials + + +def check(context, action, target, plugin=None, might_not_exist=False): + """Verifies that the action is valid on the target in this context. + + :param context: neutron context + :param action: string representing the action to be checked + this should be colon separated for clarity. + :param target: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. ``{'project_id': context.project_id}`` + :param plugin: currently unused and deprecated. + Kept for backward compatibility. + :param might_not_exist: If True the policy check is skipped (and the + function returns True) if the specified policy does not exist. + Defaults to false. + + :return: Returns True if access is permitted else False. + """ + if might_not_exist and not (policy._rules and action in policy._rules): + return True + return policy.check(*(_prepare_check(context, action, target))) + + +def enforce(context, action, target, plugin=None): + """Verifies that the action is valid on the target in this context. + + :param context: neutron context + :param action: string representing the action to be checked + this should be colon separated for clarity. + :param target: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. ``{'project_id': context.project_id}`` + :param plugin: currently unused and deprecated. + Kept for backward compatibility. + + :raises neutron.exceptions.PolicyNotAuthorized: if verification fails. + """ + + rule, target, credentials = _prepare_check(context, action, target) + result = policy.check(rule, target, credentials, action=action) + if not result: + LOG.debug(_("Failed policy check for '%s'"), action) + raise exceptions.PolicyNotAuthorized(action=action) + return result + + +def check_is_admin(context): + """Verify context has admin rights according to policy settings.""" + init() + # the target is user-self + credentials = context.to_dict() + target = credentials + # Backward compatibility: if ADMIN_CTX_POLICY is not + # found, default to validating role:admin + admin_policy = (ADMIN_CTX_POLICY in policy._rules + and ADMIN_CTX_POLICY or 'role:admin') + return policy.check(admin_policy, target, credentials) + + +def _extract_roles(rule, roles): + if isinstance(rule, policy.RoleCheck): + roles.append(rule.match.lower()) + elif isinstance(rule, policy.RuleCheck): + _extract_roles(policy._rules[rule.match], roles) + elif hasattr(rule, 'rules'): + for rule in rule.rules: + _extract_roles(rule, roles) + + +def get_admin_roles(): + """Return a list of roles which are granted admin rights according + to policy settings. + """ + # NOTE(salvatore-orlando): This function provides a solution for + # populating implicit contexts with the appropriate roles so that + # they correctly pass policy checks, and will become superseded + # once all explicit policy checks are removed from db logic and + # plugin modules. For backward compatibility it returns the literal + # admin if ADMIN_CTX_POLICY is not defined + init() + if not policy._rules or ADMIN_CTX_POLICY not in policy._rules: + return ['admin'] + try: + admin_ctx_rule = policy._rules[ADMIN_CTX_POLICY] + except (KeyError, TypeError): + return + roles = [] + _extract_roles(admin_ctx_rule, roles) + return roles diff --git a/neutron/quota.py b/neutron/quota.py new file mode 100644 index 000000000..2768b1a27 --- /dev/null +++ b/neutron/quota.py @@ -0,0 +1,334 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Quotas for instances, volumes, and floating ips.""" + +import sys + +from oslo.config import cfg +import webob + +from neutron.common import exceptions +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) +QUOTA_DB_MODULE = 'neutron.db.quota_db' +QUOTA_DB_DRIVER = 'neutron.db.quota_db.DbQuotaDriver' +QUOTA_CONF_DRIVER = 'neutron.quota.ConfDriver' + +quota_opts = [ + cfg.ListOpt('quota_items', + default=['network', 'subnet', 'port'], + help=_('Resource name(s) that are supported in quota ' + 'features')), + cfg.IntOpt('default_quota', + default=-1, + help=_('Default number of resource allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_network', + default=10, + help=_('Number of networks allowed per tenant.' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_subnet', + default=10, + help=_('Number of subnets allowed per tenant, ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_port', + default=50, + help=_('Number of ports allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.StrOpt('quota_driver', + default=QUOTA_DB_DRIVER, + help=_('Default driver to use for quota checks')), +] +# Register the configuration options +cfg.CONF.register_opts(quota_opts, 'QUOTAS') + + +class ConfDriver(object): + """Configuration driver. + + Driver to perform necessary checks to enforce quotas and obtain + quota information. The default driver utilizes the default values + in neutron.conf. + """ + + def _get_quotas(self, context, resources, keys): + """Get quotas. + + A helper method which retrieves the quotas for the specific + resources identified by keys, and which apply to the current + context. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + :param keys: A list of the desired quotas to retrieve. + """ + + # Filter resources + desired = set(keys) + sub_resources = dict((k, v) for k, v in resources.items() + if k in desired) + + # Make sure we accounted for all of them... + if len(keys) != len(sub_resources): + unknown = desired - set(sub_resources.keys()) + raise exceptions.QuotaResourceUnknown(unknown=sorted(unknown)) + quotas = {} + for resource in sub_resources.values(): + quotas[resource.name] = resource.default + return quotas + + def limit_check(self, context, tenant_id, + resources, values): + """Check simple quota limits. + + For limits--those quotas for which there is no usage + synchronization function--this method checks that a set of + proposed values are permitted by the limit restriction. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it is not a simple limit + resource. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns + nothing. + + :param context: The request context, for access checks. + :param tennant_id: The tenant_id to check quota. + :param resources: A dictionary of the registered resources. + :param values: A dictionary of the values to check against the + quota. + """ + + # Ensure no value is less than zero + unders = [key for key, val in values.items() if val < 0] + if unders: + raise exceptions.InvalidQuotaValue(unders=sorted(unders)) + + # Get the applicable quotas + quotas = self._get_quotas(context, resources, values.keys()) + + # Check the quotas and construct a list of the resources that + # would be put over limit by the desired values + overs = [key for key, val in values.items() + if quotas[key] >= 0 and quotas[key] < val] + if overs: + raise exceptions.OverQuota(overs=sorted(overs), quotas=quotas, + usages={}) + + @staticmethod + def get_tenant_quotas(context, resources, tenant_id): + quotas = {} + sub_resources = dict((k, v) for k, v in resources.items()) + for resource in sub_resources.values(): + quotas[resource.name] = resource.default + return quotas + + @staticmethod + def get_all_quotas(context, resources): + return [] + + @staticmethod + def delete_tenant_quota(context, tenant_id): + msg = _('Access to this resource was denied.') + raise webob.exc.HTTPForbidden(msg) + + @staticmethod + def update_quota_limit(context, tenant_id, resource, limit): + msg = _('Access to this resource was denied.') + raise webob.exc.HTTPForbidden(msg) + + +class BaseResource(object): + """Describe a single resource for quota checking.""" + + def __init__(self, name, flag): + """Initializes a resource. + + :param name: The name of the resource, i.e., "instances". + :param flag: The name of the flag or configuration option + """ + + self.name = name + self.flag = flag + + @property + def default(self): + """Return the default value of the quota.""" + return getattr(cfg.CONF.QUOTAS, + self.flag, + cfg.CONF.QUOTAS.default_quota) + + +class CountableResource(BaseResource): + """Describe a resource where the counts are determined by a function.""" + + def __init__(self, name, count, flag=None): + """Initializes a CountableResource. + + Countable resources are those resources which directly + correspond to objects in the database, i.e., netowk, subnet, + etc.,. A CountableResource must be constructed with a counting + function, which will be called to determine the current counts + of the resource. + + The counting function will be passed the context, along with + the extra positional and keyword arguments that are passed to + Quota.count(). It should return an integer specifying the + count. + + :param name: The name of the resource, i.e., "instances". + :param count: A callable which returns the count of the + resource. The arguments passed are as described + above. + :param flag: The name of the flag or configuration option + which specifies the default value of the quota + for this resource. + """ + + super(CountableResource, self).__init__(name, flag=flag) + self.count = count + + +class QuotaEngine(object): + """Represent the set of recognized quotas.""" + + def __init__(self, quota_driver_class=None): + """Initialize a Quota object.""" + + self._resources = {} + self._driver = None + self._driver_class = quota_driver_class + + def get_driver(self): + if self._driver is None: + _driver_class = (self._driver_class or + cfg.CONF.QUOTAS.quota_driver) + if (_driver_class == QUOTA_DB_DRIVER and + QUOTA_DB_MODULE not in sys.modules): + # If quotas table is not loaded, force config quota driver. + _driver_class = QUOTA_CONF_DRIVER + LOG.info(_("ConfDriver is used as quota_driver because the " + "loaded plugin does not support 'quotas' table.")) + if isinstance(_driver_class, basestring): + _driver_class = importutils.import_object(_driver_class) + self._driver = _driver_class + LOG.info(_('Loaded quota_driver: %s.'), _driver_class) + return self._driver + + def __contains__(self, resource): + return resource in self._resources + + def register_resource(self, resource): + """Register a resource.""" + if resource.name in self._resources: + LOG.warn(_('%s is already registered.'), resource.name) + return + self._resources[resource.name] = resource + + def register_resource_by_name(self, resourcename): + """Register a resource by name.""" + resource = CountableResource(resourcename, _count_resource, + 'quota_' + resourcename) + self.register_resource(resource) + + def register_resources(self, resources): + """Register a list of resources.""" + + for resource in resources: + self.register_resource(resource) + + def count(self, context, resource, *args, **kwargs): + """Count a resource. + + For countable resources, invokes the count() function and + returns its result. Arguments following the context and + resource are passed directly to the count function declared by + the resource. + + :param context: The request context, for access checks. + :param resource: The name of the resource, as a string. + """ + + # Get the resource + res = self._resources.get(resource) + if not res or not hasattr(res, 'count'): + raise exceptions.QuotaResourceUnknown(unknown=[resource]) + + return res.count(context, *args, **kwargs) + + def limit_check(self, context, tenant_id, **values): + """Check simple quota limits. + + For limits--those quotas for which there is no usage + synchronization function--this method checks that a set of + proposed values are permitted by the limit restriction. The + values to check are given as keyword arguments, where the key + identifies the specific quota limit to check, and the value is + the proposed value. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it is not a simple limit + resource. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns + nothing. + + :param context: The request context, for access checks. + """ + + return self.get_driver().limit_check(context, tenant_id, + self._resources, values) + + @property + def resources(self): + return self._resources + + +QUOTAS = QuotaEngine() + + +def _count_resource(context, plugin, resources, tenant_id): + count_getter_name = "get_%s_count" % resources + + # Some plugins support a count method for particular resources, + # using a DB's optimized counting features. We try to use that one + # if present. Otherwise just use regular getter to retrieve all objects + # and count in python, allowing older plugins to still be supported + try: + obj_count_getter = getattr(plugin, count_getter_name) + return obj_count_getter(context, filters={'tenant_id': [tenant_id]}) + except (NotImplementedError, AttributeError): + obj_getter = getattr(plugin, "get_%s" % resources) + obj_list = obj_getter(context, filters={'tenant_id': [tenant_id]}) + return len(obj_list) if obj_list else 0 + + +def register_resources_from_config(): + resources = [] + for resource_item in cfg.CONF.QUOTAS.quota_items: + resources.append(CountableResource(resource_item, _count_resource, + 'quota_' + resource_item)) + QUOTAS.register_resources(resources) + + +register_resources_from_config() diff --git a/neutron/scheduler/__init__.py b/neutron/scheduler/__init__.py new file mode 100644 index 000000000..7506a2914 --- /dev/null +++ b/neutron/scheduler/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/scheduler/dhcp_agent_scheduler.py b/neutron/scheduler/dhcp_agent_scheduler.py new file mode 100644 index 000000000..98ec5904e --- /dev/null +++ b/neutron/scheduler/dhcp_agent_scheduler.py @@ -0,0 +1,132 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random + +from oslo.config import cfg +from sqlalchemy import sql + +from neutron.common import constants +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.openstack.common.db import exception as db_exc +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class ChanceScheduler(object): + """Allocate a DHCP agent for a network in a random way. + More sophisticated scheduler (similar to filter scheduler in nova?) + can be introduced later. + """ + + def _schedule_bind_network(self, context, agents, network_id): + for agent in agents: + context.session.begin(subtransactions=True) + try: + binding = agentschedulers_db.NetworkDhcpAgentBinding() + binding.dhcp_agent = agent + binding.network_id = network_id + context.session.add(binding) + # try to actually write the changes and catch integrity + # DBDuplicateEntry + context.session.commit() + except db_exc.DBDuplicateEntry: + # it's totally ok, someone just did our job! + context.session.rollback() + LOG.info(_('Agent %s already present'), agent) + LOG.debug(_('Network %(network_id)s is scheduled to be ' + 'hosted by DHCP agent %(agent_id)s'), + {'network_id': network_id, + 'agent_id': agent}) + + def schedule(self, plugin, context, network): + """Schedule the network to active DHCP agent(s). + + A list of scheduled agents is returned. + """ + agents_per_network = cfg.CONF.dhcp_agents_per_network + + #TODO(gongysh) don't schedule the networks with only + # subnets whose enable_dhcp is false + with context.session.begin(subtransactions=True): + dhcp_agents = plugin.get_dhcp_agents_hosting_networks( + context, [network['id']], active=True) + if len(dhcp_agents) >= agents_per_network: + LOG.debug(_('Network %s is hosted already'), + network['id']) + return + n_agents = agents_per_network - len(dhcp_agents) + enabled_dhcp_agents = plugin.get_agents_db( + context, filters={ + 'agent_type': [constants.AGENT_TYPE_DHCP], + 'admin_state_up': [True]}) + if not enabled_dhcp_agents: + LOG.warn(_('No more DHCP agents')) + return + active_dhcp_agents = [ + agent for agent in set(enabled_dhcp_agents) + if not agents_db.AgentDbMixin.is_agent_down( + agent['heartbeat_timestamp']) + and agent not in dhcp_agents + ] + if not active_dhcp_agents: + LOG.warn(_('No more DHCP agents')) + return + n_agents = min(len(active_dhcp_agents), n_agents) + chosen_agents = random.sample(active_dhcp_agents, n_agents) + self._schedule_bind_network(context, chosen_agents, network['id']) + return chosen_agents + + def auto_schedule_networks(self, plugin, context, host): + """Schedule non-hosted networks to the DHCP agent on + the specified host. + """ + agents_per_network = cfg.CONF.dhcp_agents_per_network + with context.session.begin(subtransactions=True): + query = context.session.query(agents_db.Agent) + query = query.filter(agents_db.Agent.agent_type == + constants.AGENT_TYPE_DHCP, + agents_db.Agent.host == host, + agents_db.Agent.admin_state_up == sql.true()) + dhcp_agents = query.all() + for dhcp_agent in dhcp_agents: + if agents_db.AgentDbMixin.is_agent_down( + dhcp_agent.heartbeat_timestamp): + LOG.warn(_('DHCP agent %s is not active'), dhcp_agent.id) + continue + fields = ['network_id', 'enable_dhcp'] + subnets = plugin.get_subnets(context, fields=fields) + net_ids = set(s['network_id'] for s in subnets + if s['enable_dhcp']) + if not net_ids: + LOG.debug(_('No non-hosted networks')) + return False + for net_id in net_ids: + agents = plugin.get_dhcp_agents_hosting_networks( + context, [net_id], active=True) + if len(agents) >= agents_per_network: + continue + if any(dhcp_agent.id == agent.id for agent in agents): + continue + binding = agentschedulers_db.NetworkDhcpAgentBinding() + binding.dhcp_agent = dhcp_agent + binding.network_id = net_id + context.session.add(binding) + return True diff --git a/neutron/scheduler/l3_agent_scheduler.py b/neutron/scheduler/l3_agent_scheduler.py new file mode 100644 index 000000000..df125ce17 --- /dev/null +++ b/neutron/scheduler/l3_agent_scheduler.py @@ -0,0 +1,194 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import random + +import six +from sqlalchemy.orm import exc +from sqlalchemy import sql + +from neutron.common import constants +from neutron.db import agents_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_db +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class L3Scheduler(object): + + @abc.abstractmethod + def schedule(self, plugin, context, router_id, candidates=None): + """Schedule the router to an active L3 agent. + + Schedule the router only if it is not already scheduled. + """ + pass + + def auto_schedule_routers(self, plugin, context, host, router_ids): + """Schedule non-hosted routers to L3 Agent running on host. + + If router_ids is given, each router in router_ids is scheduled + if it is not scheduled yet. Otherwise all unscheduled routers + are scheduled. + Don't schedule the routers which are hosted already + by active l3 agents. + """ + with context.session.begin(subtransactions=True): + # query if we have valid l3 agent on the host + query = context.session.query(agents_db.Agent) + query = query.filter(agents_db.Agent.agent_type == + constants.AGENT_TYPE_L3, + agents_db.Agent.host == host, + agents_db.Agent.admin_state_up == sql.true()) + try: + l3_agent = query.one() + except (exc.MultipleResultsFound, exc.NoResultFound): + LOG.debug(_('No enabled L3 agent on host %s'), + host) + return False + if agents_db.AgentDbMixin.is_agent_down( + l3_agent.heartbeat_timestamp): + LOG.warn(_('L3 agent %s is not active'), l3_agent.id) + # check if each of the specified routers is hosted + if router_ids: + unscheduled_router_ids = [] + for router_id in router_ids: + l3_agents = plugin.get_l3_agents_hosting_routers( + context, [router_id], admin_state_up=True) + if l3_agents: + LOG.debug(_('Router %(router_id)s has already been' + ' hosted by L3 agent %(agent_id)s'), + {'router_id': router_id, + 'agent_id': l3_agents[0]['id']}) + else: + unscheduled_router_ids.append(router_id) + if not unscheduled_router_ids: + # all (specified) routers are already scheduled + return False + else: + # get all routers that are not hosted + #TODO(gongysh) consider the disabled agent's router + stmt = ~sql.exists().where( + l3_db.Router.id == + l3_agentschedulers_db.RouterL3AgentBinding.router_id) + unscheduled_router_ids = [router_id_[0] for router_id_ in + context.session.query( + l3_db.Router.id).filter(stmt)] + if not unscheduled_router_ids: + LOG.debug(_('No non-hosted routers')) + return False + + # check if the configuration of l3 agent is compatible + # with the router + routers = plugin.get_routers( + context, filters={'id': unscheduled_router_ids}) + to_removed_ids = [] + for router in routers: + candidates = plugin.get_l3_agent_candidates(router, [l3_agent]) + if not candidates: + to_removed_ids.append(router['id']) + router_ids = set([r['id'] for r in routers]) - set(to_removed_ids) + if not router_ids: + LOG.warn(_('No routers compatible with L3 agent configuration' + ' on host %s'), host) + return False + + for router_id in router_ids: + self.bind_router(context, router_id, l3_agent) + return True + + def get_candidates(self, plugin, context, sync_router): + """Return L3 agents where a router could be scheduled.""" + with context.session.begin(subtransactions=True): + # allow one router is hosted by just + # one enabled l3 agent hosting since active is just a + # timing problem. Non-active l3 agent can return to + # active any time + l3_agents = plugin.get_l3_agents_hosting_routers( + context, [sync_router['id']], admin_state_up=True) + if l3_agents: + LOG.debug(_('Router %(router_id)s has already been hosted' + ' by L3 agent %(agent_id)s'), + {'router_id': sync_router['id'], + 'agent_id': l3_agents[0]['id']}) + return + + active_l3_agents = plugin.get_l3_agents(context, active=True) + if not active_l3_agents: + LOG.warn(_('No active L3 agents')) + return + candidates = plugin.get_l3_agent_candidates(sync_router, + active_l3_agents) + if not candidates: + LOG.warn(_('No L3 agents can host the router %s'), + sync_router['id']) + return + + return candidates + + def bind_router(self, context, router_id, chosen_agent): + """Bind the router to the l3 agent which has been chosen.""" + with context.session.begin(subtransactions=True): + binding = l3_agentschedulers_db.RouterL3AgentBinding() + binding.l3_agent = chosen_agent + binding.router_id = router_id + context.session.add(binding) + LOG.debug(_('Router %(router_id)s is scheduled to ' + 'L3 agent %(agent_id)s'), + {'router_id': router_id, + 'agent_id': chosen_agent.id}) + + +class ChanceScheduler(L3Scheduler): + """Randomly allocate an L3 agent for a router.""" + + def schedule(self, plugin, context, router_id, candidates=None): + with context.session.begin(subtransactions=True): + sync_router = plugin.get_router(context, router_id) + candidates = candidates or self.get_candidates( + plugin, context, sync_router) + if not candidates: + return + + chosen_agent = random.choice(candidates) + self.bind_router(context, router_id, chosen_agent) + return chosen_agent + + +class LeastRoutersScheduler(L3Scheduler): + """Allocate to an L3 agent with the least number of routers bound.""" + + def schedule(self, plugin, context, router_id, candidates=None): + with context.session.begin(subtransactions=True): + sync_router = plugin.get_router(context, router_id) + candidates = candidates or self.get_candidates( + plugin, context, sync_router) + if not candidates: + return + + candidate_ids = [candidate['id'] for candidate in candidates] + chosen_agent = plugin.get_l3_agent_with_min_routers( + context, candidate_ids) + + self.bind_router(context, router_id, chosen_agent) + + return chosen_agent diff --git a/neutron/server/__init__.py b/neutron/server/__init__.py new file mode 100755 index 000000000..eb34ad851 --- /dev/null +++ b/neutron/server/__init__.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python + +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# If ../neutron/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... + +import sys + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.common import config +from neutron import service + +from neutron.openstack.common import gettextutils +from neutron.openstack.common import log as logging +gettextutils.install('neutron', lazy=True) + +LOG = logging.getLogger(__name__) + + +def main(): + # the configuration will be read into the cfg.CONF global data structure + config.init(sys.argv[1:]) + if not cfg.CONF.config_file: + sys.exit(_("ERROR: Unable to find configuration file via the default" + " search paths (~/.neutron/, ~/, /etc/neutron/, /etc/) and" + " the '--config-file' option!")) + try: + pool = eventlet.GreenPool() + + neutron_api = service.serve_wsgi(service.NeutronApiService) + api_thread = pool.spawn(neutron_api.wait) + + try: + neutron_rpc = service.serve_rpc() + except NotImplementedError: + LOG.info(_("RPC was already started in parent process by plugin.")) + else: + rpc_thread = pool.spawn(neutron_rpc.wait) + + # api and rpc should die together. When one dies, kill the other. + rpc_thread.link(lambda gt: api_thread.kill()) + api_thread.link(lambda gt: rpc_thread.kill()) + + pool.waitall() + except KeyboardInterrupt: + pass + except RuntimeError as e: + sys.exit(_("ERROR: %s") % e) + + +if __name__ == "__main__": + main() diff --git a/neutron/service.py b/neutron/service.py new file mode 100644 index 000000000..c26182bc5 --- /dev/null +++ b/neutron/service.py @@ -0,0 +1,299 @@ +# Copyright 2011 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect +import logging as std_logging +import os +import random + +from oslo.config import cfg +from oslo.messaging import server as rpc_server + +from neutron.common import config +from neutron.common import rpc_compat +from neutron import context +from neutron.db import api as session +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import service as common_service +from neutron import wsgi + + +service_opts = [ + cfg.IntOpt('periodic_interval', + default=40, + help=_('Seconds between running periodic tasks')), + cfg.IntOpt('api_workers', + default=0, + help=_('Number of separate worker processes for service')), + cfg.IntOpt('rpc_workers', + default=0, + help=_('Number of RPC worker processes for service')), + cfg.IntOpt('periodic_fuzzy_delay', + default=5, + help=_('Range of seconds to randomly delay when starting the ' + 'periodic task scheduler to reduce stampeding. ' + '(Disable by setting to 0)')), +] +CONF = cfg.CONF +CONF.register_opts(service_opts) + +LOG = logging.getLogger(__name__) + + +class WsgiService(object): + """Base class for WSGI based services. + + For each api you define, you must also define these flags: + :_listen: The address on which to listen + :_listen_port: The port on which to listen + + """ + + def __init__(self, app_name): + self.app_name = app_name + self.wsgi_app = None + + def start(self): + self.wsgi_app = _run_wsgi(self.app_name) + + def wait(self): + self.wsgi_app.wait() + + +class NeutronApiService(WsgiService): + """Class for neutron-api service.""" + + @classmethod + def create(cls, app_name='neutron'): + + # Setup logging early, supplying both the CLI options and the + # configuration mapping from the config file + # We only update the conf dict for the verbose and debug + # flags. Everything else must be set up in the conf file... + # Log the options used when starting if we're in debug mode... + + config.setup_logging(cfg.CONF) + # Dump the initial option values + cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + service = cls(app_name) + return service + + +def serve_wsgi(cls): + + try: + service = cls.create() + service.start() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_('Unrecoverable error: please check log ' + 'for details.')) + + return service + + +class RpcWorker(object): + """Wraps a worker to be handled by ProcessLauncher""" + def __init__(self, plugin): + self._plugin = plugin + self._servers = [] + + def start(self): + # We may have just forked from parent process. A quick disposal of the + # existing sql connections avoids producing errors later when they are + # discovered to be broken. + session.get_engine().pool.dispose() + self._servers = self._plugin.start_rpc_listeners() + + def wait(self): + for server in self._servers: + if isinstance(server, rpc_server.MessageHandlingServer): + server.wait() + + def stop(self): + for server in self._servers: + if isinstance(server, rpc_server.MessageHandlingServer): + server.kill() + self._servers = [] + + +def serve_rpc(): + plugin = manager.NeutronManager.get_plugin() + + # If 0 < rpc_workers then start_rpc_listeners would be called in a + # subprocess and we cannot simply catch the NotImplementedError. It is + # simpler to check this up front by testing whether the plugin supports + # multiple RPC workers. + if not plugin.rpc_workers_supported(): + LOG.debug(_("Active plugin doesn't implement start_rpc_listeners")) + if 0 < cfg.CONF.rpc_workers: + msg = _("'rpc_workers = %d' ignored because start_rpc_listeners " + "is not implemented.") + LOG.error(msg, cfg.CONF.rpc_workers) + raise NotImplementedError + + try: + rpc = RpcWorker(plugin) + + if cfg.CONF.rpc_workers < 1: + rpc.start() + return rpc + else: + launcher = common_service.ProcessLauncher(wait_interval=1.0) + launcher.launch_service(rpc, workers=cfg.CONF.rpc_workers) + return launcher + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_('Unrecoverable error: please check log ' + 'for details.')) + + +def _run_wsgi(app_name): + app = config.load_paste_app(app_name) + if not app: + LOG.error(_('No known API applications configured.')) + return + server = wsgi.Server("Neutron") + server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host, + workers=cfg.CONF.api_workers) + # Dump all option values here after all options are parsed + cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + LOG.info(_("Neutron service started, listening on %(host)s:%(port)s"), + {'host': cfg.CONF.bind_host, + 'port': cfg.CONF.bind_port}) + return server + + +class Service(rpc_compat.Service): + """Service object for binaries running on hosts. + + A service takes a manager and enables rpc by listening to queues based + on topic. It also periodically runs tasks on the manager. + """ + + def __init__(self, host, binary, topic, manager, report_interval=None, + periodic_interval=None, periodic_fuzzy_delay=None, + *args, **kwargs): + + self.binary = binary + self.manager_class_name = manager + manager_class = importutils.import_class(self.manager_class_name) + self.manager = manager_class(host=host, *args, **kwargs) + self.report_interval = report_interval + self.periodic_interval = periodic_interval + self.periodic_fuzzy_delay = periodic_fuzzy_delay + self.saved_args, self.saved_kwargs = args, kwargs + self.timers = [] + super(Service, self).__init__(host, topic, manager=self.manager) + + def start(self): + self.manager.init_host() + super(Service, self).start() + if self.report_interval: + pulse = loopingcall.FixedIntervalLoopingCall(self.report_state) + pulse.start(interval=self.report_interval, + initial_delay=self.report_interval) + self.timers.append(pulse) + + if self.periodic_interval: + if self.periodic_fuzzy_delay: + initial_delay = random.randint(0, self.periodic_fuzzy_delay) + else: + initial_delay = None + + periodic = loopingcall.FixedIntervalLoopingCall( + self.periodic_tasks) + periodic.start(interval=self.periodic_interval, + initial_delay=initial_delay) + self.timers.append(periodic) + self.manager.after_start() + + def __getattr__(self, key): + manager = self.__dict__.get('manager', None) + return getattr(manager, key) + + @classmethod + def create(cls, host=None, binary=None, topic=None, manager=None, + report_interval=None, periodic_interval=None, + periodic_fuzzy_delay=None): + """Instantiates class and passes back application object. + + :param host: defaults to CONF.host + :param binary: defaults to basename of executable + :param topic: defaults to bin_name - 'nova-' part + :param manager: defaults to CONF._manager + :param report_interval: defaults to CONF.report_interval + :param periodic_interval: defaults to CONF.periodic_interval + :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay + + """ + if not host: + host = CONF.host + if not binary: + binary = os.path.basename(inspect.stack()[-1][1]) + if not topic: + topic = binary.rpartition('neutron-')[2] + topic = topic.replace("-", "_") + if not manager: + manager = CONF.get('%s_manager' % topic, None) + if report_interval is None: + report_interval = CONF.report_interval + if periodic_interval is None: + periodic_interval = CONF.periodic_interval + if periodic_fuzzy_delay is None: + periodic_fuzzy_delay = CONF.periodic_fuzzy_delay + service_obj = cls(host, binary, topic, manager, + report_interval=report_interval, + periodic_interval=periodic_interval, + periodic_fuzzy_delay=periodic_fuzzy_delay) + + return service_obj + + def kill(self): + """Destroy the service object.""" + self.stop() + + def stop(self): + super(Service, self).stop() + for x in self.timers: + try: + x.stop() + except Exception: + LOG.exception(_("Exception occurs when timer stops")) + pass + self.timers = [] + + def wait(self): + super(Service, self).wait() + for x in self.timers: + try: + x.wait() + except Exception: + LOG.exception(_("Exception occurs when waiting for timer")) + pass + + def periodic_tasks(self, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + ctxt = context.get_admin_context() + self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) + + def report_state(self): + """Update the state of this service.""" + # Todo(gongysh) report state to neutron server + pass diff --git a/neutron/services/__init__.py b/neutron/services/__init__.py new file mode 100644 index 000000000..7e503debd --- /dev/null +++ b/neutron/services/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/firewall/__init__.py b/neutron/services/firewall/__init__.py new file mode 100644 index 000000000..5e8da711f --- /dev/null +++ b/neutron/services/firewall/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/firewall/agents/__init__.py b/neutron/services/firewall/agents/__init__.py new file mode 100644 index 000000000..5e8da711f --- /dev/null +++ b/neutron/services/firewall/agents/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/firewall/agents/firewall_agent_api.py b/neutron/services/firewall/agents/firewall_agent_api.py new file mode 100644 index 000000000..aad828d41 --- /dev/null +++ b/neutron/services/firewall/agents/firewall_agent_api.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. +# @author: Sridar Kandaswamy, skandasw@cisco.com, Cisco Systems, Inc. +# @author: Dan Florea, dflorea@cisco.com, Cisco Systems, Inc. + +from oslo.config import cfg + +from neutron.common import rpc_compat +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +FWaaSOpts = [ + cfg.StrOpt( + 'driver', + default='', + help=_("Name of the FWaaS Driver")), + cfg.BoolOpt( + 'enabled', + default=False, + help=_("Enable FWaaS")), +] +cfg.CONF.register_opts(FWaaSOpts, 'fwaas') + + +class FWaaSPluginApiMixin(rpc_compat.RpcProxy): + """Agent side of the FWaaS agent to FWaaS Plugin RPC API.""" + + RPC_API_VERSION = '1.0' + + def __init__(self, topic, host): + super(FWaaSPluginApiMixin, + self).__init__(topic=topic, + default_version=self.RPC_API_VERSION) + self.host = host + + def set_firewall_status(self, context, firewall_id, status): + """Make a RPC to set the status of a firewall.""" + return self.call(context, + self.make_msg('set_firewall_status', host=self.host, + firewall_id=firewall_id, status=status), + topic=self.topic) + + def firewall_deleted(self, context, firewall_id): + """Make a RPC to indicate that the firewall resources are deleted.""" + return self.call(context, + self.make_msg('firewall_deleted', host=self.host, + firewall_id=firewall_id), + topic=self.topic) + + +class FWaaSAgentRpcCallbackMixin(object): + """Mixin for FWaaS agent Implementations.""" + + def __init__(self, host): + + super(FWaaSAgentRpcCallbackMixin, self).__init__(host) + + def create_firewall(self, context, firewall, host): + """Handle RPC cast from plugin to create a firewall.""" + pass + + def update_firewall(self, context, firewall, host): + """Handle RPC cast from plugin to update a firewall.""" + pass + + def delete_firewall(self, context, firewall, host): + """Handle RPC cast from plugin to delete a firewall.""" + pass diff --git a/neutron/services/firewall/agents/l3reference/__init__.py b/neutron/services/firewall/agents/l3reference/__init__.py new file mode 100644 index 000000000..5e8da711f --- /dev/null +++ b/neutron/services/firewall/agents/l3reference/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py b/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py new file mode 100644 index 000000000..865051835 --- /dev/null +++ b/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py @@ -0,0 +1,295 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. +# @author: Sridar Kandaswamy, skandasw@cisco.com, Cisco Systems, Inc. +# @author: Dan Florea, dflorea@cisco.com, Cisco Systems, Inc. + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import ip_lib +from neutron.common import topics +from neutron import context +from neutron.extensions import firewall as fw_ext +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.firewall.agents import firewall_agent_api as api + +LOG = logging.getLogger(__name__) + + +class FWaaSL3PluginApi(api.FWaaSPluginApiMixin): + """Agent side of the FWaaS agent to FWaaS Plugin RPC API.""" + + def __init__(self, topic, host): + super(FWaaSL3PluginApi, self).__init__(topic, host) + + def get_firewalls_for_tenant(self, context, **kwargs): + """Get the Firewalls with rules from the Plugin to send to driver.""" + LOG.debug(_("Retrieve Firewall with rules from Plugin")) + + return self.call(context, + self.make_msg('get_firewalls_for_tenant', + host=self.host), + topic=self.topic) + + def get_tenants_with_firewalls(self, context, **kwargs): + """Get all Tenants that have Firewalls configured from plugin.""" + LOG.debug(_("Retrieve Tenants with Firewalls configured from Plugin")) + + return self.call(context, + self.make_msg('get_tenants_with_firewalls', + host=self.host), + topic=self.topic) + + +class FWaaSL3AgentRpcCallback(api.FWaaSAgentRpcCallbackMixin): + """FWaaS Agent support to be used by Neutron L3 agent.""" + + def __init__(self, conf): + LOG.debug(_("Initializing firewall agent")) + self.conf = conf + fwaas_driver_class_path = cfg.CONF.fwaas.driver + self.fwaas_enabled = cfg.CONF.fwaas.enabled + if self.fwaas_enabled: + try: + self.fwaas_driver = importutils.import_object( + fwaas_driver_class_path) + LOG.debug(_("FWaaS Driver Loaded: '%s'"), + fwaas_driver_class_path) + except ImportError: + msg = _('Error importing FWaaS device driver: %s') + raise ImportError(msg % fwaas_driver_class_path) + self.services_sync = False + self.root_helper = config.get_root_helper(conf) + # setup RPC to msg fwaas plugin + self.fwplugin_rpc = FWaaSL3PluginApi(topics.FIREWALL_PLUGIN, + conf.host) + super(FWaaSL3AgentRpcCallback, self).__init__(host=conf.host) + + def _get_router_info_list_for_tenant(self, routers, tenant_id): + """Returns the list of router info objects on which to apply the fw.""" + root_ip = ip_lib.IPWrapper(self.root_helper) + # Get the routers for the tenant + router_ids = [ + router['id'] + for router in routers + if router['tenant_id'] == tenant_id] + local_ns_list = root_ip.get_namespaces( + self.root_helper) if self.conf.use_namespaces else [] + + router_info_list = [] + # Pick up namespaces for Tenant Routers + for rid in router_ids: + # for routers without an interface - get_routers returns + # the router - but this is not yet populated in router_info + if rid not in self.router_info: + continue + if self.router_info[rid].use_namespaces: + router_ns = self.router_info[rid].ns_name + if router_ns in local_ns_list: + router_info_list.append(self.router_info[rid]) + else: + router_info_list.append(self.router_info[rid]) + return router_info_list + + def _invoke_driver_for_plugin_api(self, context, fw, func_name): + """Invoke driver method for plugin API and provide status back.""" + LOG.debug(_("%(func_name)s from agent for fw: %(fwid)s"), + {'func_name': func_name, 'fwid': fw['id']}) + try: + routers = self.plugin_rpc.get_routers(context) + router_info_list = self._get_router_info_list_for_tenant( + routers, + fw['tenant_id']) + if not router_info_list: + LOG.debug(_('No Routers on tenant: %s'), fw['tenant_id']) + # fw was created before any routers were added, and if a + # delete is sent then we need to ack so that plugin can + # cleanup. + if func_name == 'delete_firewall': + self.fwplugin_rpc.firewall_deleted(context, fw['id']) + return + LOG.debug(_("Apply fw on Router List: '%s'"), + [ri.router['id'] for ri in router_info_list]) + # call into the driver + try: + self.fwaas_driver.__getattribute__(func_name)( + router_info_list, + fw) + if fw['admin_state_up']: + status = constants.ACTIVE + else: + status = constants.DOWN + except fw_ext.FirewallInternalDriverError: + LOG.error(_("Firewall Driver Error for %(func_name)s " + "for fw: %(fwid)s"), + {'func_name': func_name, 'fwid': fw['id']}) + status = constants.ERROR + # delete needs different handling + if func_name == 'delete_firewall': + if status in [constants.ACTIVE, constants.DOWN]: + self.fwplugin_rpc.firewall_deleted(context, fw['id']) + else: + self.fwplugin_rpc.set_firewall_status( + context, + fw['id'], + status) + except Exception: + LOG.exception( + _("FWaaS RPC failure in %(func_name)s for fw: %(fwid)s"), + {'func_name': func_name, 'fwid': fw['id']}) + self.services_sync = True + return + + def _invoke_driver_for_sync_from_plugin(self, ctx, router_info_list, fw): + """Invoke the delete driver method for status of PENDING_DELETE and + update method for all other status to (re)apply on driver which is + Idempotent. + """ + if fw['status'] == constants.PENDING_DELETE: + try: + self.fwaas_driver.delete_firewall(router_info_list, fw) + self.fwplugin_rpc.firewall_deleted( + ctx, + fw['id']) + except fw_ext.FirewallInternalDriverError: + LOG.error(_("Firewall Driver Error on fw state %(fwmsg)s " + "for fw: %(fwid)s"), + {'fwmsg': fw['status'], 'fwid': fw['id']}) + self.fwplugin_rpc.set_firewall_status( + ctx, + fw['id'], + constants.ERROR) + else: + # PENDING_UPDATE, PENDING_CREATE, ... + try: + self.fwaas_driver.update_firewall(router_info_list, fw) + if fw['admin_state_up']: + status = constants.ACTIVE + else: + status = constants.DOWN + except fw_ext.FirewallInternalDriverError: + LOG.error(_("Firewall Driver Error on fw state %(fwmsg)s " + "for fw: %(fwid)s"), + {'fwmsg': fw['status'], 'fwid': fw['id']}) + status = constants.ERROR + + self.fwplugin_rpc.set_firewall_status( + ctx, + fw['id'], + status) + + def _process_router_add(self, ri): + """On router add, get fw with rules from plugin and update driver.""" + LOG.debug(_("Process router add, router_id: '%s'"), ri.router['id']) + routers = [] + routers.append(ri.router) + router_info_list = self._get_router_info_list_for_tenant( + routers, + ri.router['tenant_id']) + if router_info_list: + # Get the firewall with rules + # for the tenant the router is on. + ctx = context.Context('', ri.router['tenant_id']) + fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx) + LOG.debug(_("Process router add, fw_list: '%s'"), + [fw['id'] for fw in fw_list]) + for fw in fw_list: + self._invoke_driver_for_sync_from_plugin( + ctx, + router_info_list, + fw) + + def process_router_add(self, ri): + """On router add, get fw with rules from plugin and update driver.""" + # avoid msg to plugin when fwaas is not configured + if not self.fwaas_enabled: + return + try: + self._process_router_add(ri) + except Exception: + LOG.exception( + _("FWaaS RPC info call failed for '%s'."), + ri.router['id']) + self.services_sync = True + + def process_services_sync(self, ctx): + """On RPC issues sync with plugin and apply the sync data.""" + # avoid msg to plugin when fwaas is not configured + if not self.fwaas_enabled: + return + try: + # get all routers + routers = self.plugin_rpc.get_routers(ctx) + # get the list of tenants with firewalls configured + # from the plugin + tenant_ids = self.fwplugin_rpc.get_tenants_with_firewalls(ctx) + LOG.debug(_("Tenants with Firewalls: '%s'"), tenant_ids) + for tenant_id in tenant_ids: + ctx = context.Context('', tenant_id) + fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx) + if fw_list: + # if fw present on tenant + router_info_list = self._get_router_info_list_for_tenant( + routers, + tenant_id) + if router_info_list: + LOG.debug(_("Router List: '%s'"), + [ri.router['id'] for ri in router_info_list]) + LOG.debug(_("fw_list: '%s'"), + [fw['id'] for fw in fw_list]) + # apply sync data on fw for this tenant + for fw in fw_list: + # fw, routers present on this host for tenant + # install + LOG.debug(_("Apply fw on Router List: '%s'"), + [ri.router['id'] + for ri in router_info_list]) + # no need to apply sync data for ACTIVE fw + if fw['status'] != constants.ACTIVE: + self._invoke_driver_for_sync_from_plugin( + ctx, + router_info_list, + fw) + self.services_sync = False + except Exception: + LOG.exception(_("Failed fwaas process services sync")) + self.services_sync = True + + def create_firewall(self, context, firewall, host): + """Handle Rpc from plugin to create a firewall.""" + return self._invoke_driver_for_plugin_api( + context, + firewall, + 'create_firewall') + + def update_firewall(self, context, firewall, host): + """Handle Rpc from plugin to update a firewall.""" + return self._invoke_driver_for_plugin_api( + context, + firewall, + 'update_firewall') + + def delete_firewall(self, context, firewall, host): + """Handle Rpc from plugin to delete a firewall.""" + return self._invoke_driver_for_plugin_api( + context, + firewall, + 'delete_firewall') diff --git a/neutron/services/firewall/agents/varmour/__init__.py b/neutron/services/firewall/agents/varmour/__init__.py new file mode 100755 index 000000000..5e8da711f --- /dev/null +++ b/neutron/services/firewall/agents/varmour/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/firewall/agents/varmour/varmour_api.py b/neutron/services/firewall/agents/varmour/varmour_api.py new file mode 100755 index 000000000..86cb46fac --- /dev/null +++ b/neutron/services/firewall/agents/varmour/varmour_api.py @@ -0,0 +1,147 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 vArmour Networks Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Gary Duan, gduan@varmour.com, vArmour Networks + +import base64 + +import httplib2 +from oslo.config import cfg + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.services.firewall.agents.varmour import varmour_utils as va_utils + +OPTS = [ + cfg.StrOpt('director', default='localhost', + help=_("vArmour director ip")), + cfg.StrOpt('director_port', default='443', + help=_("vArmour director port")), + cfg.StrOpt('username', default='varmour', + help=_("vArmour director username")), + cfg.StrOpt('password', default='varmour', secret=True, + help=_("vArmour director password")), ] + +cfg.CONF.register_opts(OPTS, "vArmour") + +LOG = logging.getLogger(__name__) + +REST_URL_PREFIX = '/api/v1.0' + + +class vArmourAPIException(Exception): + message = _("An unknown exception.") + + def __init__(self, **kwargs): + try: + self.err = self.message % kwargs + + except Exception: + self.err = self.message + + def __str__(self): + return self.err + + +class AuthenticationFailure(vArmourAPIException): + message = _("Invalid login credential.") + + +class vArmourRestAPI(object): + + def __init__(self): + LOG.debug(_('vArmourRestAPI: started')) + self.user = cfg.CONF.vArmour.username + self.passwd = cfg.CONF.vArmour.password + self.server = cfg.CONF.vArmour.director + self.port = cfg.CONF.vArmour.director_port + self.timeout = 3 + self.key = '' + + def auth(self): + headers = {} + enc = base64.b64encode(self.user + ':' + self.passwd) + headers['Authorization'] = 'Basic ' + enc + resp = self.rest_api('POST', va_utils.REST_URL_AUTH, None, headers) + if resp and resp['status'] == 200: + self.key = resp['body']['auth'] + return True + else: + raise AuthenticationFailure() + + def commit(self): + self.rest_api('POST', va_utils.REST_URL_COMMIT) + + def rest_api(self, method, url, body=None, headers=None): + url = REST_URL_PREFIX + url + if body: + body_data = json.dumps(body) + else: + body_data = '' + if not headers: + headers = {} + enc = base64.b64encode('%s:%s' % (self.user, self.key)) + headers['Authorization'] = 'Basic ' + enc + + LOG.debug(_("vArmourRestAPI: %(server)s %(port)s"), + {'server': self.server, 'port': self.port}) + + try: + action = "https://" + self.server + ":" + self.port + url + + LOG.debug(_("vArmourRestAPI Sending: " + "%(method)s %(action)s %(headers)s %(body_data)s"), + {'method': method, 'action': action, + 'headers': headers, 'body_data': body_data}) + + h = httplib2.Http(timeout=3, + disable_ssl_certificate_validation=True) + resp, resp_str = h.request(action, method, + body=body_data, + headers=headers) + + LOG.debug(_("vArmourRestAPI Response: %(status)s %(resp_str)s"), + {'status': resp.status, 'resp_str': resp_str}) + + if resp.status == 200: + return {'status': resp.status, + 'reason': resp.reason, + 'body': json.loads(resp_str)} + except Exception: + LOG.error(_('vArmourRestAPI: Could not establish HTTP connection')) + + def del_cfg_objs(self, url, prefix): + resp = self.rest_api('GET', url) + if resp and resp['status'] == 200: + olist = resp['body']['response'] + if not olist: + return + + for o in olist: + if o.startswith(prefix): + self.rest_api('DELETE', url + '/"name:%s"' % o) + self.commit() + + def count_cfg_objs(self, url, prefix): + count = 0 + resp = self.rest_api('GET', url) + if resp and resp['status'] == 200: + for o in resp['body']['response']: + if o.startswith(prefix): + count += 1 + + return count diff --git a/neutron/services/firewall/agents/varmour/varmour_router.py b/neutron/services/firewall/agents/varmour/varmour_router.py new file mode 100755 index 000000000..952a452b1 --- /dev/null +++ b/neutron/services/firewall/agents/varmour/varmour_router.py @@ -0,0 +1,351 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 vArmour Networks Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Gary Duan, vArmour Networks Inc. +# + +import sys + +import eventlet +eventlet.monkey_patch() + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent import l3_agent +from neutron.agent.linux import external_process +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.common import config as common_config +from neutron.common import constants as l3_constants +from neutron.common import topics +from neutron.openstack.common import log as logging +from neutron.openstack.common import service +from neutron import service as neutron_service +from neutron.services.firewall.agents.l3reference import firewall_l3_agent +from neutron.services.firewall.agents.varmour import varmour_api +from neutron.services.firewall.agents.varmour import varmour_utils as va_utils + + +LOG = logging.getLogger(__name__) + + +class vArmourL3NATAgent(l3_agent.L3NATAgent, + firewall_l3_agent.FWaaSL3AgentRpcCallback): + def __init__(self, host, conf=None): + LOG.debug(_('vArmourL3NATAgent: __init__')) + self.rest = varmour_api.vArmourRestAPI() + super(vArmourL3NATAgent, self).__init__(host, conf) + + def _destroy_router_namespaces(self, only_router_id=None): + return + + def _destroy_router_namespace(self, namespace): + return + + def _create_router_namespace(self, ri): + return + + def _router_added(self, router_id, router): + LOG.debug(_("_router_added: %s"), router_id) + ri = l3_agent.RouterInfo(router_id, self.root_helper, + self.conf.use_namespaces, router) + self.router_info[router_id] = ri + super(vArmourL3NATAgent, self).process_router_add(ri) + + def _router_removed(self, router_id): + LOG.debug(_("_router_removed: %s"), router_id) + + ri = self.router_info[router_id] + if ri: + ri.router['gw_port'] = None + ri.router[l3_constants.INTERFACE_KEY] = [] + ri.router[l3_constants.FLOATINGIP_KEY] = [] + self.process_router(ri) + + name = va_utils.get_snat_rule_name(ri) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, name) + + name = va_utils.get_dnat_rule_name(ri) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, name) + + name = va_utils.get_trusted_zone_name(ri) + self._va_unset_zone_interfaces(name, True) + + name = va_utils.get_untrusted_zone_name(ri) + self._va_unset_zone_interfaces(name, True) + + del self.router_info[router_id] + + def _spawn_metadata_proxy(self, router_id, ns_name): + return + + def _destroy_metadata_proxy(self, router_id, ns_name): + return + + def _set_subnet_info(self, port): + ips = port['fixed_ips'] + if not ips: + raise Exception(_("Router port %s has no IP address") % port['id']) + return + if len(ips) > 1: + LOG.warn(_("Ignoring multiple IPs on router port %s"), port['id']) + prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen + port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen) + + def _va_unset_zone_interfaces(self, zone_name, remove_zone=False): + # return True if zone exists; otherwise, return False + LOG.debug(_("_va_unset_zone_interfaces: %s"), zone_name) + resp = self.rest.rest_api('GET', va_utils.REST_URL_CONF_ZONE) + if resp and resp['status'] == 200: + zlist = resp['body']['response'] + for zn in zlist: + if zn == zone_name: + commit = False + + if 'interface' in zlist[zn]: + for intf in zlist[zn]['interface']: + self.rest.rest_api('DELETE', + va_utils.REST_URL_CONF + + va_utils.REST_ZONE_NAME % zn + + va_utils.REST_INTF_NAME % intf) + commit = True + if remove_zone: + self.rest.rest_api('DELETE', + va_utils.REST_URL_CONF + + va_utils.REST_ZONE_NAME % zn) + commit = True + + if commit: + self.rest.commit() + + return True + + return False + + def _va_pif_2_lif(self, pif): + return pif + '.0' + + def _va_set_interface_ip(self, pif, cidr): + LOG.debug(_("_va_set_interface_ip: %(pif)s %(cidr)s"), + {'pif': pif, 'cidr': cidr}) + + lif = self._va_pif_2_lif(pif) + obj = va_utils.REST_INTF_NAME % pif + va_utils.REST_LOGIC_NAME % lif + body = { + 'name': lif, + 'family': 'ipv4', + 'address': cidr + } + self.rest.rest_api('PUT', va_utils.REST_URL_CONF + obj, body) + + def _va_get_port_name(self, port_list, name): + if name: + for p in port_list: + if p['VM name'] == name: + return p['name'] + + def _va_config_trusted_zone(self, ri, plist): + zone = va_utils.get_trusted_zone_name(ri) + LOG.debug(_("_va_config_trusted_zone: %s"), zone) + + body = { + 'name': zone, + 'type': 'L3', + 'interface': [] + } + + if not self._va_unset_zone_interfaces(zone): + # if zone doesn't exist, create it + self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body) + self.rest.commit() + + # add new internal ports to trusted zone + for p in ri.internal_ports: + if p['admin_state_up']: + dev = self.get_internal_device_name(p['id']) + pif = self._va_get_port_name(plist, dev) + if pif: + lif = self._va_pif_2_lif(pif) + if lif not in body['interface']: + body['interface'].append(lif) + + self._va_set_interface_ip(pif, p['ip_cidr']) + + if body['interface']: + self.rest.rest_api('PUT', va_utils.REST_URL_CONF_ZONE, body) + self.rest.commit() + + def _va_config_untrusted_zone(self, ri, plist): + zone = va_utils.get_untrusted_zone_name(ri) + LOG.debug(_("_va_config_untrusted_zone: %s"), zone) + + body = { + 'name': zone, + 'type': 'L3', + 'interface': [] + } + + if not self._va_unset_zone_interfaces(zone): + # if zone doesn't exist, create it + self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body) + self.rest.commit() + + # add new gateway ports to untrusted zone + if ri.ex_gw_port: + LOG.debug(_("_va_config_untrusted_zone: gw=%r"), ri.ex_gw_port) + dev = self.get_external_device_name(ri.ex_gw_port['id']) + pif = self._va_get_port_name(plist, dev) + if pif: + lif = self._va_pif_2_lif(pif) + + self._va_set_interface_ip(pif, ri.ex_gw_port['ip_cidr']) + + body['interface'].append(lif) + self.rest.rest_api('PUT', va_utils.REST_URL_CONF_ZONE, body) + self.rest.commit() + + def _va_config_router_snat_rules(self, ri, plist): + LOG.debug(_('_va_config_router_snat_rules: %s'), ri.router['id']) + + prefix = va_utils.get_snat_rule_name(ri) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, prefix) + + if not ri.enable_snat: + return + + for idx, p in enumerate(ri.internal_ports): + if p['admin_state_up']: + dev = self.get_internal_device_name(p['id']) + pif = self._va_get_port_name(plist, dev) + if pif: + net = netaddr.IPNetwork(p['ip_cidr']) + body = { + 'name': '%s_%d' % (prefix, idx), + 'ingress-context-type': 'interface', + 'ingress-index': self._va_pif_2_lif(pif), + 'source-address': [ + [str(netaddr.IPAddress(net.first + 2)), + str(netaddr.IPAddress(net.last - 1))] + ], + 'flag': 'interface translate-source' + } + self.rest.rest_api('POST', + va_utils.REST_URL_CONF_NAT_RULE, + body) + + if ri.internal_ports: + self.rest.commit() + + def _va_config_floating_ips(self, ri): + LOG.debug(_('_va_config_floating_ips: %s'), ri.router['id']) + + prefix = va_utils.get_dnat_rule_name(ri) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, prefix) + + # add new dnat rules + for idx, fip in enumerate(ri.floating_ips): + body = { + 'name': '%s_%d' % (prefix, idx), + 'ingress-context-type': 'zone', + 'ingress-index': va_utils.get_untrusted_zone_name(ri), + 'destination-address': [[fip['floating_ip_address'], + fip['floating_ip_address']]], + 'static': [fip['fixed_ip_address'], fip['fixed_ip_address']], + 'flag': 'translate-destination' + } + self.rest.rest_api('POST', va_utils.REST_URL_CONF_NAT_RULE, body) + + if ri.floating_ips: + self.rest.commit() + + def process_router(self, ri): + LOG.debug(_("process_router: %s"), ri.router['id']) + super(vArmourL3NATAgent, self).process_router(ri) + + self.rest.auth() + + # read internal port name and configuration port name map + resp = self.rest.rest_api('GET', va_utils.REST_URL_INTF_MAP) + if resp and resp['status'] == 200: + try: + plist = resp['body']['response'] + except ValueError: + LOG.warn(_("Unable to parse interface mapping.")) + return + else: + LOG.warn(_("Unable to read interface mapping.")) + return + + if ri.ex_gw_port: + self._set_subnet_info(ri.ex_gw_port) + self._va_config_trusted_zone(ri, plist) + self._va_config_untrusted_zone(ri, plist) + self._va_config_router_snat_rules(ri, plist) + self._va_config_floating_ips(ri) + + def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs, + interface_name, action): + return + + def _send_gratuitous_arp_packet(self, ri, interface_name, ip_address): + return + + def external_gateway_added(self, ri, ex_gw_port, + interface_name, internal_cidrs): + LOG.debug(_("external_gateway_added: %s"), ri.router['id']) + + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ri.ns_name): + self.driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], interface_name, + ex_gw_port['mac_address'], + bridge=self.conf.external_network_bridge, + namespace=ri.ns_name, + prefix=l3_agent.EXTERNAL_DEV_PREFIX) + self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], + namespace=ri.ns_name) + + def _update_routing_table(self, ri, operation, route): + return + + +class vArmourL3NATAgentWithStateReport(vArmourL3NATAgent, + l3_agent.L3NATAgentWithStateReport): + pass + + +def main(): + conf = cfg.CONF + conf.register_opts(vArmourL3NATAgent.OPTS) + config.register_interface_driver_opts_helper(conf) + config.register_use_namespaces_opts_helper(conf) + config.register_agent_state_opts_helper(conf) + config.register_root_helper(conf) + conf.register_opts(interface.OPTS) + conf.register_opts(external_process.OPTS) + common_config.init(sys.argv[1:]) + config.setup_logging(conf) + server = neutron_service.Service.create( + binary='neutron-l3-agent', + topic=topics.L3_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager='neutron.services.firewall.agents.varmour.varmour_router.' + 'vArmourL3NATAgentWithStateReport') + service.launch(server).wait() diff --git a/neutron/services/firewall/agents/varmour/varmour_utils.py b/neutron/services/firewall/agents/varmour/varmour_utils.py new file mode 100755 index 000000000..7290cb6e6 --- /dev/null +++ b/neutron/services/firewall/agents/varmour/varmour_utils.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 vArmour Networks Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Gary Duan, gduan@varmour.com, vArmour Networks + +ROUTER_OBJ_PREFIX = 'r-' +OBJ_PREFIX_LEN = 8 +TRUST_ZONE = '_z_trust' +UNTRUST_ZONE = '_z_untrust' +SNAT_RULE = '_snat' +DNAT_RULE = '_dnat' +ROUTER_POLICY = '_p' + +REST_URL_CONF = '/config' +REST_URL_AUTH = '/auth' +REST_URL_COMMIT = '/commit' +REST_URL_INTF_MAP = '/operation/interface/mapping' + +REST_URL_CONF_NAT_RULE = REST_URL_CONF + '/nat/rule' +REST_URL_CONF_ZONE = REST_URL_CONF + '/zone' +REST_URL_CONF_POLICY = REST_URL_CONF + '/policy' +REST_URL_CONF_ADDR = REST_URL_CONF + '/address' +REST_URL_CONF_SERVICE = REST_URL_CONF + '/service' + +REST_ZONE_NAME = '/zone/"name:%s"' +REST_INTF_NAME = '/interface/"name:%s"' +REST_LOGIC_NAME = '/logical/"name:%s"' +REST_SERVICE_NAME = '/service/"name:%s"/rule' + + +def get_router_object_prefix(ri): + return ROUTER_OBJ_PREFIX + ri.router['id'][:OBJ_PREFIX_LEN] + + +def get_firewall_object_prefix(ri, fw): + return get_router_object_prefix(ri) + '-' + fw['id'][:OBJ_PREFIX_LEN] + + +def get_trusted_zone_name(ri): + return get_router_object_prefix(ri) + TRUST_ZONE + + +def get_untrusted_zone_name(ri): + return get_router_object_prefix(ri) + UNTRUST_ZONE + + +def get_snat_rule_name(ri): + return get_router_object_prefix(ri) + SNAT_RULE + + +def get_dnat_rule_name(ri): + return get_router_object_prefix(ri) + DNAT_RULE + + +def get_router_policy_name(ri): + return get_router_object_prefix(ri) + ROUTER_POLICY + + +def get_firewall_policy_name(ri, fw, rule): + return get_firewall_object_prefix(ri, fw) + rule['id'][:OBJ_PREFIX_LEN] diff --git a/neutron/services/firewall/drivers/__init__.py b/neutron/services/firewall/drivers/__init__.py new file mode 100644 index 000000000..5e8da711f --- /dev/null +++ b/neutron/services/firewall/drivers/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/firewall/drivers/fwaas_base.py b/neutron/services/firewall/drivers/fwaas_base.py new file mode 100644 index 000000000..0e3fb25a3 --- /dev/null +++ b/neutron/services/firewall/drivers/fwaas_base.py @@ -0,0 +1,100 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Dell Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rajesh Mohan, Rajesh_Mohan3@Dell.com, DELL Inc. + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class FwaasDriverBase(object): + """Firewall as a Service Driver base class. + + Using FwaasDriver Class, an instance of L3 perimeter Firewall + can be created. The firewall co-exists with the L3 agent. + + One instance is created for each tenant. One firewall policy + is associated with each tenant (in the Havana release). + + The Firewall can be visualized as having two zones (in Havana + release), trusted and untrusted. + + All the 'internal' interfaces of Neutron Router is treated as trusted. The + interface connected to 'external network' is treated as untrusted. + + The policy is applied on traffic ingressing/egressing interfaces on + the trusted zone. This implies that policy will be applied for traffic + passing from + - trusted to untrusted zones + - untrusted to trusted zones + - trusted to trusted zones + + Policy WILL NOT be applied for traffic from untrusted to untrusted zones. + This is not a problem in Havana release as there is only one interface + connected to external network. + + Since the policy is applied on the internal interfaces, the traffic + will be not be NATed to floating IP. For incoming traffic, the + traffic will get NATed to internal IP address before it hits + the firewall rules. So, while writing the rules, care should be + taken if using rules based on floating IP. + + The firewall rule addition/deletion/insertion/update are done by the + management console. When the policy is sent to the driver, the complete + policy is sent and the whole policy has to be applied atomically. The + firewall rules will not get updated individually. This is to avoid problems + related to out-of-order notifications or inconsistent behaviour by partial + application of rules. + """ + + @abc.abstractmethod + def create_firewall(self, apply_list, firewall): + """Create the Firewall with default (drop all) policy. + + The default policy will be applied on all the interfaces of + trusted zone. + """ + pass + + @abc.abstractmethod + def delete_firewall(self, apply_list, firewall): + """Delete firewall. + + Removes all policies created by this instance and frees up + all the resources. + """ + pass + + @abc.abstractmethod + def update_firewall(self, apply_list, firewall): + """Apply the policy on all trusted interfaces. + + Remove previous policy and apply the new policy on all trusted + interfaces. + """ + pass + + @abc.abstractmethod + def apply_default_policy(self, apply_list, firewall): + """Apply the default policy on all trusted interfaces. + + Remove current policy and apply the default policy on all trusted + interfaces. + """ + pass diff --git a/neutron/services/firewall/drivers/linux/__init__.py b/neutron/services/firewall/drivers/linux/__init__.py new file mode 100644 index 000000000..5e8da711f --- /dev/null +++ b/neutron/services/firewall/drivers/linux/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/firewall/drivers/linux/iptables_fwaas.py b/neutron/services/firewall/drivers/linux/iptables_fwaas.py new file mode 100644 index 000000000..00a3ed792 --- /dev/null +++ b/neutron/services/firewall/drivers/linux/iptables_fwaas.py @@ -0,0 +1,275 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Dell Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rajesh Mohan, Rajesh_Mohan3@Dell.com, DELL Inc. + +from neutron.agent.linux import iptables_manager +from neutron.extensions import firewall as fw_ext +from neutron.openstack.common import log as logging +from neutron.services.firewall.drivers import fwaas_base + +LOG = logging.getLogger(__name__) +FWAAS_DRIVER_NAME = 'Fwaas iptables driver' +FWAAS_DEFAULT_CHAIN = 'fwaas-default-policy' +INGRESS_DIRECTION = 'ingress' +EGRESS_DIRECTION = 'egress' +CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i', + EGRESS_DIRECTION: 'o'} + +""" Firewall rules are applied on internal-interfaces of Neutron router. + The packets ingressing tenant's network will be on the output + direction on internal-interfaces. +""" +IPTABLES_DIR = {INGRESS_DIRECTION: '-o', + EGRESS_DIRECTION: '-i'} +IPV4 = 'ipv4' +IPV6 = 'ipv6' +IP_VER_TAG = {IPV4: 'v4', + IPV6: 'v6'} + + +class IptablesFwaasDriver(fwaas_base.FwaasDriverBase): + """IPTables driver for Firewall As A Service.""" + + def __init__(self): + LOG.debug(_("Initializing fwaas iptables driver")) + + def create_firewall(self, apply_list, firewall): + LOG.debug(_('Creating firewall %(fw_id)s for tenant %(tid)s)'), + {'fw_id': firewall['id'], 'tid': firewall['tenant_id']}) + try: + if firewall['admin_state_up']: + self._setup_firewall(apply_list, firewall) + else: + self.apply_default_policy(apply_list, firewall) + except (LookupError, RuntimeError): + # catch known library exceptions and raise Fwaas generic exception + LOG.exception(_("Failed to create firewall: %s"), firewall['id']) + raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME) + + def delete_firewall(self, apply_list, firewall): + LOG.debug(_('Deleting firewall %(fw_id)s for tenant %(tid)s)'), + {'fw_id': firewall['id'], 'tid': firewall['tenant_id']}) + fwid = firewall['id'] + try: + for router_info in apply_list: + ipt_mgr = router_info.iptables_manager + self._remove_chains(fwid, ipt_mgr) + self._remove_default_chains(ipt_mgr) + # apply the changes immediately (no defer in firewall path) + ipt_mgr.defer_apply_off() + except (LookupError, RuntimeError): + # catch known library exceptions and raise Fwaas generic exception + LOG.exception(_("Failed to delete firewall: %s"), fwid) + raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME) + + def update_firewall(self, apply_list, firewall): + LOG.debug(_('Updating firewall %(fw_id)s for tenant %(tid)s)'), + {'fw_id': firewall['id'], 'tid': firewall['tenant_id']}) + try: + if firewall['admin_state_up']: + self._setup_firewall(apply_list, firewall) + else: + self.apply_default_policy(apply_list, firewall) + except (LookupError, RuntimeError): + # catch known library exceptions and raise Fwaas generic exception + LOG.exception(_("Failed to update firewall: %s"), firewall['id']) + raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME) + + def apply_default_policy(self, apply_list, firewall): + LOG.debug(_('Applying firewall %(fw_id)s for tenant %(tid)s)'), + {'fw_id': firewall['id'], 'tid': firewall['tenant_id']}) + fwid = firewall['id'] + try: + for router_info in apply_list: + ipt_mgr = router_info.iptables_manager + + # the following only updates local memory; no hole in FW + self._remove_chains(fwid, ipt_mgr) + self._remove_default_chains(ipt_mgr) + + # create default 'DROP ALL' policy chain + self._add_default_policy_chain_v4v6(ipt_mgr) + self._enable_policy_chain(fwid, ipt_mgr) + + # apply the changes immediately (no defer in firewall path) + ipt_mgr.defer_apply_off() + except (LookupError, RuntimeError): + # catch known library exceptions and raise Fwaas generic exception + LOG.exception(_("Failed to apply default policy on firewall: %s"), + fwid) + raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME) + + def _setup_firewall(self, apply_list, firewall): + fwid = firewall['id'] + for router_info in apply_list: + ipt_mgr = router_info.iptables_manager + + # the following only updates local memory; no hole in FW + self._remove_chains(fwid, ipt_mgr) + self._remove_default_chains(ipt_mgr) + + # create default 'DROP ALL' policy chain + self._add_default_policy_chain_v4v6(ipt_mgr) + #create chain based on configured policy + self._setup_chains(firewall, ipt_mgr) + + # apply the changes immediately (no defer in firewall path) + ipt_mgr.defer_apply_off() + + def _get_chain_name(self, fwid, ver, direction): + return '%s%s%s' % (CHAIN_NAME_PREFIX[direction], + IP_VER_TAG[ver], + fwid) + + def _setup_chains(self, firewall, ipt_mgr): + """Create Fwaas chain using the rules in the policy + """ + fw_rules_list = firewall['firewall_rule_list'] + fwid = firewall['id'] + + #default rules for invalid packets and established sessions + invalid_rule = self._drop_invalid_packets_rule() + est_rule = self._allow_established_rule() + + for ver in [IPV4, IPV6]: + if ver == IPV4: + table = ipt_mgr.ipv4['filter'] + else: + table = ipt_mgr.ipv6['filter'] + ichain_name = self._get_chain_name(fwid, ver, INGRESS_DIRECTION) + ochain_name = self._get_chain_name(fwid, ver, EGRESS_DIRECTION) + for name in [ichain_name, ochain_name]: + table.add_chain(name) + table.add_rule(name, invalid_rule) + table.add_rule(name, est_rule) + + for rule in fw_rules_list: + if not rule['enabled']: + continue + iptbl_rule = self._convert_fwaas_to_iptables_rule(rule) + if rule['ip_version'] == 4: + ver = IPV4 + table = ipt_mgr.ipv4['filter'] + else: + ver = IPV6 + table = ipt_mgr.ipv6['filter'] + ichain_name = self._get_chain_name(fwid, ver, INGRESS_DIRECTION) + ochain_name = self._get_chain_name(fwid, ver, EGRESS_DIRECTION) + table.add_rule(ichain_name, iptbl_rule) + table.add_rule(ochain_name, iptbl_rule) + self._enable_policy_chain(fwid, ipt_mgr) + + def _remove_default_chains(self, nsid): + """Remove fwaas default policy chain.""" + self._remove_chain_by_name(IPV4, FWAAS_DEFAULT_CHAIN, nsid) + self._remove_chain_by_name(IPV6, FWAAS_DEFAULT_CHAIN, nsid) + + def _remove_chains(self, fwid, ipt_mgr): + """Remove fwaas policy chain.""" + for ver in [IPV4, IPV6]: + for direction in [INGRESS_DIRECTION, EGRESS_DIRECTION]: + chain_name = self._get_chain_name(fwid, ver, direction) + self._remove_chain_by_name(ver, chain_name, ipt_mgr) + + def _add_default_policy_chain_v4v6(self, ipt_mgr): + ipt_mgr.ipv4['filter'].add_chain(FWAAS_DEFAULT_CHAIN) + ipt_mgr.ipv4['filter'].add_rule(FWAAS_DEFAULT_CHAIN, '-j DROP') + ipt_mgr.ipv6['filter'].add_chain(FWAAS_DEFAULT_CHAIN) + ipt_mgr.ipv6['filter'].add_rule(FWAAS_DEFAULT_CHAIN, '-j DROP') + + def _remove_chain_by_name(self, ver, chain_name, ipt_mgr): + if ver == IPV4: + ipt_mgr.ipv4['filter'].ensure_remove_chain(chain_name) + else: + ipt_mgr.ipv6['filter'].ensure_remove_chain(chain_name) + + def _add_rules_to_chain(self, ipt_mgr, ver, chain_name, rules): + if ver == IPV4: + table = ipt_mgr.ipv4['filter'] + else: + table = ipt_mgr.ipv6['filter'] + for rule in rules: + table.add_rule(chain_name, rule) + + def _enable_policy_chain(self, fwid, ipt_mgr): + bname = iptables_manager.binary_name + + for (ver, tbl) in [(IPV4, ipt_mgr.ipv4['filter']), + (IPV6, ipt_mgr.ipv6['filter'])]: + for direction in [INGRESS_DIRECTION, EGRESS_DIRECTION]: + chain_name = self._get_chain_name(fwid, ver, direction) + chain_name = iptables_manager.get_chain_name(chain_name) + if chain_name in tbl.chains: + jump_rule = ['%s qr-+ -j %s-%s' % (IPTABLES_DIR[direction], + bname, chain_name)] + self._add_rules_to_chain(ipt_mgr, ver, 'FORWARD', + jump_rule) + + #jump to DROP_ALL policy + chain_name = iptables_manager.get_chain_name(FWAAS_DEFAULT_CHAIN) + jump_rule = ['-o qr-+ -j %s-%s' % (bname, chain_name)] + self._add_rules_to_chain(ipt_mgr, IPV4, 'FORWARD', jump_rule) + self._add_rules_to_chain(ipt_mgr, IPV6, 'FORWARD', jump_rule) + + #jump to DROP_ALL policy + chain_name = iptables_manager.get_chain_name(FWAAS_DEFAULT_CHAIN) + jump_rule = ['-i qr-+ -j %s-%s' % (bname, chain_name)] + self._add_rules_to_chain(ipt_mgr, IPV4, 'FORWARD', jump_rule) + self._add_rules_to_chain(ipt_mgr, IPV6, 'FORWARD', jump_rule) + + def _convert_fwaas_to_iptables_rule(self, rule): + action = rule.get('action') == 'allow' and 'ACCEPT' or 'DROP' + args = [self._protocol_arg(rule.get('protocol')), + self._port_arg('dport', + rule.get('protocol'), + rule.get('destination_port')), + self._port_arg('sport', + rule.get('protocol'), + rule.get('source_port')), + self._ip_prefix_arg('s', rule.get('source_ip_address')), + self._ip_prefix_arg('d', rule.get('destination_ip_address')), + self._action_arg(action)] + + iptables_rule = ' '.join(args) + return iptables_rule + + def _drop_invalid_packets_rule(self): + return '-m state --state INVALID -j DROP' + + def _allow_established_rule(self): + return '-m state --state ESTABLISHED,RELATED -j ACCEPT' + + def _action_arg(self, action): + if action: + return '-j %s' % action + return '' + + def _protocol_arg(self, protocol): + if protocol: + return '-p %s' % protocol + return '' + + def _port_arg(self, direction, protocol, port): + if not (protocol in ['udp', 'tcp'] and port): + return '' + return '--%s %s' % (direction, port) + + def _ip_prefix_arg(self, direction, ip_prefix): + if ip_prefix: + return '-%s %s' % (direction, ip_prefix) + return '' diff --git a/neutron/services/firewall/drivers/varmour/__init__.py b/neutron/services/firewall/drivers/varmour/__init__.py new file mode 100755 index 000000000..5e8da711f --- /dev/null +++ b/neutron/services/firewall/drivers/varmour/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/firewall/drivers/varmour/varmour_fwaas.py b/neutron/services/firewall/drivers/varmour/varmour_fwaas.py new file mode 100755 index 000000000..bcdf2f909 --- /dev/null +++ b/neutron/services/firewall/drivers/varmour/varmour_fwaas.py @@ -0,0 +1,207 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 vArmour Networks Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Gary Duan, gduan@varmour.com, vArmour Networks + +from neutron.openstack.common import log as logging +from neutron.services.firewall.agents.varmour import varmour_api +from neutron.services.firewall.agents.varmour import varmour_utils as va_utils +from neutron.services.firewall.drivers import fwaas_base + +LOG = logging.getLogger(__name__) + + +class vArmourFwaasDriver(fwaas_base.FwaasDriverBase): + def __init__(self): + LOG.debug(_("Initializing fwaas vArmour driver")) + + self.rest = varmour_api.vArmourRestAPI() + + def create_firewall(self, apply_list, firewall): + LOG.debug(_('create_firewall (%s)'), firewall['id']) + + return self.update_firewall(apply_list, firewall) + + def update_firewall(self, apply_list, firewall): + LOG.debug(_("update_firewall (%s)"), firewall['id']) + + if firewall['admin_state_up']: + return self._update_firewall(apply_list, firewall) + else: + return self.apply_default_policy(apply_list, firewall) + + def delete_firewall(self, apply_list, firewall): + LOG.debug(_("delete_firewall (%s)"), firewall['id']) + + return self.apply_default_policy(apply_list, firewall) + + def apply_default_policy(self, apply_list, firewall): + LOG.debug(_("apply_default_policy (%s)"), firewall['id']) + + self.rest.auth() + + for ri in apply_list: + self._clear_policy(ri, firewall) + + return True + + def _update_firewall(self, apply_list, firewall): + LOG.debug(_("Updating firewall (%s)"), firewall['id']) + + self.rest.auth() + + for ri in apply_list: + self._clear_policy(ri, firewall) + self._setup_policy(ri, firewall) + + return True + + def _setup_policy(self, ri, fw): + # create zones no matter if they exist. Interfaces are added by router + body = { + 'type': 'L3', + 'interface': [] + } + + body['name'] = va_utils.get_trusted_zone_name(ri) + self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body) + body['name'] = va_utils.get_untrusted_zone_name(ri) + self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body) + self.rest.commit() + + servs = dict() + addrs = dict() + for rule in fw['firewall_rule_list']: + if not rule['enabled']: + continue + + if rule['ip_version'] == 4: + service = self._make_service(ri, fw, rule, servs) + s_addr = self._make_address(ri, fw, rule, addrs, True) + d_addr = self._make_address(ri, fw, rule, addrs, False) + + policy = va_utils.get_firewall_policy_name(ri, fw, rule) + z0 = va_utils.get_trusted_zone_name(ri) + z1 = va_utils.get_untrusted_zone_name(ri) + body = self._make_policy(policy + '_0', rule, + z0, z0, s_addr, d_addr, service) + self.rest.rest_api('POST', va_utils.REST_URL_CONF_POLICY, body) + body = self._make_policy(policy + '_1', rule, + z0, z1, s_addr, d_addr, service) + self.rest.rest_api('POST', va_utils.REST_URL_CONF_POLICY, body) + body = self._make_policy(policy + '_2', rule, + z1, z0, s_addr, d_addr, service) + self.rest.rest_api('POST', va_utils.REST_URL_CONF_POLICY, body) + + self.rest.commit() + else: + LOG.warn(_("Unsupported IP version rule.")) + + def _clear_policy(self, ri, fw): + prefix = va_utils.get_firewall_object_prefix(ri, fw) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_POLICY, prefix) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_ADDR, prefix) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_SERVICE, prefix) + + def _make_service(self, ri, fw, rule, servs): + prefix = va_utils.get_firewall_object_prefix(ri, fw) + + if rule.get('protocol'): + key = rule.get('protocol') + if rule.get('source_port'): + key += '-' + rule.get('source_port') + if rule.get('destination_port'): + key += '-' + rule.get('destination_port') + else: + return + + if key in servs: + name = '%s_%d' % (prefix, servs[key]) + else: + # create new service object with index + idx = len(servs) + servs[key] = idx + name = '%s_%d' % (prefix, idx) + + body = {'name': name} + self.rest.rest_api('POST', + va_utils.REST_URL_CONF_SERVICE, + body) + body = self._make_service_rule(rule) + self.rest.rest_api('POST', + va_utils.REST_URL_CONF + + va_utils.REST_SERVICE_NAME % name, + body) + self.rest.commit() + + return name + + def _make_service_rule(self, rule): + body = { + 'name': '1', + 'protocol': rule.get('protocol') + } + if 'source_port' in rule: + body['source-start'] = rule['source_port'] + body['source-end'] = rule['source_port'] + if 'destination_port' in rule: + body['dest-start'] = rule['destination_port'] + body['dest-end'] = rule['destination_port'] + + return body + + def _make_address(self, ri, fw, rule, addrs, is_src): + prefix = va_utils.get_firewall_object_prefix(ri, fw) + + if is_src: + key = rule.get('source_ip_address') + else: + key = rule.get('destination_ip_address') + + if not key: + return + + if key in addrs: + name = '%s_%d' % (prefix, addrs[key]) + else: + # create new address object with idx + idx = len(addrs) + addrs[key] = idx + name = '%s_%d' % (prefix, idx) + + body = { + 'name': name, + 'type': 'ipv4', + 'ipv4': key + } + self.rest.rest_api('POST', va_utils.REST_URL_CONF_ADDR, body) + self.rest.commit() + + return name + + def _make_policy(self, name, rule, zone0, zone1, s_addr, d_addr, service): + body = { + 'name': name, + 'action': 'permit' if rule.get('action') == 'allow' else 'deny', + 'from': zone0, + 'to': zone1, + 'match-source-address': [s_addr or 'Any'], + 'match-dest-address': [d_addr or 'Any'], + 'match-service': [service or 'Any'] + } + + return body diff --git a/neutron/services/firewall/fwaas_plugin.py b/neutron/services/firewall/fwaas_plugin.py new file mode 100644 index 000000000..f0eaf1b2c --- /dev/null +++ b/neutron/services/firewall/fwaas_plugin.py @@ -0,0 +1,299 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. + +from oslo.config import cfg + +from neutron.common import exceptions as n_exception +from neutron.common import rpc_compat +from neutron.common import topics +from neutron import context as neutron_context +from neutron.db import api as qdbapi +from neutron.db.firewall import firewall_db +from neutron.extensions import firewall as fw_ext +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as const + + +LOG = logging.getLogger(__name__) + + +class FirewallCallbacks(rpc_compat.RpcCallback): + RPC_API_VERSION = '1.0' + + def __init__(self, plugin): + super(FirewallCallbacks, self).__init__() + self.plugin = plugin + + def set_firewall_status(self, context, firewall_id, status, **kwargs): + """Agent uses this to set a firewall's status.""" + LOG.debug(_("set_firewall_status() called")) + with context.session.begin(subtransactions=True): + fw_db = self.plugin._get_firewall(context, firewall_id) + # ignore changing status if firewall expects to be deleted + # That case means that while some pending operation has been + # performed on the backend, neutron server received delete request + # and changed firewall status to const.PENDING_DELETE + if fw_db.status == const.PENDING_DELETE: + LOG.debug(_("Firewall %(fw_id)s in PENDING_DELETE state, " + "not changing to %(status)s"), + {'fw_id': firewall_id, 'status': status}) + return False + #TODO(xuhanp): Remove INACTIVE status and use DOWN to + # be consistent with other network resources + if status in (const.ACTIVE, const.INACTIVE, const.DOWN): + fw_db.status = status + return True + else: + fw_db.status = const.ERROR + return False + + def firewall_deleted(self, context, firewall_id, **kwargs): + """Agent uses this to indicate firewall is deleted.""" + LOG.debug(_("firewall_deleted() called")) + with context.session.begin(subtransactions=True): + fw_db = self.plugin._get_firewall(context, firewall_id) + # allow to delete firewalls in ERROR state + if fw_db.status in (const.PENDING_DELETE, const.ERROR): + self.plugin.delete_db_firewall_object(context, firewall_id) + return True + else: + LOG.warn(_('Firewall %(fw)s unexpectedly deleted by agent, ' + 'status was %(status)s'), + {'fw': firewall_id, 'status': fw_db.status}) + fw_db.status = const.ERROR + return False + + def get_firewalls_for_tenant(self, context, **kwargs): + """Agent uses this to get all firewalls and rules for a tenant.""" + LOG.debug(_("get_firewalls_for_tenant() called")) + fw_list = [ + self.plugin._make_firewall_dict_with_rules(context, fw['id']) + for fw in self.plugin.get_firewalls(context) + ] + return fw_list + + def get_firewalls_for_tenant_without_rules(self, context, **kwargs): + """Agent uses this to get all firewalls for a tenant.""" + LOG.debug(_("get_firewalls_for_tenant_without_rules() called")) + fw_list = [fw for fw in self.plugin.get_firewalls(context)] + return fw_list + + def get_tenants_with_firewalls(self, context, **kwargs): + """Agent uses this to get all tenants that have firewalls.""" + LOG.debug(_("get_tenants_with_firewalls() called")) + ctx = neutron_context.get_admin_context() + fw_list = self.plugin.get_firewalls(ctx) + fw_tenant_list = list(set(fw['tenant_id'] for fw in fw_list)) + return fw_tenant_list + + +class FirewallAgentApi(rpc_compat.RpcProxy): + """Plugin side of plugin to agent RPC API.""" + + API_VERSION = '1.0' + + def __init__(self, topic, host): + super(FirewallAgentApi, self).__init__(topic, self.API_VERSION) + self.host = host + + def create_firewall(self, context, firewall): + return self.fanout_cast( + context, + self.make_msg('create_firewall', firewall=firewall, + host=self.host), + topic=self.topic + ) + + def update_firewall(self, context, firewall): + return self.fanout_cast( + context, + self.make_msg('update_firewall', firewall=firewall, + host=self.host), + topic=self.topic + ) + + def delete_firewall(self, context, firewall): + return self.fanout_cast( + context, + self.make_msg('delete_firewall', firewall=firewall, + host=self.host), + topic=self.topic + ) + + +class FirewallCountExceeded(n_exception.Conflict): + + """Reference implementation specific exception for firewall count. + + Only one firewall is supported per tenant. When a second + firewall is tried to be created, this exception will be raised. + """ + message = _("Exceeded allowed count of firewalls for tenant " + "%(tenant_id)s. Only one firewall is supported per tenant.") + + +class FirewallPlugin(firewall_db.Firewall_db_mixin): + + """Implementation of the Neutron Firewall Service Plugin. + + This class manages the workflow of FWaaS request/response. + Most DB related works are implemented in class + firewall_db.Firewall_db_mixin. + """ + supported_extension_aliases = ["fwaas"] + + def __init__(self): + """Do the initialization for the firewall service plugin here.""" + qdbapi.register_models() + + self.endpoints = [FirewallCallbacks(self)] + + self.conn = rpc_compat.create_connection(new=True) + self.conn.create_consumer( + topics.FIREWALL_PLUGIN, self.endpoints, fanout=False) + self.conn.consume_in_threads() + + self.agent_rpc = FirewallAgentApi( + topics.L3_AGENT, + cfg.CONF.host + ) + + def _make_firewall_dict_with_rules(self, context, firewall_id): + firewall = self.get_firewall(context, firewall_id) + fw_policy_id = firewall['firewall_policy_id'] + if fw_policy_id: + fw_policy = self.get_firewall_policy(context, fw_policy_id) + fw_rules_list = [self.get_firewall_rule( + context, rule_id) for rule_id in fw_policy['firewall_rules']] + firewall['firewall_rule_list'] = fw_rules_list + else: + firewall['firewall_rule_list'] = [] + # FIXME(Sumit): If the size of the firewall object we are creating + # here exceeds the largest message size supported by rabbit/qpid + # then we will have a problem. + return firewall + + def _rpc_update_firewall(self, context, firewall_id): + status_update = {"firewall": {"status": const.PENDING_UPDATE}} + fw = super(FirewallPlugin, self).update_firewall(context, firewall_id, + status_update) + if fw: + fw_with_rules = ( + self._make_firewall_dict_with_rules(context, + firewall_id)) + self.agent_rpc.update_firewall(context, fw_with_rules) + + def _rpc_update_firewall_policy(self, context, firewall_policy_id): + firewall_policy = self.get_firewall_policy(context, firewall_policy_id) + if firewall_policy: + for firewall_id in firewall_policy['firewall_list']: + self._rpc_update_firewall(context, firewall_id) + + def _ensure_update_firewall(self, context, firewall_id): + fwall = self.get_firewall(context, firewall_id) + if fwall['status'] in [const.PENDING_CREATE, + const.PENDING_UPDATE, + const.PENDING_DELETE]: + raise fw_ext.FirewallInPendingState(firewall_id=firewall_id, + pending_state=fwall['status']) + + def _ensure_update_firewall_policy(self, context, firewall_policy_id): + firewall_policy = self.get_firewall_policy(context, firewall_policy_id) + if firewall_policy and 'firewall_list' in firewall_policy: + for firewall_id in firewall_policy['firewall_list']: + self._ensure_update_firewall(context, firewall_id) + + def _ensure_update_firewall_rule(self, context, firewall_rule_id): + fw_rule = self.get_firewall_rule(context, firewall_rule_id) + if 'firewall_policy_id' in fw_rule and fw_rule['firewall_policy_id']: + self._ensure_update_firewall_policy(context, + fw_rule['firewall_policy_id']) + + def create_firewall(self, context, firewall): + LOG.debug(_("create_firewall() called")) + tenant_id = self._get_tenant_id_for_create(context, + firewall['firewall']) + fw_count = self.get_firewalls_count(context, + filters={'tenant_id': [tenant_id]}) + if fw_count: + raise FirewallCountExceeded(tenant_id=tenant_id) + firewall['firewall']['status'] = const.PENDING_CREATE + fw = super(FirewallPlugin, self).create_firewall(context, firewall) + fw_with_rules = ( + self._make_firewall_dict_with_rules(context, fw['id'])) + self.agent_rpc.create_firewall(context, fw_with_rules) + return fw + + def update_firewall(self, context, id, firewall): + LOG.debug(_("update_firewall() called")) + self._ensure_update_firewall(context, id) + firewall['firewall']['status'] = const.PENDING_UPDATE + fw = super(FirewallPlugin, self).update_firewall(context, id, firewall) + fw_with_rules = ( + self._make_firewall_dict_with_rules(context, fw['id'])) + self.agent_rpc.update_firewall(context, fw_with_rules) + return fw + + def delete_db_firewall_object(self, context, id): + firewall = self.get_firewall(context, id) + if firewall['status'] in [const.PENDING_DELETE]: + super(FirewallPlugin, self).delete_firewall(context, id) + + def delete_firewall(self, context, id): + LOG.debug(_("delete_firewall() called")) + status_update = {"firewall": {"status": const.PENDING_DELETE}} + fw = super(FirewallPlugin, self).update_firewall(context, id, + status_update) + fw_with_rules = ( + self._make_firewall_dict_with_rules(context, fw['id'])) + self.agent_rpc.delete_firewall(context, fw_with_rules) + + def update_firewall_policy(self, context, id, firewall_policy): + LOG.debug(_("update_firewall_policy() called")) + self._ensure_update_firewall_policy(context, id) + fwp = super(FirewallPlugin, + self).update_firewall_policy(context, id, firewall_policy) + self._rpc_update_firewall_policy(context, id) + return fwp + + def update_firewall_rule(self, context, id, firewall_rule): + LOG.debug(_("update_firewall_rule() called")) + self._ensure_update_firewall_rule(context, id) + fwr = super(FirewallPlugin, + self).update_firewall_rule(context, id, firewall_rule) + firewall_policy_id = fwr['firewall_policy_id'] + if firewall_policy_id: + self._rpc_update_firewall_policy(context, firewall_policy_id) + return fwr + + def insert_rule(self, context, id, rule_info): + LOG.debug(_("insert_rule() called")) + self._ensure_update_firewall_policy(context, id) + fwp = super(FirewallPlugin, + self).insert_rule(context, id, rule_info) + self._rpc_update_firewall_policy(context, id) + return fwp + + def remove_rule(self, context, id, rule_info): + LOG.debug(_("remove_rule() called")) + self._ensure_update_firewall_policy(context, id) + fwp = super(FirewallPlugin, + self).remove_rule(context, id, rule_info) + self._rpc_update_firewall_policy(context, id) + return fwp diff --git a/neutron/services/l3_router/README b/neutron/services/l3_router/README new file mode 100644 index 000000000..f6ca35bed --- /dev/null +++ b/neutron/services/l3_router/README @@ -0,0 +1,30 @@ +This service plugin implements the L3 routing functionality (resources router +and floatingip) that in earlier releases before Havana was provided by core +plugins (openvswitch, linuxbridge, ... etc). + +Core plugins can now choose not to implement L3 routing functionality and +instead delegate that to the L3 routing service plugin. + +The required changes to a core plugin are in that case: +- Do not inherit 'l3_db.L3_NAT_db_mixin' (or its descendants like extraroute) + anymore. +- Remove "router" from 'supported_extension_aliases'. +- Modify any 'self' references to members in L3_NAT_db_mixin to instead use + 'manager.NeutronManager.get_service_plugins().get(constants.L3_ROUTER_NAT)' + For example, + self.prevent_l3_port_deletion(...) + becomes something like + plugin = manager.NeutronManager.get_service_plugins().get( + constants.L3_ROUTER_NAT) + if plugin: + plugin.prevent_l3_port_deletion(...) + +If the core plugin has relied on the L3Agent the following must also be changed: +- Do not inherit 'l3_rpc_base.L3RpcCallbackMixin' in any '*RpcCallbacks' class. +- Do not be a consumer of the topics.L3PLUGIN topic for RPC. + +To use the L3 routing service plugin, add +'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin' +to 'service_plugins' in '/etc/neutron/neutron.conf'. +That is, +service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin diff --git a/neutron/services/l3_router/__init__.py b/neutron/services/l3_router/__init__.py new file mode 100644 index 000000000..7506a2914 --- /dev/null +++ b/neutron/services/l3_router/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/l3_router/l3_apic.py b/neutron/services/l3_router/l3_apic.py new file mode 100644 index 000000000..02198e8dc --- /dev/null +++ b/neutron/services/l3_router/l3_apic.py @@ -0,0 +1,135 @@ +# Copyright (c) 2014 Cisco Systems Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. + +from neutron.db import api as qdbapi +from neutron.db import db_base_plugin_v2 +from neutron.db import extraroute_db +from neutron.db import l3_gwmode_db +from neutron.db import model_base +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.plugins.ml2.drivers.cisco.apic import apic_manager + +LOG = logging.getLogger(__name__) + + +class ApicL3ServicePlugin(db_base_plugin_v2.NeutronDbPluginV2, + db_base_plugin_v2.CommonDbMixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin): + """Implementation of the APIC L3 Router Service Plugin. + + This class implements a L3 service plugin that provides + internal gateway functionality for the Cisco APIC (Application + Policy Infrastructure Controller). + """ + supported_extension_aliases = ["router", "ext-gw-mode", "extraroute"] + + def __init__(self): + super(ApicL3ServicePlugin, self).__init__() + qdbapi.register_models(base=model_base.BASEV2) + self.manager = apic_manager.APICManager() + + @staticmethod + def get_plugin_type(): + return constants.L3_ROUTER_NAT + + @staticmethod + def get_plugin_description(): + """Returns string description of the plugin.""" + return _("L3 Router Service Plugin for basic L3 using the APIC") + + def _add_epg_to_contract(self, tenant_id, epg, contract): + """Add an End Point Group(EPG) to a contract as provider/consumer.""" + if self.manager.db.get_provider_contract(): + # Set this network's EPG as a consumer + self.manager.set_contract_for_epg(tenant_id, epg.epg_id, + contract.contract_id) + else: + # Set this network's EPG as a provider + self.manager.set_contract_for_epg(tenant_id, epg.epg_id, + contract.contract_id, + provider=True) + + def add_router_interface(self, context, router_id, interface_info): + """Attach a subnet to a router.""" + tenant_id = context.tenant_id + subnet_id = interface_info['subnet_id'] + LOG.debug("Attaching subnet %(subnet_id)s to " + "router %(router_id)s" % {'subnet_id': subnet_id, + 'router_id': router_id}) + + # Get network for this subnet + subnet = self.get_subnet(context, subnet_id) + network_id = subnet['network_id'] + net_name = self.get_network(context, network_id)['name'] + + # Setup tenant filters and contracts + contract = self.manager.create_tenant_contract(tenant_id) + + # Check for a provider EPG + epg = self.manager.ensure_epg_created_for_network(tenant_id, + network_id, + net_name) + self._add_epg_to_contract(tenant_id, epg, contract) + + # Create DB port + try: + return super(ApicL3ServicePlugin, self).add_router_interface( + context, router_id, interface_info) + except Exception: + LOG.error(_("Error attaching subnet %(subnet_id)s to " + "router %(router_id)s") % {'subnet_id': subnet_id, + 'router_id': router_id}) + with excutils.save_and_reraise_exception(): + self.manager.delete_contract_for_epg(tenant_id, epg.epg_id, + contract.contract_id, + provider=epg.provider) + + def remove_router_interface(self, context, router_id, interface_info): + """Detach a subnet from a router.""" + tenant_id = context.tenant_id + subnet_id = interface_info['subnet_id'] + LOG.debug("Detaching subnet %(subnet_id)s from " + "router %(router_id)s" % {'subnet_id': subnet_id, + 'router_id': router_id}) + + # Get network for this subnet + subnet = self.get_subnet(context, subnet_id) + network_id = subnet['network_id'] + network = self.get_network(context, network_id) + + contract = self.manager.create_tenant_contract(tenant_id) + + epg = self.manager.ensure_epg_created_for_network(tenant_id, + network_id, + network['name']) + # Delete contract for this epg + self.manager.delete_contract_for_epg(tenant_id, epg.epg_id, + contract.contract_id, + provider=epg.provider) + + try: + return super(ApicL3ServicePlugin, self).remove_router_interface( + context, router_id, interface_info) + except Exception: + LOG.error(_("Error detaching subnet %(subnet_id)s from " + "router %(router_id)s") % {'subnet_id': subnet_id, + 'router_id': router_id}) + with excutils.save_and_reraise_exception(): + self._add_epg_to_contract(tenant_id, epg, contract) diff --git a/neutron/services/l3_router/l3_router_plugin.py b/neutron/services/l3_router/l3_router_plugin.py new file mode 100644 index 000000000..bd1378bc8 --- /dev/null +++ b/neutron/services/l3_router/l3_router_plugin.py @@ -0,0 +1,98 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Bob Melander, Cisco Systems, Inc. + +from oslo.config import cfg + +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.common import constants as q_const +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.db import api as qdbapi +from neutron.db import db_base_plugin_v2 +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_gwmode_db +from neutron.db import l3_rpc_base +from neutron.db import model_base +from neutron.openstack.common import importutils +from neutron.plugins.common import constants + + +class L3RouterPluginRpcCallbacks(rpc_compat.RpcCallback, + l3_rpc_base.L3RpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + +class L3RouterPlugin(db_base_plugin_v2.CommonDbMixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin): + + """Implementation of the Neutron L3 Router Service Plugin. + + This class implements a L3 service plugin that provides + router and floatingip resources and manages associated + request/response. + All DB related work is implemented in classes + l3_db.L3_NAT_db_mixin and extraroute_db.ExtraRoute_db_mixin. + """ + supported_extension_aliases = ["router", "ext-gw-mode", + "extraroute", "l3_agent_scheduler"] + + def __init__(self): + qdbapi.register_models(base=model_base.BASEV2) + self.setup_rpc() + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver) + + def setup_rpc(self): + # RPC support + self.topic = topics.L3PLUGIN + self.conn = rpc_compat.create_connection(new=True) + self.agent_notifiers.update( + {q_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) + self.endpoints = [L3RouterPluginRpcCallbacks()] + self.conn.create_consumer(self.topic, self.endpoints, + fanout=False) + self.conn.consume_in_threads() + + def get_plugin_type(self): + return constants.L3_ROUTER_NAT + + def get_plugin_description(self): + """returns string description of the plugin.""" + return ("L3 Router Service Plugin for basic L3 forwarding" + " between (L2) Neutron networks and access to external" + " networks via a NAT gateway.") + + def create_floatingip(self, context, floatingip): + """Create floating IP. + + :param context: Neutron request context + :param floatingip: data fo the floating IP being created + :returns: A floating IP object on success + + AS the l3 router plugin aysnchrounously creates floating IPs + leveraging tehe l3 agent, the initial status fro the floating + IP object will be DOWN. + """ + return super(L3RouterPlugin, self).create_floatingip( + context, floatingip, + initial_status=q_const.FLOATINGIP_STATUS_DOWN) diff --git a/neutron/services/loadbalancer/__init__.py b/neutron/services/loadbalancer/__init__.py new file mode 100644 index 000000000..5e8da711f --- /dev/null +++ b/neutron/services/loadbalancer/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/loadbalancer/agent/__init__.py b/neutron/services/loadbalancer/agent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/services/loadbalancer/agent/agent.py b/neutron/services/loadbalancer/agent/agent.py new file mode 100644 index 000000000..84de61b13 --- /dev/null +++ b/neutron/services/loadbalancer/agent/agent.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import sys + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import interface +from neutron.common import config as common_config +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.openstack.common import service +from neutron.services.loadbalancer.agent import agent_manager as manager + +OPTS = [ + cfg.IntOpt( + 'periodic_interval', + default=10, + help=_('Seconds between periodic task runs') + ) +] + + +class LbaasAgentService(rpc_compat.Service): + def start(self): + super(LbaasAgentService, self).start() + self.tg.add_timer( + cfg.CONF.periodic_interval, + self.manager.run_periodic_tasks, + None, + None + ) + + +def main(): + cfg.CONF.register_opts(OPTS) + cfg.CONF.register_opts(manager.OPTS) + # import interface options just in case the driver uses namespaces + cfg.CONF.register_opts(interface.OPTS) + config.register_interface_driver_opts_helper(cfg.CONF) + config.register_agent_state_opts_helper(cfg.CONF) + config.register_root_helper(cfg.CONF) + + common_config.init(sys.argv[1:]) + config.setup_logging(cfg.CONF) + + mgr = manager.LbaasAgentManager(cfg.CONF) + svc = LbaasAgentService( + host=cfg.CONF.host, + topic=topics.LOADBALANCER_AGENT, + manager=mgr + ) + service.launch(svc).wait() diff --git a/neutron/services/loadbalancer/agent/agent_api.py b/neutron/services/loadbalancer/agent/agent_api.py new file mode 100644 index 000000000..5c1e2b936 --- /dev/null +++ b/neutron/services/loadbalancer/agent/agent_api.py @@ -0,0 +1,100 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +from neutron.common import rpc_compat + + +class LbaasAgentApi(rpc_compat.RpcProxy): + """Agent side of the Agent to Plugin RPC API.""" + + API_VERSION = '2.0' + # history + # 1.0 Initial version + # 2.0 Generic API for agent based drivers + # - get_logical_device() handling changed on plugin side; + # - pool_deployed() and update_status() methods added; + + def __init__(self, topic, context, host): + super(LbaasAgentApi, self).__init__(topic, self.API_VERSION) + self.context = context + self.host = host + + def get_ready_devices(self): + return self.call( + self.context, + self.make_msg('get_ready_devices', host=self.host), + topic=self.topic + ) + + def pool_destroyed(self, pool_id): + return self.call( + self.context, + self.make_msg('pool_destroyed', pool_id=pool_id), + topic=self.topic + ) + + def pool_deployed(self, pool_id): + return self.call( + self.context, + self.make_msg('pool_deployed', pool_id=pool_id), + topic=self.topic + ) + + def get_logical_device(self, pool_id): + return self.call( + self.context, + self.make_msg( + 'get_logical_device', + pool_id=pool_id + ), + topic=self.topic + ) + + def update_status(self, obj_type, obj_id, status): + return self.call( + self.context, + self.make_msg('update_status', obj_type=obj_type, obj_id=obj_id, + status=status), + topic=self.topic + ) + + def plug_vip_port(self, port_id): + return self.call( + self.context, + self.make_msg('plug_vip_port', port_id=port_id, host=self.host), + topic=self.topic + ) + + def unplug_vip_port(self, port_id): + return self.call( + self.context, + self.make_msg('unplug_vip_port', port_id=port_id, host=self.host), + topic=self.topic + ) + + def update_pool_stats(self, pool_id, stats): + return self.call( + self.context, + self.make_msg( + 'update_pool_stats', + pool_id=pool_id, + stats=stats, + host=self.host + ), + topic=self.topic + ) diff --git a/neutron/services/loadbalancer/agent/agent_device_driver.py b/neutron/services/loadbalancer/agent/agent_device_driver.py new file mode 100644 index 000000000..5c555b1b8 --- /dev/null +++ b/neutron/services/loadbalancer/agent/agent_device_driver.py @@ -0,0 +1,98 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class AgentDeviceDriver(object): + """Abstract device driver that defines the API required by LBaaS agent.""" + + @abc.abstractmethod + def get_name(cls): + """Returns unique name across all LBaaS device drivers.""" + pass + + @abc.abstractmethod + def deploy_instance(self, logical_config): + """Fully deploys a loadbalancer instance from a given config.""" + pass + + @abc.abstractmethod + def undeploy_instance(self, pool_id): + """Fully undeploys the loadbalancer instance.""" + pass + + @abc.abstractmethod + def get_stats(self, pool_id): + pass + + def remove_orphans(self, known_pool_ids): + # Not all drivers will support this + raise NotImplementedError() + + @abc.abstractmethod + def create_vip(self, vip): + pass + + @abc.abstractmethod + def update_vip(self, old_vip, vip): + pass + + @abc.abstractmethod + def delete_vip(self, vip): + pass + + @abc.abstractmethod + def create_pool(self, pool): + pass + + @abc.abstractmethod + def update_pool(self, old_pool, pool): + pass + + @abc.abstractmethod + def delete_pool(self, pool): + pass + + @abc.abstractmethod + def create_member(self, member): + pass + + @abc.abstractmethod + def update_member(self, old_member, member): + pass + + @abc.abstractmethod + def delete_member(self, member): + pass + + @abc.abstractmethod + def create_pool_health_monitor(self, health_monitor, pool_id): + pass + + @abc.abstractmethod + def update_pool_health_monitor(self, + old_health_monitor, + health_monitor, + pool_id): + pass + + @abc.abstractmethod + def delete_pool_health_monitor(self, health_monitor, pool_id): + pass diff --git a/neutron/services/loadbalancer/agent/agent_manager.py b/neutron/services/loadbalancer/agent/agent_manager.py new file mode 100644 index 000000000..1bbb53cb0 --- /dev/null +++ b/neutron/services/loadbalancer/agent/agent_manager.py @@ -0,0 +1,338 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +from oslo.config import cfg + +from neutron.agent import rpc as agent_rpc +from neutron.common import constants as n_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron import context +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import periodic_task +from neutron.plugins.common import constants +from neutron.services.loadbalancer.agent import agent_api + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.MultiStrOpt( + 'device_driver', + default=['neutron.services.loadbalancer.drivers' + '.haproxy.namespace_driver.HaproxyNSDriver'], + help=_('Drivers used to manage loadbalancing devices'), + ), +] + + +class DeviceNotFoundOnAgent(n_exc.NotFound): + msg = _('Unknown device with pool_id %(pool_id)s') + + +class LbaasAgentManager(rpc_compat.RpcCallback, periodic_task.PeriodicTasks): + + RPC_API_VERSION = '2.0' + # history + # 1.0 Initial version + # 1.1 Support agent_updated call + # 2.0 Generic API for agent based drivers + # - modify/reload/destroy_pool methods were removed; + # - added methods to handle create/update/delete for every lbaas + # object individually; + + def __init__(self, conf): + super(LbaasAgentManager, self).__init__() + self.conf = conf + self.context = context.get_admin_context_without_session() + self.plugin_rpc = agent_api.LbaasAgentApi( + topics.LOADBALANCER_PLUGIN, + self.context, + self.conf.host + ) + self._load_drivers() + + self.agent_state = { + 'binary': 'neutron-lbaas-agent', + 'host': conf.host, + 'topic': topics.LOADBALANCER_AGENT, + 'configurations': {'device_drivers': self.device_drivers.keys()}, + 'agent_type': n_const.AGENT_TYPE_LOADBALANCER, + 'start_flag': True} + self.admin_state_up = True + + self._setup_state_rpc() + self.needs_resync = False + # pool_id->device_driver_name mapping used to store known instances + self.instance_mapping = {} + + def _load_drivers(self): + self.device_drivers = {} + for driver in self.conf.device_driver: + try: + driver_inst = importutils.import_object( + driver, + self.conf, + self.plugin_rpc + ) + except ImportError: + msg = _('Error importing loadbalancer device driver: %s') + raise SystemExit(msg % driver) + + driver_name = driver_inst.get_name() + if driver_name not in self.device_drivers: + self.device_drivers[driver_name] = driver_inst + else: + msg = _('Multiple device drivers with the same name found: %s') + raise SystemExit(msg % driver_name) + + def _setup_state_rpc(self): + self.state_rpc = agent_rpc.PluginReportStateAPI( + topics.LOADBALANCER_PLUGIN) + report_interval = self.conf.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def _report_state(self): + try: + instance_count = len(self.instance_mapping) + self.agent_state['configurations']['instances'] = instance_count + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def initialize_service_hook(self, started_by): + self.sync_state() + + @periodic_task.periodic_task + def periodic_resync(self, context): + if self.needs_resync: + self.needs_resync = False + self.sync_state() + + @periodic_task.periodic_task(spacing=6) + def collect_stats(self, context): + for pool_id, driver_name in self.instance_mapping.items(): + driver = self.device_drivers[driver_name] + try: + stats = driver.get_stats(pool_id) + if stats: + self.plugin_rpc.update_pool_stats(pool_id, stats) + except Exception: + LOG.exception(_('Error updating statistics on pool %s'), + pool_id) + self.needs_resync = True + + def sync_state(self): + known_instances = set(self.instance_mapping.keys()) + try: + ready_instances = set(self.plugin_rpc.get_ready_devices()) + + for deleted_id in known_instances - ready_instances: + self._destroy_pool(deleted_id) + + for pool_id in ready_instances: + self._reload_pool(pool_id) + + except Exception: + LOG.exception(_('Unable to retrieve ready devices')) + self.needs_resync = True + + self.remove_orphans() + + def _get_driver(self, pool_id): + if pool_id not in self.instance_mapping: + raise DeviceNotFoundOnAgent(pool_id=pool_id) + + driver_name = self.instance_mapping[pool_id] + return self.device_drivers[driver_name] + + def _reload_pool(self, pool_id): + try: + logical_config = self.plugin_rpc.get_logical_device(pool_id) + driver_name = logical_config['driver'] + if driver_name not in self.device_drivers: + LOG.error(_('No device driver ' + 'on agent: %s.'), driver_name) + self.plugin_rpc.update_status( + 'pool', pool_id, constants.ERROR) + return + + self.device_drivers[driver_name].deploy_instance(logical_config) + self.instance_mapping[pool_id] = driver_name + self.plugin_rpc.pool_deployed(pool_id) + except Exception: + LOG.exception(_('Unable to deploy instance for pool: %s'), pool_id) + self.needs_resync = True + + def _destroy_pool(self, pool_id): + driver = self._get_driver(pool_id) + try: + driver.undeploy_instance(pool_id) + del self.instance_mapping[pool_id] + self.plugin_rpc.pool_destroyed(pool_id) + except Exception: + LOG.exception(_('Unable to destroy device for pool: %s'), pool_id) + self.needs_resync = True + + def remove_orphans(self): + for driver_name in self.device_drivers: + pool_ids = [pool_id for pool_id in self.instance_mapping + if self.instance_mapping[pool_id] == driver_name] + try: + self.device_drivers[driver_name].remove_orphans(pool_ids) + except NotImplementedError: + pass # Not all drivers will support this + + def _handle_failed_driver_call(self, operation, obj_type, obj_id, driver): + LOG.exception(_('%(operation)s %(obj)s %(id)s failed on device driver ' + '%(driver)s'), + {'operation': operation.capitalize(), 'obj': obj_type, + 'id': obj_id, 'driver': driver}) + self.plugin_rpc.update_status(obj_type, obj_id, constants.ERROR) + + def create_vip(self, context, vip): + driver = self._get_driver(vip['pool_id']) + try: + driver.create_vip(vip) + except Exception: + self._handle_failed_driver_call('create', 'vip', vip['id'], + driver.get_name()) + else: + self.plugin_rpc.update_status('vip', vip['id'], constants.ACTIVE) + + def update_vip(self, context, old_vip, vip): + driver = self._get_driver(vip['pool_id']) + try: + driver.update_vip(old_vip, vip) + except Exception: + self._handle_failed_driver_call('update', 'vip', vip['id'], + driver.get_name()) + else: + self.plugin_rpc.update_status('vip', vip['id'], constants.ACTIVE) + + def delete_vip(self, context, vip): + driver = self._get_driver(vip['pool_id']) + driver.delete_vip(vip) + + def create_pool(self, context, pool, driver_name): + if driver_name not in self.device_drivers: + LOG.error(_('No device driver on agent: %s.'), driver_name) + self.plugin_rpc.update_status('pool', pool['id'], constants.ERROR) + return + + driver = self.device_drivers[driver_name] + try: + driver.create_pool(pool) + except Exception: + self._handle_failed_driver_call('create', 'pool', pool['id'], + driver.get_name()) + else: + self.instance_mapping[pool['id']] = driver_name + self.plugin_rpc.update_status('pool', pool['id'], constants.ACTIVE) + + def update_pool(self, context, old_pool, pool): + driver = self._get_driver(pool['id']) + try: + driver.update_pool(old_pool, pool) + except Exception: + self._handle_failed_driver_call('update', 'pool', pool['id'], + driver.get_name()) + else: + self.plugin_rpc.update_status('pool', pool['id'], constants.ACTIVE) + + def delete_pool(self, context, pool): + driver = self._get_driver(pool['id']) + driver.delete_pool(pool) + del self.instance_mapping[pool['id']] + + def create_member(self, context, member): + driver = self._get_driver(member['pool_id']) + try: + driver.create_member(member) + except Exception: + self._handle_failed_driver_call('create', 'member', member['id'], + driver.get_name()) + else: + self.plugin_rpc.update_status('member', member['id'], + constants.ACTIVE) + + def update_member(self, context, old_member, member): + driver = self._get_driver(member['pool_id']) + try: + driver.update_member(old_member, member) + except Exception: + self._handle_failed_driver_call('update', 'member', member['id'], + driver.get_name()) + else: + self.plugin_rpc.update_status('member', member['id'], + constants.ACTIVE) + + def delete_member(self, context, member): + driver = self._get_driver(member['pool_id']) + driver.delete_member(member) + + def create_pool_health_monitor(self, context, health_monitor, pool_id): + driver = self._get_driver(pool_id) + assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']} + try: + driver.create_pool_health_monitor(health_monitor, pool_id) + except Exception: + self._handle_failed_driver_call( + 'create', 'health_monitor', assoc_id, driver.get_name()) + else: + self.plugin_rpc.update_status( + 'health_monitor', assoc_id, constants.ACTIVE) + + def update_pool_health_monitor(self, context, old_health_monitor, + health_monitor, pool_id): + driver = self._get_driver(pool_id) + assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']} + try: + driver.update_pool_health_monitor(old_health_monitor, + health_monitor, + pool_id) + except Exception: + self._handle_failed_driver_call( + 'update', 'health_monitor', assoc_id, driver.get_name()) + else: + self.plugin_rpc.update_status( + 'health_monitor', assoc_id, constants.ACTIVE) + + def delete_pool_health_monitor(self, context, health_monitor, pool_id): + driver = self._get_driver(pool_id) + driver.delete_pool_health_monitor(health_monitor, pool_id) + + def agent_updated(self, context, payload): + """Handle the agent_updated notification event.""" + if payload['admin_state_up'] != self.admin_state_up: + self.admin_state_up = payload['admin_state_up'] + if self.admin_state_up: + self.needs_resync = True + else: + for pool_id in self.instance_mapping.keys(): + LOG.info(_("Destroying pool %s due to agent disabling"), + pool_id) + self._destroy_pool(pool_id) + LOG.info(_("Agent_updated by server side %s!"), payload) diff --git a/neutron/services/loadbalancer/agent_scheduler.py b/neutron/services/loadbalancer/agent_scheduler.py new file mode 100644 index 000000000..a17e528d1 --- /dev/null +++ b/neutron/services/loadbalancer/agent_scheduler.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import joinedload + +from neutron.common import constants +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import model_base +from neutron.extensions import lbaas_agentscheduler +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class PoolLoadbalancerAgentBinding(model_base.BASEV2): + """Represents binding between neutron loadbalancer pools and agents.""" + + pool_id = sa.Column(sa.String(36), + sa.ForeignKey("pools.id", ondelete='CASCADE'), + primary_key=True) + agent = orm.relation(agents_db.Agent) + agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id", + ondelete='CASCADE'), + nullable=False) + + +class LbaasAgentSchedulerDbMixin(agentschedulers_db.AgentSchedulerDbMixin, + lbaas_agentscheduler + .LbaasAgentSchedulerPluginBase): + + def get_lbaas_agent_hosting_pool(self, context, pool_id, active=None): + query = context.session.query(PoolLoadbalancerAgentBinding) + query = query.options(joinedload('agent')) + binding = query.get(pool_id) + + if (binding and self.is_eligible_agent( + active, binding.agent)): + return {'agent': self._make_agent_dict(binding.agent)} + + def get_lbaas_agents(self, context, active=None, filters=None): + query = context.session.query(agents_db.Agent) + query = query.filter_by(agent_type=constants.AGENT_TYPE_LOADBALANCER) + if active is not None: + query = query.filter_by(admin_state_up=active) + if filters: + for key, value in filters.iteritems(): + column = getattr(agents_db.Agent, key, None) + if column: + query = query.filter(column.in_(value)) + + return [agent + for agent in query + if self.is_eligible_agent(active, agent)] + + def list_pools_on_lbaas_agent(self, context, id): + query = context.session.query(PoolLoadbalancerAgentBinding.pool_id) + query = query.filter_by(agent_id=id) + pool_ids = [item[0] for item in query] + if pool_ids: + return {'pools': self.get_pools(context, filters={'id': pool_ids})} + else: + return {'pools': []} + + def get_lbaas_agent_candidates(self, device_driver, active_agents): + candidates = [] + for agent in active_agents: + agent_conf = self.get_configuration_dict(agent) + if device_driver in agent_conf['device_drivers']: + candidates.append(agent) + return candidates + + +class ChanceScheduler(object): + """Allocate a loadbalancer agent for a vip in a random way.""" + + def schedule(self, plugin, context, pool, device_driver): + """Schedule the pool to an active loadbalancer agent if there + is no enabled agent hosting it. + """ + with context.session.begin(subtransactions=True): + lbaas_agent = plugin.get_lbaas_agent_hosting_pool( + context, pool['id']) + if lbaas_agent: + LOG.debug(_('Pool %(pool_id)s has already been hosted' + ' by lbaas agent %(agent_id)s'), + {'pool_id': pool['id'], + 'agent_id': lbaas_agent['id']}) + return + + active_agents = plugin.get_lbaas_agents(context, active=True) + if not active_agents: + LOG.warn(_('No active lbaas agents for pool %s'), pool['id']) + return + + candidates = plugin.get_lbaas_agent_candidates(device_driver, + active_agents) + if not candidates: + LOG.warn(_('No lbaas agent supporting device driver %s'), + device_driver) + return + + chosen_agent = random.choice(candidates) + binding = PoolLoadbalancerAgentBinding() + binding.agent = chosen_agent + binding.pool_id = pool['id'] + context.session.add(binding) + LOG.debug(_('Pool %(pool_id)s is scheduled to ' + 'lbaas agent %(agent_id)s'), + {'pool_id': pool['id'], + 'agent_id': chosen_agent['id']}) + return chosen_agent diff --git a/neutron/services/loadbalancer/constants.py b/neutron/services/loadbalancer/constants.py new file mode 100644 index 000000000..6125b9b10 --- /dev/null +++ b/neutron/services/loadbalancer/constants.py @@ -0,0 +1,47 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Mirantis, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN' +LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS' +LB_METHOD_SOURCE_IP = 'SOURCE_IP' + +PROTOCOL_TCP = 'TCP' +PROTOCOL_HTTP = 'HTTP' +PROTOCOL_HTTPS = 'HTTPS' + +HEALTH_MONITOR_PING = 'PING' +HEALTH_MONITOR_TCP = 'TCP' +HEALTH_MONITOR_HTTP = 'HTTP' +HEALTH_MONITOR_HTTPS = 'HTTPS' + +SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP' +SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE' +SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE' + +STATS_ACTIVE_CONNECTIONS = 'active_connections' +STATS_MAX_CONNECTIONS = 'max_connections' +STATS_TOTAL_CONNECTIONS = 'total_connections' +STATS_CURRENT_SESSIONS = 'current_sessions' +STATS_MAX_SESSIONS = 'max_sessions' +STATS_TOTAL_SESSIONS = 'total_sessions' +STATS_IN_BYTES = 'bytes_in' +STATS_OUT_BYTES = 'bytes_out' +STATS_CONNECTION_ERRORS = 'connection_errors' +STATS_RESPONSE_ERRORS = 'response_errors' +STATS_STATUS = 'status' +STATS_HEALTH = 'health' +STATS_FAILED_CHECKS = 'failed_checks' diff --git a/neutron/services/loadbalancer/drivers/__init__.py b/neutron/services/loadbalancer/drivers/__init__.py new file mode 100644 index 000000000..ce18bf6d6 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost diff --git a/neutron/services/loadbalancer/drivers/abstract_driver.py b/neutron/services/loadbalancer/drivers/abstract_driver.py new file mode 100644 index 000000000..55d2c6d62 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/abstract_driver.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Radware LTD. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Avishay Balderman, Radware + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class LoadBalancerAbstractDriver(object): + """Abstract lbaas driver that expose ~same API as lbaas plugin. + + The configuration elements (Vip,Member,etc) are the dicts that + are returned to the tenant. + Get operations are not part of the API - it will be handled + by the lbaas plugin. + """ + + @abc.abstractmethod + def create_vip(self, context, vip): + """A real driver would invoke a call to his backend + and set the Vip status to ACTIVE/ERROR according + to the backend call result + self.plugin.update_status(context, Vip, vip["id"], + constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def update_vip(self, context, old_vip, vip): + """Driver may call the code below in order to update the status. + self.plugin.update_status(context, Vip, id, constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def delete_vip(self, context, vip): + """A real driver would invoke a call to his backend + and try to delete the Vip. + if the deletion was successful, delete the record from the database. + if the deletion has failed, set the Vip status to ERROR. + """ + pass + + @abc.abstractmethod + def create_pool(self, context, pool): + """Driver may call the code below in order to update the status. + self.plugin.update_status(context, Pool, pool["id"], + constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def update_pool(self, context, old_pool, pool): + """Driver may call the code below in order to update the status. + self.plugin.update_status(context, + Pool, + pool["id"], constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def delete_pool(self, context, pool): + """Driver can call the code below in order to delete the pool. + self.plugin._delete_db_pool(context, pool["id"]) + or set the status to ERROR if deletion failed + """ + pass + + @abc.abstractmethod + def stats(self, context, pool_id): + pass + + @abc.abstractmethod + def create_member(self, context, member): + """Driver may call the code below in order to update the status. + self.plugin.update_status(context, Member, member["id"], + constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def update_member(self, context, old_member, member): + """Driver may call the code below in order to update the status. + self.plugin.update_status(context, Member, + member["id"], constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def delete_member(self, context, member): + pass + + @abc.abstractmethod + def update_pool_health_monitor(self, context, + old_health_monitor, + health_monitor, + pool_id): + pass + + @abc.abstractmethod + def create_pool_health_monitor(self, context, + health_monitor, + pool_id): + """Driver may call the code below in order to update the status. + self.plugin.update_pool_health_monitor(context, + health_monitor["id"], + pool_id, + constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def delete_pool_health_monitor(self, context, health_monitor, pool_id): + pass diff --git a/neutron/services/loadbalancer/drivers/common/__init__.py b/neutron/services/loadbalancer/drivers/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/services/loadbalancer/drivers/common/agent_driver_base.py b/neutron/services/loadbalancer/drivers/common/agent_driver_base.py new file mode 100644 index 000000000..5849c9597 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/common/agent_driver_base.py @@ -0,0 +1,445 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import uuid + +from oslo.config import cfg + +from neutron.common import constants as q_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.db import agents_db +from neutron.db.loadbalancer import loadbalancer_db +from neutron.extensions import lbaas_agentscheduler +from neutron.extensions import portbindings +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.loadbalancer.drivers import abstract_driver + +LOG = logging.getLogger(__name__) + +AGENT_SCHEDULER_OPTS = [ + cfg.StrOpt('loadbalancer_pool_scheduler_driver', + default='neutron.services.loadbalancer.agent_scheduler' + '.ChanceScheduler', + help=_('Driver to use for scheduling ' + 'pool to a default loadbalancer agent')), +] + +cfg.CONF.register_opts(AGENT_SCHEDULER_OPTS) + + +class DriverNotSpecified(n_exc.NeutronException): + message = _("Device driver for agent should be specified " + "in plugin driver.") + + +class LoadBalancerCallbacks(rpc_compat.RpcCallback): + + RPC_API_VERSION = '2.0' + # history + # 1.0 Initial version + # 2.0 Generic API for agent based drivers + # - get_logical_device() handling changed; + # - pool_deployed() and update_status() methods added; + + def __init__(self, plugin): + super(LoadBalancerCallbacks, self).__init__() + self.plugin = plugin + + def get_ready_devices(self, context, host=None): + with context.session.begin(subtransactions=True): + agents = self.plugin.get_lbaas_agents(context, + filters={'host': [host]}) + if not agents: + return [] + elif len(agents) > 1: + LOG.warning(_('Multiple lbaas agents found on host %s'), host) + pools = self.plugin.list_pools_on_lbaas_agent(context, + agents[0].id) + pool_ids = [pool['id'] for pool in pools['pools']] + + qry = context.session.query(loadbalancer_db.Pool.id) + qry = qry.filter(loadbalancer_db.Pool.id.in_(pool_ids)) + qry = qry.filter( + loadbalancer_db.Pool.status.in_( + constants.ACTIVE_PENDING_STATUSES)) + up = True # makes pep8 and sqlalchemy happy + qry = qry.filter(loadbalancer_db.Pool.admin_state_up == up) + return [id for id, in qry] + + def get_logical_device(self, context, pool_id=None): + with context.session.begin(subtransactions=True): + qry = context.session.query(loadbalancer_db.Pool) + qry = qry.filter_by(id=pool_id) + pool = qry.one() + retval = {} + retval['pool'] = self.plugin._make_pool_dict(pool) + + if pool.vip: + retval['vip'] = self.plugin._make_vip_dict(pool.vip) + retval['vip']['port'] = ( + self.plugin._core_plugin._make_port_dict(pool.vip.port) + ) + for fixed_ip in retval['vip']['port']['fixed_ips']: + fixed_ip['subnet'] = ( + self.plugin._core_plugin.get_subnet( + context, + fixed_ip['subnet_id'] + ) + ) + retval['members'] = [ + self.plugin._make_member_dict(m) + for m in pool.members if ( + m.status in constants.ACTIVE_PENDING_STATUSES or + m.status == constants.INACTIVE) + ] + retval['healthmonitors'] = [ + self.plugin._make_health_monitor_dict(hm.healthmonitor) + for hm in pool.monitors + if hm.status in constants.ACTIVE_PENDING_STATUSES + ] + retval['driver'] = ( + self.plugin.drivers[pool.provider.provider_name].device_driver) + + return retval + + def pool_deployed(self, context, pool_id): + with context.session.begin(subtransactions=True): + qry = context.session.query(loadbalancer_db.Pool) + qry = qry.filter_by(id=pool_id) + pool = qry.one() + + # set all resources to active + if pool.status in constants.ACTIVE_PENDING_STATUSES: + pool.status = constants.ACTIVE + + if (pool.vip and pool.vip.status in + constants.ACTIVE_PENDING_STATUSES): + pool.vip.status = constants.ACTIVE + + for m in pool.members: + if m.status in constants.ACTIVE_PENDING_STATUSES: + m.status = constants.ACTIVE + + for hm in pool.monitors: + if hm.status in constants.ACTIVE_PENDING_STATUSES: + hm.status = constants.ACTIVE + + def update_status(self, context, obj_type, obj_id, status): + model_mapping = { + 'pool': loadbalancer_db.Pool, + 'vip': loadbalancer_db.Vip, + 'member': loadbalancer_db.Member, + 'health_monitor': loadbalancer_db.PoolMonitorAssociation + } + if obj_type not in model_mapping: + raise n_exc.Invalid(_('Unknown object type: %s') % obj_type) + try: + if obj_type == 'health_monitor': + self.plugin.update_pool_health_monitor( + context, obj_id['monitor_id'], obj_id['pool_id'], status) + else: + self.plugin.update_status( + context, model_mapping[obj_type], obj_id, status) + except n_exc.NotFound: + # update_status may come from agent on an object which was + # already deleted from db with other request + LOG.warning(_('Cannot update status: %(obj_type)s %(obj_id)s ' + 'not found in the DB, it was probably deleted ' + 'concurrently'), + {'obj_type': obj_type, 'obj_id': obj_id}) + + def pool_destroyed(self, context, pool_id=None): + """Agent confirmation hook that a pool has been destroyed. + + This method exists for subclasses to change the deletion + behavior. + """ + pass + + def plug_vip_port(self, context, port_id=None, host=None): + if not port_id: + return + + try: + port = self.plugin._core_plugin.get_port( + context, + port_id + ) + except n_exc.PortNotFound: + msg = _('Unable to find port %s to plug.') + LOG.debug(msg, port_id) + return + + port['admin_state_up'] = True + port['device_owner'] = 'neutron:' + constants.LOADBALANCER + port['device_id'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host))) + port[portbindings.HOST_ID] = host + self.plugin._core_plugin.update_port( + context, + port_id, + {'port': port} + ) + + def unplug_vip_port(self, context, port_id=None, host=None): + if not port_id: + return + + try: + port = self.plugin._core_plugin.get_port( + context, + port_id + ) + except n_exc.PortNotFound: + msg = _('Unable to find port %s to unplug. This can occur when ' + 'the Vip has been deleted first.') + LOG.debug(msg, port_id) + return + + port['admin_state_up'] = False + port['device_owner'] = '' + port['device_id'] = '' + + try: + self.plugin._core_plugin.update_port( + context, + port_id, + {'port': port} + ) + + except n_exc.PortNotFound: + msg = _('Unable to find port %s to unplug. This can occur when ' + 'the Vip has been deleted first.') + LOG.debug(msg, port_id) + + def update_pool_stats(self, context, pool_id=None, stats=None, host=None): + self.plugin.update_pool_stats(context, pool_id, data=stats) + + +class LoadBalancerAgentApi(rpc_compat.RpcProxy): + """Plugin side of plugin to agent RPC API.""" + + BASE_RPC_API_VERSION = '2.0' + # history + # 1.0 Initial version + # 1.1 Support agent_updated call + # 2.0 Generic API for agent based drivers + # - modify/reload/destroy_pool methods were removed; + # - added methods to handle create/update/delete for every lbaas + # object individually; + + def __init__(self, topic): + super(LoadBalancerAgentApi, self).__init__( + topic, default_version=self.BASE_RPC_API_VERSION) + + def _cast(self, context, method_name, method_args, host, version=None): + return self.cast( + context, + self.make_msg(method_name, **method_args), + topic='%s.%s' % (self.topic, host), + version=version + ) + + def create_vip(self, context, vip, host): + return self._cast(context, 'create_vip', {'vip': vip}, host) + + def update_vip(self, context, old_vip, vip, host): + return self._cast(context, 'update_vip', + {'old_vip': old_vip, 'vip': vip}, host) + + def delete_vip(self, context, vip, host): + return self._cast(context, 'delete_vip', {'vip': vip}, host) + + def create_pool(self, context, pool, host, driver_name): + return self._cast(context, 'create_pool', + {'pool': pool, 'driver_name': driver_name}, host) + + def update_pool(self, context, old_pool, pool, host): + return self._cast(context, 'update_pool', + {'old_pool': old_pool, 'pool': pool}, host) + + def delete_pool(self, context, pool, host): + return self._cast(context, 'delete_pool', {'pool': pool}, host) + + def create_member(self, context, member, host): + return self._cast(context, 'create_member', {'member': member}, host) + + def update_member(self, context, old_member, member, host): + return self._cast(context, 'update_member', + {'old_member': old_member, 'member': member}, host) + + def delete_member(self, context, member, host): + return self._cast(context, 'delete_member', {'member': member}, host) + + def create_pool_health_monitor(self, context, health_monitor, pool_id, + host): + return self._cast(context, 'create_pool_health_monitor', + {'health_monitor': health_monitor, + 'pool_id': pool_id}, host) + + def update_pool_health_monitor(self, context, old_health_monitor, + health_monitor, pool_id, host): + return self._cast(context, 'update_pool_health_monitor', + {'old_health_monitor': old_health_monitor, + 'health_monitor': health_monitor, + 'pool_id': pool_id}, host) + + def delete_pool_health_monitor(self, context, health_monitor, pool_id, + host): + return self._cast(context, 'delete_pool_health_monitor', + {'health_monitor': health_monitor, + 'pool_id': pool_id}, host) + + def agent_updated(self, context, admin_state_up, host): + return self._cast(context, 'agent_updated', + {'payload': {'admin_state_up': admin_state_up}}, + host) + + +class AgentDriverBase(abstract_driver.LoadBalancerAbstractDriver): + + # name of device driver that should be used by the agent; + # vendor specific plugin drivers must override it; + device_driver = None + + def __init__(self, plugin): + if not self.device_driver: + raise DriverNotSpecified() + + self.agent_rpc = LoadBalancerAgentApi(topics.LOADBALANCER_AGENT) + + self.plugin = plugin + self._set_callbacks_on_plugin() + self.plugin.agent_notifiers.update( + {q_const.AGENT_TYPE_LOADBALANCER: self.agent_rpc}) + + self.pool_scheduler = importutils.import_object( + cfg.CONF.loadbalancer_pool_scheduler_driver) + + def _set_callbacks_on_plugin(self): + # other agent based plugin driver might already set callbacks on plugin + if hasattr(self.plugin, 'agent_callbacks'): + return + + self.plugin.agent_endpoints = [ + LoadBalancerCallbacks(self.plugin), + agents_db.AgentExtRpcCallback(self.plugin) + ] + self.plugin.conn = rpc_compat.create_connection(new=True) + self.plugin.conn.create_consumer( + topics.LOADBALANCER_PLUGIN, + self.plugin.agent_endpoints, + fanout=False) + self.plugin.conn.consume_in_threads() + + def get_pool_agent(self, context, pool_id): + agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool_id) + if not agent: + raise lbaas_agentscheduler.NoActiveLbaasAgent(pool_id=pool_id) + return agent['agent'] + + def create_vip(self, context, vip): + agent = self.get_pool_agent(context, vip['pool_id']) + self.agent_rpc.create_vip(context, vip, agent['host']) + + def update_vip(self, context, old_vip, vip): + agent = self.get_pool_agent(context, vip['pool_id']) + if vip['status'] in constants.ACTIVE_PENDING_STATUSES: + self.agent_rpc.update_vip(context, old_vip, vip, agent['host']) + else: + self.agent_rpc.delete_vip(context, vip, agent['host']) + + def delete_vip(self, context, vip): + self.plugin._delete_db_vip(context, vip['id']) + agent = self.get_pool_agent(context, vip['pool_id']) + self.agent_rpc.delete_vip(context, vip, agent['host']) + + def create_pool(self, context, pool): + agent = self.pool_scheduler.schedule(self.plugin, context, pool, + self.device_driver) + if not agent: + raise lbaas_agentscheduler.NoEligibleLbaasAgent(pool_id=pool['id']) + self.agent_rpc.create_pool(context, pool, agent['host'], + self.device_driver) + + def update_pool(self, context, old_pool, pool): + agent = self.get_pool_agent(context, pool['id']) + if pool['status'] in constants.ACTIVE_PENDING_STATUSES: + self.agent_rpc.update_pool(context, old_pool, pool, + agent['host']) + else: + self.agent_rpc.delete_pool(context, pool, agent['host']) + + def delete_pool(self, context, pool): + # get agent first to know host as binding will be deleted + # after pool is deleted from db + agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool['id']) + self.plugin._delete_db_pool(context, pool['id']) + if agent: + self.agent_rpc.delete_pool(context, pool, agent['agent']['host']) + + def create_member(self, context, member): + agent = self.get_pool_agent(context, member['pool_id']) + self.agent_rpc.create_member(context, member, agent['host']) + + def update_member(self, context, old_member, member): + agent = self.get_pool_agent(context, member['pool_id']) + # member may change pool id + if member['pool_id'] != old_member['pool_id']: + old_pool_agent = self.plugin.get_lbaas_agent_hosting_pool( + context, old_member['pool_id']) + if old_pool_agent: + self.agent_rpc.delete_member(context, old_member, + old_pool_agent['agent']['host']) + self.agent_rpc.create_member(context, member, agent['host']) + else: + self.agent_rpc.update_member(context, old_member, member, + agent['host']) + + def delete_member(self, context, member): + self.plugin._delete_db_member(context, member['id']) + agent = self.get_pool_agent(context, member['pool_id']) + self.agent_rpc.delete_member(context, member, agent['host']) + + def create_pool_health_monitor(self, context, healthmon, pool_id): + # healthmon is not used here + agent = self.get_pool_agent(context, pool_id) + self.agent_rpc.create_pool_health_monitor(context, healthmon, + pool_id, agent['host']) + + def update_pool_health_monitor(self, context, old_health_monitor, + health_monitor, pool_id): + agent = self.get_pool_agent(context, pool_id) + self.agent_rpc.update_pool_health_monitor(context, old_health_monitor, + health_monitor, pool_id, + agent['host']) + + def delete_pool_health_monitor(self, context, health_monitor, pool_id): + self.plugin._delete_db_pool_health_monitor( + context, health_monitor['id'], pool_id + ) + + agent = self.get_pool_agent(context, pool_id) + self.agent_rpc.delete_pool_health_monitor(context, health_monitor, + pool_id, agent['host']) + + def stats(self, context, pool_id): + pass diff --git a/neutron/services/loadbalancer/drivers/embrane/README b/neutron/services/loadbalancer/drivers/embrane/README new file mode 100644 index 000000000..22326b0b9 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/embrane/README @@ -0,0 +1,9 @@ +Embrane LBaaS Driver + +This DRIVER interfaces OpenStack Neutron with Embrane's heleos platform, +Load Balancing appliances for cloud environments. + +L2 connectivity is leveraged by one of the supported existing plugins. + +For more details on use, configuration and implementation please refer to: +https://wiki.openstack.org/wiki/Neutron/LBaaS/EmbraneDriver \ No newline at end of file diff --git a/neutron/services/loadbalancer/drivers/embrane/__init__.py b/neutron/services/loadbalancer/drivers/embrane/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/services/loadbalancer/drivers/embrane/agent/__init__.py b/neutron/services/loadbalancer/drivers/embrane/agent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/services/loadbalancer/drivers/embrane/agent/dispatcher.py b/neutron/services/loadbalancer/drivers/embrane/agent/dispatcher.py new file mode 100644 index 000000000..7ba000599 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/embrane/agent/dispatcher.py @@ -0,0 +1,108 @@ +# Copyright 2014 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com + +from eventlet import greenthread +from eventlet import queue +from heleosapi import exceptions as h_exc + +from neutron.openstack.common import log as logging +from neutron.plugins.embrane.common import contexts as ctx +from neutron.services.loadbalancer.drivers.embrane.agent import lb_operations +from neutron.services.loadbalancer.drivers.embrane import constants as econ + +LOG = logging.getLogger(__name__) + + +class Dispatcher(object): + def __init__(self, driver, async=True): + self._async = async + self._driver = driver + self.sync_items = dict() + self.handlers = lb_operations.handlers + + def dispatch_lb(self, d_context, *args, **kwargs): + item = d_context.item + event = d_context.event + n_context = d_context.n_context + chain = d_context.chain + + item_id = item["id"] + if event in self.handlers: + for f in self.handlers[event]: + first_run = False + if item_id not in self.sync_items: + self.sync_items[item_id] = [queue.Queue()] + first_run = True + self.sync_items[item_id][0].put( + ctx.OperationContext(event, n_context, item, chain, f, + args, kwargs)) + if first_run: + t = greenthread.spawn(self._consume_lb, + item_id, + self.sync_items[item_id][0], + self._driver, + self._async) + self.sync_items[item_id].append(t) + if not self._async: + t = self.sync_items[item_id][1] + t.wait() + + def _consume_lb(self, sync_item, sync_queue, driver, a_sync): + current_state = None + while True: + try: + if current_state == econ.DELETED: + del self.sync_items[sync_item] + return + try: + operation_context = sync_queue.get( + block=a_sync, + timeout=econ.QUEUE_TIMEOUT) + except queue.Empty: + del self.sync_items[sync_item] + return + + (operation_context.chain and + operation_context.chain.execute_all()) + + transient_state = None + try: + transient_state = operation_context.function( + driver, operation_context.n_context, + operation_context.item, *operation_context.args, + **operation_context.kwargs) + except (h_exc.PendingDva, h_exc.DvaNotFound, + h_exc.BrokenInterface, h_exc.DvaCreationFailed, + h_exc.BrokenDva, h_exc.ConfigurationFailed) as ex: + LOG.warning(econ.error_map[type(ex)], ex.message) + except h_exc.DvaDeleteFailed as ex: + LOG.warning(econ.error_map[type(ex)], ex.message) + transient_state = econ.DELETED + finally: + # if the returned transient state is None, no operations + # are required on the DVA status + if transient_state == econ.DELETED: + current_state = driver._delete_vip( + operation_context.n_context, + operation_context.item) + # Error state cannot be reverted + else: + driver._update_vip_graph_state( + operation_context.n_context, + operation_context.item) + except Exception: + LOG.exception(_('Unhandled exception occurred')) diff --git a/neutron/services/loadbalancer/drivers/embrane/agent/lb_operations.py b/neutron/services/loadbalancer/drivers/embrane/agent/lb_operations.py new file mode 100644 index 000000000..f31caf777 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/embrane/agent/lb_operations.py @@ -0,0 +1,179 @@ +# Copyright 2014 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com + +import functools + +from heleosapi import exceptions as h_exc + +from neutron.openstack.common import log as logging +from neutron.services.loadbalancer import constants as lcon +from neutron.services.loadbalancer.drivers.embrane import constants as econ + +LOG = logging.getLogger(__name__) +handlers = {} + + +def handler(event, handler): + def wrap(f): + if event not in handler.keys(): + handler[event] = [f] + else: + handler[event].append(f) + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return f(*args, **kwargs) + + return wrapped_f + + return wrap + + +@handler(econ.Events.CREATE_VIP, handlers) +def _provision_load_balancer(driver, context, vip, flavor, + vip_utif_info, vip_ip_allocation_info, + pool_utif_info=None, + pool_ip_allocation_info=None, + pool=None, members=None, + monitors=None): + api = driver._heleos_api + tenant_id = context.tenant_id + admin_state = vip["admin_state_up"] + # Architectural configuration + api.create_load_balancer(tenant_id=tenant_id, + router_id=vip["id"], + name=vip["name"], + flavor=flavor, + up=False) + api.grow_interface(vip_utif_info, False, tenant_id, vip["id"]) + if pool: + api.grow_interface(pool_utif_info, False, tenant_id, + vip["id"]) + + # Logical configuration + api.allocate_address(vip["id"], True, vip_ip_allocation_info) + if pool: + api.allocate_address(vip["id"], True, pool_ip_allocation_info) + dva = api.configure_load_balancer(vip["id"], admin_state, + vip, pool, + monitors, members) + return api.extract_dva_state(dva) + + +@handler(econ.Events.UPDATE_VIP, handlers) +def _update_load_balancer(driver, context, vip, + old_pool_id=None, old_port_id=None, + removed_ip=None, pool_utif_info=None, + pool_ip_allocation_info=None, + new_pool=None, members=None, + monitors=None): + api = driver._heleos_api + tenant_id = context.tenant_id + admin_state = vip["admin_state_up"] + + if old_pool_id: + # Architectural Changes + api.de_allocate_address(vip['id'], False, old_port_id, removed_ip) + api.shrink_interface(tenant_id, vip["id"], False, old_port_id) + api.grow_interface(pool_utif_info, False, tenant_id, vip["id"]) + # Configuration Changes + api.allocate_address(vip["id"], True, pool_ip_allocation_info) + api.replace_pool(vip["id"], True, vip, old_pool_id, + new_pool, monitors, members) + + api.update_vservice(vip["id"], True, vip) + # Dva update + dva = api.update_dva(tenant_id, vip["id"], vip["name"], + admin_state, description=vip["description"]) + + return api.extract_dva_state(dva) + + +@handler(econ.Events.DELETE_VIP, handlers) +def _delete_load_balancer(driver, context, vip): + try: + driver._heleos_api.delete_dva(context.tenant_id, vip['id']) + except h_exc.DvaNotFound: + LOG.warning(_('The load balancer %s had no physical representation, ' + 'likely already deleted'), vip['id']) + return econ.DELETED + + +@handler(econ.Events.UPDATE_POOL, handlers) +def _update_server_pool(driver, context, vip, pool, + monitors=None): + api = driver._heleos_api + cookie = ((vip.get('session_persistence') or {}).get('type') == + lcon.SESSION_PERSISTENCE_HTTP_COOKIE) + return api.extract_dva_state(api.update_pool(vip['id'], + vip['admin_state_up'], + pool, cookie, monitors)) + + +@handler(econ.Events.ADD_OR_UPDATE_MEMBER, handlers) +def _add_or_update_pool_member(driver, context, vip, member, protocol): + api = driver._heleos_api + return api.extract_dva_state(api.update_backend_server( + vip['id'], vip['admin_state_up'], member, protocol)) + + +@handler(econ.Events.REMOVE_MEMBER, handlers) +def _remove_member_from_pool(driver, context, vip, member): + api = driver._heleos_api + return api.extract_dva_state(api.remove_pool_member(vip['id'], + vip['admin_state_up'], + member)) + + +@handler(econ.Events.DELETE_MEMBER, handlers) +def _delete_member(driver, context, vip, member): + with context.session.begin(subtransactions=True): + api = driver._heleos_api + dva = api.delete_backend_server(vip['id'], vip['admin_state_up'], + member) + driver._delete_member(context, member) + return api.extract_dva_state(dva) + + +@handler(econ.Events.ADD_POOL_HM, handlers) +def _create_pool_hm(driver, context, vip, hm, pool_id): + api = driver._heleos_api + return api.extract_dva_state(api.add_pool_monitor( + vip['id'], vip['admin_state_up'], hm, pool_id)) + + +@handler(econ.Events.UPDATE_POOL_HM, handlers) +def _update_pool_hm(driver, context, vip, hm, pool_id): + api = driver._heleos_api + return api.extract_dva_state(api.update_pool_monitor( + vip['id'], vip['admin_state_up'], hm, pool_id)) + + +@handler(econ.Events.DELETE_POOL_HM, handlers) +def _delete_pool_hm(driver, context, vip, hm, pool_id): + with context.session.begin(subtransactions=True): + api = driver._heleos_api + dva = api.add_pool_monitor(vip['id'], vip['admin_state_up'], + hm, pool_id) + driver._delete_pool_hm(context, hm, pool_id) + return api.extract_dva_state(dva) + + +@handler(econ.Events.POLL_GRAPH, handlers) +def _poll_graph(driver, context, vip): + api = driver._heleos_api + return api.extract_dva_state(api.get_dva(vip['id'])) diff --git a/neutron/services/loadbalancer/drivers/embrane/config.py b/neutron/services/loadbalancer/drivers/embrane/config.py new file mode 100644 index 000000000..cac9a63d5 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/embrane/config.py @@ -0,0 +1,53 @@ +# Copyright 2014 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com + +from oslo.config import cfg + +# User may want to use LB service together with the L3 plugin, but using +# different resources. The service will inherit the configuration from the +# L3 heleos plugin if present and not overridden. +heleos_opts = [ + cfg.StrOpt('esm_mgmt', + help=_('ESM management root address')), + cfg.StrOpt('admin_username', + help=_('ESM admin username.')), + cfg.StrOpt('admin_password', + secret=True, + help=_('ESM admin password.')), + cfg.StrOpt('lb_image', + help=_('Load Balancer image id (Embrane LB)')), + cfg.StrOpt('inband_id', + help=_('In band Security Zone id for LBs')), + cfg.StrOpt('oob_id', + help=_('Out of band Security Zone id for LBs')), + cfg.StrOpt('mgmt_id', + help=_('Management Security Zone id for LBs')), + cfg.StrOpt('dummy_utif_id', + help=_('Dummy user traffic Security Zone id for LBs')), + cfg.StrOpt('resource_pool_id', + help=_('Shared resource pool id')), + cfg.StrOpt('lb_flavor', default="small", + help=_('choose LB image flavor to use, accepted values: small, ' + 'medium')), + cfg.IntOpt('sync_interval', default=60, + help=_('resource synchronization interval in seconds')), + cfg.BoolOpt('async_requests', + help=_('Define if the requests have ' + 'run asynchronously or not')), +] + +cfg.CONF.register_opts(heleos_opts, 'heleoslb') diff --git a/neutron/services/loadbalancer/drivers/embrane/constants.py b/neutron/services/loadbalancer/drivers/embrane/constants.py new file mode 100644 index 000000000..61e609598 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/embrane/constants.py @@ -0,0 +1,74 @@ +# Copyright 2014 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com + +from heleosapi import constants as h_con +from heleosapi import exceptions as h_exc + +from neutron.plugins.common import constants as ccon + +DELETED = 'DELETED' # not visible status +QUEUE_TIMEOUT = 300 +BACK_SUB_LIMIT = 6 + + +class BackendActions: + UPDATE = 'update' + GROW = 'grow' + REMOVE = 'remove' + SHRINK = 'shrink' + + +class Events: + CREATE_VIP = 'create_vip' + UPDATE_VIP = 'update_vip' + DELETE_VIP = 'delete_vip' + UPDATE_POOL = 'update_pool' + UPDATE_MEMBER = 'update_member' + ADD_OR_UPDATE_MEMBER = 'add_or_update_member' + REMOVE_MEMBER = 'remove_member' + DELETE_MEMBER = 'delete_member' + POLL_GRAPH = 'poll_graph' + ADD_POOL_HM = "create_pool_hm" + UPDATE_POOL_HM = "update_pool_hm" + DELETE_POOL_HM = "delete_pool_hm" + + +_DVA_PENDING_ERROR_MSG = _('Dva is pending for the following reason: %s') +_DVA_NOT_FOUNT_ERROR_MSG = _('%s, ' + 'probably was cancelled through the heleos UI') +_DVA_BROKEN_ERROR_MSG = _('Dva seems to be broken for reason %s') +_DVA_CREATION_FAILED_ERROR_MSG = _('Dva creation failed reason %s') +_DVA_CREATION_PENDING_ERROR_MSG = _('Dva creation is in pending state ' + 'for reason %s') +_CFG_FAILED_ERROR_MSG = _('Dva configuration failed for reason %s') +_DVA_DEL_FAILED_ERROR_MSG = _('Failed to delete the backend ' + 'load balancer for reason %s. Please remove ' + 'it manually through the heleos UI') +NO_MEMBER_SUBNET_WARN = _('No subnet is associated to member %s (required ' + 'to identify the proper load balancer port)') + +error_map = {h_exc.PendingDva: _DVA_PENDING_ERROR_MSG, + h_exc.DvaNotFound: _DVA_NOT_FOUNT_ERROR_MSG, + h_exc.BrokenDva: _DVA_BROKEN_ERROR_MSG, + h_exc.DvaCreationFailed: _DVA_CREATION_FAILED_ERROR_MSG, + h_exc.DvaCreationPending: _DVA_CREATION_PENDING_ERROR_MSG, + h_exc.ConfigurationFailed: _CFG_FAILED_ERROR_MSG, + h_exc.DvaDeleteFailed: _DVA_DEL_FAILED_ERROR_MSG} + +state_map = {h_con.DvaState.POWER_ON: ccon.ACTIVE, + None: ccon.ERROR, + DELETED: DELETED} diff --git a/neutron/services/loadbalancer/drivers/embrane/db.py b/neutron/services/loadbalancer/drivers/embrane/db.py new file mode 100644 index 000000000..005fbe09a --- /dev/null +++ b/neutron/services/loadbalancer/drivers/embrane/db.py @@ -0,0 +1,56 @@ +# Copyright 2014 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com + +import neutron.db.api as db +from neutron.db import models_v2 as nmodel +from neutron.services.loadbalancer.drivers.embrane import models + + +def initialize(): + db.configure_db() + + +def add_pool_port(context, pool_id, port_id): + session = context.session + with session.begin(subtransactions=True): + pool_port = models.PoolPort() + pool_port.pool_id = pool_id + pool_port.port_id = port_id + session.add(pool_port) + + +def get_pool_port(context, pool_id): + return (context.session.query(models.PoolPort).filter_by(pool_id=pool_id). + first()) + + +def delete_pool_backend(context, pool_id): + session = context.session + backend = (session.query(models.PoolPort).filter_by( + pool_id=pool_id)) + for b in backend: + delete_pool_port(context, b) + + +def delete_pool_port(context, backend_port): + session = context.session + with session.begin(subtransactions=True): + port = (session.query(nmodel.Port).filter_by( + id=backend_port['port_id'])).first() + if port: + session.delete(backend_port) + session.delete(port) diff --git a/neutron/services/loadbalancer/drivers/embrane/driver.py b/neutron/services/loadbalancer/drivers/embrane/driver.py new file mode 100644 index 000000000..83497b32c --- /dev/null +++ b/neutron/services/loadbalancer/drivers/embrane/driver.py @@ -0,0 +1,342 @@ +# Copyright 2014 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com + +from heleosapi import backend_operations as h_op +from heleosapi import constants as h_con +from heleosapi import info as h_info +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.common import exceptions as n_exc +from neutron.db.loadbalancer import loadbalancer_db as ldb +from neutron.extensions import loadbalancer as lb_ext +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as pcon +from neutron.plugins.embrane.common import contexts as embrane_ctx +from neutron.plugins.embrane.common import exceptions as h_exc +from neutron.plugins.embrane.common import utils +from neutron.services.loadbalancer import constants as lbcon +from neutron.services.loadbalancer.drivers import abstract_driver +from neutron.services.loadbalancer.drivers.embrane.agent import dispatcher +from neutron.services.loadbalancer.drivers.embrane import config # noqa +from neutron.services.loadbalancer.drivers.embrane import constants as econ +from neutron.services.loadbalancer.drivers.embrane import db as edb +from neutron.services.loadbalancer.drivers.embrane import poller + +LOG = logging.getLogger(__name__) +conf = cfg.CONF.heleoslb +confh = {} + +try: + confh = cfg.CONF.heleos +except cfg.NoSuchOptError: + pass + + +def get_conf(x): + try: + return conf.get(x) or confh.get(x) + except cfg.NoSuchOptError: + return + + +class EmbraneLbaas(abstract_driver.LoadBalancerAbstractDriver): + def __init__(self, plugin): + edb.initialize() + config_esm_mgmt = get_conf('esm_mgmt') + config_admin_username = get_conf('admin_username') + config_admin_password = get_conf('admin_password') + config_lb_image_id = get_conf('lb_image') + config_security_zones = {h_con.SzType.IB: get_conf('inband_id'), + h_con.SzType.OOB: get_conf('oob_id'), + h_con.SzType.MGMT: get_conf('mgmt_id'), + h_con.SzType.DUMMY: get_conf('dummy_utif_id')} + config_resource_pool = get_conf('resource_pool_id') + self._heleos_api = h_op.BackendOperations( + esm_mgmt=config_esm_mgmt, + admin_username=config_admin_username, + admin_password=config_admin_password, + lb_image_id=config_lb_image_id, + security_zones=config_security_zones, + resource_pool=config_resource_pool) + self._dispatcher = dispatcher.Dispatcher( + self, get_conf("async_requests")) + self.plugin = plugin + poll_interval = conf.get('sync_interval') + if poll_interval > 0: + self._loop_call = poller.Poller(self) + self._loop_call.start_polling(conf.get('sync_interval')) + self._flavor = get_conf('lb_flavor') + + def _validate_vip(self, vip): + if vip.get('connection_limit') and vip['connection_limit'] != -1: + raise h_exc.UnsupportedException( + err_msg=_('Connection limit is not supported by Embrane LB')) + persistance = vip.get('session_persistence') + if (persistance and persistance.get('type') == + lbcon.SESSION_PERSISTENCE_APP_COOKIE): + p_type = vip['session_persistence']['type'] + raise h_exc.UnsupportedException( + err_msg=_('Session persistence %s ' + 'not supported by Embrane LBaaS') % p_type) + + def _delete_vip(self, context, vip): + with context.session.begin(subtransactions=True): + self.plugin._delete_db_vip(context, vip['id']) + return econ.DELETED + + def _delete_member(self, context, member): + self.plugin._delete_db_member(context, member['id']) + + def _delete_pool_hm(self, context, health_monitor, pool_id): + self.plugin._delete_db_pool_health_monitor(context, + health_monitor['id'], + pool_id) + + def _update_vip_graph_state(self, context, vip): + self._heleos_api.update_vip_status(vip) + self.plugin.update_status(context, ldb.Vip, vip['id'], + vip['status']) + if vip['status'] != pcon.ERROR: + pool = self.plugin.get_pool(context, vip['pool_id']) + pool_members = pool['members'] + # Manages possible manual changes and monitor actions + self._heleos_api.update_pool_status(vip['id'], pool) + self._heleos_api.update_members_status(vip['id'], pool['id'], + pool_members) + self.plugin.update_status(context, ldb.Pool, pool['id'], + pool['status']) + for member in pool_members: + self.plugin.update_status(context, ldb.Member, + member['id'], member['status']) + + def _create_backend_port(self, context, db_pool): + try: + subnet = self.plugin._core_plugin.get_subnet(context, + db_pool["subnet_id"]) + except n_exc.SubnetNotFound: + LOG.warning(_("Subnet assigned to pool %s doesn't exist, " + "backend port can't be created"), db_pool['id']) + return + + fixed_ip = {'subnet_id': subnet['id'], + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED} + + port_data = { + 'tenant_id': db_pool['tenant_id'], + 'name': 'pool-' + db_pool['id'], + 'network_id': subnet['network_id'], + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'admin_state_up': False, + 'device_id': '', + 'device_owner': '', + 'fixed_ips': [fixed_ip] + } + + port = self.plugin._core_plugin.create_port(context, + {'port': port_data}) + return edb.add_pool_port(context, db_pool['id'], port['id']) + + def _retrieve_utif_info(self, context, neutron_port): + network = self.plugin._core_plugin.get_network( + context, neutron_port['network_id']) + result = h_info.UtifInfo(network.get('provider:segmentation_id'), + network['name'], + network['id'], + False, + network['tenant_id'], + neutron_port['id'], + neutron_port['mac_address'], + network.get('provider:network_type')) + return result + + def create_vip(self, context, vip): + self._validate_vip(vip) + db_vip = self.plugin.populate_vip_graph(context, vip) + vip_port = self.plugin._core_plugin._get_port(context, + db_vip['port_id']) + vip_utif_info = self._retrieve_utif_info(context, vip_port) + vip_ip_allocation_info = utils.retrieve_ip_allocation_info( + context, vip_port) + vip_ip_allocation_info.is_gw = True + db_pool = pool_utif_info = pool_ip_allocation_info = None + members = monitors = [] + if db_vip['pool_id']: + db_pool = self.plugin.get_pool( + context, db_vip['pool_id']) + pool_port = edb.get_pool_port(context, db_pool["id"]) + if pool_port: + db_port = self.plugin._core_plugin._get_port( + context, pool_port["port_id"]) + pool_utif_info = self._retrieve_utif_info(context, db_port) + pool_ip_allocation_info = utils.retrieve_ip_allocation_info( + context, db_port) + members = self.plugin.get_members( + context, filters={'id': db_pool['members']}) + monitors = self.plugin.get_members( + context, filters={'id': db_pool['health_monitors']}) + self._dispatcher.dispatch_lb( + embrane_ctx.DispatcherContext(econ.Events.CREATE_VIP, + db_vip, context, None), + self._flavor, vip_utif_info, vip_ip_allocation_info, + pool_utif_info, pool_ip_allocation_info, db_pool, members, + monitors) + + def update_vip(self, context, old_vip, vip): + new_pool = old_port_id = removed_ip = None + new_pool_utif = new_pool_ip_allocation = None + old_pool = {} + members = monitors = [] + if old_vip['pool_id'] != vip['pool_id']: + new_pool = self.plugin.get_pool( + context, vip['pool_id']) + members = self.plugin.get_members( + context, filters={'id': new_pool['members']}) + monitors = self.plugin.get_members( + context, filters={'id': new_pool['health_monitors']}) + new_pool_port = edb.get_pool_port(context, new_pool["id"]) + if new_pool_port: + db_port = self.plugin._core_plugin._get_port( + context, new_pool_port["port_id"]) + new_pool_utif = self._retrieve_utif_info(context, db_port) + new_pool_ip_allocation = utils.retrieve_ip_allocation_info( + context, db_port) + old_pool = self.plugin.get_pool( + context, old_vip['pool_id']) + old_pool_port = edb.get_pool_port(context, old_pool["id"]) + if old_pool_port: + old_port = self.plugin._core_plugin._get_port( + context, old_pool_port['port_id']) + # remove that subnet ip + removed_ip = old_port['fixed_ips'][0]['ip_address'] + old_port_id = old_port['id'] + + self._dispatcher.dispatch_lb( + embrane_ctx.DispatcherContext(econ.Events.UPDATE_VIP, vip, + context, None), + old_pool.get('id'), old_port_id, removed_ip, new_pool_utif, + new_pool_ip_allocation, new_pool, members, monitors) + + def delete_vip(self, context, vip): + db_vip = self.plugin.populate_vip_graph(context, vip) + self._dispatcher.dispatch_lb( + embrane_ctx.DispatcherContext( + econ.Events.DELETE_VIP, db_vip, context, None)) + + def create_pool(self, context, pool): + if pool['subnet_id']: + self._create_backend_port(context, pool) + + def update_pool(self, context, old_pool, pool): + with context.session.begin(subtransactions=True): + if old_pool['vip_id']: + try: + db_vip = self.plugin._get_resource( + context, ldb.Vip, old_pool['vip_id']) + except lb_ext.VipNotFound: + return + monitors = self.plugin.get_members( + context, filters={'id': old_pool['health_monitors']}) + self._dispatcher.dispatch_lb( + embrane_ctx.DispatcherContext(econ.Events.UPDATE_POOL, + db_vip, context, None), + pool, monitors) + + def delete_pool(self, context, pool): + edb.delete_pool_backend(context, pool['id']) + self.plugin._delete_db_pool(context, pool['id']) + + def create_member(self, context, member): + db_pool = self.plugin.get_pool(context, member['pool_id']) + if db_pool['vip_id']: + db_vip = self.plugin._get_resource(context, ldb.Vip, + db_pool['vip_id']) + self._dispatcher.dispatch_lb( + embrane_ctx.DispatcherContext( + econ.Events.ADD_OR_UPDATE_MEMBER, db_vip, context, None), + member, db_pool['protocol']) + + def update_member(self, context, old_member, member): + db_pool = self.plugin.get_pool(context, member['pool_id']) + if member['pool_id'] != old_member['pool_id']: + old_pool = self.plugin.get_pool(context, old_member['pool_id']) + if old_pool['vip_id']: + db_vip = self.plugin._get_resource(context, ldb.Vip, + old_pool['vip_id']) + self._dispatcher.dispatch_lb( + embrane_ctx.DispatcherContext( + econ.Events.REMOVE_MEMBER, db_vip, context, None), + old_member) + if db_pool['vip_id']: + db_vip = self.plugin._get_resource( + context, ldb.Vip, db_pool['vip_id']) + self._dispatcher.dispatch_lb( + embrane_ctx.DispatcherContext( + econ.Events.ADD_OR_UPDATE_MEMBER, db_vip, context, None), + member, db_pool['protocol']) + + def delete_member(self, context, member): + db_pool = self.plugin.get_pool(context, member['pool_id']) + if db_pool['vip_id']: + db_vip = self.plugin._get_resource(context, ldb.Vip, + db_pool['vip_id']) + self._dispatcher.dispatch_lb( + embrane_ctx.DispatcherContext( + econ.Events.DELETE_MEMBER, db_vip, context, None), + member) + else: + self._delete_member(context, member) + + def stats(self, context, pool_id): + return {'bytes_in': 0, + 'bytes_out': 0, + 'active_connections': 0, + 'total_connections': 0} + + def create_pool_health_monitor(self, context, health_monitor, pool_id): + db_pool = self.plugin.get_pool(context, pool_id) + # API call only if vip exists + if db_pool['vip_id']: + db_vip = self.plugin._get_resource(context, ldb.Vip, + db_pool['vip_id']) + self._dispatcher.dispatch_lb( + embrane_ctx.DispatcherContext( + econ.Events.ADD_POOL_HM, db_vip, context, None), + health_monitor, pool_id) + + def update_pool_health_monitor(self, context, old_health_monitor, + health_monitor, pool_id): + db_pool = self.plugin.get_pool(context, pool_id) + if db_pool['vip_id']: + db_vip = self.plugin._get_resource(context, ldb.Vip, + db_pool['vip_id']) + self._dispatcher.dispatch_lb( + embrane_ctx.DispatcherContext( + econ.Events.UPDATE_POOL_HM, db_vip, context, None), + health_monitor, pool_id) + + def delete_pool_health_monitor(self, context, health_monitor, pool_id): + db_pool = self.plugin.get_pool(context, pool_id) + if db_pool['vip_id']: + db_vip = self.plugin._get_resource(context, ldb.Vip, + db_pool['vip_id']) + self._dispatcher.dispatch_lb( + embrane_ctx.DispatcherContext( + econ.Events.DELETE_POOL_HM, db_vip, context, None), + health_monitor, pool_id) + else: + self._delete_pool_hm(context, health_monitor, pool_id) diff --git a/neutron/services/loadbalancer/drivers/embrane/models.py b/neutron/services/loadbalancer/drivers/embrane/models.py new file mode 100644 index 000000000..51adfcf24 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/embrane/models.py @@ -0,0 +1,30 @@ +# Copyright 2014 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com + +import sqlalchemy as sql + +from neutron.db import model_base + + +class PoolPort(model_base.BASEV2): + """Represents the connection between pools and ports.""" + __tablename__ = 'embrane_pool_port' + + pool_id = sql.Column(sql.String(36), sql.ForeignKey('pools.id'), + primary_key=True) + port_id = sql.Column(sql.String(36), sql.ForeignKey('ports.id'), + nullable=False) diff --git a/neutron/services/loadbalancer/drivers/embrane/poller.py b/neutron/services/loadbalancer/drivers/embrane/poller.py new file mode 100644 index 000000000..bf36079cf --- /dev/null +++ b/neutron/services/loadbalancer/drivers/embrane/poller.py @@ -0,0 +1,71 @@ +# Copyright 2014 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com + +from heleosapi import exceptions as h_exc + +from neutron import context +from neutron.db.loadbalancer import loadbalancer_db as ldb +from neutron.db import servicetype_db as sdb +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as ccon +from neutron.plugins.embrane.common import contexts as embrane_ctx +from neutron.services.loadbalancer.drivers.embrane import constants as econ + +LOG = logging.getLogger(__name__) +skip_states = [ccon.PENDING_CREATE, + ccon.PENDING_DELETE, + ccon.PENDING_UPDATE, + ccon.ERROR] + + +class Poller(object): + def __init__(self, driver): + self.dispatcher = driver._dispatcher + service_type_manager = sdb.ServiceTypeManager.get_instance() + self.provider = (service_type_manager.get_service_providers( + None, filters={ + 'service_type': [ccon.LOADBALANCER], + 'driver': ['neutron.services.loadbalancer.drivers.' + 'embrane.driver.EmbraneLbaas']}))[0]['name'] + + def start_polling(self, interval): + loop_call = loopingcall.FixedIntervalLoopingCall(self._run) + loop_call.start(interval=interval) + return loop_call + + def _run(self): + ctx = context.get_admin_context() + try: + self.synchronize_vips(ctx) + except h_exc.PollingException as e: + LOG.exception(_('Unhandled exception occurred'), e) + + def synchronize_vips(self, ctx): + session = ctx.session + vips = session.query(ldb.Vip).join( + sdb.ProviderResourceAssociation, + sdb.ProviderResourceAssociation.resource_id == + ldb.Vip.pool_id).filter( + sdb.ProviderResourceAssociation.provider_name == self.provider) + # No need to check pending states + for vip in vips: + if vip['status'] not in skip_states: + self.dispatcher.dispatch_lb( + d_context=embrane_ctx.DispatcherContext( + econ.Events.POLL_GRAPH, vip, ctx, None), + args=()) diff --git a/neutron/services/loadbalancer/drivers/haproxy/__init__.py b/neutron/services/loadbalancer/drivers/haproxy/__init__.py new file mode 100644 index 000000000..ce18bf6d6 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/haproxy/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost diff --git a/neutron/services/loadbalancer/drivers/haproxy/cfg.py b/neutron/services/loadbalancer/drivers/haproxy/cfg.py new file mode 100644 index 000000000..815334f81 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/haproxy/cfg.py @@ -0,0 +1,238 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import itertools +from six import moves + +from neutron.agent.linux import utils +from neutron.plugins.common import constants as qconstants +from neutron.services.loadbalancer import constants + + +PROTOCOL_MAP = { + constants.PROTOCOL_TCP: 'tcp', + constants.PROTOCOL_HTTP: 'http', + constants.PROTOCOL_HTTPS: 'tcp', +} + +BALANCE_MAP = { + constants.LB_METHOD_ROUND_ROBIN: 'roundrobin', + constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn', + constants.LB_METHOD_SOURCE_IP: 'source' +} + +STATS_MAP = { + constants.STATS_ACTIVE_CONNECTIONS: 'scur', + constants.STATS_MAX_CONNECTIONS: 'smax', + constants.STATS_CURRENT_SESSIONS: 'scur', + constants.STATS_MAX_SESSIONS: 'smax', + constants.STATS_TOTAL_CONNECTIONS: 'stot', + constants.STATS_TOTAL_SESSIONS: 'stot', + constants.STATS_IN_BYTES: 'bin', + constants.STATS_OUT_BYTES: 'bout', + constants.STATS_CONNECTION_ERRORS: 'econ', + constants.STATS_RESPONSE_ERRORS: 'eresp' +} + +ACTIVE_PENDING_STATUSES = qconstants.ACTIVE_PENDING_STATUSES +INACTIVE = qconstants.INACTIVE + + +def save_config(conf_path, logical_config, socket_path=None, + user_group='nogroup'): + """Convert a logical configuration to the HAProxy version.""" + data = [] + data.extend(_build_global(logical_config, socket_path=socket_path, + user_group=user_group)) + data.extend(_build_defaults(logical_config)) + data.extend(_build_frontend(logical_config)) + data.extend(_build_backend(logical_config)) + utils.replace_file(conf_path, '\n'.join(data)) + + +def _build_global(config, socket_path=None, user_group='nogroup'): + opts = [ + 'daemon', + 'user nobody', + 'group %s' % user_group, + 'log /dev/log local0', + 'log /dev/log local1 notice' + ] + + if socket_path: + opts.append('stats socket %s mode 0666 level user' % socket_path) + + return itertools.chain(['global'], ('\t' + o for o in opts)) + + +def _build_defaults(config): + opts = [ + 'log global', + 'retries 3', + 'option redispatch', + 'timeout connect 5000', + 'timeout client 50000', + 'timeout server 50000', + ] + + return itertools.chain(['defaults'], ('\t' + o for o in opts)) + + +def _build_frontend(config): + protocol = config['vip']['protocol'] + + opts = [ + 'option tcplog', + 'bind %s:%d' % ( + _get_first_ip_from_port(config['vip']['port']), + config['vip']['protocol_port'] + ), + 'mode %s' % PROTOCOL_MAP[protocol], + 'default_backend %s' % config['pool']['id'], + ] + + if config['vip']['connection_limit'] >= 0: + opts.append('maxconn %s' % config['vip']['connection_limit']) + + if protocol == constants.PROTOCOL_HTTP: + opts.append('option forwardfor') + + return itertools.chain( + ['frontend %s' % config['vip']['id']], + ('\t' + o for o in opts) + ) + + +def _build_backend(config): + protocol = config['pool']['protocol'] + lb_method = config['pool']['lb_method'] + + opts = [ + 'mode %s' % PROTOCOL_MAP[protocol], + 'balance %s' % BALANCE_MAP.get(lb_method, 'roundrobin') + ] + + if protocol == constants.PROTOCOL_HTTP: + opts.append('option forwardfor') + + # add the first health_monitor (if available) + server_addon, health_opts = _get_server_health_option(config) + opts.extend(health_opts) + + # add session persistence (if available) + persist_opts = _get_session_persistence(config) + opts.extend(persist_opts) + + # add the members + for member in config['members']: + if ((member['status'] in ACTIVE_PENDING_STATUSES or + member['status'] == INACTIVE) + and member['admin_state_up']): + server = (('server %(id)s %(address)s:%(protocol_port)s ' + 'weight %(weight)s') % member) + server_addon + if _has_http_cookie_persistence(config): + server += ' cookie %d' % config['members'].index(member) + opts.append(server) + + return itertools.chain( + ['backend %s' % config['pool']['id']], + ('\t' + o for o in opts) + ) + + +def _get_first_ip_from_port(port): + for fixed_ip in port['fixed_ips']: + return fixed_ip['ip_address'] + + +def _get_server_health_option(config): + """return the first active health option.""" + for monitor in config['healthmonitors']: + # not checking the status of healthmonitor for two reasons: + # 1) status field is absent in HealthMonitor model + # 2) only active HealthMonitors are fetched with + # LoadBalancerCallbacks.get_logical_device + if monitor['admin_state_up']: + break + else: + return '', [] + + server_addon = ' check inter %(delay)ds fall %(max_retries)d' % monitor + opts = [ + 'timeout check %ds' % monitor['timeout'] + ] + + if monitor['type'] in (constants.HEALTH_MONITOR_HTTP, + constants.HEALTH_MONITOR_HTTPS): + opts.append('option httpchk %(http_method)s %(url_path)s' % monitor) + opts.append( + 'http-check expect rstatus %s' % + '|'.join(_expand_expected_codes(monitor['expected_codes'])) + ) + + if monitor['type'] == constants.HEALTH_MONITOR_HTTPS: + opts.append('option ssl-hello-chk') + + return server_addon, opts + + +def _get_session_persistence(config): + persistence = config['vip'].get('session_persistence') + if not persistence: + return [] + + opts = [] + if persistence['type'] == constants.SESSION_PERSISTENCE_SOURCE_IP: + opts.append('stick-table type ip size 10k') + opts.append('stick on src') + elif (persistence['type'] == constants.SESSION_PERSISTENCE_HTTP_COOKIE and + config.get('members')): + opts.append('cookie SRV insert indirect nocache') + elif (persistence['type'] == constants.SESSION_PERSISTENCE_APP_COOKIE and + persistence.get('cookie_name')): + opts.append('appsession %s len 56 timeout 3h' % + persistence['cookie_name']) + + return opts + + +def _has_http_cookie_persistence(config): + return (config['vip'].get('session_persistence') and + config['vip']['session_persistence']['type'] == + constants.SESSION_PERSISTENCE_HTTP_COOKIE) + + +def _expand_expected_codes(codes): + """Expand the expected code string in set of codes. + + 200-204 -> 200, 201, 202, 204 + 200, 203 -> 200, 203 + """ + + retval = set() + for code in codes.replace(',', ' ').split(' '): + code = code.strip() + + if not code: + continue + elif '-' in code: + low, hi = code.split('-')[:2] + retval.update(str(i) for i in moves.xrange(int(low), int(hi) + 1)) + else: + retval.add(code) + return retval diff --git a/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py new file mode 100644 index 000000000..b15f7d864 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py @@ -0,0 +1,396 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost +import os +import shutil +import socket + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.common import exceptions +from neutron.common import utils as n_utils +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.loadbalancer.agent import agent_device_driver +from neutron.services.loadbalancer import constants as lb_const +from neutron.services.loadbalancer.drivers.haproxy import cfg as hacfg + +LOG = logging.getLogger(__name__) +NS_PREFIX = 'qlbaas-' +DRIVER_NAME = 'haproxy_ns' + +STATE_PATH_DEFAULT = '$state_path/lbaas' +USER_GROUP_DEFAULT = 'nogroup' +OPTS = [ + cfg.StrOpt( + 'loadbalancer_state_path', + default=STATE_PATH_DEFAULT, + help=_('Location to store config and state files'), + deprecated_opts=[cfg.DeprecatedOpt('loadbalancer_state_path')], + ), + cfg.StrOpt( + 'user_group', + default=USER_GROUP_DEFAULT, + help=_('The user group'), + deprecated_opts=[cfg.DeprecatedOpt('user_group')], + ), + cfg.IntOpt( + 'send_gratuitous_arp', + default=3, + help=_('When delete and re-add the same vip, send this many ' + 'gratuitous ARPs to flush the ARP cache in the Router. ' + 'Set it below or equal to 0 to disable this feature.'), + ) +] +cfg.CONF.register_opts(OPTS, 'haproxy') + + +class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver): + def __init__(self, conf, plugin_rpc): + self.conf = conf + self.root_helper = config.get_root_helper(conf) + self.state_path = conf.haproxy.loadbalancer_state_path + try: + vif_driver = importutils.import_object(conf.interface_driver, conf) + except ImportError: + with excutils.save_and_reraise_exception(): + msg = (_('Error importing interface driver: %s') + % conf.haproxy.interface_driver) + LOG.error(msg) + + self.vif_driver = vif_driver + self.plugin_rpc = plugin_rpc + self.pool_to_port_id = {} + + @classmethod + def get_name(cls): + return DRIVER_NAME + + def create(self, logical_config): + pool_id = logical_config['pool']['id'] + namespace = get_ns_name(pool_id) + + self._plug(namespace, logical_config['vip']['port']) + self._spawn(logical_config) + + def update(self, logical_config): + pool_id = logical_config['pool']['id'] + pid_path = self._get_state_file_path(pool_id, 'pid') + + extra_args = ['-sf'] + extra_args.extend(p.strip() for p in open(pid_path, 'r')) + self._spawn(logical_config, extra_args) + + def _spawn(self, logical_config, extra_cmd_args=()): + pool_id = logical_config['pool']['id'] + namespace = get_ns_name(pool_id) + conf_path = self._get_state_file_path(pool_id, 'conf') + pid_path = self._get_state_file_path(pool_id, 'pid') + sock_path = self._get_state_file_path(pool_id, 'sock') + user_group = self.conf.haproxy.user_group + + hacfg.save_config(conf_path, logical_config, sock_path, user_group) + cmd = ['haproxy', '-f', conf_path, '-p', pid_path] + cmd.extend(extra_cmd_args) + + ns = ip_lib.IPWrapper(self.root_helper, namespace) + ns.netns.execute(cmd) + + # remember the pool<>port mapping + self.pool_to_port_id[pool_id] = logical_config['vip']['port']['id'] + + @n_utils.synchronized('haproxy-driver') + def undeploy_instance(self, pool_id, cleanup_namespace=False): + namespace = get_ns_name(pool_id) + ns = ip_lib.IPWrapper(self.root_helper, namespace) + pid_path = self._get_state_file_path(pool_id, 'pid') + + # kill the process + kill_pids_in_file(self.root_helper, pid_path) + + # unplug the ports + if pool_id in self.pool_to_port_id: + self._unplug(namespace, self.pool_to_port_id[pool_id]) + + # delete all devices from namespace; + # used when deleting orphans and port_id is not known for pool_id + if cleanup_namespace: + for device in ns.get_devices(exclude_loopback=True): + self.vif_driver.unplug(device.name, namespace=namespace) + + # remove the configuration directory + conf_dir = os.path.dirname(self._get_state_file_path(pool_id, '')) + if os.path.isdir(conf_dir): + shutil.rmtree(conf_dir) + ns.garbage_collect_namespace() + + def exists(self, pool_id): + namespace = get_ns_name(pool_id) + root_ns = ip_lib.IPWrapper(self.root_helper) + + socket_path = self._get_state_file_path(pool_id, 'sock') + if root_ns.netns.exists(namespace) and os.path.exists(socket_path): + try: + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + s.connect(socket_path) + return True + except socket.error: + pass + return False + + def get_stats(self, pool_id): + socket_path = self._get_state_file_path(pool_id, 'sock') + TYPE_BACKEND_REQUEST = 2 + TYPE_SERVER_REQUEST = 4 + if os.path.exists(socket_path): + parsed_stats = self._get_stats_from_socket( + socket_path, + entity_type=TYPE_BACKEND_REQUEST | TYPE_SERVER_REQUEST) + pool_stats = self._get_backend_stats(parsed_stats) + pool_stats['members'] = self._get_servers_stats(parsed_stats) + return pool_stats + else: + LOG.warn(_('Stats socket not found for pool %s'), pool_id) + return {} + + def _get_backend_stats(self, parsed_stats): + TYPE_BACKEND_RESPONSE = '1' + for stats in parsed_stats: + if stats.get('type') == TYPE_BACKEND_RESPONSE: + unified_stats = dict((k, stats.get(v, '')) + for k, v in hacfg.STATS_MAP.items()) + return unified_stats + + return {} + + def _get_servers_stats(self, parsed_stats): + TYPE_SERVER_RESPONSE = '2' + res = {} + for stats in parsed_stats: + if stats.get('type') == TYPE_SERVER_RESPONSE: + res[stats['svname']] = { + lb_const.STATS_STATUS: (constants.INACTIVE + if stats['status'] == 'DOWN' + else constants.ACTIVE), + lb_const.STATS_HEALTH: stats['check_status'], + lb_const.STATS_FAILED_CHECKS: stats['chkfail'] + } + return res + + def _get_stats_from_socket(self, socket_path, entity_type): + try: + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + s.connect(socket_path) + s.send('show stat -1 %s -1\n' % entity_type) + raw_stats = '' + chunk_size = 1024 + while True: + chunk = s.recv(chunk_size) + raw_stats += chunk + if len(chunk) < chunk_size: + break + + return self._parse_stats(raw_stats) + except socket.error as e: + LOG.warn(_('Error while connecting to stats socket: %s'), e) + return {} + + def _parse_stats(self, raw_stats): + stat_lines = raw_stats.splitlines() + if len(stat_lines) < 2: + return [] + stat_names = [name.strip('# ') for name in stat_lines[0].split(',')] + res_stats = [] + for raw_values in stat_lines[1:]: + if not raw_values: + continue + stat_values = [value.strip() for value in raw_values.split(',')] + res_stats.append(dict(zip(stat_names, stat_values))) + + return res_stats + + def _get_state_file_path(self, pool_id, kind, ensure_state_dir=True): + """Returns the file name for a given kind of config file.""" + confs_dir = os.path.abspath(os.path.normpath(self.state_path)) + conf_dir = os.path.join(confs_dir, pool_id) + if ensure_state_dir: + if not os.path.isdir(conf_dir): + os.makedirs(conf_dir, 0o755) + return os.path.join(conf_dir, kind) + + def _plug(self, namespace, port, reuse_existing=True): + self.plugin_rpc.plug_vip_port(port['id']) + interface_name = self.vif_driver.get_device_name(Wrap(port)) + + if ip_lib.device_exists(interface_name, self.root_helper, namespace): + if not reuse_existing: + raise exceptions.PreexistingDeviceFailure( + dev_name=interface_name + ) + else: + self.vif_driver.plug( + port['network_id'], + port['id'], + interface_name, + port['mac_address'], + namespace=namespace + ) + + cidrs = [ + '%s/%s' % (ip['ip_address'], + netaddr.IPNetwork(ip['subnet']['cidr']).prefixlen) + for ip in port['fixed_ips'] + ] + self.vif_driver.init_l3(interface_name, cidrs, namespace=namespace) + + gw_ip = port['fixed_ips'][0]['subnet'].get('gateway_ip') + + if not gw_ip: + host_routes = port['fixed_ips'][0]['subnet'].get('host_routes', []) + for host_route in host_routes: + if host_route['destination'] == "0.0.0.0/0": + gw_ip = host_route['nexthop'] + break + + if gw_ip: + cmd = ['route', 'add', 'default', 'gw', gw_ip] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=namespace) + ip_wrapper.netns.execute(cmd, check_exit_code=False) + # When delete and re-add the same vip, we need to + # send gratuitous ARP to flush the ARP cache in the Router. + gratuitous_arp = self.conf.haproxy.send_gratuitous_arp + if gratuitous_arp > 0: + for ip in port['fixed_ips']: + cmd_arping = ['arping', '-U', + '-I', interface_name, + '-c', gratuitous_arp, + ip['ip_address']] + ip_wrapper.netns.execute(cmd_arping, check_exit_code=False) + + def _unplug(self, namespace, port_id): + port_stub = {'id': port_id} + self.plugin_rpc.unplug_vip_port(port_id) + interface_name = self.vif_driver.get_device_name(Wrap(port_stub)) + self.vif_driver.unplug(interface_name, namespace=namespace) + + @n_utils.synchronized('haproxy-driver') + def deploy_instance(self, logical_config): + # do actual deploy only if vip and pool are configured and active + if (not logical_config or + 'vip' not in logical_config or + (logical_config['vip']['status'] not in + constants.ACTIVE_PENDING_STATUSES) or + not logical_config['vip']['admin_state_up'] or + (logical_config['pool']['status'] not in + constants.ACTIVE_PENDING_STATUSES) or + not logical_config['pool']['admin_state_up']): + return + + if self.exists(logical_config['pool']['id']): + self.update(logical_config) + else: + self.create(logical_config) + + def _refresh_device(self, pool_id): + logical_config = self.plugin_rpc.get_logical_device(pool_id) + self.deploy_instance(logical_config) + + def create_vip(self, vip): + self._refresh_device(vip['pool_id']) + + def update_vip(self, old_vip, vip): + self._refresh_device(vip['pool_id']) + + def delete_vip(self, vip): + self.undeploy_instance(vip['pool_id']) + + def create_pool(self, pool): + # nothing to do here because a pool needs a vip to be useful + pass + + def update_pool(self, old_pool, pool): + self._refresh_device(pool['id']) + + def delete_pool(self, pool): + # delete_pool may be called before vip deletion in case + # pool's admin state set to down + if self.exists(pool['id']): + self.undeploy_instance(pool['id']) + + def create_member(self, member): + self._refresh_device(member['pool_id']) + + def update_member(self, old_member, member): + self._refresh_device(member['pool_id']) + + def delete_member(self, member): + self._refresh_device(member['pool_id']) + + def create_pool_health_monitor(self, health_monitor, pool_id): + self._refresh_device(pool_id) + + def update_pool_health_monitor(self, old_health_monitor, health_monitor, + pool_id): + self._refresh_device(pool_id) + + def delete_pool_health_monitor(self, health_monitor, pool_id): + self._refresh_device(pool_id) + + def remove_orphans(self, known_pool_ids): + if not os.path.exists(self.state_path): + return + + orphans = (pool_id for pool_id in os.listdir(self.state_path) + if pool_id not in known_pool_ids) + for pool_id in orphans: + if self.exists(pool_id): + self.undeploy_instance(pool_id, cleanup_namespace=True) + + +# NOTE (markmcclain) For compliance with interface.py which expects objects +class Wrap(object): + """A light attribute wrapper for compatibility with the interface lib.""" + def __init__(self, d): + self.__dict__.update(d) + + def __getitem__(self, key): + return self.__dict__[key] + + +def get_ns_name(namespace_id): + return NS_PREFIX + namespace_id + + +def kill_pids_in_file(root_helper, pid_path): + if os.path.exists(pid_path): + with open(pid_path, 'r') as pids: + for pid in pids: + pid = pid.strip() + try: + utils.execute(['kill', '-9', pid], root_helper) + except RuntimeError: + LOG.exception( + _('Unable to kill haproxy process: %s'), + pid + ) diff --git a/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py b/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py new file mode 100644 index 000000000..7dccaa3ac --- /dev/null +++ b/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py @@ -0,0 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.services.loadbalancer.drivers.common import agent_driver_base +from neutron.services.loadbalancer.drivers.haproxy import namespace_driver + + +class HaproxyOnHostPluginDriver(agent_driver_base.AgentDriverBase): + device_driver = namespace_driver.DRIVER_NAME diff --git a/neutron/services/loadbalancer/drivers/netscaler/__init__.py b/neutron/services/loadbalancer/drivers/netscaler/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/services/loadbalancer/drivers/netscaler/ncc_client.py b/neutron/services/loadbalancer/drivers/netscaler/ncc_client.py new file mode 100644 index 000000000..98c8a35c2 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/netscaler/ncc_client.py @@ -0,0 +1,182 @@ +# Copyright 2014 Citrix Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import requests + +from neutron.common import exceptions as n_exc +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +CONTENT_TYPE_HEADER = 'Content-type' +ACCEPT_HEADER = 'Accept' +AUTH_HEADER = 'Authorization' +DRIVER_HEADER = 'X-OpenStack-LBaaS' +TENANT_HEADER = 'X-Tenant-ID' +JSON_CONTENT_TYPE = 'application/json' +DRIVER_HEADER_VALUE = 'netscaler-openstack-lbaas' + + +class NCCException(n_exc.NeutronException): + + """Represents exceptions thrown by NSClient.""" + + CONNECTION_ERROR = 1 + REQUEST_ERROR = 2 + RESPONSE_ERROR = 3 + UNKNOWN_ERROR = 4 + + def __init__(self, error): + self.message = _("NCC Error %d") % error + super(NCCException, self).__init__() + self.error = error + + +class NSClient(object): + + """Client to operate on REST resources of NetScaler Control Center.""" + + def __init__(self, service_uri, username, password): + if not service_uri: + msg = _("No NetScaler Control Center URI specified. " + "Cannot connect.") + LOG.exception(msg) + raise NCCException(NCCException.CONNECTION_ERROR) + self.service_uri = service_uri.strip('/') + self.auth = None + if username and password: + base64string = base64.encodestring("%s:%s" % (username, password)) + base64string = base64string[:-1] + self.auth = 'Basic %s' % base64string + + def create_resource(self, tenant_id, resource_path, object_name, + object_data): + """Create a resource of NetScaler Control Center.""" + return self._resource_operation('POST', tenant_id, + resource_path, + object_name=object_name, + object_data=object_data) + + def retrieve_resource(self, tenant_id, resource_path, parse_response=True): + """Retrieve a resource of NetScaler Control Center.""" + return self._resource_operation('GET', tenant_id, resource_path) + + def update_resource(self, tenant_id, resource_path, object_name, + object_data): + """Update a resource of the NetScaler Control Center.""" + return self._resource_operation('PUT', tenant_id, + resource_path, + object_name=object_name, + object_data=object_data) + + def remove_resource(self, tenant_id, resource_path, parse_response=True): + """Remove a resource of NetScaler Control Center.""" + return self._resource_operation('DELETE', tenant_id, resource_path) + + def _resource_operation(self, method, tenant_id, resource_path, + object_name=None, object_data=None): + resource_uri = "%s/%s" % (self.service_uri, resource_path) + headers = self._setup_req_headers(tenant_id) + request_body = None + if object_data: + if isinstance(object_data, str): + request_body = object_data + else: + obj_dict = {object_name: object_data} + request_body = jsonutils.dumps(obj_dict) + + response_status, resp_dict = self._execute_request(method, + resource_uri, + headers, + body=request_body) + return response_status, resp_dict + + def _is_valid_response(self, response_status): + # when status is less than 400, the response is fine + return response_status < requests.codes.bad_request + + def _setup_req_headers(self, tenant_id): + headers = {ACCEPT_HEADER: JSON_CONTENT_TYPE, + CONTENT_TYPE_HEADER: JSON_CONTENT_TYPE, + DRIVER_HEADER: DRIVER_HEADER_VALUE, + TENANT_HEADER: tenant_id, + AUTH_HEADER: self.auth} + return headers + + def _get_response_dict(self, response): + response_dict = {'status': response.status_code, + 'body': response.text, + 'headers': response.headers} + if self._is_valid_response(response.status_code): + if response.text: + response_dict['dict'] = response.json() + return response_dict + + def _execute_request(self, method, resource_uri, headers, body=None): + try: + response = requests.request(method, url=resource_uri, + headers=headers, data=body) + except requests.exceptions.ConnectionError: + msg = (_("Connection error occurred while connecting to %s") % + self.service_uri) + LOG.exception(msg) + raise NCCException(NCCException.CONNECTION_ERROR) + except requests.exceptions.SSLError: + msg = (_("SSL error occurred while connecting to %s") % + self.service_uri) + LOG.exception(msg) + raise NCCException(NCCException.CONNECTION_ERROR) + except requests.exceptions.Timeout: + msg = _("Request to %s timed out") % self.service_uri + LOG.exception(msg) + raise NCCException(NCCException.CONNECTION_ERROR) + except (requests.exceptions.URLRequired, + requests.exceptions.InvalidURL, + requests.exceptions.MissingSchema, + requests.exceptions.InvalidSchema): + msg = _("Request did not specify a valid URL") + LOG.exception(msg) + raise NCCException(NCCException.REQUEST_ERROR) + except requests.exceptions.TooManyRedirects: + msg = _("Too many redirects occurred for request to %s") + LOG.exception(msg) + raise NCCException(NCCException.REQUEST_ERROR) + except requests.exceptions.RequestException: + msg = (_("A request error while connecting to %s") % + self.service_uri) + LOG.exception(msg) + raise NCCException(NCCException.REQUEST_ERROR) + except Exception: + msg = (_("A unknown error occurred during request to %s") % + self.service_uri) + LOG.exception(msg) + raise NCCException(NCCException.UNKNOWN_ERROR) + resp_dict = self._get_response_dict(response) + LOG.debug(_("Response: %s"), resp_dict['body']) + response_status = resp_dict['status'] + if response_status == requests.codes.unauthorized: + LOG.exception(_("Unable to login. Invalid credentials passed." + "for: %s"), self.service_uri) + raise NCCException(NCCException.RESPONSE_ERROR) + if not self._is_valid_response(response_status): + msg = (_("Failed %(method)s operation on %(url)s " + "status code: %(response_status)s") % + {"method": method, + "url": resource_uri, + "response_status": response_status}) + LOG.exception(msg) + raise NCCException(NCCException.RESPONSE_ERROR) + return response_status, resp_dict diff --git a/neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py b/neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py new file mode 100644 index 000000000..9f74d6a73 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py @@ -0,0 +1,489 @@ +# Copyright 2014 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.db.loadbalancer import loadbalancer_db +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.loadbalancer.drivers import abstract_driver +from neutron.services.loadbalancer.drivers.netscaler import ncc_client + +LOG = logging.getLogger(__name__) + +NETSCALER_CC_OPTS = [ + cfg.StrOpt( + 'netscaler_ncc_uri', + help=_('The URL to reach the NetScaler Control Center Server.'), + ), + cfg.StrOpt( + 'netscaler_ncc_username', + help=_('Username to login to the NetScaler Control Center Server.'), + ), + cfg.StrOpt( + 'netscaler_ncc_password', + help=_('Password to login to the NetScaler Control Center Server.'), + ) +] + +cfg.CONF.register_opts(NETSCALER_CC_OPTS, 'netscaler_driver') + +VIPS_RESOURCE = 'vips' +VIP_RESOURCE = 'vip' +POOLS_RESOURCE = 'pools' +POOL_RESOURCE = 'pool' +POOLMEMBERS_RESOURCE = 'members' +POOLMEMBER_RESOURCE = 'member' +MONITORS_RESOURCE = 'healthmonitors' +MONITOR_RESOURCE = 'healthmonitor' +POOLSTATS_RESOURCE = 'statistics' +PROV_SEGMT_ID = 'provider:segmentation_id' +PROV_NET_TYPE = 'provider:network_type' +DRIVER_NAME = 'netscaler_driver' + + +class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver): + + """NetScaler LBaaS Plugin driver class.""" + + def __init__(self, plugin): + self.plugin = plugin + ncc_uri = cfg.CONF.netscaler_driver.netscaler_ncc_uri + ncc_username = cfg.CONF.netscaler_driver.netscaler_ncc_username + ncc_password = cfg.CONF.netscaler_driver.netscaler_ncc_password + self.client = ncc_client.NSClient(ncc_uri, + ncc_username, + ncc_password) + + def create_vip(self, context, vip): + """Create a vip on a NetScaler device.""" + network_info = self._get_vip_network_info(context, vip) + ncc_vip = self._prepare_vip_for_creation(vip) + ncc_vip = dict(ncc_vip.items() + network_info.items()) + msg = _("NetScaler driver vip creation: %s") % repr(ncc_vip) + LOG.debug(msg) + status = constants.ACTIVE + try: + self.client.create_resource(context.tenant_id, VIPS_RESOURCE, + VIP_RESOURCE, ncc_vip) + except ncc_client.NCCException: + status = constants.ERROR + self.plugin.update_status(context, loadbalancer_db.Vip, vip["id"], + status) + + def update_vip(self, context, old_vip, vip): + """Update a vip on a NetScaler device.""" + update_vip = self._prepare_vip_for_update(vip) + resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"]) + msg = (_("NetScaler driver vip %(vip_id)s update: %(vip_obj)s") % + {"vip_id": vip["id"], "vip_obj": repr(vip)}) + LOG.debug(msg) + status = constants.ACTIVE + try: + self.client.update_resource(context.tenant_id, resource_path, + VIP_RESOURCE, update_vip) + except ncc_client.NCCException: + status = constants.ERROR + self.plugin.update_status(context, loadbalancer_db.Vip, old_vip["id"], + status) + + def delete_vip(self, context, vip): + """Delete a vip on a NetScaler device.""" + resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"]) + msg = _("NetScaler driver vip removal: %s") % vip["id"] + LOG.debug(msg) + try: + self.client.remove_resource(context.tenant_id, resource_path) + except ncc_client.NCCException: + self.plugin.update_status(context, loadbalancer_db.Vip, + vip["id"], + constants.ERROR) + else: + self.plugin._delete_db_vip(context, vip['id']) + + def create_pool(self, context, pool): + """Create a pool on a NetScaler device.""" + network_info = self._get_pool_network_info(context, pool) + #allocate a snat port/ipaddress on the subnet if one doesn't exist + self._create_snatport_for_subnet_if_not_exists(context, + pool['tenant_id'], + pool['subnet_id'], + network_info) + ncc_pool = self._prepare_pool_for_creation(pool) + ncc_pool = dict(ncc_pool.items() + network_info.items()) + msg = _("NetScaler driver pool creation: %s") % repr(ncc_pool) + LOG.debug(msg) + status = constants.ACTIVE + try: + self.client.create_resource(context.tenant_id, POOLS_RESOURCE, + POOL_RESOURCE, ncc_pool) + except ncc_client.NCCException: + status = constants.ERROR + self.plugin.update_status(context, loadbalancer_db.Pool, + ncc_pool["id"], status) + + def update_pool(self, context, old_pool, pool): + """Update a pool on a NetScaler device.""" + ncc_pool = self._prepare_pool_for_update(pool) + resource_path = "%s/%s" % (POOLS_RESOURCE, old_pool["id"]) + msg = (_("NetScaler driver pool %(pool_id)s update: %(pool_obj)s") % + {"pool_id": old_pool["id"], "pool_obj": repr(ncc_pool)}) + LOG.debug(msg) + status = constants.ACTIVE + try: + self.client.update_resource(context.tenant_id, resource_path, + POOL_RESOURCE, ncc_pool) + except ncc_client.NCCException: + status = constants.ERROR + self.plugin.update_status(context, loadbalancer_db.Pool, + old_pool["id"], status) + + def delete_pool(self, context, pool): + """Delete a pool on a NetScaler device.""" + resource_path = "%s/%s" % (POOLS_RESOURCE, pool['id']) + msg = _("NetScaler driver pool removal: %s") % pool["id"] + LOG.debug(msg) + try: + self.client.remove_resource(context.tenant_id, resource_path) + except ncc_client.NCCException: + self.plugin.update_status(context, loadbalancer_db.Pool, + pool["id"], + constants.ERROR) + else: + self.plugin._delete_db_pool(context, pool['id']) + self._remove_snatport_for_subnet_if_not_used(context, + pool['tenant_id'], + pool['subnet_id']) + + def create_member(self, context, member): + """Create a pool member on a NetScaler device.""" + ncc_member = self._prepare_member_for_creation(member) + msg = (_("NetScaler driver poolmember creation: %s") % + repr(ncc_member)) + LOG.info(msg) + status = constants.ACTIVE + try: + self.client.create_resource(context.tenant_id, + POOLMEMBERS_RESOURCE, + POOLMEMBER_RESOURCE, + ncc_member) + except ncc_client.NCCException: + status = constants.ERROR + self.plugin.update_status(context, loadbalancer_db.Member, + member["id"], status) + + def update_member(self, context, old_member, member): + """Update a pool member on a NetScaler device.""" + ncc_member = self._prepare_member_for_update(member) + resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, old_member["id"]) + msg = (_("NetScaler driver poolmember %(member_id)s update:" + " %(member_obj)s") % + {"member_id": old_member["id"], + "member_obj": repr(ncc_member)}) + LOG.debug(msg) + status = constants.ACTIVE + try: + self.client.update_resource(context.tenant_id, resource_path, + POOLMEMBER_RESOURCE, ncc_member) + except ncc_client.NCCException: + status = constants.ERROR + self.plugin.update_status(context, loadbalancer_db.Member, + old_member["id"], status) + + def delete_member(self, context, member): + """Delete a pool member on a NetScaler device.""" + resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, member['id']) + msg = (_("NetScaler driver poolmember removal: %s") % + member["id"]) + LOG.debug(msg) + try: + self.client.remove_resource(context.tenant_id, resource_path) + except ncc_client.NCCException: + self.plugin.update_status(context, loadbalancer_db.Member, + member["id"], + constants.ERROR) + else: + self.plugin._delete_db_member(context, member['id']) + + def create_pool_health_monitor(self, context, health_monitor, pool_id): + """Create a pool health monitor on a NetScaler device.""" + ncc_hm = self._prepare_healthmonitor_for_creation(health_monitor, + pool_id) + resource_path = "%s/%s/%s" % (POOLS_RESOURCE, pool_id, + MONITORS_RESOURCE) + msg = (_("NetScaler driver healthmonitor creation for pool %(pool_id)s" + ": %(monitor_obj)s") % + {"pool_id": pool_id, + "monitor_obj": repr(ncc_hm)}) + LOG.debug(msg) + status = constants.ACTIVE + try: + self.client.create_resource(context.tenant_id, resource_path, + MONITOR_RESOURCE, + ncc_hm) + except ncc_client.NCCException: + status = constants.ERROR + self.plugin.update_pool_health_monitor(context, + health_monitor['id'], + pool_id, + status, "") + + def update_pool_health_monitor(self, context, old_health_monitor, + health_monitor, pool_id): + """Update a pool health monitor on a NetScaler device.""" + ncc_hm = self._prepare_healthmonitor_for_update(health_monitor) + resource_path = "%s/%s" % (MONITORS_RESOURCE, + old_health_monitor["id"]) + msg = (_("NetScaler driver healthmonitor %(monitor_id)s update: " + "%(monitor_obj)s") % + {"monitor_id": old_health_monitor["id"], + "monitor_obj": repr(ncc_hm)}) + LOG.debug(msg) + status = constants.ACTIVE + try: + self.client.update_resource(context.tenant_id, resource_path, + MONITOR_RESOURCE, ncc_hm) + except ncc_client.NCCException: + status = constants.ERROR + self.plugin.update_pool_health_monitor(context, + old_health_monitor['id'], + pool_id, + status, "") + + def delete_pool_health_monitor(self, context, health_monitor, pool_id): + """Delete a pool health monitor on a NetScaler device.""" + resource_path = "%s/%s/%s/%s" % (POOLS_RESOURCE, pool_id, + MONITORS_RESOURCE, + health_monitor["id"]) + msg = (_("NetScaler driver healthmonitor %(monitor_id)s" + "removal for pool %(pool_id)s") % + {"monitor_id": health_monitor["id"], + "pool_id": pool_id}) + LOG.debug(msg) + try: + self.client.remove_resource(context.tenant_id, resource_path) + except ncc_client.NCCException: + self.plugin.update_pool_health_monitor(context, + health_monitor['id'], + pool_id, + constants.ERROR, "") + else: + self.plugin._delete_db_pool_health_monitor(context, + health_monitor['id'], + pool_id) + + def stats(self, context, pool_id): + """Retrieve pool statistics from the NetScaler device.""" + resource_path = "%s/%s" % (POOLSTATS_RESOURCE, pool_id) + msg = _("NetScaler driver pool stats retrieval: %s") % pool_id + LOG.debug(msg) + try: + stats = self.client.retrieve_resource(context.tenant_id, + resource_path)[1] + except ncc_client.NCCException: + self.plugin.update_status(context, loadbalancer_db.Pool, + pool_id, constants.ERROR) + else: + return stats + + def _prepare_vip_for_creation(self, vip): + creation_attrs = { + 'id': vip['id'], + 'tenant_id': vip['tenant_id'], + 'protocol': vip['protocol'], + 'address': vip['address'], + 'protocol_port': vip['protocol_port'], + } + if 'session_persistence' in vip: + creation_attrs['session_persistence'] = vip['session_persistence'] + update_attrs = self._prepare_vip_for_update(vip) + creation_attrs.update(update_attrs) + return creation_attrs + + def _prepare_vip_for_update(self, vip): + return { + 'name': vip['name'], + 'description': vip['description'], + 'pool_id': vip['pool_id'], + 'connection_limit': vip['connection_limit'], + 'admin_state_up': vip['admin_state_up'] + } + + def _prepare_pool_for_creation(self, pool): + creation_attrs = { + 'id': pool['id'], + 'tenant_id': pool['tenant_id'], + 'vip_id': pool['vip_id'], + 'protocol': pool['protocol'], + 'subnet_id': pool['subnet_id'], + } + update_attrs = self._prepare_pool_for_update(pool) + creation_attrs.update(update_attrs) + return creation_attrs + + def _prepare_pool_for_update(self, pool): + return { + 'name': pool['name'], + 'description': pool['description'], + 'lb_method': pool['lb_method'], + 'admin_state_up': pool['admin_state_up'] + } + + def _prepare_member_for_creation(self, member): + creation_attrs = { + 'id': member['id'], + 'tenant_id': member['tenant_id'], + 'address': member['address'], + 'protocol_port': member['protocol_port'], + } + update_attrs = self._prepare_member_for_update(member) + creation_attrs.update(update_attrs) + return creation_attrs + + def _prepare_member_for_update(self, member): + return { + 'pool_id': member['pool_id'], + 'weight': member['weight'], + 'admin_state_up': member['admin_state_up'] + } + + def _prepare_healthmonitor_for_creation(self, health_monitor, pool_id): + creation_attrs = { + 'id': health_monitor['id'], + 'tenant_id': health_monitor['tenant_id'], + 'type': health_monitor['type'], + } + update_attrs = self._prepare_healthmonitor_for_update(health_monitor) + creation_attrs.update(update_attrs) + return creation_attrs + + def _prepare_healthmonitor_for_update(self, health_monitor): + ncc_hm = { + 'delay': health_monitor['delay'], + 'timeout': health_monitor['timeout'], + 'max_retries': health_monitor['max_retries'], + 'admin_state_up': health_monitor['admin_state_up'] + } + if health_monitor['type'] in ['HTTP', 'HTTPS']: + ncc_hm['http_method'] = health_monitor['http_method'] + ncc_hm['url_path'] = health_monitor['url_path'] + ncc_hm['expected_codes'] = health_monitor['expected_codes'] + return ncc_hm + + def _get_network_info(self, context, entity): + network_info = {} + subnet_id = entity['subnet_id'] + subnet = self.plugin._core_plugin.get_subnet(context, subnet_id) + network_id = subnet['network_id'] + network = self.plugin._core_plugin.get_network(context, network_id) + network_info['network_id'] = network_id + network_info['subnet_id'] = subnet_id + if PROV_NET_TYPE in network: + network_info['network_type'] = network[PROV_NET_TYPE] + if PROV_SEGMT_ID in network: + network_info['segmentation_id'] = network[PROV_SEGMT_ID] + return network_info + + def _get_vip_network_info(self, context, vip): + network_info = self._get_network_info(context, vip) + network_info['port_id'] = vip['port_id'] + return network_info + + def _get_pool_network_info(self, context, pool): + return self._get_network_info(context, pool) + + def _get_pools_on_subnet(self, context, tenant_id, subnet_id): + filter_dict = {'subnet_id': [subnet_id], 'tenant_id': [tenant_id]} + return self.plugin.get_pools(context, filters=filter_dict) + + def _get_snatport_for_subnet(self, context, tenant_id, subnet_id): + device_id = '_lb-snatport-' + subnet_id + subnet = self.plugin._core_plugin.get_subnet(context, subnet_id) + network_id = subnet['network_id'] + msg = (_("Filtering ports based on network_id=%(network_id)s, " + "tenant_id=%(tenant_id)s, device_id=%(device_id)s") % + {'network_id': network_id, + 'tenant_id': tenant_id, + 'device_id': device_id}) + LOG.debug(msg) + filter_dict = { + 'network_id': [network_id], + 'tenant_id': [tenant_id], + 'device_id': [device_id], + 'device-owner': [DRIVER_NAME] + } + ports = self.plugin._core_plugin.get_ports(context, + filters=filter_dict) + if ports: + msg = _("Found an existing SNAT port for subnet %s") % subnet_id + LOG.info(msg) + return ports[0] + msg = _("Found no SNAT ports for subnet %s") % subnet_id + LOG.info(msg) + + def _create_snatport_for_subnet(self, context, tenant_id, subnet_id, + ip_address): + subnet = self.plugin._core_plugin.get_subnet(context, subnet_id) + fixed_ip = {'subnet_id': subnet['id']} + if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED: + fixed_ip['ip_address'] = ip_address + port_data = { + 'tenant_id': tenant_id, + 'name': '_lb-snatport-' + subnet_id, + 'network_id': subnet['network_id'], + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'admin_state_up': False, + 'device_id': '_lb-snatport-' + subnet_id, + 'device_owner': DRIVER_NAME, + 'fixed_ips': [fixed_ip], + } + port = self.plugin._core_plugin.create_port(context, + {'port': port_data}) + msg = _("Created SNAT port: %s") % repr(port) + LOG.info(msg) + return port + + def _remove_snatport_for_subnet(self, context, tenant_id, subnet_id): + port = self._get_snatport_for_subnet(context, tenant_id, subnet_id) + if port: + self.plugin._core_plugin.delete_port(context, port['id']) + msg = _("Removed SNAT port: %s") % repr(port) + LOG.info(msg) + + def _create_snatport_for_subnet_if_not_exists(self, context, tenant_id, + subnet_id, network_info): + port = self._get_snatport_for_subnet(context, tenant_id, subnet_id) + if not port: + msg = _("No SNAT port found for subnet %s." + " Creating one...") % subnet_id + LOG.info(msg) + port = self._create_snatport_for_subnet(context, tenant_id, + subnet_id, + ip_address=None) + network_info['port_id'] = port['id'] + network_info['snat_ip'] = port['fixed_ips'][0]['ip_address'] + msg = _("SNAT port: %s") % repr(port) + LOG.info(msg) + + def _remove_snatport_for_subnet_if_not_used(self, context, tenant_id, + subnet_id): + pools = self._get_pools_on_subnet(context, tenant_id, subnet_id) + if not pools: + #No pools left on the old subnet. + #We can remove the SNAT port/ipaddress + self._remove_snatport_for_subnet(context, tenant_id, subnet_id) + msg = _("Removing SNAT port for subnet %s " + "as this is the last pool using it...") % subnet_id + LOG.info(msg) diff --git a/neutron/services/loadbalancer/drivers/radware/__init__.py b/neutron/services/loadbalancer/drivers/radware/__init__.py new file mode 100644 index 000000000..253de5544 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/radware/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Radware LLC (Radware) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Avishay Balderman, Radware diff --git a/neutron/services/loadbalancer/drivers/radware/driver.py b/neutron/services/loadbalancer/drivers/radware/driver.py new file mode 100644 index 000000000..7f198a564 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/radware/driver.py @@ -0,0 +1,1097 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Radware LTD. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Avishay Balderman, Radware + +import base64 +import copy +import httplib +import netaddr +import threading +import time + + +import eventlet +eventlet.monkey_patch(thread=True) + +from oslo.config import cfg +from six.moves import queue as Queue + +from neutron.api.v2 import attributes +from neutron.common import log as call_log +from neutron import context +from neutron.db.loadbalancer import loadbalancer_db as lb_db +from neutron.extensions import loadbalancer +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.loadbalancer.drivers import abstract_driver +from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc + +LOG = logging.getLogger(__name__) + +RESP_STATUS = 0 +RESP_REASON = 1 +RESP_STR = 2 +RESP_DATA = 3 + +TEMPLATE_HEADER = {'Content-Type': + 'application/vnd.com.radware.vdirect.' + 'template-parameters+json'} +PROVISION_HEADER = {'Content-Type': + 'application/vnd.com.radware.' + 'vdirect.status+json'} +CREATE_SERVICE_HEADER = {'Content-Type': + 'application/vnd.com.radware.' + 'vdirect.adc-service-specification+json'} + +driver_opts = [ + cfg.StrOpt('vdirect_address', + help=_('IP address of vDirect server.')), + cfg.StrOpt('ha_secondary_address', + help=_('IP address of secondary vDirect server.')), + cfg.StrOpt('vdirect_user', + default='vDirect', + help=_('vDirect user name.')), + cfg.StrOpt('vdirect_password', + default='radware', + help=_('vDirect user password.')), + cfg.StrOpt('service_adc_type', + default="VA", + help=_('Service ADC type. Default: VA.')), + cfg.StrOpt('service_adc_version', + default="", + help=_('Service ADC version.')), + cfg.BoolOpt('service_ha_pair', + default=False, + help=_('Enables or disables the Service HA pair. ' + 'Default: False.')), + cfg.IntOpt('service_throughput', + default=1000, + help=_('Service throughput. Default: 1000.')), + cfg.IntOpt('service_ssl_throughput', + default=100, + help=_('Service SSL throughput. Default: 100.')), + cfg.IntOpt('service_compression_throughput', + default=100, + help=_('Service compression throughput. Default: 100.')), + cfg.IntOpt('service_cache', + default=20, + help=_('Size of service cache. Default: 20.')), + cfg.StrOpt('l2_l3_workflow_name', + default='openstack_l2_l3', + help=_('Name of l2_l3 workflow. Default: ' + 'openstack_l2_l3.')), + cfg.StrOpt('l4_workflow_name', + default='openstack_l4', + help=_('Name of l4 workflow. Default: openstack_l4.')), + cfg.DictOpt('l2_l3_ctor_params', + default={"service": "_REPLACE_", + "ha_network_name": "HA-Network", + "ha_ip_pool_name": "default", + "allocate_ha_vrrp": True, + "allocate_ha_ips": True, + "twoleg_enabled": "_REPLACE_"}, + help=_('Parameter for l2_l3 workflow constructor.')), + cfg.DictOpt('l2_l3_setup_params', + default={"data_port": 1, + "data_ip_address": "192.168.200.99", + "data_ip_mask": "255.255.255.0", + "gateway": "192.168.200.1", + "ha_port": 2}, + help=_('Parameter for l2_l3 workflow setup.')), + cfg.ListOpt('actions_to_skip', + default=['setup_l2_l3'], + help=_('List of actions that are not pushed to ' + 'the completion queue.')), + cfg.StrOpt('l4_action_name', + default='BaseCreate', + help=_('Name of the l4 workflow action. ' + 'Default: BaseCreate.')), + cfg.ListOpt('service_resource_pool_ids', + default=[], + help=_('Resource pool IDs.')), + cfg.IntOpt('service_isl_vlan', + default=-1, + help=_('A required VLAN for the interswitch link to use.')), + cfg.BoolOpt('service_session_mirroring_enabled', + default=False, + help=_('Enable or disable Alteon interswitch link for ' + 'stateful session failover. Default: False.')) +] + +cfg.CONF.register_opts(driver_opts, "radware") + + +class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): + + """Radware lbaas driver.""" + + def __init__(self, plugin): + rad = cfg.CONF.radware + self.plugin = plugin + self.service = { + "haPair": rad.service_ha_pair, + "sessionMirroringEnabled": rad.service_session_mirroring_enabled, + "primary": { + "capacity": { + "throughput": rad.service_throughput, + "sslThroughput": rad.service_ssl_throughput, + "compressionThroughput": + rad.service_compression_throughput, + "cache": rad.service_cache + }, + "network": { + "type": "portgroup", + "portgroups": ['DATA_NETWORK'] + }, + "adcType": rad.service_adc_type, + "acceptableAdc": "Exact" + } + } + if rad.service_resource_pool_ids: + ids = rad.service_resource_pool_ids + self.service['resourcePoolIds'] = [ + {'name': id} for id in ids + ] + if rad.service_isl_vlan: + self.service['islVlan'] = rad.service_isl_vlan + self.l2_l3_wf_name = rad.l2_l3_workflow_name + self.l4_wf_name = rad.l4_workflow_name + self.l2_l3_ctor_params = rad.l2_l3_ctor_params + self.l2_l3_setup_params = rad.l2_l3_setup_params + self.l4_action_name = rad.l4_action_name + self.actions_to_skip = rad.actions_to_skip + vdirect_address = rad.vdirect_address + sec_server = rad.ha_secondary_address + self.rest_client = vDirectRESTClient(server=vdirect_address, + secondary_server=sec_server, + user=rad.vdirect_user, + password=rad.vdirect_password) + self.queue = Queue.Queue() + self.completion_handler = OperationCompletionHandler(self.queue, + self.rest_client, + plugin) + self.workflow_templates_exists = False + self.completion_handler.setDaemon(True) + self.completion_handler_started = False + + def _populate_vip_graph(self, context, vip): + ext_vip = self.plugin.populate_vip_graph(context, vip) + vip_network_id = self._get_vip_network_id(context, ext_vip) + pool_network_id = self._get_pool_network_id(context, ext_vip) + + # if VIP and PIP are different, we need an IP address for the PIP + # so create port on PIP's network and use its IP address + if vip_network_id != pool_network_id: + pip_address = self._create_port_for_pip( + context, + vip['tenant_id'], + _make_pip_name_from_vip(vip), + pool_network_id) + ext_vip['pip_address'] = pip_address + else: + ext_vip['pip_address'] = vip['address'] + + ext_vip['vip_network_id'] = vip_network_id + ext_vip['pool_network_id'] = pool_network_id + return ext_vip + + def create_vip(self, context, vip): + log_info = {'vip': vip, + 'extended_vip': 'NOT_ASSIGNED', + 'service_name': 'NOT_ASSIGNED'} + try: + ext_vip = self._populate_vip_graph(context, vip) + + service_name = self._get_service(ext_vip) + log_info['extended_vip'] = ext_vip + log_info['service_name'] = service_name + + self._create_workflow( + vip['pool_id'], self.l4_wf_name, + {"service": service_name}) + self._update_workflow( + vip['pool_id'], + self.l4_action_name, ext_vip, context) + + finally: + LOG.debug(_('vip: %(vip)s, ' + 'extended_vip: %(extended_vip)s, ' + 'service_name: %(service_name)s, '), + log_info) + + def update_vip(self, context, old_vip, vip): + ext_vip = self._populate_vip_graph(context, vip) + self._update_workflow( + vip['pool_id'], self.l4_action_name, + ext_vip, context, False, lb_db.Vip, vip['id']) + + def delete_vip(self, context, vip): + """Delete a Vip + + First delete it from the device. If deletion ended OK + - remove data from DB as well. + If the deletion failed - mark vip with error status in DB + + """ + + ext_vip = self._populate_vip_graph(context, vip) + params = _translate_vip_object_graph(ext_vip, + self.plugin, context) + ids = params.pop('__ids__') + + try: + # get neutron port id associated with the vip (present if vip and + # pip are different) and release it after workflow removed + port_filter = { + 'name': [_make_pip_name_from_vip(vip)], + } + ports = self.plugin._core_plugin.get_ports(context, + filters=port_filter) + if ports: + LOG.debug(_('Retrieved pip nport: %(port)r for ' + 'vip: %(vip)s'), {'port': ports[0], + 'vip': vip['id']}) + + delete_pip_nport_function = self._get_delete_pip_nports( + context, ports) + else: + delete_pip_nport_function = None + LOG.debug(_('Found no pip nports associated with ' + 'vip: %s'), vip['id']) + + # removing the WF will cause deletion of the configuration from the + # device + self._remove_workflow(ids, context, delete_pip_nport_function) + + except r_exc.RESTRequestFailure: + pool_id = ext_vip['pool_id'] + LOG.exception(_('Failed to remove workflow %s. ' + 'Going to set vip to ERROR status'), + pool_id) + + self.plugin.update_status(context, lb_db.Vip, ids['vip'], + constants.ERROR) + + def _get_delete_pip_nports(self, context, ports): + def _delete_pip_nports(success): + if success: + for port in ports: + try: + self.plugin._core_plugin.delete_port( + context, port['id']) + LOG.debug(_('pip nport id: %s'), port['id']) + except Exception as exception: + # stop exception propagation, nport may have + # been deleted by other means + LOG.warning(_('pip nport delete failed: %r'), + exception) + return _delete_pip_nports + + def create_pool(self, context, pool): + # nothing to do + pass + + def update_pool(self, context, old_pool, pool): + self._handle_pool(context, pool) + + def delete_pool(self, context, pool,): + self._handle_pool(context, pool, delete=True) + + def _handle_pool(self, context, pool, delete=False): + vip_id = self.plugin.get_pool(context, pool['id']).get('vip_id', None) + if vip_id: + if delete: + raise loadbalancer.PoolInUse(pool_id=pool['id']) + else: + vip = self.plugin.get_vip(context, vip_id) + ext_vip = self._populate_vip_graph(context, vip) + self._update_workflow( + pool['id'], self.l4_action_name, + ext_vip, context, delete, lb_db.Pool, pool['id']) + else: + if delete: + self.plugin._delete_db_pool(context, pool['id']) + else: + # we keep the pool in PENDING_UPDATE + # no point to modify it since it is not connected to vip yet + pass + + def create_member(self, context, member): + self._handle_member(context, member) + + def update_member(self, context, old_member, member): + self._handle_member(context, member) + + def delete_member(self, context, member): + self._handle_member(context, member, delete=True) + + def _handle_member(self, context, member, delete=False): + """Navigate the model. If a Vip is found - activate a bulk WF action. + """ + vip_id = self.plugin.get_pool( + context, member['pool_id']).get('vip_id') + if vip_id: + vip = self.plugin.get_vip(context, vip_id) + ext_vip = self._populate_vip_graph(context, vip) + self._update_workflow( + member['pool_id'], self.l4_action_name, + ext_vip, context, + delete, lb_db.Member, member['id']) + # We have to delete this member but it is not connected to a vip yet + elif delete: + self.plugin._delete_db_member(context, member['id']) + + def create_health_monitor(self, context, health_monitor): + # Anything to do here? the hm is not connected to the graph yet + pass + + def update_pool_health_monitor(self, context, old_health_monitor, + health_monitor, + pool_id): + self._handle_pool_health_monitor(context, health_monitor, pool_id) + + def create_pool_health_monitor(self, context, + health_monitor, pool_id): + self._handle_pool_health_monitor(context, health_monitor, pool_id) + + def delete_pool_health_monitor(self, context, health_monitor, pool_id): + self._handle_pool_health_monitor(context, health_monitor, pool_id, + True) + + def _handle_pool_health_monitor(self, context, health_monitor, + pool_id, delete=False): + """Push a graph to vDirect + + Navigate the model. Check if a pool is associated to the vip + and push the graph to vDirect + + """ + + vip_id = self.plugin.get_pool(context, pool_id).get('vip_id', None) + + debug_params = {"hm_id": health_monitor['id'], "pool_id": pool_id, + "delete": delete, "vip_id": vip_id} + LOG.debug(_('_handle_pool_health_monitor. health_monitor = %(hm_id)s ' + 'pool_id = %(pool_id)s delete = %(delete)s ' + 'vip_id = %(vip_id)s'), + debug_params) + + if vip_id: + vip = self.plugin.get_vip(context, vip_id) + ext_vip = self._populate_vip_graph(context, vip) + self._update_workflow(pool_id, self.l4_action_name, + ext_vip, context, + delete, lb_db.PoolMonitorAssociation, + health_monitor['id']) + elif delete: + self.plugin._delete_db_pool_health_monitor(context, + health_monitor['id'], + pool_id) + + def stats(self, context, pool_id): + # TODO(avishayb) implement + return {"bytes_in": 0, + "bytes_out": 0, + "active_connections": 0, + "total_connections": 0} + + def _get_vip_network_id(self, context, extended_vip): + subnet = self.plugin._core_plugin.get_subnet( + context, extended_vip['subnet_id']) + return subnet['network_id'] + + def _start_completion_handling_thread(self): + if not self.completion_handler_started: + LOG.info(_('Starting operation completion handling thread')) + self.completion_handler.start() + self.completion_handler_started = True + + def _get_pool_network_id(self, context, extended_vip): + subnet = self.plugin._core_plugin.get_subnet( + context, extended_vip['pool']['subnet_id']) + return subnet['network_id'] + + @call_log.log + def _update_workflow(self, wf_name, action, + wf_params, context, + delete=False, + lbaas_entity=None, entity_id=None): + """Update the WF state. Push the result to a queue for processing.""" + + if not self.workflow_templates_exists: + self._verify_workflow_templates() + + if action not in self.actions_to_skip: + params = _translate_vip_object_graph(wf_params, + self.plugin, + context) + else: + params = wf_params + + resource = '/api/workflow/%s/action/%s' % (wf_name, action) + response = _rest_wrapper(self.rest_client.call('POST', resource, + {'parameters': params}, + TEMPLATE_HEADER)) + LOG.debug(_('_update_workflow response: %s '), response) + + if action not in self.actions_to_skip: + ids = params.pop('__ids__', None) + oper = OperationAttributes(response['uri'], + ids, + lbaas_entity, + entity_id, + delete=delete) + LOG.debug(_('Pushing operation %s to the queue'), oper) + + self._start_completion_handling_thread() + self.queue.put_nowait(oper) + + def _remove_workflow(self, ids, context, post_remove_function): + + wf_name = ids['pool'] + LOG.debug(_('Remove the workflow %s') % wf_name) + resource = '/api/workflow/%s' % (wf_name) + rest_return = self.rest_client.call('DELETE', resource, None, None) + response = _rest_wrapper(rest_return, [204, 202, 404]) + if rest_return[RESP_STATUS] in [404]: + if post_remove_function: + try: + post_remove_function(True) + LOG.debug(_('Post-remove workflow function ' + '%r completed'), post_remove_function) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_('Post-remove workflow function ' + '%r failed'), post_remove_function) + self.plugin._delete_db_vip(context, ids['vip']) + else: + oper = OperationAttributes( + response['uri'], + ids, + lb_db.Vip, + ids['vip'], + delete=True, + post_op_function=post_remove_function) + LOG.debug(_('Pushing operation %s to the queue'), oper) + + self._start_completion_handling_thread() + self.queue.put_nowait(oper) + + def _remove_service(self, service_name): + resource = '/api/service/%s' % (service_name) + _rest_wrapper(self.rest_client.call('DELETE', + resource, None, None), + [202]) + + def _get_service(self, ext_vip): + """Get a service name. + + if you can't find one, + create a service and create l2_l3 WF. + + """ + if not self.workflow_templates_exists: + self._verify_workflow_templates() + if ext_vip['vip_network_id'] != ext_vip['pool_network_id']: + networks_name = '%s_%s' % (ext_vip['vip_network_id'], + ext_vip['pool_network_id']) + self.l2_l3_ctor_params["twoleg_enabled"] = True + else: + networks_name = ext_vip['vip_network_id'] + self.l2_l3_ctor_params["twoleg_enabled"] = False + incoming_service_name = 'srv_%s' % (networks_name,) + service_name = self._get_available_service(incoming_service_name) + if not service_name: + LOG.debug( + 'Could not find a service named ' + incoming_service_name) + service_name = self._create_service(ext_vip['vip_network_id'], + ext_vip['pool_network_id'], + ext_vip['tenant_id']) + self.l2_l3_ctor_params["service"] = incoming_service_name + wf_name = 'l2_l3_' + networks_name + self._create_workflow( + wf_name, self.l2_l3_wf_name, self.l2_l3_ctor_params) + self._update_workflow( + wf_name, "setup_l2_l3", self.l2_l3_setup_params, None) + else: + LOG.debug('A service named ' + service_name + ' was found.') + return service_name + + def _create_service(self, vip_network_id, pool_network_id, tenant_id): + """create the service and provision it (async).""" + # 1) create the service + service = copy.deepcopy(self.service) + if vip_network_id != pool_network_id: + service_name = 'srv_%s_%s' % (vip_network_id, pool_network_id) + service['primary']['network']['portgroups'] = [vip_network_id, + pool_network_id] + else: + service_name = 'srv_' + vip_network_id + service['primary']['network']['portgroups'] = [vip_network_id] + resource = '/api/service?name=%s&tenant=%s' % (service_name, tenant_id) + + response = _rest_wrapper(self.rest_client.call('POST', resource, + service, + CREATE_SERVICE_HEADER), [201]) + + # 2) provision the service + provision_uri = response['links']['actions']['provision'] + _rest_wrapper(self.rest_client.call('POST', provision_uri, + None, PROVISION_HEADER)) + return service_name + + def _get_available_service(self, service_name): + """Check if service exsists and return its name if it does.""" + resource = '/api/service/' + service_name + try: + _rest_wrapper(self.rest_client.call('GET', + resource, + None, None), [200]) + except Exception: + return + return service_name + + def _workflow_exists(self, pool_id): + """Check if a WF having the name of the pool_id exists.""" + resource = '/api/workflow/' + pool_id + try: + _rest_wrapper(self.rest_client.call('GET', + resource, + None, + None), [200]) + except Exception: + return False + return True + + def _create_workflow(self, wf_name, wf_template_name, + create_workflow_params=None): + """Create a WF if it doesn't exists yet.""" + if not self.workflow_templates_exists: + self._verify_workflow_templates() + if not self._workflow_exists(wf_name): + if not create_workflow_params: + create_workflow_params = {} + resource = '/api/workflowTemplate/%s?name=%s' % ( + wf_template_name, wf_name) + params = {'parameters': create_workflow_params} + response = _rest_wrapper(self.rest_client.call('POST', + resource, + params, + TEMPLATE_HEADER)) + LOG.debug(_('create_workflow response: %s'), str(response)) + + def _verify_workflow_templates(self): + """Verify the existence of workflows on vDirect server.""" + workflows = {self.l2_l3_wf_name: + False, self.l4_wf_name: False} + resource = '/api/workflowTemplate' + response = _rest_wrapper(self.rest_client.call('GET', + resource, + None, + None), [200]) + for wf in workflows.keys(): + for wf_template in response: + if wf == wf_template['name']: + workflows[wf] = True + break + for wf, found in workflows.items(): + if not found: + raise r_exc.WorkflowMissing(workflow=wf) + self.workflow_templates_exists = True + + def _create_port_for_pip(self, context, tenant_id, port_name, subnet): + """Creates port on subnet, returns that port's IP.""" + + # create port, we just want any IP allocated to the port based on the + # network id, so setting 'fixed_ips' to ATTR_NOT_SPECIFIED + port_data = { + 'tenant_id': tenant_id, + 'name': port_name, + 'network_id': subnet, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'admin_state_up': False, + 'device_id': '', + 'device_owner': 'neutron:' + constants.LOADBALANCER, + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED + } + port = self.plugin._core_plugin.create_port(context, + {'port': port_data}) + return port['fixed_ips'][0]['ip_address'] + + +class vDirectRESTClient: + """REST server proxy to Radware vDirect.""" + + def __init__(self, + server='localhost', + secondary_server=None, + user=None, + password=None, + port=2189, + ssl=True, + timeout=5000, + base_uri=''): + self.server = server + self.secondary_server = secondary_server + self.port = port + self.ssl = ssl + self.base_uri = base_uri + self.timeout = timeout + if user and password: + self.auth = base64.encodestring('%s:%s' % (user, password)) + self.auth = self.auth.replace('\n', '') + else: + raise r_exc.AuthenticationMissing() + + debug_params = {'server': self.server, + 'sec_server': self.secondary_server, + 'port': self.port, + 'ssl': self.ssl} + LOG.debug(_('vDirectRESTClient:init server=%(server)s, ' + 'secondary server=%(sec_server)s, ' + 'port=%(port)d, ' + 'ssl=%(ssl)r'), debug_params) + + def _flip_servers(self): + LOG.warning(_('Fliping servers. Current is: %(server)s, ' + 'switching to %(secondary)s'), + {'server': self.server, + 'secondary': self.secondary_server}) + self.server, self.secondary_server = self.secondary_server, self.server + + def _recover(self, action, resource, data, headers, binary=False): + if self.server and self.secondary_server: + self._flip_servers() + resp = self._call(action, resource, data, + headers, binary) + return resp + else: + LOG.exception(_('REST client is not able to recover ' + 'since only one vDirect server is ' + 'configured.')) + return -1, None, None, None + + def call(self, action, resource, data, headers, binary=False): + resp = self._call(action, resource, data, headers, binary) + if resp[RESP_STATUS] == -1: + LOG.warning(_('vDirect server is not responding (%s).'), + self.server) + return self._recover(action, resource, data, headers, binary) + elif resp[RESP_STATUS] in (301, 307): + LOG.warning(_('vDirect server is not active (%s).'), + self.server) + return self._recover(action, resource, data, headers, binary) + else: + return resp + + @call_log.log + def _call(self, action, resource, data, headers, binary=False): + if resource.startswith('http'): + uri = resource + else: + uri = self.base_uri + resource + if binary: + body = data + else: + body = json.dumps(data) + + debug_data = 'binary' if binary else body + debug_data = debug_data if debug_data else 'EMPTY' + if not headers: + headers = {'Authorization': 'Basic %s' % self.auth} + else: + headers['Authorization'] = 'Basic %s' % self.auth + conn = None + if self.ssl: + conn = httplib.HTTPSConnection( + self.server, self.port, timeout=self.timeout) + if conn is None: + LOG.error(_('vdirectRESTClient: Could not establish HTTPS ' + 'connection')) + return 0, None, None, None + else: + conn = httplib.HTTPConnection( + self.server, self.port, timeout=self.timeout) + if conn is None: + LOG.error(_('vdirectRESTClient: Could not establish HTTP ' + 'connection')) + return 0, None, None, None + + try: + conn.request(action, uri, body, headers) + response = conn.getresponse() + respstr = response.read() + respdata = respstr + try: + respdata = json.loads(respstr) + except ValueError: + # response was not JSON, ignore the exception + pass + ret = (response.status, response.reason, respstr, respdata) + except Exception as e: + log_dict = {'action': action, 'e': e} + LOG.error(_('vdirectRESTClient: %(action)s failure, %(e)r'), + log_dict) + ret = -1, None, None, None + conn.close() + return ret + + +class OperationAttributes: + + """Holds operation attributes. + + The parameter 'post_op_function' (if supplied) is a function that takes + one boolean argument, specifying the success of the operation + + """ + + def __init__(self, + operation_url, + object_graph, + lbaas_entity=None, + entity_id=None, + delete=False, + post_op_function=None): + self.operation_url = operation_url + self.object_graph = object_graph + self.delete = delete + self.lbaas_entity = lbaas_entity + self.entity_id = entity_id + self.creation_time = time.time() + self.post_op_function = post_op_function + + def __repr__(self): + items = ("%s = %r" % (k, v) for k, v in self.__dict__.items()) + return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items)) + + +class OperationCompletionHandler(threading.Thread): + + """Update DB with operation status or delete the entity from DB.""" + + def __init__(self, queue, rest_client, plugin): + threading.Thread.__init__(self) + self.queue = queue + self.rest_client = rest_client + self.plugin = plugin + self.stoprequest = threading.Event() + self.opers_to_handle_before_rest = 0 + + def join(self, timeout=None): + self.stoprequest.set() + super(OperationCompletionHandler, self).join(timeout) + + def handle_operation_completion(self, oper): + result = self.rest_client.call('GET', + oper.operation_url, + None, + None) + completed = result[RESP_DATA]['complete'] + reason = result[RESP_REASON], + description = result[RESP_STR] + if completed: + # operation is done - update the DB with the status + # or delete the entire graph from DB + success = result[RESP_DATA]['success'] + sec_to_completion = time.time() - oper.creation_time + debug_data = {'oper': oper, + 'sec_to_completion': sec_to_completion, + 'success': success} + LOG.debug(_('Operation %(oper)s is completed after ' + '%(sec_to_completion)d sec ' + 'with success status: %(success)s :'), + debug_data) + db_status = None + if not success: + # failure - log it and set the return ERROR as DB state + if reason or description: + msg = 'Reason:%s. Description:%s' % (reason, description) + else: + msg = "unknown" + error_params = {"operation": oper, "msg": msg} + LOG.error(_('Operation %(operation)s failed. Reason: %(msg)s'), + error_params) + db_status = constants.ERROR + else: + if oper.delete: + _remove_object_from_db(self.plugin, oper) + else: + db_status = constants.ACTIVE + + if db_status: + _update_vip_graph_status(self.plugin, oper, db_status) + + OperationCompletionHandler._run_post_op_function(success, oper) + + return completed + + def run(self): + while not self.stoprequest.isSet(): + try: + oper = self.queue.get(timeout=1) + + # Get the current queue size (N) and set the counter with it. + # Handle N operations with no intermission. + # Once N operations handles, get the size again and repeat. + if self.opers_to_handle_before_rest <= 0: + self.opers_to_handle_before_rest = self.queue.qsize() + 1 + + LOG.debug('Operation consumed from the queue: ' + + str(oper)) + # check the status - if oper is done: update the db , + # else push the oper again to the queue + if not self.handle_operation_completion(oper): + LOG.debug(_('Operation %s is not completed yet..') % oper) + # Not completed - push to the queue again + self.queue.put_nowait(oper) + + self.queue.task_done() + self.opers_to_handle_before_rest -= 1 + + # Take one second rest before start handling + # new operations or operations handled before + if self.opers_to_handle_before_rest <= 0: + time.sleep(1) + + except Queue.Empty: + continue + except Exception: + m = _("Exception was thrown inside OperationCompletionHandler") + LOG.exception(m) + + @staticmethod + def _run_post_op_function(success, oper): + if oper.post_op_function: + log_data = {'func': oper.post_op_function, 'oper': oper} + try: + oper.post_op_function(success) + LOG.debug(_('Post-operation function ' + '%(func)r completed ' + 'after operation %(oper)r'), + log_data) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_('Post-operation function ' + '%(func)r failed ' + 'after operation %(oper)r'), + log_data) + + +def _rest_wrapper(response, success_codes=[202]): + """Wrap a REST call and make sure a valid status is returned.""" + if not response: + raise r_exc.RESTRequestFailure( + status=-1, + reason="Unknown", + description="Unknown", + success_codes=success_codes + ) + elif response[RESP_STATUS] not in success_codes: + raise r_exc.RESTRequestFailure( + status=response[RESP_STATUS], + reason=response[RESP_REASON], + description=response[RESP_STR], + success_codes=success_codes + ) + else: + return response[RESP_DATA] + + +def _make_pip_name_from_vip(vip): + """Standard way of making PIP name based on VIP ID.""" + return 'pip_' + vip['id'] + + +def _update_vip_graph_status(plugin, oper, status): + """Update the status + + Of all the Vip object graph + or a specific entity in the graph. + + """ + + ctx = context.get_admin_context(load_admin_roles=False) + + LOG.debug(_('_update: %s '), oper) + if oper.lbaas_entity == lb_db.PoolMonitorAssociation: + plugin.update_pool_health_monitor(ctx, + oper.entity_id, + oper.object_graph['pool'], + status) + elif oper.entity_id: + plugin.update_status(ctx, + oper.lbaas_entity, + oper.entity_id, + status) + else: + _update_vip_graph_status_cascade(plugin, + oper.object_graph, + ctx, status) + + +def _update_vip_graph_status_cascade(plugin, ids, ctx, status): + plugin.update_status(ctx, + lb_db.Vip, + ids['vip'], + status) + plugin.update_status(ctx, + lb_db.Pool, + ids['pool'], + status) + for member_id in ids['members']: + plugin.update_status(ctx, + lb_db.Member, + member_id, + status) + for hm_id in ids['health_monitors']: + plugin.update_pool_health_monitor(ctx, + hm_id, + ids['pool'], + status) + + +def _remove_object_from_db(plugin, oper): + """Remove a specific entity from db.""" + LOG.debug(_('_remove_object_from_db %s'), str(oper)) + + ctx = context.get_admin_context(load_admin_roles=False) + + if oper.lbaas_entity == lb_db.PoolMonitorAssociation: + plugin._delete_db_pool_health_monitor(ctx, + oper.entity_id, + oper.object_graph['pool']) + elif oper.lbaas_entity == lb_db.Member: + plugin._delete_db_member(ctx, oper.entity_id) + elif oper.lbaas_entity == lb_db.Vip: + plugin._delete_db_vip(ctx, oper.entity_id) + elif oper.lbaas_entity == lb_db.Pool: + plugin._delete_db_pool(ctx, oper.entity_id) + else: + raise r_exc.UnsupportedEntityOperation( + operation='Remove from DB', entity=oper.lbaas_entity + ) + +TRANSLATION_DEFAULTS = {'session_persistence_type': 'none', + 'session_persistence_cookie_name': 'none', + 'url_path': '/', + 'http_method': 'GET', + 'expected_codes': '200', + 'subnet': '255.255.255.255', + 'mask': '255.255.255.255', + 'gw': '255.255.255.255', + } +VIP_PROPERTIES = ['address', 'protocol_port', 'protocol', 'connection_limit', + 'admin_state_up', 'session_persistence_type', + 'session_persistence_cookie_name'] +POOL_PROPERTIES = ['protocol', 'lb_method', 'admin_state_up'] +MEMBER_PROPERTIES = ['address', 'protocol_port', 'weight', 'admin_state_up', + 'subnet', 'mask', 'gw'] +HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries', + 'admin_state_up', 'url_path', 'http_method', + 'expected_codes', 'id'] + + +def _translate_vip_object_graph(extended_vip, plugin, context): + """Translate the extended vip + + translate to a structure that can be + understood by the workflow. + + """ + def _create_key(prefix, property_name): + return prefix + '_' + property_name + '_array' + + def _trans_prop_name(prop_name): + if prop_name == 'id': + return 'uuid' + else: + return prop_name + + def get_ids(extended_vip): + ids = {} + ids['vip'] = extended_vip['id'] + ids['pool'] = extended_vip['pool']['id'] + ids['members'] = [m['id'] for m in extended_vip['members']] + ids['health_monitors'] = [ + hm['id'] for hm in extended_vip['health_monitors'] + ] + return ids + + trans_vip = {} + LOG.debug('Vip graph to be translated: ' + str(extended_vip)) + for vip_property in VIP_PROPERTIES: + trans_vip['vip_' + vip_property] = extended_vip.get( + vip_property, TRANSLATION_DEFAULTS.get(vip_property)) + for pool_property in POOL_PROPERTIES: + trans_vip['pool_' + pool_property] = extended_vip[ + 'pool'][pool_property] + for member_property in MEMBER_PROPERTIES: + trans_vip[_create_key('member', member_property)] = [] + + two_leg = (extended_vip['pip_address'] != extended_vip['address']) + if two_leg: + pool_subnet = plugin._core_plugin.get_subnet( + context, extended_vip['pool']['subnet_id']) + + for member in extended_vip['members']: + if member['status'] != constants.PENDING_DELETE: + if (two_leg and netaddr.IPAddress(member['address']) + not in netaddr.IPNetwork(pool_subnet['cidr'])): + member_ports = plugin._core_plugin.get_ports( + context, + filters={'fixed_ips': {'ip_address': [member['address']]}, + 'tenant_id': [extended_vip['tenant_id']]}) + if len(member_ports) == 1: + member_subnet = plugin._core_plugin.get_subnet( + context, + member_ports[0]['fixed_ips'][0]['subnet_id']) + member_network = netaddr.IPNetwork(member_subnet['cidr']) + member['subnet'] = str(member_network.network) + member['mask'] = str(member_network.netmask) + else: + member['subnet'] = member['address'] + + member['gw'] = pool_subnet['gateway_ip'] + + for member_property in MEMBER_PROPERTIES: + trans_vip[_create_key('member', member_property)].append( + member.get(member_property, + TRANSLATION_DEFAULTS.get(member_property))) + + for hm_property in HEALTH_MONITOR_PROPERTIES: + trans_vip[ + _create_key('hm', _trans_prop_name(hm_property))] = [] + for hm in extended_vip['health_monitors']: + hm_pool = plugin.get_pool_health_monitor(context, + hm['id'], + extended_vip['pool']['id']) + if hm_pool['status'] != constants.PENDING_DELETE: + for hm_property in HEALTH_MONITOR_PROPERTIES: + value = hm.get(hm_property, + TRANSLATION_DEFAULTS.get(hm_property)) + trans_vip[_create_key('hm', + _trans_prop_name(hm_property))].append(value) + ids = get_ids(extended_vip) + trans_vip['__ids__'] = ids + for key in ['pip_address']: + if key in extended_vip: + trans_vip[key] = extended_vip[key] + LOG.debug('Translated Vip graph: ' + str(trans_vip)) + return trans_vip diff --git a/neutron/services/loadbalancer/drivers/radware/exceptions.py b/neutron/services/loadbalancer/drivers/radware/exceptions.py new file mode 100644 index 000000000..eec22a252 --- /dev/null +++ b/neutron/services/loadbalancer/drivers/radware/exceptions.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Radware LTD. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Evgeny Fedoruk, Radware + + +from neutron.common import exceptions + + +class RadwareLBaasException(exceptions.NeutronException): + message = _('An unknown exception occurred in Radware LBaaS provider.') + + +class AuthenticationMissing(RadwareLBaasException): + message = _('vDirect user/password missing. ' + 'Specify in configuration file, under [radware] section') + + +class WorkflowMissing(RadwareLBaasException): + message = _('Workflow %(workflow)s is missing on vDirect server. ' + 'Upload missing workflow') + + +class RESTRequestFailure(RadwareLBaasException): + message = _('REST request failed with status %(status)s. ' + 'Reason: %(reason)s, Description: %(description)s. ' + 'Success status codes are %(success_codes)s') + + +class UnsupportedEntityOperation(RadwareLBaasException): + message = _('%(operation)s operation is not supported for %(entity)s.') diff --git a/neutron/services/loadbalancer/plugin.py b/neutron/services/loadbalancer/plugin.py new file mode 100644 index 000000000..4e992e085 --- /dev/null +++ b/neutron/services/loadbalancer/plugin.py @@ -0,0 +1,326 @@ +# +# Copyright 2013 Radware LTD. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Avishay Balderman, Radware + +from neutron.api.v2 import attributes as attrs +from neutron.common import exceptions as n_exc +from neutron import context +from neutron.db import api as qdbapi +from neutron.db.loadbalancer import loadbalancer_db as ldb +from neutron.db import servicetype_db as st_db +from neutron.extensions import loadbalancer +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.loadbalancer import agent_scheduler +from neutron.services import provider_configuration as pconf +from neutron.services import service_base + +LOG = logging.getLogger(__name__) + + +class LoadBalancerPlugin(ldb.LoadBalancerPluginDb, + agent_scheduler.LbaasAgentSchedulerDbMixin): + """Implementation of the Neutron Loadbalancer Service Plugin. + + This class manages the workflow of LBaaS request/response. + Most DB related works are implemented in class + loadbalancer_db.LoadBalancerPluginDb. + """ + supported_extension_aliases = ["lbaas", + "lbaas_agent_scheduler", + "service-type"] + + # lbaas agent notifiers to handle agent update operations; + # can be updated by plugin drivers while loading; + # will be extracted by neutron manager when loading service plugins; + agent_notifiers = {} + + def __init__(self): + """Initialization for the loadbalancer service plugin.""" + + qdbapi.register_models() + self.service_type_manager = st_db.ServiceTypeManager.get_instance() + self._load_drivers() + + def _load_drivers(self): + """Loads plugin-drivers specified in configuration.""" + self.drivers, self.default_provider = service_base.load_drivers( + constants.LOADBALANCER, self) + + # we're at the point when extensions are not loaded yet + # so prevent policy from being loaded + ctx = context.get_admin_context(load_admin_roles=False) + # stop service in case provider was removed, but resources were not + self._check_orphan_pool_associations(ctx, self.drivers.keys()) + + def _check_orphan_pool_associations(self, context, provider_names): + """Checks remaining associations between pools and providers. + + If admin has not undeployed resources with provider that was deleted + from configuration, neutron service is stopped. Admin must delete + resources prior to removing providers from configuration. + """ + pools = self.get_pools(context) + lost_providers = set([pool['provider'] for pool in pools + if pool['provider'] not in provider_names]) + # resources are left without provider - stop the service + if lost_providers: + msg = _("Delete associated loadbalancer pools before " + "removing providers %s") % list(lost_providers) + LOG.exception(msg) + raise SystemExit(1) + + def _get_driver_for_provider(self, provider): + if provider in self.drivers: + return self.drivers[provider] + # raise if not associated (should never be reached) + raise n_exc.Invalid(_("Error retrieving driver for provider %s") % + provider) + + def _get_driver_for_pool(self, context, pool_id): + pool = self.get_pool(context, pool_id) + try: + return self.drivers[pool['provider']] + except KeyError: + raise n_exc.Invalid(_("Error retrieving provider for pool %s") % + pool_id) + + def get_plugin_type(self): + return constants.LOADBALANCER + + def get_plugin_description(self): + return "Neutron LoadBalancer Service Plugin" + + def create_vip(self, context, vip): + v = super(LoadBalancerPlugin, self).create_vip(context, vip) + driver = self._get_driver_for_pool(context, v['pool_id']) + driver.create_vip(context, v) + return v + + def update_vip(self, context, id, vip): + if 'status' not in vip['vip']: + vip['vip']['status'] = constants.PENDING_UPDATE + old_vip = self.get_vip(context, id) + v = super(LoadBalancerPlugin, self).update_vip(context, id, vip) + driver = self._get_driver_for_pool(context, v['pool_id']) + driver.update_vip(context, old_vip, v) + return v + + def _delete_db_vip(self, context, id): + # proxy the call until plugin inherits from DBPlugin + super(LoadBalancerPlugin, self).delete_vip(context, id) + + def delete_vip(self, context, id): + self.update_status(context, ldb.Vip, + id, constants.PENDING_DELETE) + v = self.get_vip(context, id) + driver = self._get_driver_for_pool(context, v['pool_id']) + driver.delete_vip(context, v) + + def _get_provider_name(self, context, pool): + if ('provider' in pool and + pool['provider'] != attrs.ATTR_NOT_SPECIFIED): + provider_name = pconf.normalize_provider_name(pool['provider']) + self.validate_provider(provider_name) + return provider_name + else: + if not self.default_provider: + raise pconf.DefaultServiceProviderNotFound( + service_type=constants.LOADBALANCER) + return self.default_provider + + def create_pool(self, context, pool): + provider_name = self._get_provider_name(context, pool['pool']) + p = super(LoadBalancerPlugin, self).create_pool(context, pool) + + self.service_type_manager.add_resource_association( + context, + constants.LOADBALANCER, + provider_name, p['id']) + #need to add provider name to pool dict, + #because provider was not known to db plugin at pool creation + p['provider'] = provider_name + driver = self.drivers[provider_name] + try: + driver.create_pool(context, p) + except loadbalancer.NoEligibleBackend: + # that should catch cases when backend of any kind + # is not available (agent, appliance, etc) + self.update_status(context, ldb.Pool, + p['id'], constants.ERROR, + "No eligible backend") + raise loadbalancer.NoEligibleBackend(pool_id=p['id']) + return p + + def update_pool(self, context, id, pool): + if 'status' not in pool['pool']: + pool['pool']['status'] = constants.PENDING_UPDATE + old_pool = self.get_pool(context, id) + p = super(LoadBalancerPlugin, self).update_pool(context, id, pool) + driver = self._get_driver_for_provider(p['provider']) + driver.update_pool(context, old_pool, p) + return p + + def _delete_db_pool(self, context, id): + # proxy the call until plugin inherits from DBPlugin + # rely on uuid uniqueness: + try: + with context.session.begin(subtransactions=True): + self.service_type_manager.del_resource_associations( + context, [id]) + super(LoadBalancerPlugin, self).delete_pool(context, id) + except Exception: + # that should not happen + # if it's still a case - something goes wrong + # log the error and mark the pool as ERROR + LOG.error(_('Failed to delete pool %s, putting it in ERROR state'), + id) + with excutils.save_and_reraise_exception(): + self.update_status(context, ldb.Pool, + id, constants.ERROR) + + def delete_pool(self, context, id): + # check for delete conditions and update the status + # within a transaction to avoid a race + with context.session.begin(subtransactions=True): + self.update_status(context, ldb.Pool, + id, constants.PENDING_DELETE) + self._ensure_pool_delete_conditions(context, id) + p = self.get_pool(context, id) + driver = self._get_driver_for_provider(p['provider']) + driver.delete_pool(context, p) + + def create_member(self, context, member): + m = super(LoadBalancerPlugin, self).create_member(context, member) + driver = self._get_driver_for_pool(context, m['pool_id']) + driver.create_member(context, m) + return m + + def update_member(self, context, id, member): + if 'status' not in member['member']: + member['member']['status'] = constants.PENDING_UPDATE + old_member = self.get_member(context, id) + m = super(LoadBalancerPlugin, self).update_member(context, id, member) + driver = self._get_driver_for_pool(context, m['pool_id']) + driver.update_member(context, old_member, m) + return m + + def _delete_db_member(self, context, id): + # proxy the call until plugin inherits from DBPlugin + super(LoadBalancerPlugin, self).delete_member(context, id) + + def delete_member(self, context, id): + self.update_status(context, ldb.Member, + id, constants.PENDING_DELETE) + m = self.get_member(context, id) + driver = self._get_driver_for_pool(context, m['pool_id']) + driver.delete_member(context, m) + + def _validate_hm_parameters(self, delay, timeout): + if delay < timeout: + raise loadbalancer.DelayOrTimeoutInvalid() + + def create_health_monitor(self, context, health_monitor): + new_hm = health_monitor['health_monitor'] + self._validate_hm_parameters(new_hm['delay'], new_hm['timeout']) + + hm = super(LoadBalancerPlugin, self).create_health_monitor( + context, + health_monitor + ) + return hm + + def update_health_monitor(self, context, id, health_monitor): + new_hm = health_monitor['health_monitor'] + old_hm = self.get_health_monitor(context, id) + delay = new_hm.get('delay', old_hm.get('delay')) + timeout = new_hm.get('timeout', old_hm.get('timeout')) + self._validate_hm_parameters(delay, timeout) + + hm = super(LoadBalancerPlugin, self).update_health_monitor( + context, + id, + health_monitor + ) + + with context.session.begin(subtransactions=True): + qry = context.session.query( + ldb.PoolMonitorAssociation + ).filter_by(monitor_id=hm['id']).join(ldb.Pool) + for assoc in qry: + driver = self._get_driver_for_pool(context, assoc['pool_id']) + driver.update_pool_health_monitor(context, old_hm, + hm, assoc['pool_id']) + return hm + + def _delete_db_pool_health_monitor(self, context, hm_id, pool_id): + super(LoadBalancerPlugin, self).delete_pool_health_monitor(context, + hm_id, + pool_id) + + def _delete_db_health_monitor(self, context, id): + super(LoadBalancerPlugin, self).delete_health_monitor(context, id) + + def create_pool_health_monitor(self, context, health_monitor, pool_id): + retval = super(LoadBalancerPlugin, self).create_pool_health_monitor( + context, + health_monitor, + pool_id + ) + monitor_id = health_monitor['health_monitor']['id'] + hm = self.get_health_monitor(context, monitor_id) + driver = self._get_driver_for_pool(context, pool_id) + driver.create_pool_health_monitor(context, hm, pool_id) + return retval + + def delete_pool_health_monitor(self, context, id, pool_id): + self.update_pool_health_monitor(context, id, pool_id, + constants.PENDING_DELETE) + hm = self.get_health_monitor(context, id) + driver = self._get_driver_for_pool(context, pool_id) + driver.delete_pool_health_monitor(context, hm, pool_id) + + def stats(self, context, pool_id): + driver = self._get_driver_for_pool(context, pool_id) + stats_data = driver.stats(context, pool_id) + # if we get something from the driver - + # update the db and return the value from db + # else - return what we have in db + if stats_data: + super(LoadBalancerPlugin, self).update_pool_stats( + context, + pool_id, + stats_data + ) + return super(LoadBalancerPlugin, self).stats(context, + pool_id) + + def populate_vip_graph(self, context, vip): + """Populate the vip with: pool, members, healthmonitors.""" + + pool = self.get_pool(context, vip['pool_id']) + vip['pool'] = pool + vip['members'] = [self.get_member(context, member_id) + for member_id in pool['members']] + vip['health_monitors'] = [self.get_health_monitor(context, hm_id) + for hm_id in pool['health_monitors']] + return vip + + def validate_provider(self, provider): + if provider not in self.drivers: + raise pconf.ServiceProviderNotFound( + provider=provider, service_type=constants.LOADBALANCER) diff --git a/neutron/services/metering/__init__.py b/neutron/services/metering/__init__.py new file mode 100644 index 000000000..82a447213 --- /dev/null +++ b/neutron/services/metering/__init__.py @@ -0,0 +1,15 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/metering/agents/__init__.py b/neutron/services/metering/agents/__init__.py new file mode 100644 index 000000000..82a447213 --- /dev/null +++ b/neutron/services/metering/agents/__init__.py @@ -0,0 +1,15 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/metering/agents/metering_agent.py b/neutron/services/metering/agents/metering_agent.py new file mode 100644 index 000000000..80883f41b --- /dev/null +++ b/neutron/services/metering/agents/metering_agent.py @@ -0,0 +1,297 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants as constants +from neutron.common import rpc as n_rpc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.common import utils +from neutron import context +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import periodic_task +from neutron.openstack.common import service +from neutron import service as neutron_service + + +LOG = logging.getLogger(__name__) + + +class MeteringPluginRpc(rpc_compat.RpcProxy): + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, host): + super(MeteringPluginRpc, + self).__init__(topic=topics.METERING_AGENT, + default_version=self.BASE_RPC_API_VERSION) + + def _get_sync_data_metering(self, context): + try: + return self.call(context, + self.make_msg('get_sync_data_metering', + host=self.host), + topic=topics.METERING_PLUGIN) + except Exception: + LOG.exception(_("Failed synchronizing routers")) + + +class MeteringAgent(MeteringPluginRpc, manager.Manager): + + Opts = [ + cfg.StrOpt('driver', + default='neutron.services.metering.drivers.noop.' + 'noop_driver.NoopMeteringDriver', + help=_("Metering driver")), + cfg.IntOpt('measure_interval', default=30, + help=_("Interval between two metering measures")), + cfg.IntOpt('report_interval', default=300, + help=_("Interval between two metering reports")), + ] + + def __init__(self, host, conf=None): + self.conf = conf or cfg.CONF + self._load_drivers() + self.root_helper = config.get_root_helper(self.conf) + self.context = context.get_admin_context_without_session() + self.metering_info = {} + self.metering_loop = loopingcall.FixedIntervalLoopingCall( + self._metering_loop + ) + measure_interval = self.conf.measure_interval + self.last_report = 0 + self.metering_loop.start(interval=measure_interval) + self.host = host + + self.label_tenant_id = {} + self.routers = {} + self.metering_infos = {} + super(MeteringAgent, self).__init__(host=host) + + def _load_drivers(self): + """Loads plugin-driver from configuration.""" + LOG.info(_("Loading Metering driver %s"), self.conf.driver) + if not self.conf.driver: + raise SystemExit(_('A metering driver must be specified')) + self.metering_driver = importutils.import_object( + self.conf.driver, self, self.conf) + + def _metering_notification(self): + for label_id, info in self.metering_infos.items(): + data = {'label_id': label_id, + 'tenant_id': self.label_tenant_id.get(label_id), + 'pkts': info['pkts'], + 'bytes': info['bytes'], + 'time': info['time'], + 'first_update': info['first_update'], + 'last_update': info['last_update'], + 'host': self.host} + + LOG.debug(_("Send metering report: %s"), data) + notifier = n_rpc.get_notifier('metering') + notifier.info(self.context, 'l3.meter', data) + info['pkts'] = 0 + info['bytes'] = 0 + info['time'] = 0 + + def _purge_metering_info(self): + ts = int(time.time()) + report_interval = self.conf.report_interval + for label_id, info in self.metering_info.items(): + if info['last_update'] > ts + report_interval: + del self.metering_info[label_id] + + def _add_metering_info(self, label_id, pkts, bytes): + ts = int(time.time()) + info = self.metering_infos.get(label_id, {'bytes': 0, + 'pkts': 0, + 'time': 0, + 'first_update': ts, + 'last_update': ts}) + info['bytes'] += bytes + info['pkts'] += pkts + info['time'] += ts - info['last_update'] + info['last_update'] = ts + + self.metering_infos[label_id] = info + + return info + + def _add_metering_infos(self): + self.label_tenant_id = {} + for router in self.routers.values(): + tenant_id = router['tenant_id'] + labels = router.get(constants.METERING_LABEL_KEY, []) + for label in labels: + label_id = label['id'] + self.label_tenant_id[label_id] = tenant_id + + tenant_id = self.label_tenant_id.get + accs = self._get_traffic_counters(self.context, self.routers.values()) + if not accs: + return + + for label_id, acc in accs.items(): + self._add_metering_info(label_id, acc['pkts'], acc['bytes']) + + def _metering_loop(self): + self._add_metering_infos() + + ts = int(time.time()) + delta = ts - self.last_report + + report_interval = self.conf.report_interval + if delta > report_interval: + self._metering_notification() + self._purge_metering_info() + self.last_report = ts + + @utils.synchronized('metering-agent') + def _invoke_driver(self, context, meterings, func_name): + try: + return getattr(self.metering_driver, func_name)(context, meterings) + except AttributeError: + LOG.exception(_("Driver %(driver)s does not implement %(func)s"), + {'driver': self.conf.driver, + 'func': func_name}) + except RuntimeError: + LOG.exception(_("Driver %(driver)s:%(func)s runtime error"), + {'driver': self.conf.driver, + 'func': func_name}) + + @periodic_task.periodic_task(run_immediately=True) + def _sync_routers_task(self, context): + routers = self._get_sync_data_metering(self.context) + if not routers: + return + self._update_routers(context, routers) + + def router_deleted(self, context, router_id): + self._add_metering_infos() + + if router_id in self.routers: + del self.routers[router_id] + + return self._invoke_driver(context, router_id, + 'remove_router') + + def routers_updated(self, context, routers=None): + if not routers: + routers = self._get_sync_data_metering(self.context) + if not routers: + return + self._update_routers(context, routers) + + def _update_routers(self, context, routers): + for router in routers: + self.routers[router['id']] = router + + return self._invoke_driver(context, routers, + 'update_routers') + + def _get_traffic_counters(self, context, routers): + LOG.debug(_("Get router traffic counters")) + return self._invoke_driver(context, routers, 'get_traffic_counters') + + def update_metering_label_rules(self, context, routers): + LOG.debug(_("Update metering rules from agent")) + return self._invoke_driver(context, routers, + 'update_metering_label_rules') + + def add_metering_label(self, context, routers): + LOG.debug(_("Creating a metering label from agent")) + return self._invoke_driver(context, routers, + 'add_metering_label') + + def remove_metering_label(self, context, routers): + self._add_metering_infos() + + LOG.debug(_("Delete a metering label from agent")) + return self._invoke_driver(context, routers, + 'remove_metering_label') + + +class MeteringAgentWithStateReport(MeteringAgent): + + def __init__(self, host, conf=None): + super(MeteringAgentWithStateReport, self).__init__(host=host, + conf=conf) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-metering-agent', + 'host': host, + 'topic': topics.METERING_AGENT, + 'configurations': { + 'metering_driver': self.conf.driver, + 'measure_interval': + self.conf.measure_interval, + 'report_interval': self.conf.report_interval + }, + 'start_flag': True, + 'agent_type': constants.AGENT_TYPE_METERING} + report_interval = cfg.CONF.AGENT.report_interval + self.use_call = True + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + try: + self.state_rpc.report_state(self.context, self.agent_state, + self.use_call) + self.agent_state.pop('start_flag', None) + self.use_call = False + except AttributeError: + # This means the server does not support report_state + LOG.warn(_("Neutron server does not support state report." + " State report for this agent will be disabled.")) + self.heartbeat.stop() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + + def agent_updated(self, context, payload): + LOG.info(_("agent_updated by server side %s!"), payload) + + +def main(): + conf = cfg.CONF + conf.register_opts(MeteringAgent.Opts) + config.register_agent_state_opts_helper(conf) + config.register_root_helper(conf) + common_config.init(sys.argv[1:]) + config.setup_logging(conf) + server = neutron_service.Service.create( + binary='neutron-metering-agent', + topic=topics.METERING_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager='neutron.services.metering.agents.' + 'metering_agent.MeteringAgentWithStateReport') + service.launch(server).wait() diff --git a/neutron/services/metering/drivers/__init__.py b/neutron/services/metering/drivers/__init__.py new file mode 100644 index 000000000..82a447213 --- /dev/null +++ b/neutron/services/metering/drivers/__init__.py @@ -0,0 +1,15 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/metering/drivers/abstract_driver.py b/neutron/services/metering/drivers/abstract_driver.py new file mode 100644 index 000000000..884fbba50 --- /dev/null +++ b/neutron/services/metering/drivers/abstract_driver.py @@ -0,0 +1,51 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class MeteringAbstractDriver(object): + """Abstract Metering driver.""" + + def __init__(self, plugin, conf): + pass + + @abc.abstractmethod + def update_routers(self, context, routers): + pass + + @abc.abstractmethod + def remove_router(self, context, router_id): + pass + + @abc.abstractmethod + def update_metering_label_rules(self, context, routers): + pass + + @abc.abstractmethod + def add_metering_label(self, context, routers): + pass + + @abc.abstractmethod + def remove_metering_label(self, context, routers): + pass + + @abc.abstractmethod + def get_traffic_counters(self, context, routers): + pass diff --git a/neutron/services/metering/drivers/iptables/__init__.py b/neutron/services/metering/drivers/iptables/__init__.py new file mode 100644 index 000000000..82a447213 --- /dev/null +++ b/neutron/services/metering/drivers/iptables/__init__.py @@ -0,0 +1,15 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/metering/drivers/iptables/iptables_driver.py b/neutron/services/metering/drivers/iptables/iptables_driver.py new file mode 100644 index 000000000..3eb17c246 --- /dev/null +++ b/neutron/services/metering/drivers/iptables/iptables_driver.py @@ -0,0 +1,284 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import interface +from neutron.agent.linux import iptables_manager +from neutron.common import constants as constants +from neutron.common import log +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.services.metering.drivers import abstract_driver + + +LOG = logging.getLogger(__name__) +NS_PREFIX = 'qrouter-' +WRAP_NAME = 'neutron-meter' +EXTERNAL_DEV_PREFIX = 'qg-' +TOP_CHAIN = WRAP_NAME + "-FORWARD" +RULE = '-r-' +LABEL = '-l-' + +config.register_interface_driver_opts_helper(cfg.CONF) +config.register_use_namespaces_opts_helper(cfg.CONF) +config.register_root_helper(cfg.CONF) +cfg.CONF.register_opts(interface.OPTS) + + +class IptablesManagerTransaction(object): + __transactions = {} + + def __init__(self, im): + self.im = im + + transaction = self.__transactions.get(im, 0) + transaction += 1 + self.__transactions[im] = transaction + + def __enter__(self): + return self.im + + def __exit__(self, type, value, traceback): + transaction = self.__transactions.get(self.im) + if transaction == 1: + self.im.apply() + del self.__transactions[self.im] + else: + transaction -= 1 + self.__transactions[self.im] = transaction + + +class RouterWithMetering(object): + + def __init__(self, conf, router): + self.conf = conf + self.id = router['id'] + self.router = router + self.root_helper = config.get_root_helper(self.conf) + self.ns_name = NS_PREFIX + self.id if conf.use_namespaces else None + self.iptables_manager = iptables_manager.IptablesManager( + root_helper=self.root_helper, + namespace=self.ns_name, + binary_name=WRAP_NAME) + self.metering_labels = {} + + +class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): + + def __init__(self, plugin, conf): + self.plugin = plugin + self.conf = conf or cfg.CONF + self.routers = {} + + if not self.conf.interface_driver: + raise SystemExit(_('An interface driver must be specified')) + LOG.info(_("Loading interface driver %s"), self.conf.interface_driver) + self.driver = importutils.import_object(self.conf.interface_driver, + self.conf) + + def _update_router(self, router): + r = self.routers.get(router['id'], + RouterWithMetering(self.conf, router)) + r.router = router + self.routers[r.id] = r + + return r + + @log.log + def update_routers(self, context, routers): + # disassociate removed routers + router_ids = [router['id'] for router in routers] + for router_id in self.routers: + if router_id not in router_ids: + self._process_disassociate_metering_label(router) + + for router in routers: + old_gw_port_id = None + old_rm = self.routers.get(router['id']) + if old_rm: + old_gw_port_id = old_rm.router['gw_port_id'] + gw_port_id = router['gw_port_id'] + + if gw_port_id != old_gw_port_id: + if old_rm: + with IptablesManagerTransaction(old_rm.iptables_manager): + self._process_disassociate_metering_label(router) + if gw_port_id: + self._process_associate_metering_label(router) + elif gw_port_id: + self._process_associate_metering_label(router) + + @log.log + def remove_router(self, context, router_id): + if router_id in self.routers: + del self.routers[router_id] + + def get_external_device_name(self, port_id): + return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def _process_metering_label_rules(self, rm, rules, label_chain, + rules_chain): + im = rm.iptables_manager + ext_dev = self.get_external_device_name(rm.router['gw_port_id']) + if not ext_dev: + return + + for rule in rules: + remote_ip = rule['remote_ip_prefix'] + + dir = '-i ' + ext_dev + if rule['direction'] == 'egress': + dir = '-o ' + ext_dev + + if rule['excluded']: + ipt_rule = dir + ' -d ' + remote_ip + ' -j RETURN' + im.ipv4['filter'].add_rule(rules_chain, ipt_rule, wrap=False, + top=True) + else: + ipt_rule = dir + ' -d ' + remote_ip + ' -j ' + label_chain + im.ipv4['filter'].add_rule(rules_chain, ipt_rule, + wrap=False, top=False) + + def _process_associate_metering_label(self, router): + self._update_router(router) + rm = self.routers.get(router['id']) + + with IptablesManagerTransaction(rm.iptables_manager): + labels = router.get(constants.METERING_LABEL_KEY, []) + for label in labels: + label_id = label['id'] + + label_chain = iptables_manager.get_chain_name(WRAP_NAME + + LABEL + label_id, + wrap=False) + rm.iptables_manager.ipv4['filter'].add_chain(label_chain, + wrap=False) + + rules_chain = iptables_manager.get_chain_name(WRAP_NAME + + RULE + label_id, + wrap=False) + rm.iptables_manager.ipv4['filter'].add_chain(rules_chain, + wrap=False) + rm.iptables_manager.ipv4['filter'].add_rule(TOP_CHAIN, '-j ' + + rules_chain, + wrap=False) + + rm.iptables_manager.ipv4['filter'].add_rule(label_chain, + '', + wrap=False) + + rules = label.get('rules') + if rules: + self._process_metering_label_rules(rm, rules, + label_chain, + rules_chain) + + rm.metering_labels[label_id] = label + + def _process_disassociate_metering_label(self, router): + rm = self.routers.get(router['id']) + if not rm: + return + + with IptablesManagerTransaction(rm.iptables_manager): + labels = router.get(constants.METERING_LABEL_KEY, []) + for label in labels: + label_id = label['id'] + if label_id not in rm.metering_labels: + continue + + label_chain = iptables_manager.get_chain_name(WRAP_NAME + + LABEL + label_id, + wrap=False) + rules_chain = iptables_manager.get_chain_name(WRAP_NAME + + RULE + label_id, + wrap=False) + + rm.iptables_manager.ipv4['filter'].remove_chain(label_chain, + wrap=False) + rm.iptables_manager.ipv4['filter'].remove_chain(rules_chain, + wrap=False) + + del rm.metering_labels[label_id] + + @log.log + def add_metering_label(self, context, routers): + for router in routers: + self._process_associate_metering_label(router) + + @log.log + def update_metering_label_rules(self, context, routers): + for router in routers: + self._update_metering_label_rules(router) + + def _update_metering_label_rules(self, router): + rm = self.routers.get(router['id']) + if not rm: + return + + with IptablesManagerTransaction(rm.iptables_manager): + labels = router.get(constants.METERING_LABEL_KEY, []) + for label in labels: + label_id = label['id'] + + label_chain = iptables_manager.get_chain_name(WRAP_NAME + + LABEL + label_id, + wrap=False) + rules_chain = iptables_manager.get_chain_name(WRAP_NAME + + RULE + label_id, + wrap=False) + rm.iptables_manager.ipv4['filter'].empty_chain(rules_chain, + wrap=False) + + rules = label.get('rules') + if rules: + self._process_metering_label_rules(rm, rules, + label_chain, + rules_chain) + + @log.log + def remove_metering_label(self, context, routers): + for router in routers: + self._process_disassociate_metering_label(router) + + @log.log + def get_traffic_counters(self, context, routers): + accs = {} + for router in routers: + rm = self.routers.get(router['id']) + if not rm: + continue + + for label_id, label in rm.metering_labels.items(): + chain = iptables_manager.get_chain_name(WRAP_NAME + LABEL + + label_id, wrap=False) + + chain_acc = rm.iptables_manager.get_traffic_counters( + chain, wrap=False, zero=True) + + if not chain_acc: + continue + + acc = accs.get(label_id, {'pkts': 0, 'bytes': 0}) + + acc['pkts'] += chain_acc['pkts'] + acc['bytes'] += chain_acc['bytes'] + + accs[label_id] = acc + + return accs diff --git a/neutron/services/metering/drivers/noop/__init__.py b/neutron/services/metering/drivers/noop/__init__.py new file mode 100644 index 000000000..82a447213 --- /dev/null +++ b/neutron/services/metering/drivers/noop/__init__.py @@ -0,0 +1,15 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/metering/drivers/noop/noop_driver.py b/neutron/services/metering/drivers/noop/noop_driver.py new file mode 100644 index 000000000..d3f5e7df4 --- /dev/null +++ b/neutron/services/metering/drivers/noop/noop_driver.py @@ -0,0 +1,45 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import log +from neutron.services.metering.drivers import abstract_driver + + +class NoopMeteringDriver(abstract_driver.MeteringAbstractDriver): + + @log.log + def update_routers(self, context, routers): + pass + + @log.log + def remove_router(self, context, router_id): + pass + + @log.log + def update_metering_label_rules(self, context, routers): + pass + + @log.log + def add_metering_label(self, context, routers): + pass + + @log.log + def remove_metering_label(self, context, routers): + pass + + @log.log + def get_traffic_counters(self, context, routers): + pass diff --git a/neutron/services/metering/metering_plugin.py b/neutron/services/metering/metering_plugin.py new file mode 100644 index 000000000..e67dbab60 --- /dev/null +++ b/neutron/services/metering/metering_plugin.py @@ -0,0 +1,74 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.db.metering import metering_db +from neutron.db.metering import metering_rpc + + +class MeteringPlugin(metering_db.MeteringDbMixin): + """Implementation of the Neutron Metering Service Plugin.""" + supported_extension_aliases = ["metering"] + + def __init__(self): + super(MeteringPlugin, self).__init__() + + self.endpoints = [metering_rpc.MeteringRpcCallbacks(self)] + + self.conn = rpc_compat.create_connection(new=True) + self.conn.create_consumer( + topics.METERING_PLUGIN, self.endpoints, fanout=False) + self.conn.consume_in_threads() + + self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI() + + def create_metering_label(self, context, metering_label): + label = super(MeteringPlugin, self).create_metering_label( + context, metering_label) + + data = self.get_sync_data_metering(context) + self.meter_rpc.add_metering_label(context, data) + + return label + + def delete_metering_label(self, context, label_id): + data = self.get_sync_data_metering(context, label_id) + label = super(MeteringPlugin, self).delete_metering_label( + context, label_id) + + self.meter_rpc.remove_metering_label(context, data) + + return label + + def create_metering_label_rule(self, context, metering_label_rule): + rule = super(MeteringPlugin, self).create_metering_label_rule( + context, metering_label_rule) + + data = self.get_sync_data_metering(context) + self.meter_rpc.update_metering_label_rules(context, data) + + return rule + + def delete_metering_label_rule(self, context, rule_id): + rule = super(MeteringPlugin, self).delete_metering_label_rule( + context, rule_id) + + data = self.get_sync_data_metering(context) + self.meter_rpc.update_metering_label_rules(context, data) + + return rule diff --git a/neutron/services/provider_configuration.py b/neutron/services/provider_configuration.py new file mode 100644 index 000000000..88a64b72b --- /dev/null +++ b/neutron/services/provider_configuration.py @@ -0,0 +1,162 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.common import exceptions as n_exc +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants + +LOG = logging.getLogger(__name__) + + +serviceprovider_opts = [ + cfg.MultiStrOpt('service_provider', default=[], + help=_('Defines providers for advanced services ' + 'using the format: ' + '::[:default]')) +] + +cfg.CONF.register_opts(serviceprovider_opts, 'service_providers') + + +#global scope function that should be used in service APIs +def normalize_provider_name(name): + return name.lower() + + +def parse_service_provider_opt(): + """Parse service definition opts and returns result.""" + def validate_name(name): + if len(name) > 255: + raise n_exc.Invalid( + _("Provider name is limited by 255 characters: %s") % name) + + svc_providers_opt = cfg.CONF.service_providers.service_provider + res = [] + for prov_def in svc_providers_opt: + split = prov_def.split(':') + try: + svc_type, name, driver = split[:3] + except ValueError: + raise n_exc.Invalid(_("Invalid service provider format")) + validate_name(name) + name = normalize_provider_name(name) + default = False + if len(split) == 4 and split[3]: + if split[3] == 'default': + default = True + else: + msg = (_("Invalid provider format. " + "Last part should be 'default' or empty: %s") % + prov_def) + LOG.error(msg) + raise n_exc.Invalid(msg) + if svc_type not in constants.ALLOWED_SERVICES: + msg = (_("Service type '%(svc_type)s' is not allowed, " + "allowed types: %(allowed)s") % + {'svc_type': svc_type, + 'allowed': constants.ALLOWED_SERVICES}) + LOG.error(msg) + raise n_exc.Invalid(msg) + res.append({'service_type': svc_type, + 'name': name, + 'driver': driver, + 'default': default}) + return res + + +class ServiceProviderNotFound(n_exc.InvalidInput): + message = _("Service provider '%(provider)s' could not be found " + "for service type %(service_type)s") + + +class DefaultServiceProviderNotFound(n_exc.InvalidInput): + message = _("Service type %(service_type)s does not have a default " + "service provider") + + +class ServiceProviderAlreadyAssociated(n_exc.Conflict): + message = _("Resource '%(resource_id)s' is already associated with " + "provider '%(provider)s' for service type '%(service_type)s'") + + +class ProviderConfiguration(object): + def __init__(self, prov_data): + self.providers = {} + for prov in prov_data: + self.add_provider(prov) + + def _ensure_driver_unique(self, driver): + for k, v in self.providers.items(): + if v['driver'] == driver: + msg = (_("Driver %s is not unique across providers") % + driver) + LOG.exception(msg) + raise n_exc.Invalid(msg) + + def _ensure_default_unique(self, type, default): + if not default: + return + for k, v in self.providers.items(): + if k[0] == type and v['default']: + msg = _("Multiple default providers " + "for service %s") % type + LOG.exception(msg) + raise n_exc.Invalid(msg) + + def add_provider(self, provider): + self._ensure_driver_unique(provider['driver']) + self._ensure_default_unique(provider['service_type'], + provider['default']) + provider_type = (provider['service_type'], provider['name']) + if provider_type in self.providers: + msg = (_("Multiple providers specified for service " + "%s") % provider['service_type']) + LOG.exception(msg) + raise n_exc.Invalid(msg) + self.providers[provider_type] = {'driver': provider['driver'], + 'default': provider['default']} + + def _check_entry(self, k, v, filters): + # small helper to deal with query filters + if not filters: + return True + for index, key in enumerate(['service_type', 'name']): + if key in filters: + if k[index] not in filters[key]: + return False + + for key in ['driver', 'default']: + if key in filters: + if v[key] not in filters[key]: + return False + return True + + def _fields(self, resource, fields): + if fields: + return dict(((key, item) for key, item in resource.items() + if key in fields)) + return resource + + def get_service_providers(self, filters=None, fields=None): + return [self._fields({'service_type': k[0], + 'name': k[1], + 'driver': v['driver'], + 'default': v['default']}, + fields) + for k, v in self.providers.items() + if self._check_entry(k, v, filters)] diff --git a/neutron/services/service_base.py b/neutron/services/service_base.py new file mode 100644 index 000000000..5d8e8fc86 --- /dev/null +++ b/neutron/services/service_base.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from neutron.api import extensions +from neutron.db import servicetype_db as sdb +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.services import provider_configuration as pconf + +LOG = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class ServicePluginBase(extensions.PluginInterface): + """Define base interface for any Advanced Service plugin.""" + supported_extension_aliases = [] + + @abc.abstractmethod + def get_plugin_type(self): + """Return one of predefined service types. + + See neutron/plugins/common/constants.py + """ + pass + + @abc.abstractmethod + def get_plugin_name(self): + """Return a symbolic name for the plugin. + + Each service plugin should have a symbolic name. This name + will be used, for instance, by service definitions in service types + """ + pass + + @abc.abstractmethod + def get_plugin_description(self): + """Return string description of the plugin.""" + pass + + +def load_drivers(service_type, plugin): + """Loads drivers for specific service. + + Passes plugin instance to driver's constructor + """ + service_type_manager = sdb.ServiceTypeManager.get_instance() + providers = (service_type_manager. + get_service_providers( + None, + filters={'service_type': [service_type]}) + ) + if not providers: + msg = (_("No providers specified for '%s' service, exiting") % + service_type) + LOG.error(msg) + raise SystemExit(1) + + drivers = {} + for provider in providers: + try: + drivers[provider['name']] = importutils.import_object( + provider['driver'], plugin + ) + LOG.debug(_("Loaded '%(provider)s' provider for service " + "%(service_type)s"), + {'provider': provider['driver'], + 'service_type': service_type}) + except ImportError: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Error loading provider '%(provider)s' for " + "service %(service_type)s"), + {'provider': provider['driver'], + 'service_type': service_type}) + + default_provider = None + try: + provider = service_type_manager.get_default_service_provider( + None, service_type) + default_provider = provider['name'] + except pconf.DefaultServiceProviderNotFound: + LOG.info(_("Default provider is not specified for service type %s"), + service_type) + + return drivers, default_provider diff --git a/neutron/services/vpn/__init__.py b/neutron/services/vpn/__init__.py new file mode 100644 index 000000000..29c415d98 --- /dev/null +++ b/neutron/services/vpn/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Swaminathan Vasudevan, Hewlett-Packard diff --git a/neutron/services/vpn/agent.py b/neutron/services/vpn/agent.py new file mode 100644 index 000000000..771a67dce --- /dev/null +++ b/neutron/services/vpn/agent.py @@ -0,0 +1,148 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo.config import cfg + +from neutron.agent import l3_agent +from neutron.extensions import vpnaas +from neutron.openstack.common import importutils + +vpn_agent_opts = [ + cfg.MultiStrOpt( + 'vpn_device_driver', + default=['neutron.services.vpn.device_drivers.' + 'ipsec.OpenSwanDriver'], + help=_("The vpn device drivers Neutron will use")), +] +cfg.CONF.register_opts(vpn_agent_opts, 'vpnagent') + + +class VPNAgent(l3_agent.L3NATAgentWithStateReport): + """VPNAgent class which can handle vpn service drivers.""" + def __init__(self, host, conf=None): + super(VPNAgent, self).__init__(host=host, conf=conf) + self.setup_device_drivers(host) + + def setup_device_drivers(self, host): + """Setting up device drivers. + + :param host: hostname. This is needed for rpc + Each devices will stays as processes. + They will communiate with + server side service plugin using rpc with + device specific rpc topic. + :returns: None + """ + device_drivers = cfg.CONF.vpnagent.vpn_device_driver + self.devices = [] + for device_driver in device_drivers: + try: + self.devices.append( + importutils.import_object(device_driver, self, host)) + except ImportError: + raise vpnaas.DeviceDriverImportError( + device_driver=device_driver) + + def get_namespace(self, router_id): + """Get namespace of router. + + :router_id: router_id + :returns: namespace string. + Note if the router is not exist, this function + returns None + """ + router_info = self.router_info.get(router_id) + if not router_info: + return + return router_info.ns_name + + def add_nat_rule(self, router_id, chain, rule, top=False): + """Add nat rule in namespace. + + :param router_id: router_id + :param chain: a string of chain name + :param rule: a string of rule + :param top: if top is true, the rule + will be placed on the top of chain + Note if there is no rotuer, this method do nothing + """ + router_info = self.router_info.get(router_id) + if not router_info: + return + router_info.iptables_manager.ipv4['nat'].add_rule( + chain, rule, top=top) + + def remove_nat_rule(self, router_id, chain, rule, top=False): + """Remove nat rule in namespace. + + :param router_id: router_id + :param chain: a string of chain name + :param rule: a string of rule + :param top: unused + needed to have same argument with add_nat_rule + """ + router_info = self.router_info.get(router_id) + if not router_info: + return + router_info.iptables_manager.ipv4['nat'].remove_rule( + chain, rule, top=top) + + def iptables_apply(self, router_id): + """Apply IPtables. + + :param router_id: router_id + This method do nothing if there is no router + """ + router_info = self.router_info.get(router_id) + if not router_info: + return + router_info.iptables_manager.apply() + + def _router_added(self, router_id, router): + """Router added event. + + This method overwrites parent class method. + :param router_id: id of added router + :param router: dict of rotuer + """ + super(VPNAgent, self)._router_added(router_id, router) + for device in self.devices: + device.create_router(router_id) + + def _router_removed(self, router_id): + """Router removed event. + + This method overwrites parent class method. + :param router_id: id of removed router + """ + super(VPNAgent, self)._router_removed(router_id) + for device in self.devices: + device.destroy_router(router_id) + + def _process_routers(self, routers, all_routers=False): + """Router sync event. + + This method overwrites parent class method. + :param routers: list of routers + """ + super(VPNAgent, self)._process_routers(routers, all_routers) + for device in self.devices: + device.sync(self.context, routers) + + +def main(): + l3_agent.main( + manager='neutron.services.vpn.agent.VPNAgent') diff --git a/neutron/services/vpn/common/__init__.py b/neutron/services/vpn/common/__init__.py new file mode 100644 index 000000000..9b27a7520 --- /dev/null +++ b/neutron/services/vpn/common/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/services/vpn/common/topics.py b/neutron/services/vpn/common/topics.py new file mode 100644 index 000000000..2639fbe6d --- /dev/null +++ b/neutron/services/vpn/common/topics.py @@ -0,0 +1,22 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +IPSEC_DRIVER_TOPIC = 'ipsec_driver' +IPSEC_AGENT_TOPIC = 'ipsec_agent' +CISCO_IPSEC_DRIVER_TOPIC = 'cisco_csr_ipsec_driver' +CISCO_IPSEC_AGENT_TOPIC = 'cisco_csr_ipsec_agent' diff --git a/neutron/services/vpn/device_drivers/__init__.py b/neutron/services/vpn/device_drivers/__init__.py new file mode 100644 index 000000000..c6d5f69e3 --- /dev/null +++ b/neutron/services/vpn/device_drivers/__init__.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class DeviceDriver(object): + + def __init__(self, agent, host): + pass + + @abc.abstractmethod + def sync(self, context, processes): + pass + + @abc.abstractmethod + def create_router(self, process_id): + pass + + @abc.abstractmethod + def destroy_router(self, process_id): + pass diff --git a/neutron/services/vpn/device_drivers/cisco_csr_rest_client.py b/neutron/services/vpn/device_drivers/cisco_csr_rest_client.py new file mode 100644 index 000000000..61693e9e1 --- /dev/null +++ b/neutron/services/vpn/device_drivers/cisco_csr_rest_client.py @@ -0,0 +1,258 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Paul Michali, Cisco Systems, Inc. + +import time + +import netaddr +import requests +from requests import exceptions as r_exc + +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging + + +TIMEOUT = 20.0 + +LOG = logging.getLogger(__name__) +HEADER_CONTENT_TYPE_JSON = {'content-type': 'application/json'} +URL_BASE = 'https://%(host)s/api/v1/%(resource)s' + + +def make_route_id(cidr, interface): + """Build ID that will be used to identify route for later deletion.""" + net = netaddr.IPNetwork(cidr) + return '%(network)s_%(prefix)s_%(interface)s' % { + 'network': net.network, + 'prefix': net.prefixlen, + 'interface': interface} + + +class CsrRestClient(object): + + """REST CsrRestClient for accessing the Cisco Cloud Services Router.""" + + def __init__(self, host, tunnel_ip, username, password, timeout=None): + self.host = host + self.tunnel_ip = tunnel_ip + self.auth = (username, password) + self.token = None + self.status = requests.codes.OK + self.timeout = timeout + self.max_tries = 5 + self.session = requests.Session() + + def _response_info_for(self, response, method): + """Return contents or location from response. + + For a POST or GET with a 200 response, the response content + is returned. + + For a POST with a 201 response, return the header's location, + which contains the identifier for the created resource. + + If there is an error, return the response content, so that + it can be used in error processing ('error-code', 'error-message', + and 'detail' fields). + """ + if method in ('POST', 'GET') and self.status == requests.codes.OK: + LOG.debug(_('RESPONSE: %s'), response.json()) + return response.json() + if method == 'POST' and self.status == requests.codes.CREATED: + return response.headers.get('location', '') + if self.status >= requests.codes.BAD_REQUEST and response.content: + if 'error-code' in response.content: + content = jsonutils.loads(response.content) + LOG.debug("Error response content %s", content) + return content + + def _request(self, method, url, **kwargs): + """Perform REST request and save response info.""" + try: + LOG.debug(_("%(method)s: Request for %(resource)s payload: " + "%(payload)s"), + {'method': method.upper(), 'resource': url, + 'payload': kwargs.get('data')}) + start_time = time.time() + response = self.session.request(method, url, verify=False, + timeout=self.timeout, **kwargs) + LOG.debug(_("%(method)s Took %(time).2f seconds to process"), + {'method': method.upper(), + 'time': time.time() - start_time}) + except (r_exc.Timeout, r_exc.SSLError) as te: + # Should never see SSLError, unless requests package is old (<2.0) + timeout_val = 0.0 if self.timeout is None else self.timeout + LOG.warning(_("%(method)s: Request timeout%(ssl)s " + "(%(timeout).3f sec) for CSR(%(host)s)"), + {'method': method, + 'timeout': timeout_val, + 'ssl': '(SSLError)' + if isinstance(te, r_exc.SSLError) else '', + 'host': self.host}) + self.status = requests.codes.REQUEST_TIMEOUT + except r_exc.ConnectionError: + LOG.exception(_("%(method)s: Unable to connect to CSR(%(host)s)"), + {'method': method, 'host': self.host}) + self.status = requests.codes.NOT_FOUND + except Exception as e: + LOG.error(_("%(method)s: Unexpected error for CSR (%(host)s): " + "%(error)s"), + {'method': method, 'host': self.host, 'error': e}) + self.status = requests.codes.INTERNAL_SERVER_ERROR + else: + self.status = response.status_code + LOG.debug(_("%(method)s: Completed [%(status)s]"), + {'method': method, 'status': self.status}) + return self._response_info_for(response, method) + + def authenticate(self): + """Obtain a token to use for subsequent CSR REST requests. + + This is called when there is no token yet, or if the token has expired + and attempts to use it resulted in an UNAUTHORIZED REST response. + """ + + url = URL_BASE % {'host': self.host, 'resource': 'auth/token-services'} + headers = {'Content-Length': '0', + 'Accept': 'application/json'} + headers.update(HEADER_CONTENT_TYPE_JSON) + LOG.debug(_("%(auth)s with CSR %(host)s"), + {'auth': 'Authenticating' if self.token is None + else 'Reauthenticating', 'host': self.host}) + self.token = None + response = self._request("POST", url, headers=headers, auth=self.auth) + if response: + self.token = response['token-id'] + LOG.debug(_("Successfully authenticated with CSR %s"), self.host) + return True + LOG.error(_("Failed authentication with CSR %(host)s [%(status)s]"), + {'host': self.host, 'status': self.status}) + + def _do_request(self, method, resource, payload=None, more_headers=None, + full_url=False): + """Perform a REST request to a CSR resource. + + If this is the first time interacting with the CSR, a token will + be obtained. If the request fails, due to an expired token, the + token will be obtained and the request will be retried once more. + """ + + if self.token is None: + if not self.authenticate(): + return + + if full_url: + url = resource + else: + url = ('https://%(host)s/api/v1/%(resource)s' % + {'host': self.host, 'resource': resource}) + headers = {'Accept': 'application/json', 'X-auth-token': self.token} + if more_headers: + headers.update(more_headers) + if payload: + payload = jsonutils.dumps(payload) + response = self._request(method, url, data=payload, headers=headers) + if self.status == requests.codes.UNAUTHORIZED: + if not self.authenticate(): + return + headers['X-auth-token'] = self.token + response = self._request(method, url, data=payload, + headers=headers) + if self.status != requests.codes.REQUEST_TIMEOUT: + return response + LOG.error(_("%(method)s: Request timeout for CSR(%(host)s)"), + {'method': method, 'host': self.host}) + + def get_request(self, resource, full_url=False): + """Perform a REST GET requests for a CSR resource.""" + return self._do_request('GET', resource, full_url=full_url) + + def post_request(self, resource, payload=None): + """Perform a POST request to a CSR resource.""" + return self._do_request('POST', resource, payload=payload, + more_headers=HEADER_CONTENT_TYPE_JSON) + + def put_request(self, resource, payload=None): + """Perform a PUT request to a CSR resource.""" + return self._do_request('PUT', resource, payload=payload, + more_headers=HEADER_CONTENT_TYPE_JSON) + + def delete_request(self, resource): + """Perform a DELETE request on a CSR resource.""" + return self._do_request('DELETE', resource, + more_headers=HEADER_CONTENT_TYPE_JSON) + + def create_ike_policy(self, policy_info): + base_ike_policy_info = {u'version': u'v1', + u'local-auth-method': u'pre-share'} + base_ike_policy_info.update(policy_info) + return self.post_request('vpn-svc/ike/policies', + payload=base_ike_policy_info) + + def create_ipsec_policy(self, policy_info): + base_ipsec_policy_info = {u'mode': u'tunnel'} + base_ipsec_policy_info.update(policy_info) + return self.post_request('vpn-svc/ipsec/policies', + payload=base_ipsec_policy_info) + + def create_pre_shared_key(self, psk_info): + return self.post_request('vpn-svc/ike/keyrings', payload=psk_info) + + def create_ipsec_connection(self, connection_info): + base_conn_info = {u'vpn-type': u'site-to-site', + u'ip-version': u'ipv4'} + connection_info.update(base_conn_info) + return self.post_request('vpn-svc/site-to-site', + payload=connection_info) + + def configure_ike_keepalive(self, keepalive_info): + base_keepalive_info = {u'periodic': True} + keepalive_info.update(base_keepalive_info) + return self.put_request('vpn-svc/ike/keepalive', keepalive_info) + + def create_static_route(self, route_info): + return self.post_request('routing-svc/static-routes', + payload=route_info) + + def delete_static_route(self, route_id): + return self.delete_request('routing-svc/static-routes/%s' % route_id) + + def set_ipsec_connection_state(self, tunnel, admin_up=True): + """Set the IPSec site-to-site connection (tunnel) admin state. + + Note: When a tunnel is created, it will be admin up. + """ + info = {u'vpn-interface-name': tunnel, u'enabled': admin_up} + return self.put_request('vpn-svc/site-to-site/%s/state' % tunnel, info) + + def delete_ipsec_connection(self, conn_id): + return self.delete_request('vpn-svc/site-to-site/%s' % conn_id) + + def delete_ipsec_policy(self, policy_id): + return self.delete_request('vpn-svc/ipsec/policies/%s' % policy_id) + + def delete_ike_policy(self, policy_id): + return self.delete_request('vpn-svc/ike/policies/%s' % policy_id) + + def delete_pre_shared_key(self, key_id): + return self.delete_request('vpn-svc/ike/keyrings/%s' % key_id) + + def read_tunnel_statuses(self): + results = self.get_request('vpn-svc/site-to-site/active/sessions') + if self.status != requests.codes.OK or not results: + return [] + tunnels = [(t[u'vpn-interface-name'], t[u'status']) + for t in results['items']] + return tunnels diff --git a/neutron/services/vpn/device_drivers/cisco_ipsec.py b/neutron/services/vpn/device_drivers/cisco_ipsec.py new file mode 100644 index 000000000..7d73735c1 --- /dev/null +++ b/neutron/services/vpn/device_drivers/cisco_ipsec.py @@ -0,0 +1,858 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Paul Michali, Cisco Systems, Inc. + +import abc +import collections +import requests + +import netaddr +from oslo.config import cfg +from oslo import messaging +import six + +from neutron.common import exceptions +from neutron.common import rpc_compat +from neutron import context as ctx +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants +from neutron.plugins.common import utils as plugin_utils +from neutron.services.vpn.common import topics +from neutron.services.vpn import device_drivers +from neutron.services.vpn.device_drivers import ( + cisco_csr_rest_client as csr_client) + + +ipsec_opts = [ + cfg.IntOpt('status_check_interval', + default=60, + help=_("Status check interval for Cisco CSR IPSec connections")) +] +cfg.CONF.register_opts(ipsec_opts, 'cisco_csr_ipsec') + +LOG = logging.getLogger(__name__) + +RollbackStep = collections.namedtuple('RollbackStep', + ['action', 'resource_id', 'title']) + + +class CsrResourceCreateFailure(exceptions.NeutronException): + message = _("Cisco CSR failed to create %(resource)s (%(which)s)") + + +class CsrAdminStateChangeFailure(exceptions.NeutronException): + message = _("Cisco CSR failed to change %(tunnel)s admin state to " + "%(state)s") + + +class CsrDriverMismatchError(exceptions.NeutronException): + message = _("Required %(resource)s attribute %(attr)s mapping for Cisco " + "CSR is missing in device driver") + + +class CsrUnknownMappingError(exceptions.NeutronException): + message = _("Device driver does not have a mapping of '%(value)s for " + "attribute %(attr)s of %(resource)s") + + +def find_available_csrs_from_config(config_files): + """Read INI for available Cisco CSRs that driver can use. + + Loads management port, tunnel IP, user, and password information for + available CSRs from configuration file. Driver will use this info to + configure VPN connections. The CSR is associated 1:1 with a Neutron + router. To identify which CSR to use for a VPN service, the public + (GW) IP of the Neutron router will be used as an index into the CSR + config info. + """ + multi_parser = cfg.MultiConfigParser() + LOG.info(_("Scanning config files %s for Cisco CSR configurations"), + config_files) + try: + read_ok = multi_parser.read(config_files) + except cfg.ParseError as pe: + LOG.error(_("Config file parse error: %s"), pe) + return {} + + if len(read_ok) != len(config_files): + raise cfg.Error(_("Unable to parse config files %s for Cisco CSR " + "info") % config_files) + csrs_found = {} + for parsed_file in multi_parser.parsed: + for parsed_item in parsed_file.keys(): + device_type, sep, for_router = parsed_item.partition(':') + if device_type.lower() == 'cisco_csr_rest': + try: + netaddr.IPNetwork(for_router) + except netaddr.core.AddrFormatError: + LOG.error(_("Ignoring Cisco CSR configuration entry - " + "router IP %s is not valid"), for_router) + continue + entry = parsed_file[parsed_item] + # Check for missing fields + try: + rest_mgmt_ip = entry['rest_mgmt'][0] + tunnel_ip = entry['tunnel_ip'][0] + username = entry['username'][0] + password = entry['password'][0] + except KeyError as ke: + LOG.error(_("Ignoring Cisco CSR for router %(router)s " + "- missing %(field)s setting"), + {'router': for_router, 'field': str(ke)}) + continue + # Validate fields + try: + timeout = float(entry['timeout'][0]) + except ValueError: + LOG.error(_("Ignoring Cisco CSR for router %s - " + "timeout is not a floating point number"), + for_router) + continue + except KeyError: + timeout = csr_client.TIMEOUT + try: + netaddr.IPAddress(rest_mgmt_ip) + except netaddr.core.AddrFormatError: + LOG.error(_("Ignoring Cisco CSR for subnet %s - " + "REST management is not an IP address"), + for_router) + continue + try: + netaddr.IPAddress(tunnel_ip) + except netaddr.core.AddrFormatError: + LOG.error(_("Ignoring Cisco CSR for router %s - " + "local tunnel is not an IP address"), + for_router) + continue + csrs_found[for_router] = {'rest_mgmt': rest_mgmt_ip, + 'tunnel_ip': tunnel_ip, + 'username': username, + 'password': password, + 'timeout': timeout} + + LOG.debug(_("Found CSR for router %(router)s: %(info)s"), + {'router': for_router, + 'info': csrs_found[for_router]}) + return csrs_found + + +class CiscoCsrIPsecVpnDriverApi(rpc_compat.RpcProxy): + """RPC API for agent to plugin messaging.""" + + def get_vpn_services_on_host(self, context, host): + """Get list of vpnservices on this host. + + The vpnservices including related ipsec_site_connection, + ikepolicy, ipsecpolicy, and Cisco info on this host. + """ + return self.call(context, + self.make_msg('get_vpn_services_on_host', + host=host), + topic=self.topic) + + def update_status(self, context, status): + """Update status for all VPN services and connections.""" + return self.cast(context, + self.make_msg('update_status', + status=status), + topic=self.topic) + + +@six.add_metaclass(abc.ABCMeta) +class CiscoCsrIPsecDriver(device_drivers.DeviceDriver): + """Cisco CSR VPN Device Driver for IPSec. + + This class is designed for use with L3-agent now. + However this driver will be used with another agent in future. + so the use of "Router" is kept minimul now. + Insted of router_id, we are using process_id in this code. + """ + + # history + # 1.0 Initial version + RPC_API_VERSION = '1.0' + + # TODO(ihrachys): we can't use RpcCallback here due to inheritance + # issues + target = messaging.Target(version=RPC_API_VERSION) + + def __init__(self, agent, host): + self.host = host + self.conn = rpc_compat.create_connection(new=True) + context = ctx.get_admin_context_without_session() + node_topic = '%s.%s' % (topics.CISCO_IPSEC_AGENT_TOPIC, self.host) + + self.service_state = {} + + self.endpoints = [self] + self.conn.create_consumer(node_topic, self.endpoints, fanout=False) + self.conn.consume_in_threads() + self.agent_rpc = ( + CiscoCsrIPsecVpnDriverApi(topics.CISCO_IPSEC_DRIVER_TOPIC, '1.0')) + self.periodic_report = loopingcall.FixedIntervalLoopingCall( + self.report_status, context) + self.periodic_report.start( + interval=agent.conf.cisco_csr_ipsec.status_check_interval) + + csrs_found = find_available_csrs_from_config(cfg.CONF.config_file) + if csrs_found: + LOG.info(_("Loaded %(num)d Cisco CSR configuration%(plural)s"), + {'num': len(csrs_found), + 'plural': 's'[len(csrs_found) == 1:]}) + else: + raise SystemExit(_('No Cisco CSR configurations found in: %s') % + cfg.CONF.config_file) + self.csrs = dict([(k, csr_client.CsrRestClient(v['rest_mgmt'], + v['tunnel_ip'], + v['username'], + v['password'], + v['timeout'])) + for k, v in csrs_found.items()]) + + def vpnservice_updated(self, context, **kwargs): + """Handle VPNaaS service driver change notifications.""" + LOG.debug(_("Handling VPN service update notification '%s'"), + kwargs.get('reason', '')) + self.sync(context, []) + + def create_vpn_service(self, service_data): + """Create new entry to track VPN service and its connections.""" + vpn_service_id = service_data['id'] + vpn_service_router = service_data['external_ip'] + self.service_state[vpn_service_id] = CiscoCsrVpnService( + service_data, self.csrs.get(vpn_service_router)) + return self.service_state[vpn_service_id] + + def update_connection(self, context, vpn_service_id, conn_data): + """Handle notification for a single IPSec connection.""" + vpn_service = self.service_state[vpn_service_id] + conn_id = conn_data['id'] + conn_is_admin_up = conn_data[u'admin_state_up'] + + if conn_id in vpn_service.conn_state: # Existing connection... + ipsec_conn = vpn_service.conn_state[conn_id] + config_changed = ipsec_conn.check_for_changes(conn_data) + if config_changed: + LOG.debug(_("Update: Existing connection %s changed"), conn_id) + ipsec_conn.delete_ipsec_site_connection(context, conn_id) + ipsec_conn.create_ipsec_site_connection(context, conn_data) + ipsec_conn.conn_info = conn_data + + if ipsec_conn.forced_down: + if vpn_service.is_admin_up and conn_is_admin_up: + LOG.debug(_("Update: Connection %s no longer admin down"), + conn_id) + ipsec_conn.set_admin_state(is_up=True) + ipsec_conn.forced_down = False + else: + if not vpn_service.is_admin_up or not conn_is_admin_up: + LOG.debug(_("Update: Connection %s forced to admin down"), + conn_id) + ipsec_conn.set_admin_state(is_up=False) + ipsec_conn.forced_down = True + else: # New connection... + ipsec_conn = vpn_service.create_connection(conn_data) + ipsec_conn.create_ipsec_site_connection(context, conn_data) + if not vpn_service.is_admin_up or not conn_is_admin_up: + LOG.debug(_("Update: Created new connection %s in admin down " + "state"), conn_id) + ipsec_conn.set_admin_state(is_up=False) + ipsec_conn.forced_down = True + else: + LOG.debug(_("Update: Created new connection %s"), conn_id) + + ipsec_conn.is_dirty = False + ipsec_conn.last_status = conn_data['status'] + ipsec_conn.is_admin_up = conn_is_admin_up + return ipsec_conn + + def update_service(self, context, service_data): + """Handle notification for a single VPN Service and its connections.""" + vpn_service_id = service_data['id'] + csr_id = service_data['external_ip'] + if csr_id not in self.csrs: + LOG.error(_("Update: Skipping VPN service %(service)s as it's " + "router (%(csr_id)s is not associated with a Cisco " + "CSR"), {'service': vpn_service_id, 'csr_id': csr_id}) + return + + if vpn_service_id in self.service_state: + LOG.debug(_("Update: Existing VPN service %s detected"), + vpn_service_id) + vpn_service = self.service_state[vpn_service_id] + else: + LOG.debug(_("Update: New VPN service %s detected"), vpn_service_id) + vpn_service = self.create_vpn_service(service_data) + + vpn_service.is_dirty = False + vpn_service.connections_removed = False + vpn_service.last_status = service_data['status'] + vpn_service.is_admin_up = service_data[u'admin_state_up'] + for conn_data in service_data['ipsec_conns']: + self.update_connection(context, vpn_service_id, conn_data) + LOG.debug(_("Update: Completed update processing")) + return vpn_service + + def update_all_services_and_connections(self, context): + """Update services and connections based on plugin info. + + Perform any create and update operations and then update status. + Mark every visited connection as no longer "dirty" so they will + not be deleted at end of sync processing. + """ + services_data = self.agent_rpc.get_vpn_services_on_host(context, + self.host) + LOG.debug("Sync updating for %d VPN services", len(services_data)) + vpn_services = [] + for service_data in services_data: + vpn_service = self.update_service(context, service_data) + if vpn_service: + vpn_services.append(vpn_service) + return vpn_services + + def mark_existing_connections_as_dirty(self): + """Mark all existing connections as "dirty" for sync.""" + service_count = 0 + connection_count = 0 + for service_state in self.service_state.values(): + service_state.is_dirty = True + service_count += 1 + for conn_id in service_state.conn_state: + service_state.conn_state[conn_id].is_dirty = True + connection_count += 1 + LOG.debug(_("Mark: %(service)d VPN services and %(conn)d IPSec " + "connections marked dirty"), {'service': service_count, + 'conn': connection_count}) + + def remove_unknown_connections(self, context): + """Remove connections that are not known by service driver.""" + service_count = 0 + connection_count = 0 + for vpn_service_id, vpn_service in self.service_state.items(): + dirty = [c_id for c_id, c in vpn_service.conn_state.items() + if c.is_dirty] + vpn_service.connections_removed = len(dirty) > 0 + for conn_id in dirty: + conn_state = vpn_service.conn_state[conn_id] + conn_state.delete_ipsec_site_connection(context, conn_id) + connection_count += 1 + del vpn_service.conn_state[conn_id] + if vpn_service.is_dirty: + service_count += 1 + del self.service_state[vpn_service_id] + elif dirty: + self.connections_removed = True + LOG.debug(_("Sweep: Removed %(service)d dirty VPN service%(splural)s " + "and %(conn)d dirty IPSec connection%(cplural)s"), + {'service': service_count, 'conn': connection_count, + 'splural': 's'[service_count == 1:], + 'cplural': 's'[connection_count == 1:]}) + + def build_report_for_connections_on(self, vpn_service): + """Create the report fragment for IPSec connections on a service. + + Collect the current status from the Cisco CSR and use that to update + the status and generate report fragment for each connection on the + service. If there is no status information, or no change, then no + report info will be created for the connection. The combined report + data is returned. + """ + LOG.debug(_("Report: Collecting status for IPSec connections on VPN " + "service %s"), vpn_service.service_id) + tunnels = vpn_service.get_ipsec_connections_status() + report = {} + for connection in vpn_service.conn_state.values(): + if connection.forced_down: + LOG.debug(_("Connection %s forced down"), connection.conn_id) + current_status = constants.DOWN + else: + current_status = connection.find_current_status_in(tunnels) + LOG.debug(_("Connection %(conn)s reported %(status)s"), + {'conn': connection.conn_id, + 'status': current_status}) + frag = connection.update_status_and_build_report(current_status) + if frag: + LOG.debug(_("Report: Adding info for IPSec connection %s"), + connection.conn_id) + report.update(frag) + return report + + def build_report_for_service(self, vpn_service): + """Create the report info for a VPN service and its IPSec connections. + + Get the report info for the connections on the service, and include + it into the report info for the VPN service. If there is no report + info for the connection, then no change has occurred and no report + will be generated. If there is only one connection for the service, + we'll set the service state to match the connection (with ERROR seen + as DOWN). + """ + conn_report = self.build_report_for_connections_on(vpn_service) + if conn_report or vpn_service.connections_removed: + pending_handled = plugin_utils.in_pending_status( + vpn_service.last_status) + vpn_service.update_last_status() + LOG.debug(_("Report: Adding info for VPN service %s"), + vpn_service.service_id) + return {u'id': vpn_service.service_id, + u'status': vpn_service.last_status, + u'updated_pending_status': pending_handled, + u'ipsec_site_connections': conn_report} + else: + return {} + + @lockutils.synchronized('vpn-agent', 'neutron-') + def report_status(self, context): + """Report status of all VPN services and IPSec connections to plugin. + + This is called periodically by the agent, to push up changes in + status. Use a lock to serialize access to (and changing of) + running state. + """ + return self.report_status_internal(context) + + def report_status_internal(self, context): + """Generate report and send to plugin, if anything changed.""" + service_report = [] + LOG.debug(_("Report: Starting status report processing")) + for vpn_service_id, vpn_service in self.service_state.items(): + LOG.debug(_("Report: Collecting status for VPN service %s"), + vpn_service_id) + report = self.build_report_for_service(vpn_service) + if report: + service_report.append(report) + if service_report: + LOG.info(_("Sending status report update to plugin")) + self.agent_rpc.update_status(context, service_report) + LOG.debug(_("Report: Completed status report processing")) + return service_report + + @lockutils.synchronized('vpn-agent', 'neutron-') + def sync(self, context, routers): + """Synchronize with plugin and report current status. + + Mark all "known" services/connections as dirty, update them based on + information from the plugin, remove (sweep) any connections that are + not updated (dirty), and report updates, if any, back to plugin. + Called when update/delete a service or create/update/delete a + connection (vpnservice_updated message), or router change + (_process_routers). + + Use lock to serialize access (and changes) to running state for VPN + service and IPsec connections. + """ + self.mark_existing_connections_as_dirty() + self.update_all_services_and_connections(context) + self.remove_unknown_connections(context) + self.report_status_internal(context) + + def create_router(self, process_id): + """Actions taken when router created.""" + # Note: Since Cisco CSR is running out-of-band, nothing to do here + pass + + def destroy_router(self, process_id): + """Actions taken when router deleted.""" + # Note: Since Cisco CSR is running out-of-band, nothing to do here + pass + + +class CiscoCsrVpnService(object): + + """Maintains state/status information for a service and its connections.""" + + def __init__(self, service_data, csr): + self.service_id = service_data['id'] + self.conn_state = {} + self.csr = csr + self.is_admin_up = True + # TODO(pcm) FUTURE - handle sharing of policies + + def create_connection(self, conn_data): + conn_id = conn_data['id'] + self.conn_state[conn_id] = CiscoCsrIPSecConnection(conn_data, self.csr) + return self.conn_state[conn_id] + + def get_connection(self, conn_id): + return self.conn_state.get(conn_id) + + def conn_status(self, conn_id): + conn_state = self.get_connection(conn_id) + if conn_state: + return conn_state.last_status + + def snapshot_conn_state(self, ipsec_conn): + """Create/obtain connection state and save current status.""" + conn_state = self.conn_state.setdefault( + ipsec_conn['id'], CiscoCsrIPSecConnection(ipsec_conn, self.csr)) + conn_state.last_status = ipsec_conn['status'] + conn_state.is_dirty = False + return conn_state + + STATUS_MAP = {'ERROR': constants.ERROR, + 'UP-ACTIVE': constants.ACTIVE, + 'UP-IDLE': constants.ACTIVE, + 'UP-NO-IKE': constants.ACTIVE, + 'DOWN': constants.DOWN, + 'DOWN-NEGOTIATING': constants.DOWN} + + def get_ipsec_connections_status(self): + """Obtain current status of all tunnels on a Cisco CSR. + + Convert them to OpenStack status values. + """ + tunnels = self.csr.read_tunnel_statuses() + for tunnel in tunnels: + LOG.debug("CSR Reports %(tunnel)s status '%(status)s'", + {'tunnel': tunnel[0], 'status': tunnel[1]}) + return dict(map(lambda x: (x[0], self.STATUS_MAP[x[1]]), tunnels)) + + def find_matching_connection(self, tunnel_id): + """Find IPSec connection using Cisco CSR tunnel specified, if any.""" + for connection in self.conn_state.values(): + if connection.tunnel == tunnel_id: + return connection.conn_id + + def no_connections_up(self): + return not any(c.last_status == 'ACTIVE' + for c in self.conn_state.values()) + + def update_last_status(self): + if not self.is_admin_up or self.no_connections_up(): + self.last_status = constants.DOWN + else: + self.last_status = constants.ACTIVE + + +class CiscoCsrIPSecConnection(object): + + """State and actions for IPSec site-to-site connections.""" + + def __init__(self, conn_info, csr): + self.conn_info = conn_info + self.csr = csr + self.steps = [] + self.forced_down = False + self.changed = False + + @property + def conn_id(self): + return self.conn_info['id'] + + @property + def is_admin_up(self): + return self.conn_info['admin_state_up'] + + @is_admin_up.setter + def is_admin_up(self, is_up): + self.conn_info['admin_state_up'] = is_up + + @property + def tunnel(self): + return self.conn_info['cisco']['site_conn_id'] + + def check_for_changes(self, curr_conn): + return not all([self.conn_info[attr] == curr_conn[attr] + for attr in ('mtu', 'psk', 'peer_address', + 'peer_cidrs', 'ike_policy', + 'ipsec_policy', 'cisco')]) + + def find_current_status_in(self, statuses): + if self.tunnel in statuses: + return statuses[self.tunnel] + else: + return constants.ERROR + + def update_status_and_build_report(self, current_status): + if current_status != self.last_status: + pending_handled = plugin_utils.in_pending_status(self.last_status) + self.last_status = current_status + return {self.conn_id: {'status': current_status, + 'updated_pending_status': pending_handled}} + else: + return {} + + DIALECT_MAP = {'ike_policy': {'name': 'IKE Policy', + 'v1': u'v1', + # auth_algorithm -> hash + 'sha1': u'sha', + # encryption_algorithm -> encryption + '3des': u'3des', + 'aes-128': u'aes', + 'aes-192': u'aes192', + 'aes-256': u'aes256', + # pfs -> dhGroup + 'group2': 2, + 'group5': 5, + 'group14': 14}, + 'ipsec_policy': {'name': 'IPSec Policy', + # auth_algorithm -> esp-authentication + 'sha1': u'esp-sha-hmac', + # transform_protocol -> ah + 'esp': None, + 'ah': u'ah-sha-hmac', + 'ah-esp': u'ah-sha-hmac', + # encryption_algorithm -> esp-encryption + '3des': u'esp-3des', + 'aes-128': u'esp-aes', + 'aes-192': u'esp-192-aes', + 'aes-256': u'esp-256-aes', + # pfs -> pfs + 'group2': u'group2', + 'group5': u'group5', + 'group14': u'group14'}} + + def translate_dialect(self, resource, attribute, info): + """Map VPNaaS attributes values to CSR values for a resource.""" + name = self.DIALECT_MAP[resource]['name'] + if attribute not in info: + raise CsrDriverMismatchError(resource=name, attr=attribute) + value = info[attribute].lower() + if value in self.DIALECT_MAP[resource]: + return self.DIALECT_MAP[resource][value] + raise CsrUnknownMappingError(resource=name, attr=attribute, + value=value) + + def create_psk_info(self, psk_id, conn_info): + """Collect/create attributes needed for pre-shared key.""" + return {u'keyring-name': psk_id, + u'pre-shared-key-list': [ + {u'key': conn_info['psk'], + u'encrypted': False, + u'peer-address': conn_info['peer_address']}]} + + def create_ike_policy_info(self, ike_policy_id, conn_info): + """Collect/create/map attributes needed for IKE policy.""" + for_ike = 'ike_policy' + policy_info = conn_info[for_ike] + version = self.translate_dialect(for_ike, + 'ike_version', + policy_info) + encrypt_algorithm = self.translate_dialect(for_ike, + 'encryption_algorithm', + policy_info) + auth_algorithm = self.translate_dialect(for_ike, + 'auth_algorithm', + policy_info) + group = self.translate_dialect(for_ike, + 'pfs', + policy_info) + lifetime = policy_info['lifetime_value'] + return {u'version': version, + u'priority-id': ike_policy_id, + u'encryption': encrypt_algorithm, + u'hash': auth_algorithm, + u'dhGroup': group, + u'lifetime': lifetime} + + def create_ipsec_policy_info(self, ipsec_policy_id, info): + """Collect/create attributes needed for IPSec policy. + + Note: OpenStack will provide a default encryption algorithm, if one is + not provided, so a authentication only configuration of (ah, sha1), + which maps to ah-sha-hmac transform protocol, cannot be selected. + As a result, we'll always configure the encryption algorithm, and + will select ah-sha-hmac for transform protocol. + """ + + for_ipsec = 'ipsec_policy' + policy_info = info[for_ipsec] + transform_protocol = self.translate_dialect(for_ipsec, + 'transform_protocol', + policy_info) + auth_algorithm = self.translate_dialect(for_ipsec, + 'auth_algorithm', + policy_info) + encrypt_algorithm = self.translate_dialect(for_ipsec, + 'encryption_algorithm', + policy_info) + group = self.translate_dialect(for_ipsec, 'pfs', policy_info) + lifetime = policy_info['lifetime_value'] + settings = {u'policy-id': ipsec_policy_id, + u'protection-suite': { + u'esp-encryption': encrypt_algorithm, + u'esp-authentication': auth_algorithm}, + u'lifetime-sec': lifetime, + u'pfs': group, + u'anti-replay-window-size': u'disable'} + if transform_protocol: + settings[u'protection-suite'][u'ah'] = transform_protocol + return settings + + def create_site_connection_info(self, site_conn_id, ipsec_policy_id, + conn_info): + """Collect/create attributes needed for the IPSec connection.""" + # TODO(pcm) Enable, once CSR is embedded as a Neutron router + # gw_ip = vpnservice['external_ip'] (need to pass in) + mtu = conn_info['mtu'] + return { + u'vpn-interface-name': site_conn_id, + u'ipsec-policy-id': ipsec_policy_id, + u'local-device': { + # TODO(pcm): FUTURE - Get CSR port of interface with + # local subnet + u'ip-address': u'GigabitEthernet3', + # TODO(pcm): FUTURE - Get IP address of router's public + # I/F, once CSR is used as embedded router. + u'tunnel-ip-address': self.csr.tunnel_ip + # u'tunnel-ip-address': u'%s' % gw_ip + }, + u'remote-device': { + u'tunnel-ip-address': conn_info['peer_address'] + }, + u'mtu': mtu + } + + def create_routes_info(self, site_conn_id, conn_info): + """Collect/create attributes for static routes.""" + routes_info = [] + for peer_cidr in conn_info.get('peer_cidrs', []): + route = {u'destination-network': peer_cidr, + u'outgoing-interface': site_conn_id} + route_id = csr_client.make_route_id(peer_cidr, site_conn_id) + routes_info.append((route_id, route)) + return routes_info + + def _check_create(self, resource, which): + """Determine if REST create request was successful.""" + if self.csr.status == requests.codes.CREATED: + LOG.debug("%(resource)s %(which)s is configured", + {'resource': resource, 'which': which}) + return + LOG.error(_("Unable to create %(resource)s %(which)s: " + "%(status)d"), + {'resource': resource, 'which': which, + 'status': self.csr.status}) + # ToDO(pcm): Set state to error + raise CsrResourceCreateFailure(resource=resource, which=which) + + def do_create_action(self, action_suffix, info, resource_id, title): + """Perform a single REST step for IPSec site connection create.""" + create_action = 'create_%s' % action_suffix + try: + getattr(self.csr, create_action)(info) + except AttributeError: + LOG.exception(_("Internal error - '%s' is not defined"), + create_action) + raise CsrResourceCreateFailure(resource=title, + which=resource_id) + self._check_create(title, resource_id) + self.steps.append(RollbackStep(action_suffix, resource_id, title)) + + def _verify_deleted(self, status, resource, which): + """Determine if REST delete request was successful.""" + if status in (requests.codes.NO_CONTENT, requests.codes.NOT_FOUND): + LOG.debug("%(resource)s configuration %(which)s was removed", + {'resource': resource, 'which': which}) + else: + LOG.warning(_("Unable to delete %(resource)s %(which)s: " + "%(status)d"), {'resource': resource, + 'which': which, + 'status': status}) + + def do_rollback(self): + """Undo create steps that were completed successfully.""" + for step in reversed(self.steps): + delete_action = 'delete_%s' % step.action + LOG.debug(_("Performing rollback action %(action)s for " + "resource %(resource)s"), {'action': delete_action, + 'resource': step.title}) + try: + getattr(self.csr, delete_action)(step.resource_id) + except AttributeError: + LOG.exception(_("Internal error - '%s' is not defined"), + delete_action) + raise CsrResourceCreateFailure(resource=step.title, + which=step.resource_id) + self._verify_deleted(self.csr.status, step.title, step.resource_id) + self.steps = [] + + def create_ipsec_site_connection(self, context, conn_info): + """Creates an IPSec site-to-site connection on CSR. + + Create the PSK, IKE policy, IPSec policy, connection, static route, + and (future) DPD. + """ + # Get all the IDs + conn_id = conn_info['id'] + psk_id = conn_id + site_conn_id = conn_info['cisco']['site_conn_id'] + ike_policy_id = conn_info['cisco']['ike_policy_id'] + ipsec_policy_id = conn_info['cisco']['ipsec_policy_id'] + + LOG.debug(_('Creating IPSec connection %s'), conn_id) + # Get all the attributes needed to create + try: + psk_info = self.create_psk_info(psk_id, conn_info) + ike_policy_info = self.create_ike_policy_info(ike_policy_id, + conn_info) + ipsec_policy_info = self.create_ipsec_policy_info(ipsec_policy_id, + conn_info) + connection_info = self.create_site_connection_info(site_conn_id, + ipsec_policy_id, + conn_info) + routes_info = self.create_routes_info(site_conn_id, conn_info) + except (CsrUnknownMappingError, CsrDriverMismatchError) as e: + LOG.exception(e) + return + + try: + self.do_create_action('pre_shared_key', psk_info, + conn_id, 'Pre-Shared Key') + self.do_create_action('ike_policy', ike_policy_info, + ike_policy_id, 'IKE Policy') + self.do_create_action('ipsec_policy', ipsec_policy_info, + ipsec_policy_id, 'IPSec Policy') + self.do_create_action('ipsec_connection', connection_info, + site_conn_id, 'IPSec Connection') + + # TODO(pcm): FUTURE - Do DPD for v1 and handle if >1 connection + # and different DPD settings + for route_id, route_info in routes_info: + self.do_create_action('static_route', route_info, + route_id, 'Static Route') + except CsrResourceCreateFailure: + self.do_rollback() + LOG.info(_("FAILED: Create of IPSec site-to-site connection %s"), + conn_id) + else: + LOG.info(_("SUCCESS: Created IPSec site-to-site connection %s"), + conn_id) + + def delete_ipsec_site_connection(self, context, conn_id): + """Delete the site-to-site IPSec connection. + + This will be best effort and will continue, if there are any + failures. + """ + LOG.debug(_('Deleting IPSec connection %s'), conn_id) + if not self.steps: + LOG.warning(_('Unable to find connection %s'), conn_id) + else: + self.do_rollback() + + LOG.info(_("SUCCESS: Deleted IPSec site-to-site connection %s"), + conn_id) + + def set_admin_state(self, is_up): + """Change the admin state for the IPSec connection.""" + self.csr.set_ipsec_connection_state(self.tunnel, admin_up=is_up) + if self.csr.status != requests.codes.NO_CONTENT: + state = "UP" if is_up else "DOWN" + LOG.error(_("Unable to change %(tunnel)s admin state to " + "%(state)s"), {'tunnel': self.tunnel, 'state': state}) + raise CsrAdminStateChangeFailure(tunnel=self.tunnel, state=state) diff --git a/neutron/services/vpn/device_drivers/ipsec.py b/neutron/services/vpn/device_drivers/ipsec.py new file mode 100644 index 000000000..aef47919c --- /dev/null +++ b/neutron/services/vpn/device_drivers/ipsec.py @@ -0,0 +1,713 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import abc +import copy +import os +import re +import shutil + +import jinja2 +import netaddr +from oslo.config import cfg +from oslo import messaging +import six + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.common import rpc_compat +from neutron import context +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants +from neutron.plugins.common import utils as plugin_utils +from neutron.services.vpn.common import topics +from neutron.services.vpn import device_drivers + +LOG = logging.getLogger(__name__) +TEMPLATE_PATH = os.path.dirname(__file__) + +ipsec_opts = [ + cfg.StrOpt( + 'config_base_dir', + default='$state_path/ipsec', + help=_('Location to store ipsec server config files')), + cfg.IntOpt('ipsec_status_check_interval', + default=60, + help=_("Interval for checking ipsec status")) +] +cfg.CONF.register_opts(ipsec_opts, 'ipsec') + +openswan_opts = [ + cfg.StrOpt( + 'ipsec_config_template', + default=os.path.join( + TEMPLATE_PATH, + 'template/openswan/ipsec.conf.template'), + help=_('Template file for ipsec configuration')), + cfg.StrOpt( + 'ipsec_secret_template', + default=os.path.join( + TEMPLATE_PATH, + 'template/openswan/ipsec.secret.template'), + help=_('Template file for ipsec secret configuration')) +] + +cfg.CONF.register_opts(openswan_opts, 'openswan') + +JINJA_ENV = None + +STATUS_MAP = { + 'erouted': constants.ACTIVE, + 'unrouted': constants.DOWN +} + +IPSEC_CONNS = 'ipsec_site_connections' + + +def _get_template(template_file): + global JINJA_ENV + if not JINJA_ENV: + templateLoader = jinja2.FileSystemLoader(searchpath="/") + JINJA_ENV = jinja2.Environment(loader=templateLoader) + return JINJA_ENV.get_template(template_file) + + +@six.add_metaclass(abc.ABCMeta) +class BaseSwanProcess(): + """Swan Family Process Manager + + This class manages start/restart/stop ipsec process. + This class create/delete config template + """ + + binary = "ipsec" + CONFIG_DIRS = [ + 'var/run', + 'log', + 'etc', + 'etc/ipsec.d/aacerts', + 'etc/ipsec.d/acerts', + 'etc/ipsec.d/cacerts', + 'etc/ipsec.d/certs', + 'etc/ipsec.d/crls', + 'etc/ipsec.d/ocspcerts', + 'etc/ipsec.d/policies', + 'etc/ipsec.d/private', + 'etc/ipsec.d/reqs', + 'etc/pki/nssdb/' + ] + + DIALECT_MAP = { + "3des": "3des", + "aes-128": "aes128", + "aes-256": "aes256", + "aes-192": "aes192", + "group2": "modp1024", + "group5": "modp1536", + "group14": "modp2048", + "group15": "modp3072", + "bi-directional": "start", + "response-only": "add", + "v2": "insist", + "v1": "never" + } + + def __init__(self, conf, root_helper, process_id, + vpnservice, namespace): + self.conf = conf + self.id = process_id + self.root_helper = root_helper + self.updated_pending_status = False + self.namespace = namespace + self.connection_status = {} + self.config_dir = os.path.join( + cfg.CONF.ipsec.config_base_dir, self.id) + self.etc_dir = os.path.join(self.config_dir, 'etc') + self.update_vpnservice(vpnservice) + + def translate_dialect(self): + if not self.vpnservice: + return + for ipsec_site_conn in self.vpnservice['ipsec_site_connections']: + self._dialect(ipsec_site_conn, 'initiator') + self._dialect(ipsec_site_conn['ikepolicy'], 'ike_version') + for key in ['encryption_algorithm', + 'auth_algorithm', + 'pfs']: + self._dialect(ipsec_site_conn['ikepolicy'], key) + self._dialect(ipsec_site_conn['ipsecpolicy'], key) + + def update_vpnservice(self, vpnservice): + self.vpnservice = vpnservice + self.translate_dialect() + + def _dialect(self, obj, key): + obj[key] = self.DIALECT_MAP.get(obj[key], obj[key]) + + @abc.abstractmethod + def ensure_configs(self): + pass + + def ensure_config_file(self, kind, template, vpnservice): + """Update config file, based on current settings for service.""" + config_str = self._gen_config_content(template, vpnservice) + config_file_name = self._get_config_filename(kind) + utils.replace_file(config_file_name, config_str) + + def remove_config(self): + """Remove whole config file.""" + shutil.rmtree(self.config_dir, ignore_errors=True) + + def _get_config_filename(self, kind): + config_dir = self.etc_dir + return os.path.join(config_dir, kind) + + def _ensure_dir(self, dir_path): + if not os.path.isdir(dir_path): + os.makedirs(dir_path, 0o755) + + def ensure_config_dir(self, vpnservice): + """Create config directory if it does not exist.""" + self._ensure_dir(self.config_dir) + for subdir in self.CONFIG_DIRS: + dir_path = os.path.join(self.config_dir, subdir) + self._ensure_dir(dir_path) + + def _gen_config_content(self, template_file, vpnservice): + template = _get_template(template_file) + return template.render( + {'vpnservice': vpnservice, + 'state_path': cfg.CONF.state_path}) + + @abc.abstractmethod + def get_status(self): + pass + + @property + def status(self): + if self.active: + return constants.ACTIVE + return constants.DOWN + + @property + def active(self): + """Check if the process is active or not.""" + if not self.namespace: + return False + try: + status = self.get_status() + self._update_connection_status(status) + except RuntimeError: + return False + return True + + def update(self): + """Update Status based on vpnservice configuration.""" + if self.vpnservice and not self.vpnservice['admin_state_up']: + self.disable() + else: + self.enable() + + if plugin_utils.in_pending_status(self.vpnservice['status']): + self.updated_pending_status = True + + self.vpnservice['status'] = self.status + for ipsec_site_conn in self.vpnservice['ipsec_site_connections']: + if plugin_utils.in_pending_status(ipsec_site_conn['status']): + conn_id = ipsec_site_conn['id'] + conn_status = self.connection_status.get(conn_id) + if not conn_status: + continue + conn_status['updated_pending_status'] = True + ipsec_site_conn['status'] = conn_status['status'] + + def enable(self): + """Enabling the process.""" + try: + self.ensure_configs() + if self.active: + self.restart() + else: + self.start() + except RuntimeError: + LOG.exception( + _("Failed to enable vpn process on router %s"), + self.id) + + def disable(self): + """Disabling the process.""" + try: + if self.active: + self.stop() + self.remove_config() + except RuntimeError: + LOG.exception( + _("Failed to disable vpn process on router %s"), + self.id) + + @abc.abstractmethod + def restart(self): + """Restart process.""" + + @abc.abstractmethod + def start(self): + """Start process.""" + + @abc.abstractmethod + def stop(self): + """Stop process.""" + + def _update_connection_status(self, status_output): + for line in status_output.split('\n'): + m = re.search('\d\d\d "([a-f0-9\-]+).* (unrouted|erouted);', line) + if not m: + continue + connection_id = m.group(1) + status = m.group(2) + if not self.connection_status.get(connection_id): + self.connection_status[connection_id] = { + 'status': None, + 'updated_pending_status': False + } + self.connection_status[ + connection_id]['status'] = STATUS_MAP[status] + + +class OpenSwanProcess(BaseSwanProcess): + """OpenSwan Process manager class. + + This process class uses three commands + (1) ipsec pluto: IPsec IKE keying daemon + (2) ipsec addconn: Adds new ipsec addconn + (3) ipsec whack: control interface for IPSEC keying daemon + """ + def __init__(self, conf, root_helper, process_id, + vpnservice, namespace): + super(OpenSwanProcess, self).__init__( + conf, root_helper, process_id, + vpnservice, namespace) + self.secrets_file = os.path.join( + self.etc_dir, 'ipsec.secrets') + self.config_file = os.path.join( + self.etc_dir, 'ipsec.conf') + self.pid_path = os.path.join( + self.config_dir, 'var', 'run', 'pluto') + + def _execute(self, cmd, check_exit_code=True): + """Execute command on namespace.""" + ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace) + return ip_wrapper.netns.execute( + cmd, + check_exit_code=check_exit_code) + + def ensure_configs(self): + """Generate config files which are needed for OpenSwan. + + If there is no directory, this function will create + dirs. + """ + self.ensure_config_dir(self.vpnservice) + self.ensure_config_file( + 'ipsec.conf', + self.conf.openswan.ipsec_config_template, + self.vpnservice) + self.ensure_config_file( + 'ipsec.secrets', + self.conf.openswan.ipsec_secret_template, + self.vpnservice) + + def get_status(self): + return self._execute([self.binary, + 'whack', + '--ctlbase', + self.pid_path, + '--status']) + + def restart(self): + """Restart the process.""" + self.stop() + self.start() + return + + def _get_nexthop(self, address): + routes = self._execute( + ['ip', 'route', 'get', address]) + if routes.find('via') >= 0: + return routes.split(' ')[2] + return address + + def _virtual_privates(self): + """Returns line of virtual_privates. + + virtual_private contains the networks + that are allowed as subnet for the remote client. + """ + virtual_privates = [] + nets = [self.vpnservice['subnet']['cidr']] + for ipsec_site_conn in self.vpnservice['ipsec_site_connections']: + nets += ipsec_site_conn['peer_cidrs'] + for net in nets: + version = netaddr.IPNetwork(net).version + virtual_privates.append('%%v%s:%s' % (version, net)) + return ','.join(virtual_privates) + + def start(self): + """Start the process. + + Note: if there is not namespace yet, + just do nothing, and wait next event. + """ + if not self.namespace: + return + virtual_private = self._virtual_privates() + #start pluto IKE keying daemon + self._execute([self.binary, + 'pluto', + '--ctlbase', self.pid_path, + '--ipsecdir', self.etc_dir, + '--use-netkey', + '--uniqueids', + '--nat_traversal', + '--secretsfile', self.secrets_file, + '--virtual_private', virtual_private + ]) + #add connections + for ipsec_site_conn in self.vpnservice['ipsec_site_connections']: + nexthop = self._get_nexthop(ipsec_site_conn['peer_address']) + self._execute([self.binary, + 'addconn', + '--ctlbase', '%s.ctl' % self.pid_path, + '--defaultroutenexthop', nexthop, + '--config', self.config_file, + ipsec_site_conn['id'] + ]) + #TODO(nati) fix this when openswan is fixed + #Due to openswan bug, this command always exit with 3 + #start whack ipsec keying daemon + self._execute([self.binary, + 'whack', + '--ctlbase', self.pid_path, + '--listen', + ], check_exit_code=False) + + for ipsec_site_conn in self.vpnservice['ipsec_site_connections']: + if not ipsec_site_conn['initiator'] == 'start': + continue + #initiate ipsec connection + self._execute([self.binary, + 'whack', + '--ctlbase', self.pid_path, + '--name', ipsec_site_conn['id'], + '--asynchronous', + '--initiate' + ]) + + def disconnect(self): + if not self.namespace: + return + if not self.vpnservice: + return + for conn_id in self.connection_status: + self._execute([self.binary, + 'whack', + '--ctlbase', self.pid_path, + '--name', '%s/0x1' % conn_id, + '--terminate' + ]) + + def stop(self): + #Stop process using whack + #Note this will also stop pluto + self.disconnect() + self._execute([self.binary, + 'whack', + '--ctlbase', self.pid_path, + '--shutdown', + ]) + #clean connection_status info + self.connection_status = {} + + +class IPsecVpnDriverApi(rpc_compat.RpcProxy): + """IPSecVpnDriver RPC api.""" + IPSEC_PLUGIN_VERSION = '1.0' + + def get_vpn_services_on_host(self, context, host): + """Get list of vpnservices. + + The vpnservices including related ipsec_site_connection, + ikepolicy and ipsecpolicy on this host + """ + return self.call(context, + self.make_msg('get_vpn_services_on_host', + host=host), + version=self.IPSEC_PLUGIN_VERSION, + topic=self.topic) + + def update_status(self, context, status): + """Update local status. + + This method call updates status attribute of + VPNServices. + """ + return self.cast(context, + self.make_msg('update_status', + status=status), + version=self.IPSEC_PLUGIN_VERSION, + topic=self.topic) + + +@six.add_metaclass(abc.ABCMeta) +class IPsecDriver(device_drivers.DeviceDriver): + """VPN Device Driver for IPSec. + + This class is designed for use with L3-agent now. + However this driver will be used with another agent in future. + so the use of "Router" is kept minimul now. + Insted of router_id, we are using process_id in this code. + """ + + # history + # 1.0 Initial version + + RPC_API_VERSION = '1.0' + + # TODO(ihrachys): we can't use RpcCallback here due to inheritance + # issues + target = messaging.Target(version=RPC_API_VERSION) + + def __init__(self, agent, host): + self.agent = agent + self.conf = self.agent.conf + self.root_helper = self.agent.root_helper + self.host = host + self.conn = rpc_compat.create_connection(new=True) + self.context = context.get_admin_context_without_session() + self.topic = topics.IPSEC_AGENT_TOPIC + node_topic = '%s.%s' % (self.topic, self.host) + + self.processes = {} + self.process_status_cache = {} + + self.endpoints = [self] + self.conn.create_consumer(node_topic, self.endpoints, fanout=False) + self.conn.consume_in_threads() + self.agent_rpc = IPsecVpnDriverApi(topics.IPSEC_DRIVER_TOPIC, '1.0') + self.process_status_cache_check = loopingcall.FixedIntervalLoopingCall( + self.report_status, self.context) + self.process_status_cache_check.start( + interval=self.conf.ipsec.ipsec_status_check_interval) + + def _update_nat(self, vpnservice, func): + """Setting up nat rule in iptables. + + We need to setup nat rule for ipsec packet. + :param vpnservice: vpnservices + :param func: self.add_nat_rule or self.remove_nat_rule + """ + local_cidr = vpnservice['subnet']['cidr'] + router_id = vpnservice['router_id'] + for ipsec_site_connection in vpnservice['ipsec_site_connections']: + for peer_cidr in ipsec_site_connection['peer_cidrs']: + func( + router_id, + 'POSTROUTING', + '-s %s -d %s -m policy ' + '--dir out --pol ipsec ' + '-j ACCEPT ' % (local_cidr, peer_cidr), + top=True) + self.agent.iptables_apply(router_id) + + def vpnservice_updated(self, context, **kwargs): + """Vpnservice updated rpc handler + + VPN Service Driver will call this method + when vpnservices updated. + Then this method start sync with server. + """ + self.sync(context, []) + + @abc.abstractmethod + def create_process(self, process_id, vpnservice, namespace): + pass + + def ensure_process(self, process_id, vpnservice=None): + """Ensuring process. + + If the process doesn't exist, it will create process + and store it in self.processs + """ + process = self.processes.get(process_id) + if not process or not process.namespace: + namespace = self.agent.get_namespace(process_id) + process = self.create_process( + process_id, + vpnservice, + namespace) + self.processes[process_id] = process + elif vpnservice: + process.update_vpnservice(vpnservice) + return process + + def create_router(self, process_id): + """Handling create router event. + + Agent calls this method, when the process namespace + is ready. + """ + if process_id in self.processes: + # In case of vpnservice is created + # before router's namespace + process = self.processes[process_id] + self._update_nat(process.vpnservice, self.agent.add_nat_rule) + process.enable() + + def destroy_router(self, process_id): + """Handling destroy_router event. + + Agent calls this method, when the process namespace + is deleted. + """ + if process_id in self.processes: + process = self.processes[process_id] + process.disable() + vpnservice = process.vpnservice + if vpnservice: + self._update_nat(vpnservice, self.agent.remove_nat_rule) + del self.processes[process_id] + + def get_process_status_cache(self, process): + if not self.process_status_cache.get(process.id): + self.process_status_cache[process.id] = { + 'status': None, + 'id': process.vpnservice['id'], + 'updated_pending_status': False, + 'ipsec_site_connections': {}} + return self.process_status_cache[process.id] + + def is_status_updated(self, process, previous_status): + if process.updated_pending_status: + return True + if process.status != previous_status['status']: + return True + if (process.connection_status != + previous_status['ipsec_site_connections']): + return True + + def unset_updated_pending_status(self, process): + process.updated_pending_status = False + for connection_status in process.connection_status.values(): + connection_status['updated_pending_status'] = False + + def copy_process_status(self, process): + return { + 'id': process.vpnservice['id'], + 'status': process.status, + 'updated_pending_status': process.updated_pending_status, + 'ipsec_site_connections': copy.deepcopy(process.connection_status) + } + + def update_downed_connections(self, process_id, new_status): + """Update info to be reported, if connections just went down. + + If there is no longer any information for a connection, because it + has been removed (e.g. due to an admin down of VPN service or IPSec + connection), but there was previous status information for the + connection, mark the connection as down for reporting purposes. + """ + if process_id in self.process_status_cache: + for conn in self.process_status_cache[process_id][IPSEC_CONNS]: + if conn not in new_status[IPSEC_CONNS]: + new_status[IPSEC_CONNS][conn] = { + 'status': constants.DOWN, + 'updated_pending_status': True + } + + def report_status(self, context): + status_changed_vpn_services = [] + for process in self.processes.values(): + previous_status = self.get_process_status_cache(process) + if self.is_status_updated(process, previous_status): + new_status = self.copy_process_status(process) + self.update_downed_connections(process.id, new_status) + status_changed_vpn_services.append(new_status) + self.process_status_cache[process.id] = ( + self.copy_process_status(process)) + # We need unset updated_pending status after it + # is reported to the server side + self.unset_updated_pending_status(process) + + if status_changed_vpn_services: + self.agent_rpc.update_status( + context, + status_changed_vpn_services) + + @lockutils.synchronized('vpn-agent', 'neutron-') + def sync(self, context, routers): + """Sync status with server side. + + :param context: context object for RPC call + :param routers: Router objects which is created in this sync event + + There could be many failure cases should be + considered including the followings. + 1) Agent class restarted + 2) Failure on process creation + 3) VpnService is deleted during agent down + 4) RPC failure + + In order to handle, these failure cases, + This driver takes simple sync strategies. + """ + vpnservices = self.agent_rpc.get_vpn_services_on_host( + context, self.host) + router_ids = [vpnservice['router_id'] for vpnservice in vpnservices] + # Ensure the ipsec process is enabled + for vpnservice in vpnservices: + process = self.ensure_process(vpnservice['router_id'], + vpnservice=vpnservice) + self._update_nat(vpnservice, self.agent.add_nat_rule) + process.update() + + # Delete any IPSec processes that are + # associated with routers, but are not running the VPN service. + for router in routers: + #We are using router id as process_id + process_id = router['id'] + if process_id not in router_ids: + process = self.ensure_process(process_id) + self.destroy_router(process_id) + + # Delete any IPSec processes running + # VPN that do not have an associated router. + process_ids = [process_id + for process_id in self.processes + if process_id not in router_ids] + for process_id in process_ids: + self.destroy_router(process_id) + self.report_status(context) + + +class OpenSwanDriver(IPsecDriver): + def create_process(self, process_id, vpnservice, namespace): + return OpenSwanProcess( + self.conf, + self.root_helper, + process_id, + vpnservice, + namespace) diff --git a/neutron/services/vpn/device_drivers/template/openswan/ipsec.conf.template b/neutron/services/vpn/device_drivers/template/openswan/ipsec.conf.template new file mode 100644 index 000000000..546e27ec6 --- /dev/null +++ b/neutron/services/vpn/device_drivers/template/openswan/ipsec.conf.template @@ -0,0 +1,64 @@ +# Configuration for {{vpnservice.name}} +config setup + nat_traversal=yes + listen={{vpnservice.external_ip}} +conn %default + ikelifetime=480m + keylife=60m + keyingtries=%forever +{% for ipsec_site_connection in vpnservice.ipsec_site_connections if ipsec_site_connection.admin_state_up +%}conn {{ipsec_site_connection.id}} + # NOTE: a default route is required for %defaultroute to work... + left={{vpnservice.external_ip}} + leftid={{vpnservice.external_ip}} + auto={{ipsec_site_connection.initiator}} + # NOTE:REQUIRED + # [subnet] + leftsubnet={{vpnservice.subnet.cidr}} + # leftsubnet=networkA/netmaskA, networkB/netmaskB (IKEv2 only) + leftnexthop=%defaultroute + ###################### + # ipsec_site_connections + ###################### + # [peer_address] + right={{ipsec_site_connection.peer_address}} + # [peer_id] + rightid={{ipsec_site_connection.peer_id}} + # [peer_cidrs] + rightsubnets={ {{ipsec_site_connection['peer_cidrs']|join(' ')}} } + # rightsubnet=networkA/netmaskA, networkB/netmaskB (IKEv2 only) + rightnexthop=%defaultroute + # [mtu] + # Note It looks like not supported in the strongswan driver + # ignore it now + # [dpd_action] + dpdaction={{ipsec_site_connection.dpd_action}} + # [dpd_interval] + dpddelay={{ipsec_site_connection.dpd_interval}} + # [dpd_timeout] + dpdtimeout={{ipsec_site_connection.dpd_timeout}} + # [auth_mode] + authby=secret + ###################### + # IKEPolicy params + ###################### + #ike version + ikev2={{ipsec_site_connection.ikepolicy.ike_version}} + # [encryption_algorithm]-[auth_algorithm]-[pfs] + ike={{ipsec_site_connection.ikepolicy.encryption_algorithm}}-{{ipsec_site_connection.ikepolicy.auth_algorithm}};{{ipsec_site_connection.ikepolicy.pfs}} + # [lifetime_value] + ikelifetime={{ipsec_site_connection.ikepolicy.lifetime_value}}s + # NOTE: it looks lifetime_units=kilobytes can't be enforced (could be seconds, hours, days...) + ########################## + # IPsecPolicys params + ########################## + # [transform_protocol] + auth={{ipsec_site_connection.ipsecpolicy.transform_protocol}} + # [encryption_algorithm]-[auth_algorithm]-[pfs] + phase2alg={{ipsec_site_connection.ipsecpolicy.encryption_algorithm}}-{{ipsec_site_connection.ipsecpolicy.auth_algorithm}};{{ipsec_site_connection.ipsecpolicy.pfs}} + # [encapsulation_mode] + type={{ipsec_site_connection.ipsecpolicy.encapsulation_mode}} + # [lifetime_value] + lifetime={{ipsec_site_connection.ipsecpolicy.lifetime_value}}s + # lifebytes=100000 if lifetime_units=kilobytes (IKEv2 only) +{% endfor %} diff --git a/neutron/services/vpn/device_drivers/template/openswan/ipsec.secret.template b/neutron/services/vpn/device_drivers/template/openswan/ipsec.secret.template new file mode 100644 index 000000000..8302e859f --- /dev/null +++ b/neutron/services/vpn/device_drivers/template/openswan/ipsec.secret.template @@ -0,0 +1,3 @@ +# Configuration for {{vpnservice.name}} {% for ipsec_site_connection in vpnservice.ipsec_site_connections %} +{{vpnservice.external_ip}} {{ipsec_site_connection.peer_id}} : PSK "{{ipsec_site_connection.psk}}" +{% endfor %} diff --git a/neutron/services/vpn/plugin.py b/neutron/services/vpn/plugin.py new file mode 100644 index 000000000..771188d65 --- /dev/null +++ b/neutron/services/vpn/plugin.py @@ -0,0 +1,107 @@ + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Swaminathan Vasudevan, Hewlett-Packard + +from neutron.db.vpn import vpn_db +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services import service_base + +LOG = logging.getLogger(__name__) + + +class VPNPlugin(vpn_db.VPNPluginDb): + + """Implementation of the VPN Service Plugin. + + This class manages the workflow of VPNaaS request/response. + Most DB related works are implemented in class + vpn_db.VPNPluginDb. + """ + supported_extension_aliases = ["vpnaas", "service-type"] + + +class VPNDriverPlugin(VPNPlugin, vpn_db.VPNPluginRpcDbMixin): + """VpnPlugin which supports VPN Service Drivers.""" + #TODO(nati) handle ikepolicy and ipsecpolicy update usecase + def __init__(self): + super(VPNDriverPlugin, self).__init__() + # Load the service driver from neutron.conf. + drivers, default_provider = service_base.load_drivers( + constants.VPN, self) + LOG.info(_("VPN plugin using service driver: %s"), default_provider) + self.ipsec_driver = drivers[default_provider] + + def _get_driver_for_vpnservice(self, vpnservice): + return self.ipsec_driver + + def _get_driver_for_ipsec_site_connection(self, context, + ipsec_site_connection): + #TODO(nati) get vpnservice when we support service type framework + vpnservice = None + return self._get_driver_for_vpnservice(vpnservice) + + def create_ipsec_site_connection(self, context, ipsec_site_connection): + ipsec_site_connection = super( + VPNDriverPlugin, self).create_ipsec_site_connection( + context, ipsec_site_connection) + driver = self._get_driver_for_ipsec_site_connection( + context, ipsec_site_connection) + driver.create_ipsec_site_connection(context, ipsec_site_connection) + return ipsec_site_connection + + def delete_ipsec_site_connection(self, context, ipsec_conn_id): + ipsec_site_connection = self.get_ipsec_site_connection( + context, ipsec_conn_id) + super(VPNDriverPlugin, self).delete_ipsec_site_connection( + context, ipsec_conn_id) + driver = self._get_driver_for_ipsec_site_connection( + context, ipsec_site_connection) + driver.delete_ipsec_site_connection(context, ipsec_site_connection) + + def update_ipsec_site_connection( + self, context, + ipsec_conn_id, ipsec_site_connection): + old_ipsec_site_connection = self.get_ipsec_site_connection( + context, ipsec_conn_id) + ipsec_site_connection = super( + VPNDriverPlugin, self).update_ipsec_site_connection( + context, + ipsec_conn_id, + ipsec_site_connection) + driver = self._get_driver_for_ipsec_site_connection( + context, ipsec_site_connection) + driver.update_ipsec_site_connection( + context, old_ipsec_site_connection, ipsec_site_connection) + return ipsec_site_connection + + def update_vpnservice(self, context, vpnservice_id, vpnservice): + old_vpn_service = self.get_vpnservice(context, vpnservice_id) + new_vpn_service = super( + VPNDriverPlugin, self).update_vpnservice(context, vpnservice_id, + vpnservice) + driver = self._get_driver_for_vpnservice(old_vpn_service) + driver.update_vpnservice(context, old_vpn_service, new_vpn_service) + return new_vpn_service + + def delete_vpnservice(self, context, vpnservice_id): + vpnservice = self._get_vpnservice(context, vpnservice_id) + super(VPNDriverPlugin, self).delete_vpnservice(context, vpnservice_id) + driver = self._get_driver_for_vpnservice(vpnservice) + driver.delete_vpnservice(context, vpnservice) diff --git a/neutron/services/vpn/service_drivers/__init__.py b/neutron/services/vpn/service_drivers/__init__.py new file mode 100644 index 000000000..e95637e9b --- /dev/null +++ b/neutron/services/vpn/service_drivers/__init__.py @@ -0,0 +1,92 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from neutron.common import rpc_compat +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants + +LOG = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class VpnDriver(object): + + def __init__(self, service_plugin): + self.service_plugin = service_plugin + + @property + def service_type(self): + pass + + @abc.abstractmethod + def create_vpnservice(self, context, vpnservice): + pass + + @abc.abstractmethod + def update_vpnservice( + self, context, old_vpnservice, vpnservice): + pass + + @abc.abstractmethod + def delete_vpnservice(self, context, vpnservice): + pass + + +class BaseIPsecVpnAgentApi(rpc_compat.RpcProxy): + """Base class for IPSec API to agent.""" + + def __init__(self, to_agent_topic, topic, default_version): + self.to_agent_topic = to_agent_topic + super(BaseIPsecVpnAgentApi, self).__init__(topic, default_version) + + def _agent_notification(self, context, method, router_id, + version=None, **kwargs): + """Notify update for the agent. + + This method will find where is the router, and + dispatch notification for the agent. + """ + admin_context = context.is_admin and context or context.elevated() + plugin = manager.NeutronManager.get_service_plugins().get( + constants.L3_ROUTER_NAT) + if not version: + version = self.RPC_API_VERSION + l3_agents = plugin.get_l3_agents_hosting_routers( + admin_context, [router_id], + admin_state_up=True, + active=True) + for l3_agent in l3_agents: + LOG.debug(_('Notify agent at %(topic)s.%(host)s the message ' + '%(method)s %(args)s'), + {'topic': self.to_agent_topic, + 'host': l3_agent.host, + 'method': method, + 'args': kwargs}) + self.cast( + context, self.make_msg(method, **kwargs), + version=version, + topic='%s.%s' % (self.to_agent_topic, l3_agent.host)) + + def vpnservice_updated(self, context, router_id, **kwargs): + """Send update event of vpnservices.""" + self._agent_notification(context, 'vpnservice_updated', router_id, + **kwargs) diff --git a/neutron/services/vpn/service_drivers/cisco_csr_db.py b/neutron/services/vpn/service_drivers/cisco_csr_db.py new file mode 100644 index 000000000..e1f0760cd --- /dev/null +++ b/neutron/services/vpn/service_drivers/cisco_csr_db.py @@ -0,0 +1,239 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Paul Michali, Cisco Systems, Inc. + +import sqlalchemy as sa +from sqlalchemy.orm import exc as sql_exc + +from neutron.common import exceptions +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.db.vpn import vpn_db +from neutron.openstack.common.db import exception as db_exc +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +# Note: Artificially limit these to reduce mapping table size and performance +# Tunnel can be 0..7FFFFFFF, IKE policy can be 1..10000, IPSec policy can be +# 1..31 characters long. +MAX_CSR_TUNNELS = 10000 +MAX_CSR_IKE_POLICIES = 2000 +MAX_CSR_IPSEC_POLICIES = 2000 + +TUNNEL = 'Tunnel' +IKE_POLICY = 'IKE Policy' +IPSEC_POLICY = 'IPSec Policy' + +MAPPING_LIMITS = {TUNNEL: (0, MAX_CSR_TUNNELS), + IKE_POLICY: (1, MAX_CSR_IKE_POLICIES), + IPSEC_POLICY: (1, MAX_CSR_IPSEC_POLICIES)} + + +class CsrInternalError(exceptions.NeutronException): + message = _("Fatal - %(reason)s") + + +class IdentifierMap(model_base.BASEV2, models_v2.HasTenant): + + """Maps OpenStack IDs to compatible numbers for Cisco CSR.""" + + __tablename__ = 'cisco_csr_identifier_map' + + ipsec_site_conn_id = sa.Column(sa.String(64), + sa.ForeignKey('ipsec_site_connections.id', + ondelete="CASCADE"), + primary_key=True) + csr_tunnel_id = sa.Column(sa.Integer, nullable=False) + csr_ike_policy_id = sa.Column(sa.Integer, nullable=False) + csr_ipsec_policy_id = sa.Column(sa.Integer, nullable=False) + + +def get_next_available_id(session, table_field, id_type): + """Find first unused id for the specified field in IdentifierMap table. + + As entries are removed, find the first "hole" and return that as the + next available ID. To improve performance, artificially limit + the number of entries to a smaller range. Currently, these IDs are + globally unique. Could enhance in the future to be unique per router + (CSR). + """ + min_value = MAPPING_LIMITS[id_type][0] + max_value = MAPPING_LIMITS[id_type][1] + rows = session.query(table_field).order_by(table_field) + used_ids = set([row[0] for row in rows]) + all_ids = set(range(min_value, max_value + min_value)) + available_ids = all_ids - used_ids + if not available_ids: + msg = _("No available Cisco CSR %(type)s IDs from " + "%(min)d..%(max)d") % {'type': id_type, + 'min': min_value, + 'max': max_value} + LOG.error(msg) + raise IndexError(msg) + return available_ids.pop() + + +def get_next_available_tunnel_id(session): + """Find first available tunnel ID from 0..MAX_CSR_TUNNELS-1.""" + return get_next_available_id(session, IdentifierMap.csr_tunnel_id, + TUNNEL) + + +def get_next_available_ike_policy_id(session): + """Find first available IKE Policy ID from 1..MAX_CSR_IKE_POLICIES.""" + return get_next_available_id(session, IdentifierMap.csr_ike_policy_id, + IKE_POLICY) + + +def get_next_available_ipsec_policy_id(session): + """Find first available IPSec Policy ID from 1..MAX_CSR_IKE_POLICIES.""" + return get_next_available_id(session, IdentifierMap.csr_ipsec_policy_id, + IPSEC_POLICY) + + +def find_conn_with_policy(policy_field, policy_id, conn_id, session): + """Return ID of another conneciton (if any) that uses same policy ID.""" + qry = session.query(vpn_db.IPsecSiteConnection.id) + match = qry.filter_request( + policy_field == policy_id, + vpn_db.IPsecSiteConnection.id != conn_id).first() + if match: + return match[0] + + +def find_connection_using_ike_policy(ike_policy_id, conn_id, session): + """Return ID of another connection that uses same IKE policy ID.""" + return find_conn_with_policy(vpn_db.IPsecSiteConnection.ikepolicy_id, + ike_policy_id, conn_id, session) + + +def find_connection_using_ipsec_policy(ipsec_policy_id, conn_id, session): + """Return ID of another connection that uses same IPSec policy ID.""" + return find_conn_with_policy(vpn_db.IPsecSiteConnection.ipsecpolicy_id, + ipsec_policy_id, conn_id, session) + + +def lookup_policy(policy_type, policy_field, conn_id, session): + """Obtain specified policy's mapping from other connection.""" + try: + return session.query(policy_field).filter_by( + ipsec_site_conn_id=conn_id).one()[0] + except sql_exc.NoResultFound: + msg = _("Database inconsistency between IPSec connection and " + "Cisco CSR mapping table (%s)") % policy_type + raise CsrInternalError(reason=msg) + + +def lookup_ike_policy_id_for(conn_id, session): + """Obtain existing Cisco CSR IKE policy ID from another connection.""" + return lookup_policy(IKE_POLICY, IdentifierMap.csr_ike_policy_id, + conn_id, session) + + +def lookup_ipsec_policy_id_for(conn_id, session): + """Obtain existing Cisco CSR IPSec policy ID from another connection.""" + return lookup_policy(IPSEC_POLICY, IdentifierMap.csr_ipsec_policy_id, + conn_id, session) + + +def determine_csr_policy_id(policy_type, conn_policy_field, map_policy_field, + policy_id, conn_id, session): + """Use existing or reserve a new policy ID for Cisco CSR use. + + TODO(pcm) FUTURE: Once device driver adds support for IKE/IPSec policy + ID sharing, add call to find_conn_with_policy() to find used ID and + then call lookup_policy() to find the current mapping for that ID. + """ + csr_id = get_next_available_id(session, map_policy_field, policy_type) + LOG.debug(_("Reserved new CSR ID %(csr_id)d for %(policy)s " + "ID %(policy_id)s"), {'csr_id': csr_id, + 'policy': policy_type, + 'policy_id': policy_id}) + return csr_id + + +def determine_csr_ike_policy_id(ike_policy_id, conn_id, session): + """Use existing, or reserve a new IKE policy ID for Cisco CSR.""" + return determine_csr_policy_id(IKE_POLICY, + vpn_db.IPsecSiteConnection.ikepolicy_id, + IdentifierMap.csr_ike_policy_id, + ike_policy_id, conn_id, session) + + +def determine_csr_ipsec_policy_id(ipsec_policy_id, conn_id, session): + """Use existing, or reserve a new IPSec policy ID for Cisco CSR.""" + return determine_csr_policy_id(IPSEC_POLICY, + vpn_db.IPsecSiteConnection.ipsecpolicy_id, + IdentifierMap.csr_ipsec_policy_id, + ipsec_policy_id, conn_id, session) + + +def get_tunnel_mapping_for(conn_id, session): + try: + entry = session.query(IdentifierMap).filter_by( + ipsec_site_conn_id=conn_id).one() + LOG.debug(_("Mappings for IPSec connection %(conn)s - " + "tunnel=%(tunnel)s ike_policy=%(csr_ike)d " + "ipsec_policy=%(csr_ipsec)d"), + {'conn': conn_id, 'tunnel': entry.csr_tunnel_id, + 'csr_ike': entry.csr_ike_policy_id, + 'csr_ipsec': entry.csr_ipsec_policy_id}) + return (entry.csr_tunnel_id, entry.csr_ike_policy_id, + entry.csr_ipsec_policy_id) + except sql_exc.NoResultFound: + msg = _("Existing entry for IPSec connection %s not found in Cisco " + "CSR mapping table") % conn_id + raise CsrInternalError(reason=msg) + + +def create_tunnel_mapping(context, conn_info): + """Create Cisco CSR IDs, using mapping table and OpenStack UUIDs.""" + conn_id = conn_info['id'] + ike_policy_id = conn_info['ikepolicy_id'] + ipsec_policy_id = conn_info['ipsecpolicy_id'] + tenant_id = conn_info['tenant_id'] + with context.session.begin(): + csr_tunnel_id = get_next_available_tunnel_id(context.session) + csr_ike_id = determine_csr_ike_policy_id(ike_policy_id, conn_id, + context.session) + csr_ipsec_id = determine_csr_ipsec_policy_id(ipsec_policy_id, conn_id, + context.session) + map_entry = IdentifierMap(tenant_id=tenant_id, + ipsec_site_conn_id=conn_id, + csr_tunnel_id=csr_tunnel_id, + csr_ike_policy_id=csr_ike_id, + csr_ipsec_policy_id=csr_ipsec_id) + try: + context.session.add(map_entry) + # Force committing to database + context.session.flush() + except db_exc.DBDuplicateEntry: + msg = _("Attempt to create duplicate entry in Cisco CSR " + "mapping table for connection %s") % conn_id + raise CsrInternalError(reason=msg) + LOG.info(_("Mapped connection %(conn_id)s to Tunnel%(tunnel_id)d " + "using IKE policy ID %(ike_id)d and IPSec policy " + "ID %(ipsec_id)d"), + {'conn_id': conn_id, 'tunnel_id': csr_tunnel_id, + 'ike_id': csr_ike_id, 'ipsec_id': csr_ipsec_id}) + + +def delete_tunnel_mapping(context, conn_info): + conn_id = conn_info['id'] + with context.session.begin(): + sess_qry = context.session.query(IdentifierMap) + sess_qry.filter_by(ipsec_site_conn_id=conn_id).delete() + LOG.info(_("Removed mapping for connection %s"), conn_id) diff --git a/neutron/services/vpn/service_drivers/cisco_ipsec.py b/neutron/services/vpn/service_drivers/cisco_ipsec.py new file mode 100644 index 000000000..ed34f41ff --- /dev/null +++ b/neutron/services/vpn/service_drivers/cisco_ipsec.py @@ -0,0 +1,245 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from netaddr import core as net_exc + +from neutron.common import exceptions +from neutron.common import rpc_compat +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.vpn.common import topics +from neutron.services.vpn import service_drivers +from neutron.services.vpn.service_drivers import cisco_csr_db as csr_id_map + + +LOG = logging.getLogger(__name__) + +IPSEC = 'ipsec' +BASE_IPSEC_VERSION = '1.0' +LIFETIME_LIMITS = {'IKE Policy': {'min': 60, 'max': 86400}, + 'IPSec Policy': {'min': 120, 'max': 2592000}} +MIN_CSR_MTU = 1500 +MAX_CSR_MTU = 9192 + + +class CsrValidationFailure(exceptions.BadRequest): + message = _("Cisco CSR does not support %(resource)s attribute %(key)s " + "with value '%(value)s'") + + +class CiscoCsrIPsecVpnDriverCallBack(rpc_compat.RpcCallback): + + """Handler for agent to plugin RPC messaging.""" + + # history + # 1.0 Initial version + + RPC_API_VERSION = BASE_IPSEC_VERSION + + def __init__(self, driver): + super(CiscoCsrIPsecVpnDriverCallBack, self).__init__() + self.driver = driver + + def get_vpn_services_on_host(self, context, host=None): + """Retuns info on the vpnservices on the host.""" + plugin = self.driver.service_plugin + vpnservices = plugin._get_agent_hosting_vpn_services( + context, host) + return [self.driver._make_vpnservice_dict(vpnservice, context) + for vpnservice in vpnservices] + + def update_status(self, context, status): + """Update status of all vpnservices.""" + plugin = self.driver.service_plugin + plugin.update_status_by_agent(context, status) + + +class CiscoCsrIPsecVpnAgentApi(service_drivers.BaseIPsecVpnAgentApi, + rpc_compat.RpcCallback): + + """API and handler for Cisco IPSec plugin to agent RPC messaging.""" + + RPC_API_VERSION = BASE_IPSEC_VERSION + + def __init__(self, topic, default_version): + super(CiscoCsrIPsecVpnAgentApi, self).__init__( + topics.CISCO_IPSEC_AGENT_TOPIC, topic, default_version) + + +class CiscoCsrIPsecVPNDriver(service_drivers.VpnDriver): + + """Cisco CSR VPN Service Driver class for IPsec.""" + + def __init__(self, service_plugin): + super(CiscoCsrIPsecVPNDriver, self).__init__(service_plugin) + self.endpoints = [CiscoCsrIPsecVpnDriverCallBack(self)] + self.conn = rpc_compat.create_connection(new=True) + self.conn.create_consumer( + topics.CISCO_IPSEC_DRIVER_TOPIC, self.endpoints, fanout=False) + self.conn.consume_in_threads() + self.agent_rpc = CiscoCsrIPsecVpnAgentApi( + topics.CISCO_IPSEC_AGENT_TOPIC, BASE_IPSEC_VERSION) + + @property + def service_type(self): + return IPSEC + + def validate_lifetime(self, for_policy, policy_info): + """Ensure lifetime in secs and value is supported, based on policy.""" + units = policy_info['lifetime']['units'] + if units != 'seconds': + raise CsrValidationFailure(resource=for_policy, + key='lifetime:units', + value=units) + value = policy_info['lifetime']['value'] + if (value < LIFETIME_LIMITS[for_policy]['min'] or + value > LIFETIME_LIMITS[for_policy]['max']): + raise CsrValidationFailure(resource=for_policy, + key='lifetime:value', + value=value) + + def validate_ike_version(self, policy_info): + """Ensure IKE policy is v1 for current REST API.""" + version = policy_info['ike_version'] + if version != 'v1': + raise CsrValidationFailure(resource='IKE Policy', + key='ike_version', + value=version) + + def validate_mtu(self, conn_info): + """Ensure the MTU value is supported.""" + mtu = conn_info['mtu'] + if mtu < MIN_CSR_MTU or mtu > MAX_CSR_MTU: + raise CsrValidationFailure(resource='IPSec Connection', + key='mtu', + value=mtu) + + def validate_public_ip_present(self, vpn_service): + """Ensure there is one gateway IP specified for the router used.""" + gw_port = vpn_service.router.gw_port + if not gw_port or len(gw_port.fixed_ips) != 1: + raise CsrValidationFailure(resource='IPSec Connection', + key='router:gw_port:ip_address', + value='missing') + + def validate_peer_id(self, ipsec_conn): + """Ensure that an IP address is specified for peer ID.""" + # TODO(pcm) Should we check peer_address too? + peer_id = ipsec_conn['peer_id'] + try: + netaddr.IPAddress(peer_id) + except net_exc.AddrFormatError: + raise CsrValidationFailure(resource='IPSec Connection', + key='peer_id', value=peer_id) + + def validate_ipsec_connection(self, context, ipsec_conn, vpn_service): + """Validate attributes w.r.t. Cisco CSR capabilities.""" + ike_policy = self.service_plugin.get_ikepolicy( + context, ipsec_conn['ikepolicy_id']) + ipsec_policy = self.service_plugin.get_ipsecpolicy( + context, ipsec_conn['ipsecpolicy_id']) + self.validate_lifetime('IKE Policy', ike_policy) + self.validate_lifetime('IPSec Policy', ipsec_policy) + self.validate_ike_version(ike_policy) + self.validate_mtu(ipsec_conn) + self.validate_public_ip_present(vpn_service) + self.validate_peer_id(ipsec_conn) + LOG.debug(_("IPSec connection %s validated for Cisco CSR"), + ipsec_conn['id']) + + def create_ipsec_site_connection(self, context, ipsec_site_connection): + vpnservice = self.service_plugin._get_vpnservice( + context, ipsec_site_connection['vpnservice_id']) + try: + self.validate_ipsec_connection(context, ipsec_site_connection, + vpnservice) + except CsrValidationFailure: + with excutils.save_and_reraise_exception(): + self.service_plugin.update_ipsec_site_conn_status( + context, ipsec_site_connection['id'], constants.ERROR) + csr_id_map.create_tunnel_mapping(context, ipsec_site_connection) + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'], + reason='ipsec-conn-create') + + def update_ipsec_site_connection( + self, context, old_ipsec_site_connection, ipsec_site_connection): + vpnservice = self.service_plugin._get_vpnservice( + context, ipsec_site_connection['vpnservice_id']) + self.agent_rpc.vpnservice_updated( + context, vpnservice['router_id'], + reason='ipsec-conn-update') + + def delete_ipsec_site_connection(self, context, ipsec_site_connection): + vpnservice = self.service_plugin._get_vpnservice( + context, ipsec_site_connection['vpnservice_id']) + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'], + reason='ipsec-conn-delete') + + def create_ikepolicy(self, context, ikepolicy): + pass + + def delete_ikepolicy(self, context, ikepolicy): + pass + + def update_ikepolicy(self, context, old_ikepolicy, ikepolicy): + pass + + def create_ipsecpolicy(self, context, ipsecpolicy): + pass + + def delete_ipsecpolicy(self, context, ipsecpolicy): + pass + + def update_ipsecpolicy(self, context, old_ipsec_policy, ipsecpolicy): + pass + + def create_vpnservice(self, context, vpnservice): + pass + + def update_vpnservice(self, context, old_vpnservice, vpnservice): + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'], + reason='vpn-service-update') + + def delete_vpnservice(self, context, vpnservice): + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'], + reason='vpn-service-delete') + + def get_cisco_connection_mappings(self, conn_id, context): + """Obtain persisted mappings for IDs related to connection.""" + tunnel_id, ike_id, ipsec_id = csr_id_map.get_tunnel_mapping_for( + conn_id, context.session) + return {'site_conn_id': u'Tunnel%d' % tunnel_id, + 'ike_policy_id': u'%d' % ike_id, + 'ipsec_policy_id': u'%s' % ipsec_id} + + def _make_vpnservice_dict(self, vpnservice, context): + """Collect all info on service, including Cisco info per IPSec conn.""" + vpnservice_dict = dict(vpnservice) + vpnservice_dict['ipsec_conns'] = [] + vpnservice_dict['subnet'] = dict( + vpnservice.subnet) + vpnservice_dict['external_ip'] = vpnservice.router.gw_port[ + 'fixed_ips'][0]['ip_address'] + for ipsec_conn in vpnservice.ipsec_site_connections: + ipsec_conn_dict = dict(ipsec_conn) + ipsec_conn_dict['ike_policy'] = dict(ipsec_conn.ikepolicy) + ipsec_conn_dict['ipsec_policy'] = dict(ipsec_conn.ipsecpolicy) + ipsec_conn_dict['peer_cidrs'] = [ + peer_cidr.cidr for peer_cidr in ipsec_conn.peer_cidrs] + ipsec_conn_dict['cisco'] = self.get_cisco_connection_mappings( + ipsec_conn['id'], context) + vpnservice_dict['ipsec_conns'].append(ipsec_conn_dict) + return vpnservice_dict diff --git a/neutron/services/vpn/service_drivers/ipsec.py b/neutron/services/vpn/service_drivers/ipsec.py new file mode 100644 index 000000000..be6aa9e26 --- /dev/null +++ b/neutron/services/vpn/service_drivers/ipsec.py @@ -0,0 +1,156 @@ +# vim: tabstop=10 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import netaddr + +from neutron.common import rpc_compat +from neutron.openstack.common import log as logging +from neutron.services.vpn.common import topics +from neutron.services.vpn import service_drivers + + +LOG = logging.getLogger(__name__) + +IPSEC = 'ipsec' +BASE_IPSEC_VERSION = '1.0' + + +class IPsecVpnDriverCallBack(rpc_compat.RpcCallback): + """Callback for IPSecVpnDriver rpc.""" + + # history + # 1.0 Initial version + + RPC_API_VERSION = BASE_IPSEC_VERSION + + def __init__(self, driver): + super(IPsecVpnDriverCallBack, self).__init__() + self.driver = driver + + def get_vpn_services_on_host(self, context, host=None): + """Returns the vpnservices on the host.""" + plugin = self.driver.service_plugin + vpnservices = plugin._get_agent_hosting_vpn_services( + context, host) + return [self.driver._make_vpnservice_dict(vpnservice) + for vpnservice in vpnservices] + + def update_status(self, context, status): + """Update status of vpnservices.""" + plugin = self.driver.service_plugin + plugin.update_status_by_agent(context, status) + + +class IPsecVpnAgentApi(service_drivers.BaseIPsecVpnAgentApi, + rpc_compat.RpcCallback): + """Agent RPC API for IPsecVPNAgent.""" + + RPC_API_VERSION = BASE_IPSEC_VERSION + + def __init__(self, topic, default_version): + super(IPsecVpnAgentApi, self).__init__( + topics.IPSEC_AGENT_TOPIC, topic, default_version) + + +class IPsecVPNDriver(service_drivers.VpnDriver): + """VPN Service Driver class for IPsec.""" + + def __init__(self, service_plugin): + super(IPsecVPNDriver, self).__init__(service_plugin) + self.endpoints = [IPsecVpnDriverCallBack(self)] + self.conn = rpc_compat.create_connection(new=True) + self.conn.create_consumer( + topics.IPSEC_DRIVER_TOPIC, self.endpoints, fanout=False) + self.conn.consume_in_threads() + self.agent_rpc = IPsecVpnAgentApi( + topics.IPSEC_AGENT_TOPIC, BASE_IPSEC_VERSION) + + @property + def service_type(self): + return IPSEC + + def create_ipsec_site_connection(self, context, ipsec_site_connection): + vpnservice = self.service_plugin._get_vpnservice( + context, ipsec_site_connection['vpnservice_id']) + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id']) + + def update_ipsec_site_connection( + self, context, old_ipsec_site_connection, ipsec_site_connection): + vpnservice = self.service_plugin._get_vpnservice( + context, ipsec_site_connection['vpnservice_id']) + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id']) + + def delete_ipsec_site_connection(self, context, ipsec_site_connection): + vpnservice = self.service_plugin._get_vpnservice( + context, ipsec_site_connection['vpnservice_id']) + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id']) + + def create_ikepolicy(self, context, ikepolicy): + pass + + def delete_ikepolicy(self, context, ikepolicy): + pass + + def update_ikepolicy(self, context, old_ikepolicy, ikepolicy): + pass + + def create_ipsecpolicy(self, context, ipsecpolicy): + pass + + def delete_ipsecpolicy(self, context, ipsecpolicy): + pass + + def update_ipsecpolicy(self, context, old_ipsec_policy, ipsecpolicy): + pass + + def create_vpnservice(self, context, vpnservice): + pass + + def update_vpnservice(self, context, old_vpnservice, vpnservice): + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id']) + + def delete_vpnservice(self, context, vpnservice): + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id']) + + def _make_vpnservice_dict(self, vpnservice): + """Convert vpnservice information for vpn agent. + + also converting parameter name for vpn agent driver + """ + vpnservice_dict = dict(vpnservice) + vpnservice_dict['ipsec_site_connections'] = [] + vpnservice_dict['subnet'] = dict( + vpnservice.subnet) + vpnservice_dict['external_ip'] = vpnservice.router.gw_port[ + 'fixed_ips'][0]['ip_address'] + for ipsec_site_connection in vpnservice.ipsec_site_connections: + ipsec_site_connection_dict = dict(ipsec_site_connection) + try: + netaddr.IPAddress(ipsec_site_connection['peer_id']) + except netaddr.core.AddrFormatError: + ipsec_site_connection['peer_id'] = ( + '@' + ipsec_site_connection['peer_id']) + ipsec_site_connection_dict['ikepolicy'] = dict( + ipsec_site_connection.ikepolicy) + ipsec_site_connection_dict['ipsecpolicy'] = dict( + ipsec_site_connection.ipsecpolicy) + vpnservice_dict['ipsec_site_connections'].append( + ipsec_site_connection_dict) + peer_cidrs = [ + peer_cidr.cidr + for peer_cidr in ipsec_site_connection.peer_cidrs] + ipsec_site_connection_dict['peer_cidrs'] = peer_cidrs + return vpnservice_dict diff --git a/neutron/tests/__init__.py b/neutron/tests/__init__.py new file mode 100644 index 000000000..2d32e4ef3 --- /dev/null +++ b/neutron/tests/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/base.py b/neutron/tests/base.py new file mode 100644 index 000000000..dfbbe1386 --- /dev/null +++ b/neutron/tests/base.py @@ -0,0 +1,216 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base Test Case for all Unit Tests""" + +import contextlib +import gc +import logging +import os +import os.path +import sys +import weakref + +import eventlet.timeout +import fixtures +import mock +from oslo.config import cfg +from oslo.messaging import conffixture as messaging_conffixture +import testtools + +from neutron.common import config +from neutron.common import rpc as n_rpc +from neutron.db import agentschedulers_db +from neutron import manager +from neutron.tests import fake_notifier +from neutron.tests import post_mortem_debug + + +CONF = cfg.CONF +CONF.import_opt('state_path', 'neutron.common.config') +TRUE_STRING = ['True', '1'] +LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" + +ROOTDIR = os.path.dirname(__file__) +ETCDIR = os.path.join(ROOTDIR, 'etc') + + +def etcdir(*p): + return os.path.join(ETCDIR, *p) + + +def fake_use_fatal_exceptions(*args): + return True + + +def fake_consume_in_threads(self): + return [] + + +class BaseTestCase(testtools.TestCase): + + def cleanup_core_plugin(self): + """Ensure that the core plugin is deallocated.""" + nm = manager.NeutronManager + if not nm.has_instance(): + return + + #TODO(marun) Fix plugins that do not properly initialize notifiers + agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {} + + # Perform a check for deallocation only if explicitly + # configured to do so since calling gc.collect() after every + # test increases test suite execution time by ~50%. + check_plugin_deallocation = ( + os.environ.get('OS_CHECK_PLUGIN_DEALLOCATION') in TRUE_STRING) + if check_plugin_deallocation: + plugin = weakref.ref(nm._instance.plugin) + + nm.clear_instance() + + if check_plugin_deallocation: + gc.collect() + + #TODO(marun) Ensure that mocks are deallocated? + if plugin() and not isinstance(plugin(), mock.Base): + self.fail('The plugin for this test was not deallocated.') + + def setup_coreplugin(self, core_plugin=None): + if core_plugin is not None: + cfg.CONF.set_override('core_plugin', core_plugin) + + def setup_notification_driver(self, notification_driver=None): + self.addCleanup(fake_notifier.reset) + if notification_driver is None: + notification_driver = [fake_notifier.__name__] + cfg.CONF.set_override("notification_driver", notification_driver) + + @staticmethod + def config_parse(conf=None, args=None): + """Create the default configurations.""" + # neutron.conf.test includes rpc_backend which needs to be cleaned up + if args is None: + args = ['--config-file', etcdir('neutron.conf.test')] + if conf is None: + config.init(args=args) + else: + conf(args) + + def setUp(self): + super(BaseTestCase, self).setUp() + + # Ensure plugin cleanup is triggered last so that + # test-specific cleanup has a chance to release references. + self.addCleanup(self.cleanup_core_plugin) + + # Configure this first to ensure pm debugging support for setUp() + if os.environ.get('OS_POST_MORTEM_DEBUG') in TRUE_STRING: + self.addOnException(post_mortem_debug.exception_handler) + + if os.environ.get('OS_DEBUG') in TRUE_STRING: + _level = logging.DEBUG + else: + _level = logging.INFO + capture_logs = os.environ.get('OS_LOG_CAPTURE') in TRUE_STRING + if not capture_logs: + logging.basicConfig(format=LOG_FORMAT, level=_level) + self.log_fixture = self.useFixture( + fixtures.FakeLogger( + format=LOG_FORMAT, + level=_level, + nuke_handlers=capture_logs, + )) + + # suppress all but errors here + self.useFixture( + fixtures.FakeLogger( + name='neutron.api.extensions', + format=LOG_FORMAT, + level=logging.ERROR, + nuke_handlers=capture_logs, + )) + + test_timeout = int(os.environ.get('OS_TEST_TIMEOUT', 0)) + if test_timeout == -1: + test_timeout = 0 + if test_timeout > 0: + self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) + + # If someone does use tempfile directly, ensure that it's cleaned up + self.useFixture(fixtures.NestedTempfile()) + self.useFixture(fixtures.TempHomeDir()) + + self.temp_dir = self.useFixture(fixtures.TempDir()).path + cfg.CONF.set_override('state_path', self.temp_dir) + + self.addCleanup(mock.patch.stopall) + self.addCleanup(CONF.reset) + + if os.environ.get('OS_STDOUT_CAPTURE') in TRUE_STRING: + stdout = self.useFixture(fixtures.StringStream('stdout')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) + if os.environ.get('OS_STDERR_CAPTURE') in TRUE_STRING: + stderr = self.useFixture(fixtures.StringStream('stderr')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + self.useFixture(fixtures.MonkeyPatch( + 'neutron.common.exceptions.NeutronException.use_fatal_exceptions', + fake_use_fatal_exceptions)) + + # don't actually start RPC listeners when testing + self.useFixture(fixtures.MonkeyPatch( + 'neutron.common.rpc_compat.Connection.consume_in_threads', + fake_consume_in_threads)) + + self.useFixture(fixtures.MonkeyPatch( + 'oslo.messaging.Notifier', fake_notifier.FakeNotifier)) + + self.messaging_conf = messaging_conffixture.ConfFixture(CONF) + self.messaging_conf.transport_driver = 'fake' + self.messaging_conf.response_timeout = 15 + self.useFixture(self.messaging_conf) + + self.addCleanup(n_rpc.clear_extra_exmods) + n_rpc.add_extra_exmods('neutron.test') + + self.addCleanup(n_rpc.cleanup) + n_rpc.init(CONF) + + if sys.version_info < (2, 7) and getattr(self, 'fmt', '') == 'xml': + raise self.skipException('XML Testing Skipped in Py26') + + def config(self, **kw): + """Override some configuration values. + + The keyword arguments are the names of configuration options to + override and their values. + + If a group argument is supplied, the overrides are applied to + the specified configuration option group. + + All overrides are automatically cleared at the end of the current + test by the fixtures cleanup process. + """ + group = kw.pop('group', None) + for k, v in kw.iteritems(): + CONF.set_override(k, v, group) + + @contextlib.contextmanager + def assert_max_execution_time(self, max_execution_time=5): + with eventlet.timeout.Timeout(max_execution_time, False): + yield + return + self.fail('Execution of this test timed out') diff --git a/neutron/tests/etc/api-paste.ini.test b/neutron/tests/etc/api-paste.ini.test new file mode 100644 index 000000000..664c30949 --- /dev/null +++ b/neutron/tests/etc/api-paste.ini.test @@ -0,0 +1,8 @@ +[pipeline:extensions_app_with_filter] +pipeline = extensions extensions_test_app + +[filter:extensions] +paste.filter_factory = neutron.common.extensions:plugin_aware_extension_middleware_factory + +[app:extensions_test_app] +paste.app_factory = neutron.tests.unit.test_extensions:app_factory diff --git a/neutron/tests/etc/neutron.conf.test b/neutron/tests/etc/neutron.conf.test new file mode 100644 index 000000000..45f0e778f --- /dev/null +++ b/neutron/tests/etc/neutron.conf.test @@ -0,0 +1,27 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +verbose = True + +# Show debugging output in logs (sets DEBUG log level output) +debug = False + +# Address to bind the API server +bind_host = 0.0.0.0 + +# Port the bind the API server to +bind_port = 9696 + +# Path to the extensions +api_extensions_path = unit/extensions + +# Paste configuration file +api_paste_config = api-paste.ini.test + +# The messaging module to use, defaults to kombu. +rpc_backend = neutron.openstack.common.rpc.impl_fake + +lock_path = $state_path/lock + +[database] +connection = 'sqlite://' + diff --git a/neutron/tests/etc/rootwrap.d/neutron.test.filters b/neutron/tests/etc/rootwrap.d/neutron.test.filters new file mode 100644 index 000000000..48fa6da89 --- /dev/null +++ b/neutron/tests/etc/rootwrap.d/neutron.test.filters @@ -0,0 +1,12 @@ +# neutron-rootwrap command filters for the unit test + +# this file goes with neutron/tests/unit/_test_rootwrap_exec.py. +# See the comments there about how to run that unit tests + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# a test filter for the RootwrapTest unit test +bash: CommandFilter, /usr/bin/bash, root diff --git a/neutron/tests/fake_notifier.py b/neutron/tests/fake_notifier.py new file mode 100644 index 000000000..012f3351e --- /dev/null +++ b/neutron/tests/fake_notifier.py @@ -0,0 +1,50 @@ +# Copyright 2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import functools + + +NOTIFICATIONS = [] + + +def reset(): + del NOTIFICATIONS[:] + + +FakeMessage = collections.namedtuple('Message', + ['publisher_id', 'priority', + 'event_type', 'payload']) + + +class FakeNotifier(object): + + def __init__(self, transport, publisher_id=None): + self.transport = transport + self.publisher_id = publisher_id + for priority in ('debug', 'info', 'warn', 'error', 'critical'): + setattr(self, priority, + functools.partial(self._notify, priority=priority.upper())) + + def prepare(self, publisher_id=None): + if publisher_id is None: + publisher_id = self.publisher_id + return self.__class__(self.transport, publisher_id) + + def _notify(self, ctxt, event_type, payload, priority): + msg = dict(publisher_id=self.publisher_id, + priority=priority, + event_type=event_type, + payload=payload) + NOTIFICATIONS.append(msg) diff --git a/neutron/tests/functional/__init__.py b/neutron/tests/functional/__init__.py new file mode 100644 index 000000000..ac4d6cbf6 --- /dev/null +++ b/neutron/tests/functional/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/functional/agent/__init__.py b/neutron/tests/functional/agent/__init__.py new file mode 100644 index 000000000..ac4d6cbf6 --- /dev/null +++ b/neutron/tests/functional/agent/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/functional/agent/linux/__init__.py b/neutron/tests/functional/agent/linux/__init__.py new file mode 100644 index 000000000..ac4d6cbf6 --- /dev/null +++ b/neutron/tests/functional/agent/linux/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/functional/agent/linux/base.py b/neutron/tests/functional/agent/linux/base.py new file mode 100644 index 000000000..c5ea717f7 --- /dev/null +++ b/neutron/tests/functional/agent/linux/base.py @@ -0,0 +1,74 @@ +# Copyright 2014 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import random + +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import utils +from neutron.plugins.common import constants as q_const +from neutron.tests import base + + +BR_PREFIX = 'test-br' + + +class BaseLinuxTestCase(base.BaseTestCase): + def setUp(self, root_helper='sudo'): + super(BaseLinuxTestCase, self).setUp() + + self.root_helper = root_helper + + def check_command(self, cmd, error_text, skip_msg): + try: + utils.execute(cmd) + except RuntimeError as e: + if error_text in str(e): + self.skipTest(skip_msg) + raise + + def check_sudo_enabled(self): + if os.environ.get('OS_SUDO_TESTING') not in base.TRUE_STRING: + self.skipTest('testing with sudo is not enabled') + + def get_rand_name(self, max_length, prefix='test'): + name = prefix + str(random.randint(1, 0x7fffffff)) + return name[:max_length] + + def create_resource(self, name_prefix, creation_func, *args, **kwargs): + """Create a new resource that does not already exist. + + :param name_prefix: The prefix for a randomly generated name + :param creation_func: A function taking the name of the resource + to be created as it's first argument. An error is assumed + to indicate a name collision. + :param *args *kwargs: These will be passed to the create function. + """ + while True: + name = self.get_rand_name(q_const.MAX_DEV_NAME_LEN, name_prefix) + try: + return creation_func(name, *args, **kwargs) + except RuntimeError: + continue + + +class BaseOVSLinuxTestCase(BaseLinuxTestCase): + def setUp(self, root_helper='sudo'): + super(BaseOVSLinuxTestCase, self).setUp(root_helper) + self.ovs = ovs_lib.BaseOVS(self.root_helper) + + def create_ovs_bridge(self, br_prefix=BR_PREFIX): + br = self.create_resource(br_prefix, self.ovs.add_bridge) + self.addCleanup(br.destroy) + return br diff --git a/neutron/tests/functional/agent/linux/test_async_process.py b/neutron/tests/functional/agent/linux/test_async_process.py new file mode 100644 index 000000000..dd2ebb889 --- /dev/null +++ b/neutron/tests/functional/agent/linux/test_async_process.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +import fixtures + +from six import moves + +from neutron.agent.linux import async_process +from neutron.tests import base + + +class TestAsyncProcess(base.BaseTestCase): + + def setUp(self): + super(TestAsyncProcess, self).setUp() + self.test_file_path = self.useFixture( + fixtures.TempDir()).join("test_async_process.tmp") + self.data = [str(x) for x in moves.xrange(4)] + with file(self.test_file_path, 'w') as f: + f.writelines('%s\n' % item for item in self.data) + + def _check_stdout(self, proc): + # Ensure that all the output from the file is read + output = [] + while output != self.data: + new_output = list(proc.iter_stdout()) + if new_output: + output += new_output + eventlet.sleep(0.01) + + def test_stopping_async_process_lifecycle(self): + with self.assert_max_execution_time(): + proc = async_process.AsyncProcess(['tail', '-f', + self.test_file_path]) + proc.start() + self._check_stdout(proc) + proc.stop() + + # Ensure that the process and greenthreads have stopped + proc._process.wait() + self.assertEqual(proc._process.returncode, -9) + for watcher in proc._watchers: + watcher.wait() + + def test_async_process_respawns(self): + with self.assert_max_execution_time(): + proc = async_process.AsyncProcess(['tail', '-f', + self.test_file_path], + respawn_interval=0) + proc.start() + + # Ensure that the same output is read twice + self._check_stdout(proc) + pid = proc._get_pid_to_kill() + proc._kill_process(pid) + self._check_stdout(proc) + proc.stop() diff --git a/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py b/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py new file mode 100644 index 000000000..3ef5f9411 --- /dev/null +++ b/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py @@ -0,0 +1,108 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests in this module will be skipped unless: + + - ovsdb-client is installed + + - ovsdb-client can be invoked via password-less sudo + + - OS_SUDO_TESTING is set to '1' or 'True' in the test execution + environment + + +The jenkins gate does not allow direct sudo invocation during test +runs, but configuring OS_SUDO_TESTING ensures that developers are +still able to execute tests that require the capability. +""" + +import eventlet + +from neutron.agent.linux import ovsdb_monitor +from neutron.tests.functional.agent.linux import base as base_agent + + +class BaseMonitorTest(base_agent.BaseOVSLinuxTestCase): + + def setUp(self): + # Emulate using a rootwrap script with sudo + super(BaseMonitorTest, self).setUp(root_helper='sudo sudo') + + self._check_test_requirements() + self.bridge = self.create_ovs_bridge() + + def _check_test_requirements(self): + self.check_sudo_enabled() + self.check_command(['which', 'ovsdb-client'], + 'Exit code: 1', 'ovsdb-client is not installed') + self.check_command(['sudo', '-n', 'ovsdb-client', 'list-dbs'], + 'Exit code: 1', + 'password-less sudo not granted for ovsdb-client') + + +class TestOvsdbMonitor(BaseMonitorTest): + + def setUp(self): + super(TestOvsdbMonitor, self).setUp() + + self.monitor = ovsdb_monitor.OvsdbMonitor('Bridge', + root_helper=self.root_helper) + self.addCleanup(self.monitor.stop) + self.monitor.start() + + def collect_initial_output(self): + while True: + output = list(self.monitor.iter_stdout()) + if output: + return output[0] + eventlet.sleep(0.01) + + def test_killed_monitor_respawns(self): + with self.assert_max_execution_time(): + self.monitor.respawn_interval = 0 + old_pid = self.monitor._process.pid + output1 = self.collect_initial_output() + pid = self.monitor._get_pid_to_kill() + self.monitor._kill_process(pid) + self.monitor._reset_queues() + while (self.monitor._process.pid == old_pid): + eventlet.sleep(0.01) + output2 = self.collect_initial_output() + # Initial output should appear twice + self.assertEqual(output1, output2) + + +class TestSimpleInterfaceMonitor(BaseMonitorTest): + + def setUp(self): + super(TestSimpleInterfaceMonitor, self).setUp() + + self.monitor = ovsdb_monitor.SimpleInterfaceMonitor( + root_helper=self.root_helper) + self.addCleanup(self.monitor.stop) + self.monitor.start(block=True) + + def test_has_updates(self): + self.assertTrue(self.monitor.has_updates, + 'Initial call should always be true') + self.assertFalse(self.monitor.has_updates, + 'has_updates without port addition should be False') + self.create_resource('test-port-', self.bridge.add_port) + with self.assert_max_execution_time(): + # has_updates after port addition should become True + while not self.monitor.has_updates: + eventlet.sleep(0.01) diff --git a/neutron/tests/functional/sanity/__init__.py b/neutron/tests/functional/sanity/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/functional/sanity/test_ovs_sanity.py b/neutron/tests/functional/sanity/test_ovs_sanity.py new file mode 100644 index 000000000..fa63300fe --- /dev/null +++ b/neutron/tests/functional/sanity/test_ovs_sanity.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from neutron.cmd.sanity import checks +from neutron.tests import base + + +class OVSSanityTestCase(base.BaseTestCase): + def setUp(self): + super(OVSSanityTestCase, self).setUp() + + self.root_helper = 'sudo' + + def check_sudo_enabled(self): + if os.environ.get('OS_SUDO_TESTING') not in base.TRUE_STRING: + self.skipTest('testing with sudo is not enabled') + + def test_ovs_vxlan_support_runs(self): + """This test just ensures that the test in neutron-sanity-check + can run through without error, without mocking anything out + """ + self.check_sudo_enabled() + checks.vxlan_supported(self.root_helper) + + def test_ovs_patch_support_runs(self): + """This test just ensures that the test in neutron-sanity-check + can run through without error, without mocking anything out + """ + self.check_sudo_enabled() + checks.patch_supported(self.root_helper) diff --git a/neutron/tests/post_mortem_debug.py b/neutron/tests/post_mortem_debug.py new file mode 100644 index 000000000..1208505c3 --- /dev/null +++ b/neutron/tests/post_mortem_debug.py @@ -0,0 +1,106 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pdb +import traceback + + +def exception_handler(exc_info): + """Exception handler enabling post-mortem debugging. + + A class extending testtools.TestCase can add this handler in setUp(): + + self.addOnException(post_mortem_debug.exception_handler) + + When an exception occurs, the user will be dropped into a pdb + session in the execution environment of the failure. + + Frames associated with the testing framework are excluded so that + the post-mortem session for an assertion failure will start at the + assertion call (e.g. self.assertTrue) rather than the framework code + that raises the failure exception (e.g. the assertTrue method). + """ + tb = exc_info[2] + ignored_traceback = get_ignored_traceback(tb) + if ignored_traceback: + tb = FilteredTraceback(tb, ignored_traceback) + traceback.print_exception(exc_info[0], exc_info[1], tb) + pdb.post_mortem(tb) + + +def get_ignored_traceback(tb): + """Retrieve the first traceback of an ignored trailing chain. + + Given an initial traceback, find the first traceback of a trailing + chain of tracebacks that should be ignored. The criteria for + whether a traceback should be ignored is whether its frame's + globals include the __unittest marker variable. This criteria is + culled from: + + unittest.TestResult._is_relevant_tb_level + + For example: + + tb.tb_next => tb0.tb_next => tb1.tb_next + + - If no tracebacks were to be ignored, None would be returned. + - If only tb1 was to be ignored, tb1 would be returned. + - If tb0 and tb1 were to be ignored, tb0 would be returned. + - If either of only tb or only tb0 was to be ignored, None would + be returned because neither tb or tb0 would be part of a + trailing chain of ignored tracebacks. + """ + # Turn the traceback chain into a list + tb_list = [] + while tb: + tb_list.append(tb) + tb = tb.tb_next + + # Find all members of an ignored trailing chain + ignored_tracebacks = [] + for tb in reversed(tb_list): + if '__unittest' in tb.tb_frame.f_globals: + ignored_tracebacks.append(tb) + else: + break + + # Return the first member of the ignored trailing chain + if ignored_tracebacks: + return ignored_tracebacks[-1] + + +class FilteredTraceback(object): + """Wraps a traceback to filter unwanted frames.""" + + def __init__(self, tb, filtered_traceback): + """Constructor. + + :param tb: The start of the traceback chain to filter. + :param filtered_traceback: The first traceback of a trailing + chain that is to be filtered. + """ + self._tb = tb + self.tb_lasti = self._tb.tb_lasti + self.tb_lineno = self._tb.tb_lineno + self.tb_frame = self._tb.tb_frame + self._filtered_traceback = filtered_traceback + + @property + def tb_next(self): + tb_next = self._tb.tb_next + if tb_next and tb_next != self._filtered_traceback: + return FilteredTraceback(tb_next, self._filtered_traceback) diff --git a/neutron/tests/tools.py b/neutron/tests/tools.py new file mode 100644 index 000000000..f80b30508 --- /dev/null +++ b/neutron/tests/tools.py @@ -0,0 +1,47 @@ +# Copyright (c) 2013 NEC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Akihiro Motoki, NEC Corporation + + +"""setup_mock_calls and verify_mock_calls are convenient methods +to setup a sequence of mock calls. + +expected_calls_and_values is a list of (expected_call, return_value): + + expected_calls_and_values = [ + (mock.call(["ovs-vsctl", self.TO, '--', "--may-exist", "add-port", + self.BR_NAME, pname], root_helper=self.root_helper), + None), + (mock.call(["ovs-vsctl", self.TO, "set", "Interface", + pname, "type=gre"], root_helper=self.root_helper), + None), + .... + ] + +* expected_call should be mock.call(expected_arg, ....) +* return_value is passed to side_effect of a mocked call. + A return value or an exception can be specified. +""" + + +def setup_mock_calls(mocked_call, expected_calls_and_values): + return_values = [call[1] for call in expected_calls_and_values] + mocked_call.side_effect = return_values + + +def verify_mock_calls(mocked_call, expected_calls_and_values): + expected_calls = [call[0] for call in expected_calls_and_values] + mocked_call.assert_has_calls(expected_calls) diff --git a/neutron/tests/unit/__init__.py b/neutron/tests/unit/__init__.py new file mode 100644 index 000000000..96b119faf --- /dev/null +++ b/neutron/tests/unit/__init__.py @@ -0,0 +1,26 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo.config import cfg + + +reldir = os.path.join(os.path.dirname(__file__), '..', '..', '..') +absdir = os.path.abspath(reldir) +cfg.CONF.state_path = absdir +cfg.CONF.use_stderr = False diff --git a/neutron/tests/unit/_test_extension_portbindings.py b/neutron/tests/unit/_test_extension_portbindings.py new file mode 100644 index 000000000..3a78b8d8f --- /dev/null +++ b/neutron/tests/unit/_test_extension_portbindings.py @@ -0,0 +1,377 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NEC Corporation +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Akihiro Motoki, NEC Corporation +# + +import contextlib +import httplib + +from oslo.config import cfg +from webob import exc + +from neutron import context +from neutron.extensions import portbindings +from neutron import manager +from neutron.tests.unit import test_db_plugin + + +class PortBindingsTestCase(test_db_plugin.NeutronDbPluginV2TestCase): + + # VIF_TYPE must be overridden according to plugin vif_type + VIF_TYPE = portbindings.VIF_TYPE_OTHER + # The plugin supports the port security feature such as + # security groups and anti spoofing. + HAS_PORT_FILTER = False + + def _check_response_portbindings(self, port): + self.assertEqual(port[portbindings.VIF_TYPE], self.VIF_TYPE) + vif_details = port[portbindings.VIF_DETAILS] + # REVISIT(rkukura): Consider reworking tests to enable ML2 to bind + if self.VIF_TYPE not in [portbindings.VIF_TYPE_UNBOUND, + portbindings.VIF_TYPE_BINDING_FAILED]: + # TODO(rkukura): Replace with new VIF security details + self.assertEqual(vif_details[portbindings.CAP_PORT_FILTER], + self.HAS_PORT_FILTER) + + def _check_response_no_portbindings(self, port): + self.assertIn('status', port) + self.assertNotIn(portbindings.VIF_TYPE, port) + self.assertNotIn(portbindings.VIF_DETAILS, port) + + def _get_non_admin_context(self): + return context.Context(user_id=None, + tenant_id=self._tenant_id, + is_admin=False, + read_deleted="no") + + def test_port_vif_details(self): + with self.port(name='name') as port: + port_id = port['port']['id'] + # Check a response of create_port + self._check_response_portbindings(port['port']) + # Check a response of get_port + ctx = context.get_admin_context() + port = self._show('ports', port_id, neutron_context=ctx)['port'] + self._check_response_portbindings(port) + # By default user is admin - now test non admin user + ctx = self._get_non_admin_context() + non_admin_port = self._show( + 'ports', port_id, neutron_context=ctx)['port'] + self._check_response_no_portbindings(non_admin_port) + + def test_ports_vif_details(self): + plugin = manager.NeutronManager.get_plugin() + cfg.CONF.set_default('allow_overlapping_ips', True) + with contextlib.nested(self.port(), self.port()): + ctx = context.get_admin_context() + ports = plugin.get_ports(ctx) + self.assertEqual(len(ports), 2) + for port in ports: + self._check_response_portbindings(port) + # By default user is admin - now test non admin user + ctx = self._get_non_admin_context() + ports = self._list('ports', neutron_context=ctx)['ports'] + self.assertEqual(len(ports), 2) + for non_admin_port in ports: + self._check_response_no_portbindings(non_admin_port) + + def _check_port_binding_profile(self, port, profile=None): + # For plugins which does not use binding:profile attr + # we just check an operation for the port succeed. + self.assertIn('id', port) + + def _test_create_port_binding_profile(self, profile): + profile_arg = {portbindings.PROFILE: profile} + with self.port(arg_list=(portbindings.PROFILE,), + **profile_arg) as port: + port_id = port['port']['id'] + self._check_port_binding_profile(port['port'], profile) + port = self._show('ports', port_id) + self._check_port_binding_profile(port['port'], profile) + + def test_create_port_binding_profile_none(self): + self._test_create_port_binding_profile(None) + + def test_create_port_binding_profile_with_empty_dict(self): + self._test_create_port_binding_profile({}) + + def _test_update_port_binding_profile(self, profile): + profile_arg = {portbindings.PROFILE: profile} + with self.port() as port: + # print "(1) %s" % port + self._check_port_binding_profile(port['port']) + port_id = port['port']['id'] + ctx = context.get_admin_context() + port = self._update('ports', port_id, {'port': profile_arg}, + neutron_context=ctx)['port'] + self._check_port_binding_profile(port, profile) + port = self._show('ports', port_id)['port'] + self._check_port_binding_profile(port, profile) + + def test_update_port_binding_profile_none(self): + self._test_update_port_binding_profile(None) + + def test_update_port_binding_profile_with_empty_dict(self): + self._test_update_port_binding_profile({}) + + def test_port_create_portinfo_non_admin(self): + profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}} + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + # succeed without binding:profile + with self.port(subnet=subnet1, + set_context=True, tenant_id='test'): + pass + # fail with binding:profile + try: + with self.port(subnet=subnet1, + expected_res_status=403, + arg_list=(portbindings.PROFILE,), + set_context=True, tenant_id='test', + **profile_arg): + pass + except exc.HTTPClientError: + pass + + def test_port_update_portinfo_non_admin(self): + profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}} + with self.network() as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1) as port: + # By default user is admin - now test non admin user + # Note that 404 is returned when prohibit by policy. + # See comment for PolicyNotAuthorized except clause + # in update() in neutron.api.v2.base.Controller. + port_id = port['port']['id'] + ctx = self._get_non_admin_context() + port = self._update('ports', port_id, + {'port': profile_arg}, + expected_code=404, + neutron_context=ctx) + + +class PortBindingsHostTestCaseMixin(object): + fmt = 'json' + hostname = 'testhost' + + def _check_response_portbindings_host(self, port): + self.assertEqual(port[portbindings.HOST_ID], self.hostname) + + def _check_response_no_portbindings_host(self, port): + self.assertIn('status', port) + self.assertNotIn(portbindings.HOST_ID, port) + + def test_port_vif_non_admin(self): + with self.network(set_context=True, + tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + host_arg = {portbindings.HOST_ID: self.hostname} + try: + with self.port(subnet=subnet1, + expected_res_status=403, + arg_list=(portbindings.HOST_ID,), + set_context=True, + tenant_id='test', + **host_arg): + pass + except exc.HTTPClientError: + pass + + def test_port_vif_host(self): + host_arg = {portbindings.HOST_ID: self.hostname} + with self.port(name='name', arg_list=(portbindings.HOST_ID,), + **host_arg) as port: + port_id = port['port']['id'] + # Check a response of create_port + self._check_response_portbindings_host(port['port']) + # Check a response of get_port + ctx = context.get_admin_context() + port = self._show('ports', port_id, neutron_context=ctx)['port'] + self._check_response_portbindings_host(port) + # By default user is admin - now test non admin user + ctx = context.Context(user_id=None, + tenant_id=self._tenant_id, + is_admin=False, + read_deleted="no") + non_admin_port = self._show( + 'ports', port_id, neutron_context=ctx)['port'] + self._check_response_no_portbindings_host(non_admin_port) + + def test_ports_vif_host(self): + cfg.CONF.set_default('allow_overlapping_ips', True) + host_arg = {portbindings.HOST_ID: self.hostname} + with contextlib.nested( + self.port(name='name1', + arg_list=(portbindings.HOST_ID,), + **host_arg), + self.port(name='name2')): + ctx = context.get_admin_context() + ports = self._list('ports', neutron_context=ctx)['ports'] + self.assertEqual(2, len(ports)) + for port in ports: + if port['name'] == 'name1': + self._check_response_portbindings_host(port) + else: + self.assertFalse(port[portbindings.HOST_ID]) + # By default user is admin - now test non admin user + ctx = context.Context(user_id=None, + tenant_id=self._tenant_id, + is_admin=False, + read_deleted="no") + ports = self._list('ports', neutron_context=ctx)['ports'] + self.assertEqual(2, len(ports)) + for non_admin_port in ports: + self._check_response_no_portbindings_host(non_admin_port) + + def test_ports_vif_host_update(self): + cfg.CONF.set_default('allow_overlapping_ips', True) + host_arg = {portbindings.HOST_ID: self.hostname} + with contextlib.nested( + self.port(name='name1', + arg_list=(portbindings.HOST_ID,), + **host_arg), + self.port(name='name2')) as (port1, port2): + data = {'port': {portbindings.HOST_ID: 'testhosttemp'}} + req = self.new_update_request('ports', data, port1['port']['id']) + req.get_response(self.api) + req = self.new_update_request('ports', data, port2['port']['id']) + ctx = context.get_admin_context() + req.get_response(self.api) + ports = self._list('ports', neutron_context=ctx)['ports'] + self.assertEqual(2, len(ports)) + for port in ports: + self.assertEqual('testhosttemp', port[portbindings.HOST_ID]) + + def test_ports_vif_non_host_update(self): + host_arg = {portbindings.HOST_ID: self.hostname} + with self.port(name='name', arg_list=(portbindings.HOST_ID,), + **host_arg) as port: + data = {'port': {'admin_state_up': False}} + req = self.new_update_request('ports', data, port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(port['port'][portbindings.HOST_ID], + res['port'][portbindings.HOST_ID]) + + def test_ports_vif_non_host_update_when_host_null(self): + with self.port() as port: + data = {'port': {'admin_state_up': False}} + req = self.new_update_request('ports', data, port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(port['port'][portbindings.HOST_ID], + res['port'][portbindings.HOST_ID]) + + def test_ports_vif_host_list(self): + cfg.CONF.set_default('allow_overlapping_ips', True) + host_arg = {portbindings.HOST_ID: self.hostname} + with contextlib.nested( + self.port(name='name1', + arg_list=(portbindings.HOST_ID,), + **host_arg), + self.port(name='name2'), + self.port(name='name3', + arg_list=(portbindings.HOST_ID,), + **host_arg),) as (port1, _port2, port3): + self._test_list_resources( + 'port', (port1, port3), + query_params='%s=%s' % (portbindings.HOST_ID, self.hostname)) + + +class PortBindingsVnicTestCaseMixin(object): + fmt = 'json' + vnic_type = portbindings.VNIC_NORMAL + + def _check_response_portbindings_vnic_type(self, port): + self.assertIn('status', port) + self.assertEqual(port[portbindings.VNIC_TYPE], self.vnic_type) + + def test_port_vnic_type_non_admin(self): + with self.network(set_context=True, + tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} + with self.port(subnet=subnet1, + expected_res_status=httplib.CREATED, + arg_list=(portbindings.VNIC_TYPE,), + set_context=True, + tenant_id='test', + **vnic_arg) as port: + # Check a response of create_port + self._check_response_portbindings_vnic_type(port['port']) + + def test_port_vnic_type(self): + vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} + with self.port(name='name', arg_list=(portbindings.VNIC_TYPE,), + **vnic_arg) as port: + port_id = port['port']['id'] + # Check a response of create_port + self._check_response_portbindings_vnic_type(port['port']) + # Check a response of get_port + ctx = context.get_admin_context() + port = self._show('ports', port_id, neutron_context=ctx)['port'] + self._check_response_portbindings_vnic_type(port) + # By default user is admin - now test non admin user + ctx = context.Context(user_id=None, + tenant_id=self._tenant_id, + is_admin=False, + read_deleted="no") + non_admin_port = self._show( + 'ports', port_id, neutron_context=ctx)['port'] + self._check_response_portbindings_vnic_type(non_admin_port) + + def test_ports_vnic_type(self): + cfg.CONF.set_default('allow_overlapping_ips', True) + vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} + with contextlib.nested( + self.port(name='name1', + arg_list=(portbindings.VNIC_TYPE,), + **vnic_arg), + self.port(name='name2')): + ctx = context.get_admin_context() + ports = self._list('ports', neutron_context=ctx)['ports'] + self.assertEqual(2, len(ports)) + for port in ports: + if port['name'] == 'name1': + self._check_response_portbindings_vnic_type(port) + else: + self.assertEqual(portbindings.VNIC_NORMAL, + port[portbindings.VNIC_TYPE]) + # By default user is admin - now test non admin user + ctx = context.Context(user_id=None, + tenant_id=self._tenant_id, + is_admin=False, + read_deleted="no") + ports = self._list('ports', neutron_context=ctx)['ports'] + self.assertEqual(2, len(ports)) + for non_admin_port in ports: + self._check_response_portbindings_vnic_type(non_admin_port) + + def test_ports_vnic_type_list(self): + cfg.CONF.set_default('allow_overlapping_ips', True) + vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} + with contextlib.nested( + self.port(name='name1', + arg_list=(portbindings.VNIC_TYPE,), + **vnic_arg), + self.port(name='name2'), + self.port(name='name3', + arg_list=(portbindings.VNIC_TYPE,), + **vnic_arg),) as (port1, port2, port3): + self._test_list_resources( + 'port', (port1, port2, port3), + query_params='%s=%s' % (portbindings.VNIC_TYPE, + self.vnic_type)) diff --git a/neutron/tests/unit/_test_rootwrap_exec.py b/neutron/tests/unit/_test_rootwrap_exec.py new file mode 100644 index 000000000..a26104e40 --- /dev/null +++ b/neutron/tests/unit/_test_rootwrap_exec.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import fixtures + +from neutron.agent.linux import utils +from neutron.openstack.common import log as logging +from neutron.tests import base + + +LOG = logging.getLogger(__name__) + + +class RootwrapTestExec(base.BaseTestCase): + """Simple unit test to test the basic rootwrap mechanism + + Essentially hello-world. Just run a command as root and check that + it actually *did* run as root, and generated the right output. + + NB that this is named _test_rootwrap so as not to get run by default + from scripts like tox. That's because it actually executes a sudo'ed + command, and that won't work in the automated test environment, at + least as it stands today. To run this, rename it to + test_rootwrap.py, or run it by hand. + """ + + def setUp(self): + super(RootwrapTestExec, self).setUp() + self.cwd = os.getcwd() + "/../../.." + # stuff a stupid bash script into /tmp, so that the next + # method can execute it. + self.test_file = self.useFixture( + fixtures.TempDir()).join("rootwrap-test.sh") + with open(self.test_file, 'w') as f: + f.write('#!/bin/bash\n') + f.write('ID=`id | sed \'s/uid=//\' | sed \'s/(.*//\' `\n') + f.write("echo $ID $1\ +\" Now is the time for all good men to come \ +to the aid of their party.\"\n") + # we need a temporary conf file, pointing into pwd for the filter + # specs. there's probably a better way to do this, but I couldn't + # figure it out. 08/15/12 -- jrd + self.conf_file = self.useFixture( + fixtures.TempDir()).join("rootwrap.conf") + with open(self.conf_file, 'w') as f: + f.write("# temporary conf file for rootwrap-test, " + + "generated by test_rootwrap.py\n") + f.write("[DEFAULT]\n") + f.write("filters_path=" + self.cwd + + "/neutron/tests/etc/rootwrap.d/") + # now set the root helper to sudo our rootwrap script, + # with the new conf + self.root_helper = "sudo " + self.cwd + "/bin/neutron-rootwrap " + self.root_helper += self.conf_file + + def runTest(self): + try: + result = utils.execute(["bash", self.test_file, 'arg'], + self.root_helper) + self.assertEqual(result, + "0 arg Now is the time for all good men to \ +come to the aid of their party.") + except Exception: + LOG.exception("Losing in rootwrap test") + + def tearDown(self): + os.remove(self.test_file) + os.remove(self.conf_file) + super(RootwrapTestExec, self).tearDown() diff --git a/neutron/tests/unit/agent/__init__.py b/neutron/tests/unit/agent/__init__.py new file mode 100644 index 000000000..ac4d6cbf6 --- /dev/null +++ b/neutron/tests/unit/agent/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/agent/linux/__init__.py b/neutron/tests/unit/agent/linux/__init__.py new file mode 100644 index 000000000..ac4d6cbf6 --- /dev/null +++ b/neutron/tests/unit/agent/linux/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/agent/linux/test_async_process.py b/neutron/tests/unit/agent/linux/test_async_process.py new file mode 100644 index 000000000..ea4243030 --- /dev/null +++ b/neutron/tests/unit/agent/linux/test_async_process.py @@ -0,0 +1,251 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet.event +import eventlet.queue +import eventlet.timeout +import mock +import testtools + +from neutron.agent.linux import async_process +from neutron.agent.linux import utils +from neutron.tests import base + + +_marker = () + + +class TestAsyncProcess(base.BaseTestCase): + + def setUp(self): + super(TestAsyncProcess, self).setUp() + self.proc = async_process.AsyncProcess(['fake']) + + def test_construtor_raises_exception_for_negative_respawn_interval(self): + with testtools.ExpectedException(ValueError): + async_process.AsyncProcess(['fake'], respawn_interval=-1) + + def test__spawn(self): + expected_process = 'Foo' + proc = self.proc + with mock.patch.object(utils, 'create_process') as mock_create_process: + mock_create_process.return_value = [expected_process, None] + with mock.patch('eventlet.spawn') as mock_spawn: + proc._spawn() + + self.assertIsInstance(proc._kill_event, eventlet.event.Event) + self.assertEqual(proc._process, expected_process) + mock_spawn.assert_has_calls([ + mock.call(proc._watch_process, + proc._read_stdout, + proc._kill_event), + mock.call(proc._watch_process, + proc._read_stderr, + proc._kill_event), + ]) + self.assertEqual(len(proc._watchers), 2) + + def test__handle_process_error_kills_with_respawn(self): + with mock.patch.object(self.proc, '_kill') as kill: + self.proc._handle_process_error() + + kill.assert_has_calls(mock.call(respawning=False)) + + def test__handle_process_error_kills_without_respawn(self): + self.proc.respawn_interval = 1 + with mock.patch.object(self.proc, '_kill') as kill: + with mock.patch.object(self.proc, '_spawn') as spawn: + with mock.patch('eventlet.sleep') as sleep: + self.proc._handle_process_error() + + kill.assert_has_calls(mock.call(respawning=True)) + sleep.assert_has_calls(mock.call(self.proc.respawn_interval)) + spawn.assert_called_once_with() + + def _test__watch_process(self, callback, kill_event): + self.proc._kill_event = kill_event + # Ensure the test times out eventually if the watcher loops endlessly + with eventlet.timeout.Timeout(5): + with mock.patch.object(self.proc, + '_handle_process_error') as func: + self.proc._watch_process(callback, kill_event) + + if not kill_event.ready(): + func.assert_called_once_with() + + def test__watch_process_exits_on_callback_failure(self): + self._test__watch_process(lambda: False, eventlet.event.Event()) + + def test__watch_process_exits_on_exception(self): + def foo(): + raise Exception('Error!') + self._test__watch_process(foo, eventlet.event.Event()) + + def test__watch_process_exits_on_sent_kill_event(self): + kill_event = eventlet.event.Event() + kill_event.send() + self._test__watch_process(None, kill_event) + + def _test_read_output_queues_and_returns_result(self, output): + queue = eventlet.queue.LightQueue() + mock_stream = mock.Mock() + with mock.patch.object(mock_stream, 'readline') as mock_readline: + mock_readline.return_value = output + result = self.proc._read(mock_stream, queue) + + if output: + self.assertEqual(output, result) + self.assertEqual(output, queue.get_nowait()) + else: + self.assertFalse(result) + self.assertTrue(queue.empty()) + + def test__read_queues_and_returns_output(self): + self._test_read_output_queues_and_returns_result('foo') + + def test__read_returns_none_for_missing_output(self): + self._test_read_output_queues_and_returns_result('') + + def test_start_raises_exception_if_process_already_started(self): + self.proc._kill_event = True + with testtools.ExpectedException(async_process.AsyncProcessException): + self.proc.start() + + def test_start_invokes__spawn(self): + with mock.patch.object(self.proc, '_spawn') as mock_start: + self.proc.start() + + mock_start.assert_called_once_with() + + def test__iter_queue_returns_empty_list_for_empty_queue(self): + result = list(self.proc._iter_queue(eventlet.queue.LightQueue())) + self.assertEqual(result, []) + + def test__iter_queue_returns_queued_data(self): + queue = eventlet.queue.LightQueue() + queue.put('foo') + result = list(self.proc._iter_queue(queue)) + self.assertEqual(result, ['foo']) + + def _test_iter_output_calls_iter_queue_on_output_queue(self, output_type): + expected_value = 'foo' + with mock.patch.object(self.proc, '_iter_queue') as mock_iter_queue: + mock_iter_queue.return_value = expected_value + target_func = getattr(self.proc, 'iter_%s' % output_type, None) + value = target_func() + + self.assertEqual(value, expected_value) + queue = getattr(self.proc, '_%s_lines' % output_type, None) + mock_iter_queue.assert_called_with(queue) + + def test_iter_stdout(self): + self._test_iter_output_calls_iter_queue_on_output_queue('stdout') + + def test_iter_stderr(self): + self._test_iter_output_calls_iter_queue_on_output_queue('stderr') + + def _test__kill(self, respawning, pid=None): + with mock.patch.object(self.proc, '_kill_event') as mock_kill_event: + with mock.patch.object(self.proc, '_get_pid_to_kill', + return_value=pid): + with mock.patch.object(self.proc, + '_kill_process') as mock_kill_process: + self.proc._kill(respawning) + + if respawning: + self.assertIsNotNone(self.proc._kill_event) + else: + self.assertIsNone(self.proc._kill_event) + + mock_kill_event.send.assert_called_once_with() + if pid: + mock_kill_process.assert_called_once_with(pid) + + def test__kill_when_respawning_does_not_clear_kill_event(self): + self._test__kill(True) + + def test__kill_when_not_respawning_clears_kill_event(self): + self._test__kill(False) + + def test__kill_targets_process_for_pid(self): + self._test__kill(False, pid='1') + + def _test__get_pid_to_kill(self, expected=_marker, + root_helper=None, pids=None): + def _find_child_pids(x): + if not pids: + return [] + pids.pop(0) + return pids + + if root_helper: + self.proc.root_helper = root_helper + + with mock.patch.object(self.proc, '_process') as mock_process: + with mock.patch.object(mock_process, 'pid') as mock_pid: + with mock.patch.object(utils, 'find_child_pids', + side_effect=_find_child_pids): + actual = self.proc._get_pid_to_kill() + if expected is _marker: + expected = mock_pid + self.assertEqual(expected, actual) + + def test__get_pid_to_kill_returns_process_pid_without_root_helper(self): + self._test__get_pid_to_kill() + + def test__get_pid_to_kill_returns_child_pid_with_root_helper(self): + self._test__get_pid_to_kill(expected='2', pids=['1', '2'], + root_helper='a') + + def test__get_pid_to_kill_returns_last_child_pid_with_root_Helper(self): + self._test__get_pid_to_kill(expected='3', pids=['1', '2', '3'], + root_helper='a') + + def test__get_pid_to_kill_returns_none_with_root_helper(self): + self._test__get_pid_to_kill(expected=None, root_helper='a') + + def _test__kill_process(self, pid, expected, exception_message=None): + self.proc.root_helper = 'foo' + if exception_message: + exc = RuntimeError(exception_message) + else: + exc = None + with mock.patch.object(utils, 'execute', + side_effect=exc) as mock_execute: + actual = self.proc._kill_process(pid) + + self.assertEqual(expected, actual) + mock_execute.assert_called_with(['kill', '-9', pid], + root_helper=self.proc.root_helper) + + def test__kill_process_returns_true_for_valid_pid(self): + self._test__kill_process('1', True) + + def test__kill_process_returns_true_for_stale_pid(self): + self._test__kill_process('1', True, 'No such process') + + def test__kill_process_returns_false_for_execute_exception(self): + self._test__kill_process('1', False, 'Invalid') + + def test_stop_calls_kill(self): + self.proc._kill_event = True + with mock.patch.object(self.proc, '_kill') as mock_kill: + self.proc.stop() + mock_kill.assert_called_once_with() + + def test_stop_raises_exception_if_already_started(self): + with testtools.ExpectedException(async_process.AsyncProcessException): + self.proc.stop() diff --git a/neutron/tests/unit/agent/linux/test_ovs_lib.py b/neutron/tests/unit/agent/linux/test_ovs_lib.py new file mode 100644 index 000000000..8a19ed39a --- /dev/null +++ b/neutron/tests/unit/agent/linux/test_ovs_lib.py @@ -0,0 +1,967 @@ +# Copyright 2012, VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import mock +from oslo.config import cfg +import testtools + +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import utils +from neutron.common import exceptions +from neutron.openstack.common import jsonutils +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as p_const +from neutron.plugins.openvswitch.common import constants as const +from neutron.tests import base +from neutron.tests import tools + +try: + OrderedDict = collections.OrderedDict +except AttributeError: + import ordereddict + OrderedDict = ordereddict.OrderedDict + +OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0" + + +class TestBaseOVS(base.BaseTestCase): + + def setUp(self): + super(TestBaseOVS, self).setUp() + self.root_helper = 'sudo' + self.ovs = ovs_lib.BaseOVS(self.root_helper) + self.br_name = 'bridge1' + + def test_add_bridge(self): + with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl: + bridge = self.ovs.add_bridge(self.br_name) + + mock_vsctl.assert_called_with(["--", "--may-exist", + "add-br", self.br_name]) + self.assertEqual(bridge.br_name, self.br_name) + self.assertEqual(bridge.root_helper, self.ovs.root_helper) + + def test_delete_bridge(self): + with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl: + self.ovs.delete_bridge(self.br_name) + mock_vsctl.assert_called_with(["--", "--if-exists", "del-br", + self.br_name]) + + def test_bridge_exists_returns_true(self): + with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl: + self.assertTrue(self.ovs.bridge_exists(self.br_name)) + mock_vsctl.assert_called_with(['br-exists', self.br_name], + check_error=True) + + def test_bridge_exists_returns_false_for_exit_code_2(self): + with mock.patch.object(self.ovs, 'run_vsctl', + side_effect=RuntimeError('Exit code: 2\n')): + self.assertFalse(self.ovs.bridge_exists('bridge1')) + + def test_bridge_exists_raises_unknown_exception(self): + with mock.patch.object(self.ovs, 'run_vsctl', + side_effect=RuntimeError()): + with testtools.ExpectedException(RuntimeError): + self.ovs.bridge_exists('bridge1') + + def test_get_bridge_name_for_port_name_returns_bridge_for_valid_port(self): + port_name = 'bar' + with mock.patch.object(self.ovs, 'run_vsctl', + return_value=self.br_name) as mock_vsctl: + bridge = self.ovs.get_bridge_name_for_port_name(port_name) + self.assertEqual(bridge, self.br_name) + mock_vsctl.assert_called_with(['port-to-br', port_name], + check_error=True) + + def test_get_bridge_name_for_port_name_returns_none_for_exit_code_1(self): + with mock.patch.object(self.ovs, 'run_vsctl', + side_effect=RuntimeError('Exit code: 1\n')): + self.assertFalse(self.ovs.get_bridge_name_for_port_name('bridge1')) + + def test_get_bridge_name_for_port_name_raises_unknown_exception(self): + with mock.patch.object(self.ovs, 'run_vsctl', + side_effect=RuntimeError()): + with testtools.ExpectedException(RuntimeError): + self.ovs.get_bridge_name_for_port_name('bridge1') + + def _test_port_exists(self, br_name, result): + with mock.patch.object(self.ovs, + 'get_bridge_name_for_port_name', + return_value=br_name): + self.assertEqual(self.ovs.port_exists('bar'), result) + + def test_port_exists_returns_true_for_bridge_name(self): + self._test_port_exists(self.br_name, True) + + def test_port_exists_returns_false_for_none(self): + self._test_port_exists(None, False) + + +class OVS_Lib_Test(base.BaseTestCase): + """A test suite to exercise the OVS libraries shared by Neutron agents. + + Note: these tests do not actually execute ovs-* utilities, and thus + can run on any system. That does, however, limit their scope. + """ + + def setUp(self): + super(OVS_Lib_Test, self).setUp() + self.BR_NAME = "br-int" + self.TO = "--timeout=10" + + self.root_helper = 'sudo' + self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper) + self.execute = mock.patch.object( + utils, "execute", spec=utils.execute).start() + + def test_vifport(self): + """Create and stringify vif port, confirm no exceptions.""" + + pname = "vif1.0" + ofport = 5 + vif_id = uuidutils.generate_uuid() + mac = "ca:fe:de:ad:be:ef" + + # test __init__ + port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br) + self.assertEqual(port.port_name, pname) + self.assertEqual(port.ofport, ofport) + self.assertEqual(port.vif_id, vif_id) + self.assertEqual(port.vif_mac, mac) + self.assertEqual(port.switch.br_name, self.BR_NAME) + + # test __str__ + str(port) + + def test_set_controller(self): + controller_names = ['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'] + self.br.set_controller(controller_names) + self.execute.assert_called_once_with( + ['ovs-vsctl', self.TO, '--', 'set-controller', self.BR_NAME, + 'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'], + root_helper=self.root_helper) + + def test_del_controller(self): + self.br.del_controller() + self.execute.assert_called_once_with( + ['ovs-vsctl', self.TO, '--', 'del-controller', self.BR_NAME], + root_helper=self.root_helper) + + def test_get_controller(self): + self.execute.return_value = 'tcp:127.0.0.1:6633\ntcp:172.17.16.10:5555' + names = self.br.get_controller() + self.assertEqual(names, + ['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555']) + self.execute.assert_called_once_with( + ['ovs-vsctl', self.TO, '--', 'get-controller', self.BR_NAME], + root_helper=self.root_helper) + + def test_set_secure_mode(self): + self.br.set_secure_mode() + self.execute.assert_called_once_with( + ['ovs-vsctl', self.TO, '--', 'set-fail-mode', self.BR_NAME, + 'secure'], root_helper=self.root_helper) + + def test_set_protocols(self): + protocols = 'OpenFlow13' + self.br.set_protocols(protocols) + self.execute.assert_called_once_with( + ['ovs-vsctl', self.TO, '--', 'set', 'bridge', self.BR_NAME, + "protocols=%s" % protocols], + root_helper=self.root_helper) + + def test_create(self): + self.br.add_bridge(self.BR_NAME) + + self.br.create() + + def test_destroy(self): + self.br.delete_bridge(self.BR_NAME) + + self.br.destroy() + + def test_reset_bridge(self): + self.br.destroy() + self.br.create() + + self.br.reset_bridge() + + def _build_timeout_opt(self, exp_timeout): + return "--timeout=%d" % exp_timeout if exp_timeout else self.TO + + def _test_delete_port(self, exp_timeout=None): + exp_timeout_str = self._build_timeout_opt(exp_timeout) + pname = "tap5" + self.br.delete_port(pname) + self.execute.assert_called_once_with( + ["ovs-vsctl", exp_timeout_str, "--", "--if-exists", + "del-port", self.BR_NAME, pname], + root_helper=self.root_helper) + + def test_delete_port(self): + self._test_delete_port() + + def test_call_command_non_default_timeput(self): + # This test is only for verifying a non-default timeout + # is correctly applied. Does not need to be repeated for + # every ovs_lib method + new_timeout = 5 + self.br.vsctl_timeout = new_timeout + self._test_delete_port(new_timeout) + + def test_add_flow(self): + ofport = "99" + vid = 4000 + lsw_id = 18 + cidr = '192.168.1.0/24' + + flow_dict_1 = OrderedDict([('priority', 2), + ('dl_src', 'ca:fe:de:ad:be:ef'), + ('actions', 'strip_vlan,output:0')]) + flow_dict_2 = OrderedDict([('priority', 1), + ('actions', 'normal')]) + flow_dict_3 = OrderedDict([('priority', 2), + ('actions', 'drop')]) + flow_dict_4 = OrderedDict([('priority', 2), + ('in_port', ofport), + ('actions', 'drop')]) + flow_dict_5 = OrderedDict([ + ('priority', 4), + ('in_port', ofport), + ('dl_vlan', vid), + ('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))]) + flow_dict_6 = OrderedDict([ + ('priority', 3), + ('tun_id', lsw_id), + ('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))]) + flow_dict_7 = OrderedDict([ + ('priority', 4), + ('nw_src', cidr), + ('proto', 'arp'), + ('actions', 'drop')]) + + self.br.add_flow(**flow_dict_1) + self.br.add_flow(**flow_dict_2) + self.br.add_flow(**flow_dict_3) + self.br.add_flow(**flow_dict_4) + self.br.add_flow(**flow_dict_5) + self.br.add_flow(**flow_dict_6) + self.br.add_flow(**flow_dict_7) + expected_calls = [ + mock.call(["ovs-ofctl", "add-flow", self.BR_NAME, + "hard_timeout=0,idle_timeout=0," + "priority=2,dl_src=ca:fe:de:ad:be:ef" + ",actions=strip_vlan,output:0"], + process_input=None, root_helper=self.root_helper), + mock.call(["ovs-ofctl", "add-flow", self.BR_NAME, + "hard_timeout=0,idle_timeout=0," + "priority=1,actions=normal"], + process_input=None, root_helper=self.root_helper), + mock.call(["ovs-ofctl", "add-flow", self.BR_NAME, + "hard_timeout=0,idle_timeout=0," + "priority=2,actions=drop"], + process_input=None, root_helper=self.root_helper), + mock.call(["ovs-ofctl", "add-flow", self.BR_NAME, + "hard_timeout=0,idle_timeout=0," + "priority=2,in_port=%s,actions=drop" % ofport], + process_input=None, root_helper=self.root_helper), + mock.call(["ovs-ofctl", "add-flow", self.BR_NAME, + "hard_timeout=0,idle_timeout=0," + "priority=4,dl_vlan=%s,in_port=%s," + "actions=strip_vlan,set_tunnel:%s,normal" + % (vid, ofport, lsw_id)], + process_input=None, root_helper=self.root_helper), + mock.call(["ovs-ofctl", "add-flow", self.BR_NAME, + "hard_timeout=0,idle_timeout=0," + "priority=3,tun_id=%s,actions=" + "mod_vlan_vid:%s,output:%s" + % (lsw_id, vid, ofport)], + process_input=None, root_helper=self.root_helper), + mock.call(["ovs-ofctl", "add-flow", self.BR_NAME, + "hard_timeout=0,idle_timeout=0," + "priority=4,nw_src=%s,arp,actions=drop" % cidr], + process_input=None, root_helper=self.root_helper), + ] + self.execute.assert_has_calls(expected_calls) + + def test_add_flow_timeout_set(self): + flow_dict = OrderedDict([('priority', 1), + ('hard_timeout', 1000), + ('idle_timeout', 2000), + ('actions', 'normal')]) + + self.br.add_flow(**flow_dict) + self.execute.assert_called_once_with( + ["ovs-ofctl", "add-flow", self.BR_NAME, + "hard_timeout=1000,idle_timeout=2000,priority=1,actions=normal"], + process_input=None, + root_helper=self.root_helper) + + def test_add_flow_default_priority(self): + flow_dict = OrderedDict([('actions', 'normal')]) + + self.br.add_flow(**flow_dict) + self.execute.assert_called_once_with( + ["ovs-ofctl", "add-flow", self.BR_NAME, + "hard_timeout=0,idle_timeout=0,priority=1,actions=normal"], + process_input=None, + root_helper=self.root_helper) + + def test_get_port_ofport(self): + pname = "tap99" + ofport = "6" + self.execute.return_value = ofport + self.assertEqual(self.br.get_port_ofport(pname), ofport) + self.execute.assert_called_once_with( + ["ovs-vsctl", self.TO, "get", "Interface", pname, "ofport"], + root_helper=self.root_helper) + + def test_get_port_ofport_non_int(self): + pname = "tap99" + ofport = "[]" + self.execute.return_value = ofport + self.assertEqual(self.br.get_port_ofport(pname), const.INVALID_OFPORT) + self.execute.assert_called_once_with( + ["ovs-vsctl", self.TO, "get", "Interface", pname, "ofport"], + root_helper=self.root_helper) + + def test_get_datapath_id(self): + datapath_id = '"0000b67f4fbcc149"' + self.execute.return_value = datapath_id + self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"')) + self.execute.assert_called_once_with( + ["ovs-vsctl", self.TO, "get", + "Bridge", self.BR_NAME, "datapath_id"], + root_helper=self.root_helper) + + def test_count_flows(self): + self.execute.return_value = 'ignore\nflow-1\n' + # counts the number of flows as total lines of output - 2 + self.assertEqual(self.br.count_flows(), 1) + self.execute.assert_called_once_with( + ["ovs-ofctl", "dump-flows", self.BR_NAME], + root_helper=self.root_helper, + process_input=None) + + def test_delete_flow(self): + ofport = "5" + lsw_id = 40 + vid = 39 + self.br.delete_flows(in_port=ofport) + self.br.delete_flows(tun_id=lsw_id) + self.br.delete_flows(dl_vlan=vid) + expected_calls = [ + mock.call(["ovs-ofctl", "del-flows", self.BR_NAME, + "in_port=" + ofport], + process_input=None, root_helper=self.root_helper), + mock.call(["ovs-ofctl", "del-flows", self.BR_NAME, + "tun_id=%s" % lsw_id], + process_input=None, root_helper=self.root_helper), + mock.call(["ovs-ofctl", "del-flows", self.BR_NAME, + "dl_vlan=%s" % vid], + process_input=None, root_helper=self.root_helper), + ] + self.execute.assert_has_calls(expected_calls) + + def test_delete_flow_with_priority_set(self): + params = {'in_port': '1', + 'priority': '1'} + + self.assertRaises(exceptions.InvalidInput, + self.br.delete_flows, + **params) + + def test_dump_flows(self): + table = 23 + nxst_flow = "NXST_FLOW reply (xid=0x4):" + flows = "\n".join([" cookie=0x0, duration=18042.514s, table=0, " + "n_packets=6, n_bytes=468, " + "priority=2,in_port=1 actions=drop", + " cookie=0x0, duration=18027.562s, table=0, " + "n_packets=0, n_bytes=0, " + "priority=3,in_port=1,dl_vlan=100 " + "actions=mod_vlan_vid:1,NORMAL", + " cookie=0x0, duration=18044.351s, table=0, " + "n_packets=9, n_bytes=594, priority=1 " + "actions=NORMAL", " cookie=0x0, " + "duration=18044.211s, table=23, n_packets=0, " + "n_bytes=0, priority=0 actions=drop"]) + flow_args = '\n'.join([nxst_flow, flows]) + run_ofctl = mock.patch.object(self.br, 'run_ofctl').start() + run_ofctl.side_effect = [flow_args] + retflows = self.br.dump_flows_for_table(table) + self.assertEqual(flows, retflows) + + def test_dump_flows_ovs_dead(self): + table = 23 + run_ofctl = mock.patch.object(self.br, 'run_ofctl').start() + run_ofctl.side_effect = [''] + retflows = self.br.dump_flows_for_table(table) + self.assertEqual(None, retflows) + + def test_mod_flow_with_priority_set(self): + params = {'in_port': '1', + 'priority': '1'} + + self.assertRaises(exceptions.InvalidInput, + self.br.mod_flow, + **params) + + def test_mod_flow_no_actions_set(self): + params = {'in_port': '1'} + + self.assertRaises(exceptions.InvalidInput, + self.br.mod_flow, + **params) + + def test_defer_apply_flows(self): + + flow_expr = mock.patch.object(ovs_lib, '_build_flow_expr_str').start() + flow_expr.side_effect = ['added_flow_1', 'added_flow_2', + 'deleted_flow_1'] + run_ofctl = mock.patch.object(self.br, 'run_ofctl').start() + + self.br.defer_apply_on() + self.br.add_flow(flow='add_flow_1') + self.br.defer_apply_on() + self.br.add_flow(flow='add_flow_2') + self.br.delete_flows(flow='delete_flow_1') + self.br.defer_apply_off() + + flow_expr.assert_has_calls([ + mock.call({'flow': 'add_flow_1'}, 'add'), + mock.call({'flow': 'add_flow_2'}, 'add'), + mock.call({'flow': 'delete_flow_1'}, 'del') + ]) + + run_ofctl.assert_has_calls([ + mock.call('add-flows', ['-'], 'added_flow_1\nadded_flow_2\n'), + mock.call('del-flows', ['-'], 'deleted_flow_1\n') + ]) + + def test_defer_apply_flows_concurrently(self): + flow_expr = mock.patch.object(ovs_lib, '_build_flow_expr_str').start() + flow_expr.side_effect = ['added_flow_1', 'deleted_flow_1', + 'modified_flow_1', 'added_flow_2', + 'deleted_flow_2', 'modified_flow_2'] + + run_ofctl = mock.patch.object(self.br, 'run_ofctl').start() + + def run_ofctl_fake(cmd, args, process_input=None): + self.br.defer_apply_on() + if cmd == 'add-flows': + self.br.add_flow(flow='added_flow_2') + elif cmd == 'del-flows': + self.br.delete_flows(flow='deleted_flow_2') + elif cmd == 'mod-flows': + self.br.mod_flow(flow='modified_flow_2') + run_ofctl.side_effect = run_ofctl_fake + + self.br.defer_apply_on() + self.br.add_flow(flow='added_flow_1') + self.br.delete_flows(flow='deleted_flow_1') + self.br.mod_flow(flow='modified_flow_1') + self.br.defer_apply_off() + + run_ofctl.side_effect = None + self.br.defer_apply_off() + + flow_expr.assert_has_calls([ + mock.call({'flow': 'added_flow_1'}, 'add'), + mock.call({'flow': 'deleted_flow_1'}, 'del'), + mock.call({'flow': 'modified_flow_1'}, 'mod'), + mock.call({'flow': 'added_flow_2'}, 'add'), + mock.call({'flow': 'deleted_flow_2'}, 'del'), + mock.call({'flow': 'modified_flow_2'}, 'mod') + ]) + run_ofctl.assert_has_calls([ + mock.call('add-flows', ['-'], 'added_flow_1\n'), + mock.call('del-flows', ['-'], 'deleted_flow_1\n'), + mock.call('mod-flows', ['-'], 'modified_flow_1\n'), + mock.call('add-flows', ['-'], 'added_flow_2\n'), + mock.call('del-flows', ['-'], 'deleted_flow_2\n'), + mock.call('mod-flows', ['-'], 'modified_flow_2\n') + ]) + + def test_add_tunnel_port(self): + pname = "tap99" + local_ip = "1.1.1.1" + remote_ip = "9.9.9.9" + ofport = "6" + command = ["ovs-vsctl", self.TO, '--', "--may-exist", "add-port", + self.BR_NAME, pname] + command.extend(["--", "set", "Interface", pname]) + command.extend(["type=gre", "options:df_default=true", + "options:remote_ip=" + remote_ip, + "options:local_ip=" + local_ip, + "options:in_key=flow", + "options:out_key=flow"]) + # Each element is a tuple of (expected mock call, return_value) + expected_calls_and_values = [ + (mock.call(command, root_helper=self.root_helper), None), + (mock.call(["ovs-vsctl", self.TO, "get", + "Interface", pname, "ofport"], + root_helper=self.root_helper), + ofport), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + self.assertEqual( + self.br.add_tunnel_port(pname, remote_ip, local_ip), + ofport) + + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def test_add_vxlan_fragmented_tunnel_port(self): + pname = "tap99" + local_ip = "1.1.1.1" + remote_ip = "9.9.9.9" + ofport = "6" + vxlan_udp_port = "9999" + dont_fragment = False + command = ["ovs-vsctl", self.TO, '--', "--may-exist", "add-port", + self.BR_NAME, pname] + command.extend(["--", "set", "Interface", pname]) + command.extend(["type=" + p_const.TYPE_VXLAN, + "options:dst_port=" + vxlan_udp_port, + "options:df_default=false", + "options:remote_ip=" + remote_ip, + "options:local_ip=" + local_ip, + "options:in_key=flow", + "options:out_key=flow"]) + # Each element is a tuple of (expected mock call, return_value) + expected_calls_and_values = [ + (mock.call(command, root_helper=self.root_helper), None), + (mock.call(["ovs-vsctl", self.TO, "get", + "Interface", pname, "ofport"], + root_helper=self.root_helper), + ofport), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + self.assertEqual( + self.br.add_tunnel_port(pname, remote_ip, local_ip, + p_const.TYPE_VXLAN, vxlan_udp_port, + dont_fragment), + ofport) + + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def test_add_patch_port(self): + pname = "tap99" + peer = "bar10" + ofport = "6" + + # Each element is a tuple of (expected mock call, return_value) + command = ["ovs-vsctl", self.TO, "add-port", self.BR_NAME, pname] + command.extend(["--", "set", "Interface", pname]) + command.extend(["type=patch", "options:peer=" + peer]) + expected_calls_and_values = [ + (mock.call(command, root_helper=self.root_helper), + None), + (mock.call(["ovs-vsctl", self.TO, "get", + "Interface", pname, "ofport"], + root_helper=self.root_helper), + ofport) + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + self.assertEqual(self.br.add_patch_port(pname, peer), ofport) + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def _test_get_vif_ports(self, is_xen=False): + pname = "tap99" + ofport = "6" + vif_id = uuidutils.generate_uuid() + mac = "ca:fe:de:ad:be:ef" + + if is_xen: + external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}' + % (vif_id, mac)) + else: + external_ids = ('{iface-id="%s", attached-mac="%s"}' + % (vif_id, mac)) + + # Each element is a tuple of (expected mock call, return_value) + expected_calls_and_values = [ + (mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME], + root_helper=self.root_helper), + "%s\n" % pname), + (mock.call(["ovs-vsctl", self.TO, "get", + "Interface", pname, "external_ids"], + root_helper=self.root_helper), + external_ids), + (mock.call(["ovs-vsctl", self.TO, "get", + "Interface", pname, "ofport"], + root_helper=self.root_helper), + ofport), + ] + if is_xen: + expected_calls_and_values.append( + (mock.call(["xe", "vif-param-get", "param-name=other-config", + "param-key=nicira-iface-id", "uuid=" + vif_id], + root_helper=self.root_helper), + vif_id) + ) + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + ports = self.br.get_vif_ports() + self.assertEqual(1, len(ports)) + self.assertEqual(ports[0].port_name, pname) + self.assertEqual(ports[0].ofport, ofport) + self.assertEqual(ports[0].vif_id, vif_id) + self.assertEqual(ports[0].vif_mac, mac) + self.assertEqual(ports[0].switch.br_name, self.BR_NAME) + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def _encode_ovs_json(self, headings, data): + # See man ovs-vsctl(8) for the encoding details. + r = {"data": [], + "headings": headings} + for row in data: + ovs_row = [] + r["data"].append(ovs_row) + for cell in row: + if isinstance(cell, (str, int, list)): + ovs_row.append(cell) + elif isinstance(cell, dict): + ovs_row.append(["map", cell.items()]) + elif isinstance(cell, set): + ovs_row.append(["set", cell]) + else: + raise TypeError('%r not int, str, list, set or dict' % + type(cell)) + return jsonutils.dumps(r) + + def _test_get_vif_port_set(self, is_xen): + if is_xen: + id_key = 'xs-vif-uuid' + else: + id_key = 'iface-id' + + headings = ['name', 'external_ids'] + data = [ + # A vif port on this bridge: + ['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1], + # A vif port on this bridge not yet configured + ['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []], + # Another vif port on this bridge not yet configured + ['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'}, + ['set', []]], + + # A vif port on another bridge: + ['tap88', {id_key: 'tap88id', 'attached-mac': 'tap88id'}, 1], + # Non-vif port on this bridge: + ['tun22', {}, 2], + ] + + # Each element is a tuple of (expected mock call, return_value) + expected_calls_and_values = [ + (mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME], + root_helper=self.root_helper), + 'tap99\ntun22'), + (mock.call(["ovs-vsctl", self.TO, "--format=json", + "--", "--columns=name,external_ids,ofport", + "list", "Interface"], + root_helper=self.root_helper), + self._encode_ovs_json(headings, data)), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + if is_xen: + get_xapi_iface_id = mock.patch.object(self.br, + 'get_xapi_iface_id').start() + get_xapi_iface_id.return_value = 'tap99id' + + port_set = self.br.get_vif_port_set() + self.assertEqual(set(['tap99id']), port_set) + tools.verify_mock_calls(self.execute, expected_calls_and_values) + if is_xen: + get_xapi_iface_id.assert_called_once_with('tap99id') + + def test_get_vif_ports_nonxen(self): + self._test_get_vif_ports(is_xen=False) + + def test_get_vif_ports_xen(self): + self._test_get_vif_ports(is_xen=True) + + def test_get_vif_port_set_nonxen(self): + self._test_get_vif_port_set(False) + + def test_get_vif_port_set_xen(self): + self._test_get_vif_port_set(True) + + def test_get_vif_ports_list_ports_error(self): + expected_calls_and_values = [ + (mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME], + root_helper=self.root_helper), + RuntimeError()), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + self.assertRaises(RuntimeError, self.br.get_vif_ports) + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def test_get_vif_port_set_list_ports_error(self): + expected_calls_and_values = [ + (mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME], + root_helper=self.root_helper), + RuntimeError()), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + self.assertRaises(RuntimeError, self.br.get_vif_port_set) + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def test_get_vif_port_set_list_interface_error(self): + expected_calls_and_values = [ + (mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME], + root_helper=self.root_helper), + 'tap99\n'), + (mock.call(["ovs-vsctl", self.TO, "--format=json", + "--", "--columns=name,external_ids,ofport", + "list", "Interface"], + root_helper=self.root_helper), + RuntimeError()), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + self.assertRaises(RuntimeError, self.br.get_vif_port_set) + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def test_get_port_tag_dict(self): + headings = ['name', 'tag'] + data = [ + ['int-br-eth2', set()], + ['patch-tun', set()], + ['qr-76d9e6b6-21', 1], + ['tapce5318ff-78', 1], + ['tape1400310-e6', 1], + ] + + # Each element is a tuple of (expected mock call, return_value) + expected_calls_and_values = [ + (mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME], + root_helper=self.root_helper), + '\n'.join((iface for iface, tag in data))), + (mock.call(["ovs-vsctl", self.TO, "--format=json", + "--", "--columns=name,tag", + "list", "Port"], + root_helper=self.root_helper), + self._encode_ovs_json(headings, data)), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + port_tags = self.br.get_port_tag_dict() + self.assertEqual( + port_tags, + {u'int-br-eth2': [], + u'patch-tun': [], + u'qr-76d9e6b6-21': 1, + u'tapce5318ff-78': 1, + u'tape1400310-e6': 1} + ) + + def test_clear_db_attribute(self): + pname = "tap77" + self.br.clear_db_attribute("Port", pname, "tag") + self.execute.assert_called_once_with( + ["ovs-vsctl", self.TO, "clear", "Port", pname, "tag"], + root_helper=self.root_helper) + + def _test_iface_to_br(self, exp_timeout=None): + iface = 'tap0' + br = 'br-int' + root_helper = 'sudo' + self.execute.return_value = 'br-int' + exp_timeout_str = self._build_timeout_opt(exp_timeout) + self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br) + self.execute.assert_called_once_with( + ["ovs-vsctl", exp_timeout_str, "iface-to-br", iface], + root_helper=root_helper) + + def test_iface_to_br(self): + self._test_iface_to_br() + + def test_iface_to_br_non_default_timeout(self): + new_timeout = 5 + cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout) + self._test_iface_to_br(new_timeout) + + def test_iface_to_br_handles_ovs_vsctl_exception(self): + iface = 'tap0' + root_helper = 'sudo' + self.execute.side_effect = Exception + + self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface)) + self.execute.assert_called_once_with( + ["ovs-vsctl", self.TO, "iface-to-br", iface], + root_helper=root_helper) + + def test_delete_all_ports(self): + with mock.patch.object(self.br, 'get_port_name_list', + return_value=['port1']) as get_port: + with mock.patch.object(self.br, 'delete_port') as delete_port: + self.br.delete_ports(all_ports=True) + get_port.assert_called_once_with() + delete_port.assert_called_once_with('port1') + + def test_delete_neutron_ports(self): + port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(), + 'ca:fe:de:ad:be:ef', 'br') + port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(), + 'ca:ee:de:ad:be:ef', 'br') + with mock.patch.object(self.br, 'get_vif_ports', + return_value=[port1, port2]) as get_ports: + with mock.patch.object(self.br, 'delete_port') as delete_port: + self.br.delete_ports(all_ports=False) + get_ports.assert_called_once_with() + delete_port.assert_has_calls([ + mock.call('tap1234'), + mock.call('tap5678') + ]) + + def test_delete_neutron_ports_list_error(self): + expected_calls_and_values = [ + (mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME], + root_helper=self.root_helper), + RuntimeError()), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False) + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def _test_get_bridges(self, exp_timeout=None): + bridges = ['br-int', 'br-ex'] + root_helper = 'sudo' + self.execute.return_value = 'br-int\nbr-ex\n' + timeout_str = self._build_timeout_opt(exp_timeout) + self.assertEqual(ovs_lib.get_bridges(root_helper), bridges) + self.execute.assert_called_once_with( + ["ovs-vsctl", timeout_str, "list-br"], + root_helper=root_helper) + + def test_get_bridges(self): + self._test_get_bridges() + + def test_get_bridges_not_default_timeout(self): + new_timeout = 5 + cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout) + self._test_get_bridges(new_timeout) + + def test_get_local_port_mac_succeeds(self): + with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand', + return_value=mock.Mock(address='foo')): + self.assertEqual('foo', self.br.get_local_port_mac()) + + def test_get_local_port_mac_raises_exception_for_missing_mac(self): + with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand', + return_value=mock.Mock(address=None)): + with testtools.ExpectedException(Exception): + self.br.get_local_port_mac() + + def _test_get_vif_port_by_id(self, iface_id, data, br_name=None): + headings = ['external_ids', 'name', 'ofport'] + # Each element is a tuple of (expected mock call, return_value) + expected_calls_and_values = [ + (mock.call(["ovs-vsctl", self.TO, "--format=json", + "--", "--columns=external_ids,name,ofport", + "find", "Interface", + 'external_ids:iface-id="%s"' % iface_id], + root_helper=self.root_helper), + self._encode_ovs_json(headings, data))] + if data: + if not br_name: + br_name = self.BR_NAME + + expected_calls_and_values.append( + (mock.call(["ovs-vsctl", self.TO, + "iface-to-br", data[0][headings.index('name')]], + root_helper=self.root_helper), + br_name)) + tools.setup_mock_calls(self.execute, expected_calls_and_values) + vif_port = self.br.get_vif_port_by_id(iface_id) + + tools.verify_mock_calls(self.execute, expected_calls_and_values) + return vif_port + + def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None): + external_ids = [["iface-id", "tap99id"], + ["iface-status", "active"]] + if mac: + external_ids.append(["attached-mac", mac]) + data = [[["map", external_ids], "tap99", + ofport if ofport else '["set",[]]']] + vif_port = self._test_get_vif_port_by_id('tap99id', data) + if not ofport or ofport == -1 or not mac: + self.assertIsNone(vif_port) + return + self.assertEqual(vif_port.vif_id, 'tap99id') + self.assertEqual(vif_port.vif_mac, 'aa:bb:cc:dd:ee:ff') + self.assertEqual(vif_port.port_name, 'tap99') + self.assertEqual(vif_port.ofport, ofport) + + def test_get_vif_by_port_id_with_ofport(self): + self._test_get_vif_port_by_id_with_data( + ofport=1, mac="aa:bb:cc:dd:ee:ff") + + def test_get_vif_by_port_id_without_ofport(self): + self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff") + + def test_get_vif_by_port_id_with_invalid_ofport(self): + self._test_get_vif_port_by_id_with_data( + ofport=-1, mac="aa:bb:cc:dd:ee:ff") + + def test_get_vif_by_port_id_without_mac(self): + self._test_get_vif_port_by_id_with_data(ofport=1) + + def test_get_vif_by_port_id_with_no_data(self): + self.assertIsNone(self._test_get_vif_port_by_id('whatever', [])) + + def test_get_vif_by_port_id_different_bridge(self): + external_ids = [["iface-id", "tap99id"], + ["iface-status", "active"]] + data = [[["map", external_ids], "tap99", 1]] + self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data, + "br-ext")) + + def test_ofctl_arg_supported(self): + with mock.patch('neutron.common.utils.get_random_string') as utils: + utils.return_value = 'test' + supported = ovs_lib.ofctl_arg_supported(self.root_helper, 'cmd', + ['args']) + self.execute.assert_has_calls([ + mock.call(['ovs-vsctl', self.TO, '--', '--if-exists', 'del-br', + 'br-test-test'], root_helper=self.root_helper), + mock.call(['ovs-vsctl', self.TO, '--', '--may-exist', 'add-br', + 'br-test-test'], root_helper=self.root_helper), + mock.call(['ovs-ofctl', 'cmd', 'br-test-test', 'args'], + root_helper=self.root_helper), + mock.call(['ovs-vsctl', self.TO, '--', '--if-exists', 'del-br', + 'br-test-test'], root_helper=self.root_helper) + ]) + self.assertTrue(supported) + + self.execute.side_effect = Exception + supported = ovs_lib.ofctl_arg_supported(self.root_helper, 'cmd', + ['args']) + self.execute.assert_has_calls([ + mock.call(['ovs-vsctl', self.TO, '--', '--if-exists', 'del-br', + 'br-test-test'], root_helper=self.root_helper), + mock.call(['ovs-vsctl', self.TO, '--', '--may-exist', 'add-br', + 'br-test-test'], root_helper=self.root_helper), + mock.call(['ovs-ofctl', 'cmd', 'br-test-test', 'args'], + root_helper=self.root_helper), + mock.call(['ovs-vsctl', self.TO, '--', '--if-exists', 'del-br', + 'br-test-test'], root_helper=self.root_helper) + ]) + self.assertFalse(supported) diff --git a/neutron/tests/unit/agent/linux/test_ovsdb_monitor.py b/neutron/tests/unit/agent/linux/test_ovsdb_monitor.py new file mode 100644 index 000000000..ec37f83aa --- /dev/null +++ b/neutron/tests/unit/agent/linux/test_ovsdb_monitor.py @@ -0,0 +1,105 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet.event +import mock + +from neutron.agent.linux import ovsdb_monitor +from neutron.tests import base + + +class TestOvsdbMonitor(base.BaseTestCase): + + def setUp(self): + super(TestOvsdbMonitor, self).setUp() + self.root_helper = 'sudo' + self.monitor = ovsdb_monitor.OvsdbMonitor('Interface', + root_helper=self.root_helper) + + def read_output_queues_and_returns_result(self, output_type, output): + with mock.patch.object(self.monitor, '_process') as mock_process: + with mock.patch.object(mock_process, output_type) as mock_file: + with mock.patch.object(mock_file, 'readline') as mock_readline: + mock_readline.return_value = output + func = getattr(self.monitor, + '_read_%s' % output_type, + None) + return func() + + def test__read_stdout_returns_none_for_empty_read(self): + result = self.read_output_queues_and_returns_result('stdout', '') + self.assertIsNone(result) + + def test__read_stdout_queues_normal_output_to_stdout_queue(self): + output = 'foo' + result = self.read_output_queues_and_returns_result('stdout', output) + self.assertEqual(result, output) + self.assertEqual(self.monitor._stdout_lines.get_nowait(), output) + + def test__read_stderr_returns_none(self): + result = self.read_output_queues_and_returns_result('stderr', '') + self.assertIsNone(result) + + +class TestSimpleInterfaceMonitor(base.BaseTestCase): + + def setUp(self): + super(TestSimpleInterfaceMonitor, self).setUp() + self.root_helper = 'sudo' + self.monitor = ovsdb_monitor.SimpleInterfaceMonitor( + root_helper=self.root_helper) + + def test_is_active_is_false_by_default(self): + self.assertFalse(self.monitor.is_active) + + def test_is_active_can_be_true(self): + self.monitor.data_received = True + self.monitor._kill_event = eventlet.event.Event() + self.assertTrue(self.monitor.is_active) + + def test_has_updates_is_true_by_default(self): + self.assertTrue(self.monitor.has_updates) + + def test_has_updates_is_false_if_active_with_no_output(self): + target = ('neutron.agent.linux.ovsdb_monitor.SimpleInterfaceMonitor' + '.is_active') + with mock.patch(target, + new_callable=mock.PropertyMock(return_value=True)): + self.assertFalse(self.monitor.has_updates) + + def test__kill_sets_data_received_to_false(self): + self.monitor.data_received = True + with mock.patch( + 'neutron.agent.linux.ovsdb_monitor.OvsdbMonitor._kill'): + self.monitor._kill() + self.assertFalse(self.monitor.data_received) + + def test__read_stdout_sets_data_received_and_returns_output(self): + output = 'foo' + with mock.patch( + 'neutron.agent.linux.ovsdb_monitor.OvsdbMonitor._read_stdout', + return_value=output): + result = self.monitor._read_stdout() + self.assertTrue(self.monitor.data_received) + self.assertEqual(result, output) + + def test__read_stdout_does_not_set_data_received_for_empty_ouput(self): + output = None + with mock.patch( + 'neutron.agent.linux.ovsdb_monitor.OvsdbMonitor._read_stdout', + return_value=output): + self.monitor._read_stdout() + self.assertFalse(self.monitor.data_received) diff --git a/neutron/tests/unit/agent/linux/test_polling.py b/neutron/tests/unit/agent/linux/test_polling.py new file mode 100644 index 000000000..e288654c2 --- /dev/null +++ b/neutron/tests/unit/agent/linux/test_polling.py @@ -0,0 +1,116 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.agent.linux import polling +from neutron.tests import base + + +class TestGetPollingManager(base.BaseTestCase): + + def test_return_always_poll_by_default(self): + with polling.get_polling_manager() as pm: + self.assertEqual(pm.__class__, polling.AlwaysPoll) + + def test_manage_polling_minimizer(self): + mock_target = 'neutron.agent.linux.polling.InterfacePollingMinimizer' + with mock.patch('%s.start' % mock_target) as mock_start: + with mock.patch('%s.stop' % mock_target) as mock_stop: + with polling.get_polling_manager(minimize_polling=True, + root_helper='test') as pm: + self.assertEqual(pm._monitor.root_helper, 'test') + self.assertEqual(pm.__class__, + polling.InterfacePollingMinimizer) + mock_stop.assert_has_calls(mock.call()) + mock_start.assert_has_calls(mock.call()) + + +class TestBasePollingManager(base.BaseTestCase): + + def setUp(self): + super(TestBasePollingManager, self).setUp() + self.pm = polling.BasePollingManager() + + def test_force_polling_sets_interval_attribute(self): + self.assertFalse(self.pm._force_polling) + self.pm.force_polling() + self.assertTrue(self.pm._force_polling) + + def test_polling_completed_sets_interval_attribute(self): + self.pm._polling_completed = False + self.pm.polling_completed() + self.assertTrue(self.pm._polling_completed) + + def mock_is_polling_required(self, return_value): + return mock.patch.object(self.pm, '_is_polling_required', + return_value=return_value) + + def test_is_polling_required_returns_true_when_forced(self): + with self.mock_is_polling_required(False): + self.pm.force_polling() + self.assertTrue(self.pm.is_polling_required) + self.assertFalse(self.pm._force_polling) + + def test_is_polling_required_returns_true_when_polling_not_completed(self): + with self.mock_is_polling_required(False): + self.pm._polling_completed = False + self.assertTrue(self.pm.is_polling_required) + + def test_is_polling_required_returns_true_when_updates_are_present(self): + with self.mock_is_polling_required(True): + self.assertTrue(self.pm.is_polling_required) + self.assertFalse(self.pm._polling_completed) + + def test_is_polling_required_returns_false_for_no_updates(self): + with self.mock_is_polling_required(False): + self.assertFalse(self.pm.is_polling_required) + + +class TestAlwaysPoll(base.BaseTestCase): + + def test_is_polling_required_always_returns_true(self): + pm = polling.AlwaysPoll() + self.assertTrue(pm.is_polling_required) + + +class TestInterfacePollingMinimizer(base.BaseTestCase): + + def setUp(self): + super(TestInterfacePollingMinimizer, self).setUp() + self.pm = polling.InterfacePollingMinimizer() + + def test_start_calls_monitor_start(self): + with mock.patch.object(self.pm._monitor, 'start') as mock_start: + self.pm.start() + mock_start.assert_called_with() + + def test_stop_calls_monitor_stop(self): + with mock.patch.object(self.pm._monitor, 'stop') as mock_stop: + self.pm.stop() + mock_stop.assert_called_with() + + def mock_has_updates(self, return_value): + target = ('neutron.agent.linux.ovsdb_monitor.SimpleInterfaceMonitor' + '.has_updates') + return mock.patch( + target, + new_callable=mock.PropertyMock(return_value=return_value), + ) + + def test__is_polling_required_returns_when_updates_are_present(self): + with self.mock_has_updates(True): + self.assertTrue(self.pm._is_polling_required()) diff --git a/neutron/tests/unit/api/__init__.py b/neutron/tests/unit/api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/api/rpc/__init__.py b/neutron/tests/unit/api/rpc/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/api/rpc/agentnotifiers/__init__.py b/neutron/tests/unit/api/rpc/agentnotifiers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py b/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py new file mode 100644 index 000000000..5d29f6cbd --- /dev/null +++ b/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py @@ -0,0 +1,154 @@ +# Copyright (c) 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import mock + +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.common import utils +from neutron.db import agents_db +from neutron.openstack.common import timeutils +from neutron.tests import base + + +class TestDhcpAgentNotifyAPI(base.BaseTestCase): + + def setUp(self): + super(TestDhcpAgentNotifyAPI, self).setUp() + self.notifier = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI(plugin=mock.Mock())) + + mock_util_p = mock.patch.object(utils, 'is_extension_supported') + mock_log_p = mock.patch.object(dhcp_rpc_agent_api, 'LOG') + mock_fanout_p = mock.patch.object(self.notifier, '_fanout_message') + mock_cast_p = mock.patch.object(self.notifier, '_cast_message') + self.mock_util = mock_util_p.start() + self.mock_log = mock_log_p.start() + self.mock_fanout = mock_fanout_p.start() + self.mock_cast = mock_cast_p.start() + + def _test__schedule_network(self, network, + new_agents=None, existing_agents=None, + expected_casts=0, expected_warnings=0): + self.notifier.plugin.schedule_network.return_value = new_agents + agents = self.notifier._schedule_network( + mock.ANY, network, existing_agents) + if new_agents is None: + new_agents = [] + self.assertEqual(new_agents + existing_agents, agents) + self.assertEqual(expected_casts, self.mock_cast.call_count) + self.assertEqual(expected_warnings, self.mock_log.warn.call_count) + + def test__schedule_network(self): + agent = agents_db.Agent() + agent.admin_state_up = True + agent.heartbeat_timestamp = timeutils.utcnow() + network = {'id': 'foo_net_id'} + self._test__schedule_network(network, + new_agents=[agent], existing_agents=[], + expected_casts=1, expected_warnings=0) + + def test__schedule_network_no_existing_agents(self): + agent = agents_db.Agent() + agent.admin_state_up = True + agent.heartbeat_timestamp = timeutils.utcnow() + network = {'id': 'foo_net_id'} + self._test__schedule_network(network, + new_agents=None, existing_agents=[agent], + expected_casts=0, expected_warnings=0) + + def test__schedule_network_no_new_agents(self): + network = {'id': 'foo_net_id'} + self._test__schedule_network(network, + new_agents=None, existing_agents=[], + expected_casts=0, expected_warnings=1) + + def _test__get_enabled_agents(self, network, + agents=None, port_count=0, + expected_warnings=0, expected_errors=0): + self.notifier.plugin.get_ports_count.return_value = port_count + enabled_agents = self.notifier._get_enabled_agents( + mock.ANY, network, agents, mock.ANY, mock.ANY) + self.assertEqual(agents, enabled_agents) + self.assertEqual(expected_warnings, self.mock_log.warn.call_count) + self.assertEqual(expected_errors, self.mock_log.error.call_count) + + def test__get_enabled_agents(self): + agent = agents_db.Agent() + agent.admin_state_up = True + agent.heartbeat_timestamp = timeutils.utcnow() + network = {'id': 'foo_network_id'} + self._test__get_enabled_agents(network, agents=[agent]) + + def test__get_enabled_agents_with_inactive_ones(self): + agent1 = agents_db.Agent() + agent1.admin_state_up = True + agent1.heartbeat_timestamp = timeutils.utcnow() + agent2 = agents_db.Agent() + agent2.admin_state_up = True + # This is effectively an inactive agent + agent2.heartbeat_timestamp = datetime.datetime(2000, 1, 1, 0, 0) + network = {'id': 'foo_network_id'} + self._test__get_enabled_agents(network, + agents=[agent1, agent2], + expected_warnings=1, expected_errors=0) + + def test__get_enabled_agents_with_notification_required(self): + network = {'id': 'foo_network_id', 'subnets': ['foo_subnet_id']} + self._test__get_enabled_agents(network, [], port_count=20, + expected_warnings=0, expected_errors=1) + + def test__notify_agents_fanout_required(self): + self.notifier._notify_agents(mock.ANY, + 'network_delete_end', + mock.ANY, 'foo_network_id') + self.assertEqual(1, self.mock_fanout.call_count) + + def _test__notify_agents(self, method, + expected_scheduling=0, expected_casts=0): + with mock.patch.object(self.notifier, '_schedule_network') as f: + with mock.patch.object(self.notifier, '_get_enabled_agents') as g: + agent = agents_db.Agent() + agent.admin_state_up = True + agent.heartbeat_timestamp = timeutils.utcnow() + g.return_value = [agent] + self.notifier._notify_agents(mock.Mock(), method, + mock.ANY, 'foo_network_id') + self.assertEqual(expected_scheduling, f.call_count) + self.assertEqual(expected_casts, self.mock_cast.call_count) + + def test__notify_agents_cast_required_with_scheduling(self): + self._test__notify_agents('port_create_end', + expected_scheduling=1, expected_casts=1) + + def test__notify_agents_cast_required_wo_scheduling_on_port_update(self): + self._test__notify_agents('port_update_end', + expected_scheduling=0, expected_casts=1) + + def test__notify_agents_cast_required_wo_scheduling_on_subnet_create(self): + self._test__notify_agents('subnet_create_end', + expected_scheduling=0, expected_casts=1) + + def test__notify_agents_no_action(self): + self._test__notify_agents('network_create_end', + expected_scheduling=0, expected_casts=0) + + def test__fanout_message(self): + self.notifier._fanout_message(mock.ANY, mock.ANY, mock.ANY) + self.assertEqual(1, self.mock_fanout.call_count) + + def test__cast_message(self): + self.notifier._cast_message(mock.ANY, mock.ANY, mock.ANY) + self.assertEqual(1, self.mock_cast.call_count) diff --git a/neutron/tests/unit/bigswitch/__init__.py b/neutron/tests/unit/bigswitch/__init__.py new file mode 100644 index 000000000..7e503debd --- /dev/null +++ b/neutron/tests/unit/bigswitch/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/bigswitch/etc/restproxy.ini.test b/neutron/tests/unit/bigswitch/etc/restproxy.ini.test new file mode 100644 index 000000000..8df78a6eb --- /dev/null +++ b/neutron/tests/unit/bigswitch/etc/restproxy.ini.test @@ -0,0 +1,44 @@ +# Test config file for quantum-proxy-plugin. + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/restproxy_quantum +# Replace 127.0.0.1 above with the IP address of the database used by the +# main quantum server. (Leave it as is if the database runs on this host.) +connection = sqlite:// +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +# max_retries = 10 +# Database reconnection interval in seconds - in event connectivity is lost +retry_interval = 2 + +[restproxy] +# All configuration for this plugin is in section '[restproxy]' +# +# The following parameters are supported: +# servers : [,]* (Error if not set) +# serverauth : (default: no auth) +# serverssl : True | False (default: False) +# +servers=localhost:9000,localhost:8899 +serverssl=False +#serverauth=username:password + +[nova] +# Specify the VIF_TYPE that will be controlled on the Nova compute instances +# options: ivs or ovs +# default: ovs +vif_type = ovs +# Overrides for vif types based on nova compute node host IDs +# Comma separated list of host IDs to fix to a specific VIF type +node_override_vif_ivs = ivshost + +[router] +# Specify the default router rules installed in newly created tenant routers +# Specify multiple times for multiple rules +# Use an * to specify default for all tenants +# Default is any any allow for all tenants +#tenant_default_router_rule=*:any:any:permit +# Maximum number of rules that a single router may have +max_router_rules=200 diff --git a/neutron/tests/unit/bigswitch/etc/ssl/ca_certs/README b/neutron/tests/unit/bigswitch/etc/ssl/ca_certs/README new file mode 100644 index 000000000..91779e39d --- /dev/null +++ b/neutron/tests/unit/bigswitch/etc/ssl/ca_certs/README @@ -0,0 +1,2 @@ +ca_certs directory for SSL unit tests +No files will be generated here, but it should exist for the tests diff --git a/neutron/tests/unit/bigswitch/etc/ssl/combined/README b/neutron/tests/unit/bigswitch/etc/ssl/combined/README new file mode 100644 index 000000000..9f9922fd5 --- /dev/null +++ b/neutron/tests/unit/bigswitch/etc/ssl/combined/README @@ -0,0 +1,2 @@ +combined certificates directory for SSL unit tests +No files will be created here, but it should exist for the tests diff --git a/neutron/tests/unit/bigswitch/etc/ssl/host_certs/README b/neutron/tests/unit/bigswitch/etc/ssl/host_certs/README new file mode 100644 index 000000000..0eaec67ce --- /dev/null +++ b/neutron/tests/unit/bigswitch/etc/ssl/host_certs/README @@ -0,0 +1,2 @@ +host_certs directory for SSL unit tests +No files will be created here, but it should exist for the tests diff --git a/neutron/tests/unit/bigswitch/fake_server.py b/neutron/tests/unit/bigswitch/fake_server.py new file mode 100644 index 000000000..3db3cc3ed --- /dev/null +++ b/neutron/tests/unit/bigswitch/fake_server.py @@ -0,0 +1,185 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Big Switch Networks, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kevin Benton, +# + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.plugins.bigswitch import servermanager + +LOG = logging.getLogger(__name__) + + +class HTTPResponseMock(): + status = 200 + reason = 'OK' + + def __init__(self, sock, debuglevel=0, strict=0, method=None, + buffering=False): + pass + + def read(self): + return "{'status': '200 OK'}" + + def getheader(self, header): + return None + + +class HTTPResponseMock404(HTTPResponseMock): + status = 404 + reason = 'Not Found' + + def read(self): + return "{'status': '%s 404 Not Found'}" % servermanager.NXNETWORK + + +class HTTPResponseMock500(HTTPResponseMock): + status = 500 + reason = 'Internal Server Error' + + def __init__(self, sock, debuglevel=0, strict=0, method=None, + buffering=False, errmsg='500 Internal Server Error'): + self.errmsg = errmsg + + def read(self): + return "{'status': '%s'}" % self.errmsg + + +class HTTPConnectionMock(object): + + def __init__(self, server, port, timeout): + self.response = None + self.broken = False + # Port 9000 is the broken server + if port == 9000: + self.broken = True + errmsg = "This server is broken, please try another" + self.response = HTTPResponseMock500(None, errmsg=errmsg) + + def request(self, action, uri, body, headers): + LOG.debug(_("Request: action=%(action)s, uri=%(uri)r, " + "body=%(body)s, headers=%(headers)s"), + {'action': action, 'uri': uri, + 'body': body, 'headers': headers}) + if self.broken and "ExceptOnBadServer" in uri: + raise Exception("Broken server got an unexpected request") + if self.response: + return + + # detachment may return 404 and plugin shouldn't die + if uri.endswith('attachment') and action == 'DELETE': + self.response = HTTPResponseMock404(None) + else: + self.response = HTTPResponseMock(None) + + # Port creations/updates must contain binding information + if ('port' in uri and 'attachment' not in uri + and 'binding' not in body and action in ('POST', 'PUT')): + errmsg = "Port binding info missing in port request '%s'" % body + self.response = HTTPResponseMock500(None, errmsg=errmsg) + return + + return + + def getresponse(self): + return self.response + + def close(self): + pass + + +class HTTPConnectionMock404(HTTPConnectionMock): + + def __init__(self, server, port, timeout): + self.response = HTTPResponseMock404(None) + self.broken = True + + +class HTTPConnectionMock500(HTTPConnectionMock): + + def __init__(self, server, port, timeout): + self.response = HTTPResponseMock500(None) + self.broken = True + + +class VerifyMultiTenantFloatingIP(HTTPConnectionMock): + + def request(self, action, uri, body, headers): + # Only handle network update requests + if 'network' in uri and 'tenant' in uri and 'ports' not in uri: + req = json.loads(body) + if 'network' not in req or 'floatingips' not in req['network']: + msg = _("No floating IPs in request" + "uri=%(uri)s, body=%(body)s") % {'uri': uri, + 'body': body} + raise Exception(msg) + distinct_tenants = [] + for flip in req['network']['floatingips']: + if flip['tenant_id'] not in distinct_tenants: + distinct_tenants.append(flip['tenant_id']) + if len(distinct_tenants) < 2: + msg = _("Expected floating IPs from multiple tenants." + "uri=%(uri)s, body=%(body)s") % {'uri': uri, + 'body': body} + raise Exception(msg) + super(VerifyMultiTenantFloatingIP, + self).request(action, uri, body, headers) + + +class HTTPSMockBase(HTTPConnectionMock): + expected_cert = '' + combined_cert = None + + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=None, source_address=None): + self.host = host + super(HTTPSMockBase, self).__init__(host, port, timeout) + + def request(self, method, url, body=None, headers={}): + self.connect() + super(HTTPSMockBase, self).request(method, url, body, headers) + + +class HTTPSNoValidation(HTTPSMockBase): + + def connect(self): + if self.combined_cert: + raise Exception('combined_cert set on NoValidation') + + +class HTTPSCAValidation(HTTPSMockBase): + expected_cert = 'DUMMYCERTIFICATEAUTHORITY' + + def connect(self): + contents = get_cert_contents(self.combined_cert) + if self.expected_cert not in contents: + raise Exception('No dummy CA cert in cert_file') + + +class HTTPSHostValidation(HTTPSMockBase): + expected_cert = 'DUMMYCERTFORHOST%s' + + def connect(self): + contents = get_cert_contents(self.combined_cert) + expected = self.expected_cert % self.host + if expected not in contents: + raise Exception(_('No host cert for %(server)s in cert %(cert)s'), + {'server': self.host, 'cert': contents}) + + +def get_cert_contents(path): + raise Exception('METHOD MUST BE MOCKED FOR TEST') diff --git a/neutron/tests/unit/bigswitch/test_agent_scheduler.py b/neutron/tests/unit/bigswitch/test_agent_scheduler.py new file mode 100644 index 000000000..b8d5e3aae --- /dev/null +++ b/neutron/tests/unit/bigswitch/test_agent_scheduler.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2013 Big Switch Networks, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from neutron.tests.unit.bigswitch import test_base +from neutron.tests.unit.openvswitch import test_agent_scheduler + + +class BigSwitchDhcpAgentNotifierTestCase( + test_agent_scheduler.OvsDhcpAgentNotifierTestCase, + test_base.BigSwitchTestBase): + + plugin_str = ('%s.NeutronRestProxyV2' % + test_base.RESTPROXY_PKG_PATH) + + def setUp(self): + self.setup_config_files() + self.setup_patches() + super(BigSwitchDhcpAgentNotifierTestCase, self).setUp() + self.startHttpPatch() diff --git a/neutron/tests/unit/bigswitch/test_base.py b/neutron/tests/unit/bigswitch/test_base.py new file mode 100644 index 000000000..6fc5580eb --- /dev/null +++ b/neutron/tests/unit/bigswitch/test_base.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import mock +from oslo.config import cfg + +import neutron.common.test_lib as test_lib +from neutron.db import api as db +from neutron.plugins.bigswitch import config +from neutron.tests.unit.bigswitch import fake_server + +# REVISIT(kevinbenton): This needs to be imported here to create the +# portbindings table since it's not imported until function call time +# in the porttracker_db module, which will cause unit test failures when +# the unit tests are being run by testtools +from neutron.db import portbindings_db # noqa + +RESTPROXY_PKG_PATH = 'neutron.plugins.bigswitch.plugin' +NOTIFIER = 'neutron.plugins.bigswitch.plugin.AgentNotifierApi' +CERTFETCH = 'neutron.plugins.bigswitch.servermanager.ServerPool._fetch_cert' +SERVER_MANAGER = 'neutron.plugins.bigswitch.servermanager' +HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection' +SPAWN = 'neutron.plugins.bigswitch.plugin.eventlet.GreenPool.spawn_n' +CWATCH = SERVER_MANAGER + '.ServerPool._consistency_watchdog' + + +class BigSwitchTestBase(object): + + _plugin_name = ('%s.NeutronRestProxyV2' % RESTPROXY_PKG_PATH) + + def setup_config_files(self): + etc_path = os.path.join(os.path.dirname(__file__), 'etc') + test_lib.test_config['config_files'] = [os.path.join(etc_path, + 'restproxy.ini.test')] + self.addCleanup(cfg.CONF.reset) + config.register_config() + # Only try SSL on SSL tests + cfg.CONF.set_override('server_ssl', False, 'RESTPROXY') + cfg.CONF.set_override('ssl_cert_directory', + os.path.join(etc_path, 'ssl'), 'RESTPROXY') + # The mock interferes with HTTP(S) connection caching + cfg.CONF.set_override('cache_connections', False, 'RESTPROXY') + + def setup_patches(self): + self.plugin_notifier_p = mock.patch(NOTIFIER) + # prevent any greenthreads from spawning + self.spawn_p = mock.patch(SPAWN, new=lambda *args, **kwargs: None) + # prevent the consistency watchdog from starting + self.watch_p = mock.patch(CWATCH, new=lambda *args, **kwargs: None) + self.addCleanup(db.clear_db) + self.plugin_notifier_p.start() + self.spawn_p.start() + self.watch_p.start() + + def startHttpPatch(self): + self.httpPatch = mock.patch(HTTPCON, + new=fake_server.HTTPConnectionMock) + self.httpPatch.start() diff --git a/neutron/tests/unit/bigswitch/test_capabilities.py b/neutron/tests/unit/bigswitch/test_capabilities.py new file mode 100644 index 000000000..89e4c5b72 --- /dev/null +++ b/neutron/tests/unit/bigswitch/test_capabilities.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2014 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @author Kevin Benton + +import contextlib +import mock + +from neutron.tests.unit.bigswitch import test_router_db + +PLUGIN = 'neutron.plugins.bigswitch.plugin' +SERVERMANAGER = PLUGIN + '.servermanager' +SERVERPOOL = SERVERMANAGER + '.ServerPool' +SERVERRESTCALL = SERVERMANAGER + '.ServerProxy.rest_call' +HTTPCON = SERVERMANAGER + '.httplib.HTTPConnection' + + +class CapabilitiesTests(test_router_db.RouterDBTestBase): + + def test_floating_ip_capability(self): + with contextlib.nested( + mock.patch(SERVERRESTCALL, + return_value=(200, None, '["floatingip"]', None)), + mock.patch(SERVERPOOL + '.rest_create_floatingip', + return_value=(200, None, None, None)), + mock.patch(SERVERPOOL + '.rest_delete_floatingip', + return_value=(200, None, None, None)) + ) as (mock_rest, mock_create, mock_delete): + with self.floatingip_with_assoc() as fip: + pass + mock_create.assert_has_calls( + [mock.call(fip['floatingip']['tenant_id'], fip['floatingip'])] + ) + mock_delete.assert_has_calls( + [mock.call(fip['floatingip']['tenant_id'], + fip['floatingip']['id'])] + ) + + def test_floating_ip_capability_neg(self): + with contextlib.nested( + mock.patch(SERVERRESTCALL, + return_value=(200, None, '[""]', None)), + mock.patch(SERVERPOOL + '.rest_update_network', + return_value=(200, None, None, None)) + ) as (mock_rest, mock_netupdate): + with self.floatingip_with_assoc() as fip: + pass + updates = [call[0][2]['floatingips'] + for call in mock_netupdate.call_args_list] + all_floats = [f['floating_ip_address'] + for floats in updates for f in floats] + self.assertIn(fip['floatingip']['floating_ip_address'], all_floats) + + def test_keep_alive_capability(self): + with mock.patch( + SERVERRESTCALL, return_value=(200, None, '["keep-alive"]', None) + ): + # perform a task to cause capabilities to be retrieved + with self.floatingip_with_assoc(): + pass + # stop default HTTP patch since we need a magicmock + self.httpPatch.stop() + # now mock HTTP class instead of REST so we can see headers + conmock = mock.patch(HTTPCON).start() + instance = conmock.return_value + instance.getresponse.return_value.getheader.return_value = 'HASHHEADER' + with self.network(): + callheaders = instance.request.mock_calls[0][1][3] + self.assertIn('Connection', callheaders) + self.assertEqual(callheaders['Connection'], 'keep-alive') diff --git a/neutron/tests/unit/bigswitch/test_restproxy_agent.py b/neutron/tests/unit/bigswitch/test_restproxy_agent.py new file mode 100644 index 000000000..4c961df7d --- /dev/null +++ b/neutron/tests/unit/bigswitch/test_restproxy_agent.py @@ -0,0 +1,188 @@ +# Copyright 2014 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kevin Benton, Big Switch Networks + +import contextlib + +import mock + +from neutron.openstack.common import importutils +from neutron.tests import base + +OVSBRIDGE = 'neutron.agent.linux.ovs_lib.OVSBridge' +PLUGINAPI = 'neutron.plugins.bigswitch.agent.restproxy_agent.PluginApi' +CONTEXT = 'neutron.context' +CONSUMERCREATE = 'neutron.agent.rpc.create_consumers' +SGRPC = 'neutron.agent.securitygroups_rpc' +SGAGENT = 'neutron.plugins.bigswitch.agent.restproxy_agent.SecurityGroupAgent' +AGENTMOD = 'neutron.plugins.bigswitch.agent.restproxy_agent' +NEUTRONCFG = 'neutron.common.config' +PLCONFIG = 'neutron.plugins.bigswitch.config' + + +class BaseAgentTestCase(base.BaseTestCase): + + def setUp(self): + super(BaseAgentTestCase, self).setUp() + self.mod_agent = importutils.import_module(AGENTMOD) + + +class TestRestProxyAgentOVS(BaseAgentTestCase): + def setUp(self): + super(TestRestProxyAgentOVS, self).setUp() + self.plapi = mock.patch(PLUGINAPI).start() + self.ovsbridge = mock.patch(OVSBRIDGE).start() + self.context = mock.patch(CONTEXT).start() + self.rpc = mock.patch(CONSUMERCREATE).start() + self.sg_rpc = mock.patch(SGRPC).start() + self.sg_agent = mock.patch(SGAGENT).start() + + def mock_agent(self): + mock_context = mock.Mock(return_value='abc') + self.context.get_admin_context_without_session = mock_context + return self.mod_agent.RestProxyAgent('int-br', 2, 'helper') + + def mock_port_update(self, **kwargs): + agent = self.mock_agent() + agent.port_update(mock.Mock(), **kwargs) + + def test_port_update(self): + port = {'id': 1, 'security_groups': 'default'} + + with mock.patch.object(self.ovsbridge.return_value, + 'get_vif_port_by_id', + return_value=1) as get_vif: + self.mock_port_update(port=port) + + get_vif.assert_called_once_with(1) + self.sg_agent.assert_has_calls([ + mock.call().refresh_firewall() + ]) + + def test_port_update_not_vifport(self): + port = {'id': 1, 'security_groups': 'default'} + + with mock.patch.object(self.ovsbridge.return_value, + 'get_vif_port_by_id', + return_value=0) as get_vif: + self.mock_port_update(port=port) + + get_vif.assert_called_once_with(1) + self.assertFalse(self.sg_agent.return_value.refresh_firewall.called) + + def test_port_update_without_secgroup(self): + port = {'id': 1} + + with mock.patch.object(self.ovsbridge.return_value, + 'get_vif_port_by_id', + return_value=1) as get_vif: + self.mock_port_update(port=port) + + get_vif.assert_called_once_with(1) + self.assertFalse(self.sg_agent.return_value.refresh_firewall.called) + + def mock_update_ports(self, vif_port_set=None, registered_ports=None): + with mock.patch.object(self.ovsbridge.return_value, + 'get_vif_port_set', + return_value=vif_port_set): + agent = self.mock_agent() + return agent._update_ports(registered_ports) + + def test_update_ports_unchanged(self): + self.assertIsNone(self.mock_update_ports()) + + def test_update_ports_changed(self): + vif_port_set = set([1, 3]) + registered_ports = set([1, 2]) + expected = dict(current=vif_port_set, + added=set([3]), + removed=set([2])) + + actual = self.mock_update_ports(vif_port_set, registered_ports) + + self.assertEqual(expected, actual) + + def mock_process_devices_filter(self, port_info): + agent = self.mock_agent() + agent._process_devices_filter(port_info) + + def test_process_devices_filter_add(self): + port_info = {'added': 1} + + self.mock_process_devices_filter(port_info) + + self.sg_agent.assert_has_calls([ + mock.call().prepare_devices_filter(1) + ]) + + def test_process_devices_filter_remove(self): + port_info = {'removed': 2} + + self.mock_process_devices_filter(port_info) + + self.sg_agent.assert_has_calls([ + mock.call().remove_devices_filter(2) + ]) + + def test_process_devices_filter_both(self): + port_info = {'added': 1, 'removed': 2} + + self.mock_process_devices_filter(port_info) + + self.sg_agent.assert_has_calls([ + mock.call().prepare_devices_filter(1), + mock.call().remove_devices_filter(2) + ]) + + def test_process_devices_filter_none(self): + port_info = {} + + self.mock_process_devices_filter(port_info) + + self.assertFalse( + self.sg_agent.return_value.prepare_devices_filter.called) + self.assertFalse( + self.sg_agent.return_value.remove_devices_filter.called) + + +class TestRestProxyAgent(BaseAgentTestCase): + def mock_main(self): + cfg_attrs = {'CONF.RESTPROXYAGENT.integration_bridge': 'integ_br', + 'CONF.RESTPROXYAGENT.polling_interval': 5, + 'CONF.RESTPROXYAGENT.virtual_switch_type': 'ovs', + 'CONF.AGENT.root_helper': 'helper'} + with contextlib.nested( + mock.patch(AGENTMOD + '.cfg', **cfg_attrs), + mock.patch(AGENTMOD + '.config.init'), + mock.patch(NEUTRONCFG), + mock.patch(PLCONFIG), + ) as (mock_conf, mock_init, mock_log_conf, mock_pluginconf): + self.mod_agent.main() + + mock_log_conf.assert_has_calls([ + mock.call(mock_conf), + ]) + + def test_main(self): + agent_attrs = {'daemon_loop.side_effect': SystemExit(0)} + with mock.patch(AGENTMOD + '.RestProxyAgent', + **agent_attrs) as mock_agent: + self.assertRaises(SystemExit, self.mock_main) + + mock_agent.assert_has_calls([ + mock.call('integ_br', 5, 'helper', 'ovs'), + mock.call().daemon_loop() + ]) diff --git a/neutron/tests/unit/bigswitch/test_restproxy_plugin.py b/neutron/tests/unit/bigswitch/test_restproxy_plugin.py new file mode 100644 index 000000000..df3aeb030 --- /dev/null +++ b/neutron/tests/unit/bigswitch/test_restproxy_plugin.py @@ -0,0 +1,316 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import mock +from oslo.config import cfg +import webob.exc + +from neutron.common import constants +from neutron import context +from neutron.extensions import portbindings +from neutron import manager +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit.bigswitch import fake_server +from neutron.tests.unit.bigswitch import test_base +from neutron.tests.unit import test_api_v2 +import neutron.tests.unit.test_db_plugin as test_plugin +import neutron.tests.unit.test_extension_allowedaddresspairs as test_addr_pair + +patch = mock.patch +HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection' + + +class BigSwitchProxyPluginV2TestCase(test_base.BigSwitchTestBase, + test_plugin.NeutronDbPluginV2TestCase): + + def setUp(self, plugin_name=None): + if hasattr(self, 'HAS_PORT_FILTER'): + cfg.CONF.set_override( + 'enable_security_group', self.HAS_PORT_FILTER, 'SECURITYGROUP') + self.setup_config_files() + self.setup_patches() + if plugin_name: + self._plugin_name = plugin_name + super(BigSwitchProxyPluginV2TestCase, + self).setUp(self._plugin_name) + self.port_create_status = 'BUILD' + self.startHttpPatch() + + +class TestBigSwitchProxyBasicGet(test_plugin.TestBasicGet, + BigSwitchProxyPluginV2TestCase): + + pass + + +class TestBigSwitchProxyV2HTTPResponse(test_plugin.TestV2HTTPResponse, + BigSwitchProxyPluginV2TestCase): + + def test_failover_memory(self): + # first request causes failover so next shouldn't hit bad server + with self.network() as net: + kwargs = {'tenant_id': 'ExceptOnBadServer'} + with self.network(**kwargs) as net: + req = self.new_show_request('networks', net['network']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 200) + + +class TestBigSwitchProxyPortsV2(test_plugin.TestPortsV2, + BigSwitchProxyPluginV2TestCase, + test_bindings.PortBindingsTestCase): + + VIF_TYPE = portbindings.VIF_TYPE_OVS + HAS_PORT_FILTER = False + + def setUp(self, plugin_name=None): + super(TestBigSwitchProxyPortsV2, + self).setUp(self._plugin_name) + + def test_router_port_status_active(self): + # router ports screw up port auto-deletion so it has to be + # disabled for this test + with self.network(do_delete=False) as net: + with self.subnet(network=net, do_delete=False) as sub: + with self.port( + subnet=sub, + no_delete=True, + device_owner=constants.DEVICE_OWNER_ROUTER_INTF + ) as port: + # router ports should be immediately active + self.assertEqual(port['port']['status'], 'ACTIVE') + + def test_update_port_status_build(self): + # normal ports go into the pending build state for async creation + with self.port() as port: + self.assertEqual(port['port']['status'], 'BUILD') + self.assertEqual(self.port_create_status, 'BUILD') + + def _get_ports(self, netid): + return self.deserialize('json', + self._list_ports('json', netid=netid))['ports'] + + def test_rollback_for_port_create(self): + plugin = manager.NeutronManager.get_plugin() + with self.subnet() as s: + # stop normal patch + self.httpPatch.stop() + # allow thread spawns for this test + self.spawn_p.stop() + kwargs = {'device_id': 'somedevid'} + # put in a broken 'server' + httpPatch = patch(HTTPCON, new=fake_server.HTTPConnectionMock500) + httpPatch.start() + with self.port(subnet=s, **kwargs): + # wait for async port create request to finish + plugin.evpool.waitall() + # put good 'server' back in + httpPatch.stop() + self.httpPatch.start() + ports = self._get_ports(s['subnet']['network_id']) + #failure to create should result in port in error state + self.assertEqual(ports[0]['status'], 'ERROR') + + def test_rollback_for_port_update(self): + with self.network() as n: + with self.port(network_id=n['network']['id'], + device_id='66') as port: + port = self._get_ports(n['network']['id'])[0] + data = {'port': {'name': 'aNewName', 'device_id': '99'}} + # stop normal patch + self.httpPatch.stop() + with patch(HTTPCON, new=fake_server.HTTPConnectionMock500): + self.new_update_request( + 'ports', data, port['id']).get_response(self.api) + self.httpPatch.start() + uport = self._get_ports(n['network']['id'])[0] + # name should have stayed the same + self.assertEqual(port['name'], uport['name']) + + def test_rollback_for_port_delete(self): + with self.network() as n: + with self.port(network_id=n['network']['id'], + device_id='somedevid') as port: + # stop normal patch + self.httpPatch.stop() + with patch(HTTPCON, new=fake_server.HTTPConnectionMock500): + self._delete('ports', port['port']['id'], + expected_code= + webob.exc.HTTPInternalServerError.code) + self.httpPatch.start() + port = self._get_ports(n['network']['id'])[0] + self.assertEqual('BUILD', port['status']) + + def test_correct_shared_net_tenant_id(self): + # tenant_id in port requests should match network tenant_id instead + # of port tenant_id + def rest_port_op(self, ten_id, netid, port): + if ten_id != 'SHARED': + raise Exception('expecting tenant_id SHARED. got %s' % ten_id) + with self.network(tenant_id='SHARED', shared=True) as net: + with self.subnet(network=net) as sub: + pref = 'neutron.plugins.bigswitch.servermanager.ServerPool.%s' + tomock = [pref % 'rest_create_port', + pref % 'rest_update_port', + pref % 'rest_delete_port'] + patches = [patch(f, create=True, new=rest_port_op) + for f in tomock] + for restp in patches: + restp.start() + with self.port(subnet=sub, tenant_id='port-owner') as port: + data = {'port': {'binding:host_id': 'someotherhost', + 'device_id': 'override_dev'}} + req = self.new_update_request('ports', data, + port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 200) + + def test_create404_triggers_sync(self): + # allow async port thread for this patch + self.spawn_p.stop() + with contextlib.nested( + self.subnet(), + patch(HTTPCON, create=True, + new=fake_server.HTTPConnectionMock404), + patch(test_base.RESTPROXY_PKG_PATH + + '.NeutronRestProxyV2._send_all_data') + ) as (s, mock_http, mock_send_all): + with self.port(subnet=s, device_id='somedevid') as p: + # wait for the async port thread to finish + plugin = manager.NeutronManager.get_plugin() + plugin.evpool.waitall() + call = mock.call( + send_routers=True, send_ports=True, send_floating_ips=True, + triggered_by_tenant=p['port']['tenant_id'] + ) + mock_send_all.assert_has_calls([call]) + self.spawn_p.start() + + def test_port_vif_details_default(self): + kwargs = {'name': 'name', 'device_id': 'override_dev'} + with self.port(**kwargs) as port: + self.assertEqual(port['port']['binding:vif_type'], + portbindings.VIF_TYPE_OVS) + + def test_port_vif_details_override(self): + # ivshost is in the test config to override to IVS + kwargs = {'name': 'name', 'binding:host_id': 'ivshost', + 'device_id': 'override_dev'} + with self.port(**kwargs) as port: + self.assertEqual(port['port']['binding:vif_type'], + portbindings.VIF_TYPE_IVS) + kwargs = {'name': 'name2', 'binding:host_id': 'someotherhost', + 'device_id': 'other_dev'} + with self.port(**kwargs) as port: + self.assertEqual(port['port']['binding:vif_type'], self.VIF_TYPE) + + def test_port_move(self): + # ivshost is in the test config to override to IVS + kwargs = {'name': 'name', 'binding:host_id': 'ivshost', + 'device_id': 'override_dev'} + with self.port(**kwargs) as port: + data = {'port': {'binding:host_id': 'someotherhost', + 'device_id': 'override_dev'}} + req = self.new_update_request('ports', data, port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['port']['binding:vif_type'], self.VIF_TYPE) + + def _make_port(self, fmt, net_id, expected_res_status=None, arg_list=None, + **kwargs): + arg_list = arg_list or () + arg_list += ('binding:host_id', ) + res = self._create_port(fmt, net_id, expected_res_status, + arg_list, **kwargs) + # Things can go wrong - raise HTTP exc with res code only + # so it can be caught by unit tests + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + return self.deserialize(fmt, res) + + +class TestVifDifferentDefault(BigSwitchProxyPluginV2TestCase): + + def setup_config_files(self): + super(TestVifDifferentDefault, self).setup_config_files() + cfg.CONF.set_override('vif_type', 'ivs', 'NOVA') + + def test_default_viftype(self): + with self.port() as port: + self.assertEqual(port['port']['binding:vif_type'], 'ivs') + + +class TestBigSwitchProxyNetworksV2(test_plugin.TestNetworksV2, + BigSwitchProxyPluginV2TestCase): + + def _get_networks(self, tenant_id): + ctx = context.Context('', tenant_id) + return manager.NeutronManager.get_plugin().get_networks(ctx) + + def test_rollback_on_network_create(self): + tid = test_api_v2._uuid() + kwargs = {'tenant_id': tid} + self.httpPatch.stop() + with patch(HTTPCON, new=fake_server.HTTPConnectionMock500): + self._create_network('json', 'netname', True, **kwargs) + self.httpPatch.start() + self.assertFalse(self._get_networks(tid)) + + def test_rollback_on_network_update(self): + with self.network() as n: + data = {'network': {'name': 'aNewName'}} + self.httpPatch.stop() + with patch(HTTPCON, new=fake_server.HTTPConnectionMock500): + self.new_update_request( + 'networks', data, n['network']['id'] + ).get_response(self.api) + self.httpPatch.start() + updatedn = self._get_networks(n['network']['tenant_id'])[0] + # name should have stayed the same due to failure + self.assertEqual(n['network']['name'], updatedn['name']) + + def test_rollback_on_network_delete(self): + with self.network() as n: + self.httpPatch.stop() + with patch(HTTPCON, new=fake_server.HTTPConnectionMock500): + self._delete( + 'networks', n['network']['id'], + expected_code=webob.exc.HTTPInternalServerError.code) + self.httpPatch.start() + # network should still exist in db + self.assertEqual(n['network']['id'], + self._get_networks(n['network']['tenant_id'] + )[0]['id']) + + +class TestBigSwitchProxySubnetsV2(test_plugin.TestSubnetsV2, + BigSwitchProxyPluginV2TestCase): + + pass + + +class TestBigSwitchProxySync(BigSwitchProxyPluginV2TestCase): + + def test_send_data(self): + plugin_obj = manager.NeutronManager.get_plugin() + result = plugin_obj._send_all_data() + self.assertEqual(result[0], 200) + + +class TestBigSwitchAddressPairs(BigSwitchProxyPluginV2TestCase, + test_addr_pair.TestAllowedAddressPairs): + pass diff --git a/neutron/tests/unit/bigswitch/test_router_db.py b/neutron/tests/unit/bigswitch/test_router_db.py new file mode 100644 index 000000000..fc82b727c --- /dev/null +++ b/neutron/tests/unit/bigswitch/test_router_db.py @@ -0,0 +1,554 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Big Switch Networks, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Adapted from neutron.tests.unit.test_l3_plugin +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com +# + +import contextlib +import copy + +import mock +from oslo.config import cfg +from six import moves +from webob import exc + +from neutron.common import test_lib +from neutron import context +from neutron.extensions import l3 +from neutron import manager +from neutron.openstack.common import uuidutils +from neutron.plugins.bigswitch.extensions import routerrule +from neutron.tests.unit.bigswitch import fake_server +from neutron.tests.unit.bigswitch import test_base +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_extension_extradhcpopts as test_extradhcp +from neutron.tests.unit import test_l3_plugin + + +HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection' +_uuid = uuidutils.generate_uuid + + +class RouterRulesTestExtensionManager(object): + + def get_resources(self): + l3.RESOURCE_ATTRIBUTE_MAP['routers'].update( + routerrule.EXTENDED_ATTRIBUTES_2_0['routers']) + return l3.L3.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class DHCPOptsTestCase(test_base.BigSwitchTestBase, + test_extradhcp.TestExtraDhcpOpt): + + def setUp(self, plugin=None): + self.setup_patches() + self.setup_config_files() + super(test_extradhcp.ExtraDhcpOptDBTestCase, + self).setUp(plugin=self._plugin_name) + self.startHttpPatch() + + +class RouterDBTestBase(test_base.BigSwitchTestBase, + test_l3_plugin.L3BaseForIntTests, + test_l3_plugin.L3NatTestCaseMixin): + + def setUp(self): + self.setup_patches() + self.setup_config_files() + ext_mgr = RouterRulesTestExtensionManager() + super(RouterDBTestBase, self).setUp(plugin=self._plugin_name, + ext_mgr=ext_mgr) + cfg.CONF.set_default('allow_overlapping_ips', False) + self.plugin_obj = manager.NeutronManager.get_plugin() + self.startHttpPatch() + + def tearDown(self): + super(RouterDBTestBase, self).tearDown() + del test_lib.test_config['config_files'] + + +class RouterDBTestCase(RouterDBTestBase, + test_l3_plugin.L3NatDBIntTestCase): + + def test_router_remove_router_interface_wrong_subnet_returns_400(self): + with self.router() as r: + with self.subnet() as s: + with self.subnet(cidr='10.0.10.0/24') as s1: + with self.port(subnet=s1, no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + p['port']['id'], + exc.HTTPBadRequest.code) + #remove properly to clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_router_remove_router_interface_wrong_port_returns_404(self): + with self.router() as r: + with self.subnet() as s: + with self.port(subnet=s, no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + # create another port for testing failure case + res = self._create_port('json', p['port']['network_id']) + p2 = self.deserialize('json', res) + self._router_interface_action('remove', + r['router']['id'], + None, + p2['port']['id'], + exc.HTTPNotFound.code) + # remove correct interface to cleanup + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + # remove extra port created + self._delete('ports', p2['port']['id']) + + def test_multi_tenant_flip_alllocation(self): + tenant1_id = _uuid() + tenant2_id = _uuid() + with contextlib.nested( + self.network(tenant_id=tenant1_id), + self.network(tenant_id=tenant2_id)) as (n1, n2): + with contextlib.nested( + self.subnet(network=n1, cidr='11.0.0.0/24'), + self.subnet(network=n2, cidr='12.0.0.0/24'), + self.subnet(cidr='13.0.0.0/24')) as (s1, s2, psub): + with contextlib.nested( + self.router(tenant_id=tenant1_id), + self.router(tenant_id=tenant2_id), + self.port(subnet=s1, tenant_id=tenant1_id), + self.port(subnet=s2, tenant_id=tenant2_id)) as (r1, r2, + p1, p2): + self._set_net_external(psub['subnet']['network_id']) + s1id = p1['port']['fixed_ips'][0]['subnet_id'] + s2id = p2['port']['fixed_ips'][0]['subnet_id'] + s1 = {'subnet': {'id': s1id}} + s2 = {'subnet': {'id': s2id}} + self._add_external_gateway_to_router( + r1['router']['id'], + psub['subnet']['network_id']) + self._add_external_gateway_to_router( + r2['router']['id'], + psub['subnet']['network_id']) + self._router_interface_action( + 'add', r1['router']['id'], + s1['subnet']['id'], None) + self._router_interface_action( + 'add', r2['router']['id'], + s2['subnet']['id'], None) + fl1 = self._make_floatingip_for_tenant_port( + net_id=psub['subnet']['network_id'], + port_id=p1['port']['id'], + tenant_id=tenant1_id) + self.httpPatch.stop() + multiFloatPatch = mock.patch( + HTTPCON, + new=fake_server.VerifyMultiTenantFloatingIP) + multiFloatPatch.start() + fl2 = self._make_floatingip_for_tenant_port( + net_id=psub['subnet']['network_id'], + port_id=p2['port']['id'], + tenant_id=tenant2_id) + multiFloatPatch.stop() + self.httpPatch.start() + self._delete('floatingips', fl1['floatingip']['id']) + self._delete('floatingips', fl2['floatingip']['id']) + self._router_interface_action( + 'remove', r1['router']['id'], + s1['subnet']['id'], None) + self._router_interface_action( + 'remove', r2['router']['id'], + s2['subnet']['id'], None) + + def _make_floatingip_for_tenant_port(self, net_id, port_id, tenant_id): + data = {'floatingip': {'floating_network_id': net_id, + 'tenant_id': tenant_id, + 'port_id': port_id}} + floatingip_req = self.new_create_request('floatingips', data, self.fmt) + res = floatingip_req.get_response(self.ext_api) + return self.deserialize(self.fmt, res) + + def test_floatingip_with_invalid_create_port(self): + self._test_floatingip_with_invalid_create_port( + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2') + + def test_create_floatingip_no_ext_gateway_return_404(self): + with self.subnet(cidr='10.0.10.0/24') as public_sub: + self._set_net_external(public_sub['subnet']['network_id']) + with self.port() as private_port: + with self.router(): + res = self._create_floatingip( + 'json', + public_sub['subnet']['network_id'], + port_id=private_port['port']['id']) + self.assertEqual(res.status_int, exc.HTTPNotFound.code) + + def test_router_update_gateway(self): + with self.router() as r: + with self.subnet() as s1: + with self.subnet(cidr='10.0.10.0/24') as s2: + self._set_net_external(s1['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s1['subnet']['network_id']) + body = self._show('routers', r['router']['id']) + net_id = (body['router'] + ['external_gateway_info']['network_id']) + self.assertEqual(net_id, s1['subnet']['network_id']) + self._set_net_external(s2['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s2['subnet']['network_id']) + body = self._show('routers', r['router']['id']) + net_id = (body['router'] + ['external_gateway_info']['network_id']) + self.assertEqual(net_id, s2['subnet']['network_id']) + self._remove_external_gateway_from_router( + r['router']['id'], + s2['subnet']['network_id']) + + def test_router_add_interface_overlapped_cidr(self): + self.skipTest("Plugin does not support") + + def test_router_add_interface_overlapped_cidr_returns_400(self): + self.skipTest("Plugin does not support") + + def test_list_nets_external(self): + self.skipTest("Plugin does not support") + + def test_router_update_gateway_with_existed_floatingip(self): + with self.subnet(cidr='10.0.10.0/24') as subnet: + self._set_net_external(subnet['subnet']['network_id']) + with self.floatingip_with_assoc() as fip: + self._add_external_gateway_to_router( + fip['floatingip']['router_id'], + subnet['subnet']['network_id'], + expected_code=exc.HTTPConflict.code) + + def test_router_remove_interface_wrong_subnet_returns_400(self): + with self.router() as r: + with self.subnet(cidr='10.0.10.0/24') as s: + with self.port(no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + p['port']['id'], + exc.HTTPBadRequest.code) + #remove properly to clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_router_remove_interface_wrong_port_returns_404(self): + with self.router() as r: + with self.subnet(cidr='10.0.10.0/24'): + with self.port(no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + # create another port for testing failure case + res = self._create_port('json', p['port']['network_id']) + p2 = self.deserialize('json', res) + self._router_interface_action('remove', + r['router']['id'], + None, + p2['port']['id'], + exc.HTTPNotFound.code) + # remove correct interface to cleanup + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + # remove extra port created + self._delete('ports', p2['port']['id']) + + def test_send_data(self): + fmt = 'json' + plugin_obj = manager.NeutronManager.get_plugin() + + with self.router() as r: + r_id = r['router']['id'] + + with self.subnet(cidr='10.0.10.0/24') as s: + s_id = s['subnet']['id'] + + with self.router() as r1: + r1_id = r1['router']['id'] + body = self._router_interface_action('add', r_id, s_id, + None) + self.assertIn('port_id', body) + r_port_id = body['port_id'] + body = self._show('ports', r_port_id) + self.assertEqual(body['port']['device_id'], r_id) + + with self.subnet(cidr='10.0.20.0/24') as s1: + s1_id = s1['subnet']['id'] + body = self._router_interface_action('add', r1_id, + s1_id, None) + self.assertIn('port_id', body) + r1_port_id = body['port_id'] + body = self._show('ports', r1_port_id) + self.assertEqual(body['port']['device_id'], r1_id) + + with self.subnet(cidr='11.0.0.0/24') as public_sub: + public_net_id = public_sub['subnet']['network_id'] + self._set_net_external(public_net_id) + + with self.port() as prv_port: + prv_fixed_ip = prv_port['port']['fixed_ips'][0] + priv_sub_id = prv_fixed_ip['subnet_id'] + self._add_external_gateway_to_router( + r_id, public_net_id) + self._router_interface_action('add', r_id, + priv_sub_id, + None) + + priv_port_id = prv_port['port']['id'] + res = self._create_floatingip( + fmt, public_net_id, + port_id=priv_port_id) + self.assertEqual(res.status_int, + exc.HTTPCreated.code) + floatingip = self.deserialize(fmt, res) + + result = plugin_obj._send_all_data() + self.assertEqual(result[0], 200) + + self._delete('floatingips', + floatingip['floatingip']['id']) + self._remove_external_gateway_from_router( + r_id, public_net_id) + self._router_interface_action('remove', r_id, + priv_sub_id, + None) + self._router_interface_action('remove', r_id, s_id, + None) + self._show('ports', r_port_id, + expected_code=exc.HTTPNotFound.code) + self._router_interface_action('remove', r1_id, s1_id, + None) + self._show('ports', r1_port_id, + expected_code=exc.HTTPNotFound.code) + + def test_router_rules_update(self): + with self.router() as r: + r_id = r['router']['id'] + router_rules = [{'destination': '1.2.3.4/32', + 'source': '4.3.2.1/32', + 'action': 'permit', + 'nexthops': ['4.4.4.4', '4.4.4.5']}] + body = self._update('routers', r_id, + {'router': {'router_rules': router_rules}}) + + body = self._show('routers', r['router']['id']) + self.assertIn('router_rules', body['router']) + rules = body['router']['router_rules'] + self.assertEqual(_strip_rule_ids(rules), router_rules) + # Try after adding another rule + router_rules.append({'source': 'external', + 'destination': '8.8.8.8/32', + 'action': 'permit', 'nexthops': []}) + body = self._update('routers', r['router']['id'], + {'router': {'router_rules': router_rules}}) + + body = self._show('routers', r['router']['id']) + self.assertIn('router_rules', body['router']) + rules = body['router']['router_rules'] + self.assertEqual(_strip_rule_ids(rules), router_rules) + + def test_router_rules_separation(self): + with self.router() as r1: + with self.router() as r2: + r1_id = r1['router']['id'] + r2_id = r2['router']['id'] + router1_rules = [{'destination': '5.6.7.8/32', + 'source': '8.7.6.5/32', + 'action': 'permit', + 'nexthops': ['8.8.8.8', '9.9.9.9']}] + router2_rules = [{'destination': '1.2.3.4/32', + 'source': '4.3.2.1/32', + 'action': 'permit', + 'nexthops': ['4.4.4.4', '4.4.4.5']}] + body1 = self._update('routers', r1_id, + {'router': + {'router_rules': router1_rules}}) + body2 = self._update('routers', r2_id, + {'router': + {'router_rules': router2_rules}}) + + body1 = self._show('routers', r1_id) + body2 = self._show('routers', r2_id) + rules1 = body1['router']['router_rules'] + rules2 = body2['router']['router_rules'] + self.assertEqual(_strip_rule_ids(rules1), router1_rules) + self.assertEqual(_strip_rule_ids(rules2), router2_rules) + + def test_router_rules_validation(self): + with self.router() as r: + r_id = r['router']['id'] + good_rules = [{'destination': '1.2.3.4/32', + 'source': '4.3.2.1/32', + 'action': 'permit', + 'nexthops': ['4.4.4.4', '4.4.4.5']}] + + body = self._update('routers', r_id, + {'router': {'router_rules': good_rules}}) + body = self._show('routers', r_id) + self.assertIn('router_rules', body['router']) + self.assertEqual(good_rules, + _strip_rule_ids(body['router']['router_rules'])) + + # Missing nexthops should be populated with an empty list + light_rules = copy.deepcopy(good_rules) + del light_rules[0]['nexthops'] + body = self._update('routers', r_id, + {'router': {'router_rules': light_rules}}) + body = self._show('routers', r_id) + self.assertIn('router_rules', body['router']) + light_rules[0]['nexthops'] = [] + self.assertEqual(light_rules, + _strip_rule_ids(body['router']['router_rules'])) + # bad CIDR + bad_rules = copy.deepcopy(good_rules) + bad_rules[0]['destination'] = '1.1.1.1' + body = self._update('routers', r_id, + {'router': {'router_rules': bad_rules}}, + expected_code=exc.HTTPBadRequest.code) + # bad next hop + bad_rules = copy.deepcopy(good_rules) + bad_rules[0]['nexthops'] = ['1.1.1.1', 'f2'] + body = self._update('routers', r_id, + {'router': {'router_rules': bad_rules}}, + expected_code=exc.HTTPBadRequest.code) + # bad action + bad_rules = copy.deepcopy(good_rules) + bad_rules[0]['action'] = 'dance' + body = self._update('routers', r_id, + {'router': {'router_rules': bad_rules}}, + expected_code=exc.HTTPBadRequest.code) + # duplicate rule with opposite action + bad_rules = copy.deepcopy(good_rules) + bad_rules.append(copy.deepcopy(bad_rules[0])) + bad_rules.append(copy.deepcopy(bad_rules[0])) + bad_rules[1]['source'] = 'any' + bad_rules[2]['action'] = 'deny' + body = self._update('routers', r_id, + {'router': {'router_rules': bad_rules}}, + expected_code=exc.HTTPBadRequest.code) + # duplicate nexthop + bad_rules = copy.deepcopy(good_rules) + bad_rules[0]['nexthops'] = ['1.1.1.1', '1.1.1.1'] + body = self._update('routers', r_id, + {'router': {'router_rules': bad_rules}}, + expected_code=exc.HTTPBadRequest.code) + # make sure light rules persisted during bad updates + body = self._show('routers', r_id) + self.assertIn('router_rules', body['router']) + self.assertEqual(light_rules, + _strip_rule_ids(body['router']['router_rules'])) + + def test_router_rules_config_change(self): + cfg.CONF.set_override('tenant_default_router_rule', + ['*:any:any:deny', + '*:8.8.8.8/32:any:permit:1.2.3.4'], + 'ROUTER') + with self.router() as r: + body = self._show('routers', r['router']['id']) + expected_rules = [{'source': 'any', 'destination': 'any', + 'nexthops': [], 'action': 'deny'}, + {'source': '8.8.8.8/32', 'destination': 'any', + 'nexthops': ['1.2.3.4'], 'action': 'permit'}] + self.assertEqual(expected_rules, + _strip_rule_ids(body['router']['router_rules'])) + + def test_rule_exhaustion(self): + cfg.CONF.set_override('max_router_rules', 10, 'ROUTER') + with self.router() as r: + rules = [] + for i in moves.xrange(1, 12): + rule = {'source': 'any', 'nexthops': [], + 'destination': '1.1.1.' + str(i) + '/32', + 'action': 'permit'} + rules.append(rule) + self._update('routers', r['router']['id'], + {'router': {'router_rules': rules}}, + expected_code=exc.HTTPBadRequest.code) + + def test_rollback_on_router_create(self): + tid = test_api_v2._uuid() + self.httpPatch.stop() + with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500): + self._create_router('json', tid) + self.assertTrue(len(self._get_routers(tid)) == 0) + + def test_rollback_on_router_update(self): + with self.router() as r: + data = {'router': {'name': 'aNewName'}} + self.httpPatch.stop() + with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500): + self.new_update_request( + 'routers', data, r['router']['id']).get_response(self.api) + self.httpPatch.start() + updatedr = self._get_routers(r['router']['tenant_id'])[0] + # name should have stayed the same due to failure + self.assertEqual(r['router']['name'], updatedr['name']) + + def test_rollback_on_router_delete(self): + with self.router() as r: + self.httpPatch.stop() + with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500): + self._delete('routers', r['router']['id'], + expected_code=exc.HTTPInternalServerError.code) + self.httpPatch.start() + self.assertEqual(r['router']['id'], + self._get_routers(r['router']['tenant_id'] + )[0]['id']) + + def _get_routers(self, tenant_id): + ctx = context.Context('', tenant_id) + return self.plugin_obj.get_routers(ctx) + + +def _strip_rule_ids(rules): + cleaned = [] + for rule in rules: + del rule['id'] + cleaned.append(rule) + return cleaned diff --git a/neutron/tests/unit/bigswitch/test_security_groups.py b/neutron/tests/unit/bigswitch/test_security_groups.py new file mode 100644 index 000000000..1e3a7aa56 --- /dev/null +++ b/neutron/tests/unit/bigswitch/test_security_groups.py @@ -0,0 +1,47 @@ +# Copyright 2014, Big Switch Networks +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron import manager +from neutron.tests.unit.bigswitch import test_base +from neutron.tests.unit import test_extension_security_group as test_sg +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + + +class RestProxySecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase, + test_base.BigSwitchTestBase): + plugin_str = ('%s.NeutronRestProxyV2' % + test_base.RESTPROXY_PKG_PATH) + + def setUp(self, plugin=None): + test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER) + self.setup_config_files() + self.setup_patches() + self._attribute_map_bk_ = {} + super(RestProxySecurityGroupsTestCase, self).setUp(self.plugin_str) + plugin = manager.NeutronManager.get_plugin() + self.notifier = plugin.notifier + self.rpc = plugin.endpoints[0] + self.startHttpPatch() + + +class TestSecServerRpcCallBack(test_sg_rpc.SGServerRpcCallBackMixinTestCase, + RestProxySecurityGroupsTestCase): + pass + + +class TestSecurityGroupsMixin(test_sg.TestSecurityGroups, + test_sg_rpc.SGNotificationTestMixin, + RestProxySecurityGroupsTestCase): + pass diff --git a/neutron/tests/unit/bigswitch/test_servermanager.py b/neutron/tests/unit/bigswitch/test_servermanager.py new file mode 100644 index 000000000..7523b9a4d --- /dev/null +++ b/neutron/tests/unit/bigswitch/test_servermanager.py @@ -0,0 +1,467 @@ +# Copyright 2014 Big Switch Networks, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kevin Benton, kevin.benton@bigswitch.com +# +import contextlib +import httplib +import socket +import ssl + +import mock +from oslo.config import cfg + +from neutron import manager +from neutron.openstack.common import importutils +from neutron.plugins.bigswitch import servermanager +from neutron.tests.unit.bigswitch import test_restproxy_plugin as test_rp + +SERVERMANAGER = 'neutron.plugins.bigswitch.servermanager' +HTTPCON = SERVERMANAGER + '.httplib.HTTPConnection' +HTTPSCON = SERVERMANAGER + '.HTTPSConnectionWithValidation' + + +class ServerManagerTests(test_rp.BigSwitchProxyPluginV2TestCase): + + def setUp(self): + self.socket_mock = mock.patch( + SERVERMANAGER + '.socket.create_connection').start() + self.wrap_mock = mock.patch(SERVERMANAGER + '.ssl.wrap_socket').start() + super(ServerManagerTests, self).setUp() + # http patch must not be running or it will mangle the servermanager + # import where the https connection classes are defined + self.httpPatch.stop() + self.sm = importutils.import_module(SERVERMANAGER) + + def test_no_servers(self): + cfg.CONF.set_override('servers', [], 'RESTPROXY') + self.assertRaises(cfg.Error, servermanager.ServerPool) + + def test_malformed_servers(self): + cfg.CONF.set_override('servers', ['1.2.3.4', '1.1.1.1:a'], 'RESTPROXY') + self.assertRaises(cfg.Error, servermanager.ServerPool) + + def test_ipv6_server_address(self): + cfg.CONF.set_override( + 'servers', ['[ABCD:EF01:2345:6789:ABCD:EF01:2345:6789]:80'], + 'RESTPROXY') + s = servermanager.ServerPool() + self.assertEqual(s.servers[0].server, + '[ABCD:EF01:2345:6789:ABCD:EF01:2345:6789]') + + def test_sticky_cert_fetch_fail(self): + pl = manager.NeutronManager.get_plugin() + pl.servers.ssl = True + with mock.patch( + 'ssl.get_server_certificate', + side_effect=Exception('There is no more entropy in the universe') + ) as sslgetmock: + self.assertRaises( + cfg.Error, + pl.servers._get_combined_cert_for_server, + *('example.org', 443) + ) + sslgetmock.assert_has_calls([mock.call(('example.org', 443))]) + + def test_consistency_watchdog_stops_with_0_polling_interval(self): + pl = manager.NeutronManager.get_plugin() + pl.servers.capabilities = ['consistency'] + self.watch_p.stop() + with mock.patch('eventlet.sleep') as smock: + # should return immediately a polling interval of 0 + pl.servers._consistency_watchdog(0) + self.assertFalse(smock.called) + + def test_consistency_watchdog(self): + pl = manager.NeutronManager.get_plugin() + pl.servers.capabilities = [] + self.watch_p.stop() + with contextlib.nested( + mock.patch('eventlet.sleep'), + mock.patch( + SERVERMANAGER + '.ServerPool.rest_call', + side_effect=servermanager.RemoteRestError( + reason='Failure to trigger except clause.' + ) + ), + mock.patch( + SERVERMANAGER + '.LOG.exception', + side_effect=KeyError('Failure to break loop') + ) + ) as (smock, rmock, lmock): + # should return immediately without consistency capability + pl.servers._consistency_watchdog() + self.assertFalse(smock.called) + pl.servers.capabilities = ['consistency'] + self.assertRaises(KeyError, + pl.servers._consistency_watchdog) + rmock.assert_called_with('GET', '/health', '', {}, [], False) + self.assertEqual(1, len(lmock.mock_calls)) + + def test_consistency_hash_header(self): + # mock HTTP class instead of rest_call so we can see headers + with mock.patch(HTTPCON) as conmock: + rv = conmock.return_value + rv.getresponse.return_value.getheader.return_value = 'HASHHEADER' + with self.network(): + callheaders = rv.request.mock_calls[0][1][3] + self.assertIn('X-BSN-BVS-HASH-MATCH', callheaders) + # first call will be empty to indicate no previous state hash + self.assertEqual(callheaders['X-BSN-BVS-HASH-MATCH'], '') + # change the header that will be received on delete call + rv.getresponse.return_value.getheader.return_value = 'HASH2' + + # net delete should have used header received on create + callheaders = rv.request.mock_calls[1][1][3] + self.assertEqual(callheaders['X-BSN-BVS-HASH-MATCH'], 'HASHHEADER') + + # create again should now use header received from prev delete + with self.network(): + callheaders = rv.request.mock_calls[2][1][3] + self.assertIn('X-BSN-BVS-HASH-MATCH', callheaders) + self.assertEqual(callheaders['X-BSN-BVS-HASH-MATCH'], + 'HASH2') + + def test_file_put_contents(self): + pl = manager.NeutronManager.get_plugin() + with mock.patch(SERVERMANAGER + '.open', create=True) as omock: + pl.servers._file_put_contents('somepath', 'contents') + omock.assert_has_calls([mock.call('somepath', 'w')]) + omock.return_value.__enter__.return_value.assert_has_calls([ + mock.call.write('contents') + ]) + + def test_combine_certs_to_file(self): + pl = manager.NeutronManager.get_plugin() + with mock.patch(SERVERMANAGER + '.open', create=True) as omock: + omock.return_value.__enter__().read.return_value = 'certdata' + pl.servers._combine_certs_to_file(['cert1.pem', 'cert2.pem'], + 'combined.pem') + # mock shared between read and write file handles so the calls + # are mixed together + omock.assert_has_calls([ + mock.call('combined.pem', 'w'), + mock.call('cert1.pem', 'r'), + mock.call('cert2.pem', 'r'), + ], any_order=True) + omock.return_value.__enter__.return_value.assert_has_calls([ + mock.call.read(), + mock.call.write('certdata'), + mock.call.read(), + mock.call.write('certdata') + ]) + + def test_auth_header(self): + cfg.CONF.set_override('server_auth', 'username:pass', 'RESTPROXY') + sp = servermanager.ServerPool() + with mock.patch(HTTPCON) as conmock: + rv = conmock.return_value + rv.getresponse.return_value.getheader.return_value = 'HASHHEADER' + sp.rest_create_network('tenant', 'network') + callheaders = rv.request.mock_calls[0][1][3] + self.assertIn('Authorization', callheaders) + self.assertEqual(callheaders['Authorization'], + 'Basic dXNlcm5hbWU6cGFzcw==') + + def test_header_add(self): + sp = servermanager.ServerPool() + with mock.patch(HTTPCON) as conmock: + rv = conmock.return_value + rv.getresponse.return_value.getheader.return_value = 'HASHHEADER' + sp.servers[0].rest_call('GET', '/', headers={'EXTRA-HEADER': 'HI'}) + callheaders = rv.request.mock_calls[0][1][3] + # verify normal headers weren't mangled + self.assertIn('Content-type', callheaders) + self.assertEqual(callheaders['Content-type'], + 'application/json') + # verify new header made it in + self.assertIn('EXTRA-HEADER', callheaders) + self.assertEqual(callheaders['EXTRA-HEADER'], 'HI') + + def test_capabilities_retrieval(self): + sp = servermanager.ServerPool() + with mock.patch(HTTPCON) as conmock: + rv = conmock.return_value.getresponse.return_value + rv.getheader.return_value = 'HASHHEADER' + + # each server will get different capabilities + rv.read.side_effect = ['["a","b","c"]', '["b","c","d"]'] + # pool capabilities is intersection between both + self.assertEqual(set(['b', 'c']), sp.get_capabilities()) + self.assertEqual(2, rv.read.call_count) + + # the pool should cache after the first call so no more + # HTTP calls should be made + rv.read.side_effect = ['["w","x","y"]', '["x","y","z"]'] + self.assertEqual(set(['b', 'c']), sp.get_capabilities()) + self.assertEqual(2, rv.read.call_count) + + def test_capabilities_retrieval_failure(self): + sp = servermanager.ServerPool() + with mock.patch(HTTPCON) as conmock: + rv = conmock.return_value.getresponse.return_value + rv.getheader.return_value = 'HASHHEADER' + # a failure to parse should result in an empty capability set + rv.read.return_value = 'XXXXX' + self.assertEqual([], sp.servers[0].get_capabilities()) + + # One broken server should affect all capabilities + rv.read.side_effect = ['{"a": "b"}', '["b","c","d"]'] + self.assertEqual(set(), sp.get_capabilities()) + + def test_reconnect_on_timeout_change(self): + sp = servermanager.ServerPool() + with mock.patch(HTTPCON) as conmock: + rv = conmock.return_value + rv.getresponse.return_value.getheader.return_value = 'HASHHEADER' + sp.servers[0].capabilities = ['keep-alive'] + sp.servers[0].rest_call('GET', '/', timeout=10) + # even with keep-alive enabled, a change in timeout will trigger + # a reconnect + sp.servers[0].rest_call('GET', '/', timeout=75) + conmock.assert_has_calls([ + mock.call('localhost', 9000, timeout=10), + mock.call('localhost', 9000, timeout=75), + ], any_order=True) + + def test_connect_failures(self): + sp = servermanager.ServerPool() + with mock.patch(HTTPCON, return_value=None): + resp = sp.servers[0].rest_call('GET', '/') + self.assertEqual(resp, (0, None, None, None)) + # verify same behavior on ssl class + sp.servers[0].currentcon = False + sp.servers[0].ssl = True + with mock.patch(HTTPSCON, return_value=None): + resp = sp.servers[0].rest_call('GET', '/') + self.assertEqual(resp, (0, None, None, None)) + + def test_reconnect_cached_connection(self): + sp = servermanager.ServerPool() + with mock.patch(HTTPCON) as conmock: + rv = conmock.return_value + rv.getresponse.return_value.getheader.return_value = 'HASH' + sp.servers[0].capabilities = ['keep-alive'] + sp.servers[0].rest_call('GET', '/first') + # raise an error on re-use to verify reconnect + # return okay the second time so the reconnect works + rv.request.side_effect = [httplib.ImproperConnectionState(), + mock.MagicMock()] + sp.servers[0].rest_call('GET', '/second') + uris = [c[1][1] for c in rv.request.mock_calls] + expected = [ + sp.base_uri + '/first', + sp.base_uri + '/second', + sp.base_uri + '/second', + ] + self.assertEqual(uris, expected) + + def test_no_reconnect_recurse_to_infinity(self): + # retry uses recursion when a reconnect is necessary + # this test makes sure it stops after 1 recursive call + sp = servermanager.ServerPool() + with mock.patch(HTTPCON) as conmock: + rv = conmock.return_value + # hash header must be string instead of mock object + rv.getresponse.return_value.getheader.return_value = 'HASH' + sp.servers[0].capabilities = ['keep-alive'] + sp.servers[0].rest_call('GET', '/first') + # after retrying once, the rest call should raise the + # exception up + rv.request.side_effect = httplib.ImproperConnectionState() + self.assertRaises(httplib.ImproperConnectionState, + sp.servers[0].rest_call, + *('GET', '/second')) + # 1 for the first call, 2 for the second with retry + self.assertEqual(rv.request.call_count, 3) + + def test_socket_error(self): + sp = servermanager.ServerPool() + with mock.patch(HTTPCON) as conmock: + conmock.return_value.request.side_effect = socket.timeout() + resp = sp.servers[0].rest_call('GET', '/') + self.assertEqual(resp, (0, None, None, None)) + + def test_cert_get_fail(self): + pl = manager.NeutronManager.get_plugin() + pl.servers.ssl = True + with mock.patch('os.path.exists', return_value=False): + self.assertRaises(cfg.Error, + pl.servers._get_combined_cert_for_server, + *('example.org', 443)) + + def test_cert_make_dirs(self): + pl = manager.NeutronManager.get_plugin() + pl.servers.ssl = True + cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY') + # pretend base dir exists, 3 children don't, and host cert does + with contextlib.nested( + mock.patch('os.path.exists', side_effect=[True, False, False, + False, True]), + mock.patch('os.makedirs'), + mock.patch(SERVERMANAGER + '.ServerPool._combine_certs_to_file') + ) as (exmock, makemock, combmock): + # will raise error because no certs found + self.assertIn( + 'example.org', + pl.servers._get_combined_cert_for_server('example.org', 443) + ) + base = cfg.CONF.RESTPROXY.ssl_cert_directory + hpath = base + '/host_certs/example.org.pem' + combpath = base + '/combined/example.org.pem' + combmock.assert_has_calls([mock.call([hpath], combpath)]) + self.assertEqual(exmock.call_count, 5) + self.assertEqual(makemock.call_count, 3) + + def test_no_cert_error(self): + pl = manager.NeutronManager.get_plugin() + pl.servers.ssl = True + cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY') + # pretend base dir exists and 3 children do, but host cert doesn't + with mock.patch( + 'os.path.exists', + side_effect=[True, True, True, True, False] + ) as exmock: + # will raise error because no certs found + self.assertRaises( + cfg.Error, + pl.servers._get_combined_cert_for_server, + *('example.org', 443) + ) + self.assertEqual(exmock.call_count, 5) + + def test_action_success(self): + pl = manager.NeutronManager.get_plugin() + self.assertTrue(pl.servers.action_success((200,))) + + def test_server_failure(self): + pl = manager.NeutronManager.get_plugin() + self.assertTrue(pl.servers.server_failure((404,))) + # server failure has an ignore codes option + self.assertFalse(pl.servers.server_failure((404,), + ignore_codes=[404])) + + def test_conflict_triggers_sync(self): + pl = manager.NeutronManager.get_plugin() + with mock.patch( + SERVERMANAGER + '.ServerProxy.rest_call', + return_value=(httplib.CONFLICT, 0, 0, 0) + ) as srestmock: + # making a call should trigger a conflict sync + pl.servers.rest_call('GET', '/', '', None, []) + srestmock.assert_has_calls([ + mock.call('GET', '/', '', None, False, reconnect=True), + mock.call('PUT', '/topology', + {'routers': [], 'networks': []}, + timeout=None) + ]) + + def test_conflict_sync_raises_error_without_topology(self): + pl = manager.NeutronManager.get_plugin() + pl.servers.get_topo_function = None + with mock.patch( + SERVERMANAGER + '.ServerProxy.rest_call', + return_value=(httplib.CONFLICT, 0, 0, 0) + ): + # making a call should trigger a conflict sync that will + # error without the topology function set + self.assertRaises( + cfg.Error, + pl.servers.rest_call, + *('GET', '/', '', None, []) + ) + + def test_floating_calls(self): + pl = manager.NeutronManager.get_plugin() + with mock.patch(SERVERMANAGER + '.ServerPool.rest_action') as ramock: + pl.servers.rest_create_floatingip('tenant', {'id': 'somefloat'}) + pl.servers.rest_update_floatingip('tenant', {'name': 'myfl'}, 'id') + pl.servers.rest_delete_floatingip('tenant', 'oldid') + ramock.assert_has_calls([ + mock.call('PUT', '/tenants/tenant/floatingips/somefloat', + errstr=u'Unable to create floating IP: %s'), + mock.call('PUT', '/tenants/tenant/floatingips/id', + errstr=u'Unable to update floating IP: %s'), + mock.call('DELETE', '/tenants/tenant/floatingips/oldid', + errstr=u'Unable to delete floating IP: %s') + ]) + + def test_HTTPSConnectionWithValidation_without_cert(self): + con = self.sm.HTTPSConnectionWithValidation( + 'www.example.org', 443, timeout=90) + con.source_address = '127.0.0.1' + con.request("GET", "/") + self.socket_mock.assert_has_calls([mock.call( + ('www.example.org', 443), 90, '127.0.0.1' + )]) + self.wrap_mock.assert_has_calls([mock.call( + self.socket_mock(), None, None, cert_reqs=ssl.CERT_NONE + )]) + self.assertEqual(con.sock, self.wrap_mock()) + + def test_HTTPSConnectionWithValidation_with_cert(self): + con = self.sm.HTTPSConnectionWithValidation( + 'www.example.org', 443, timeout=90) + con.combined_cert = 'SOMECERTS.pem' + con.source_address = '127.0.0.1' + con.request("GET", "/") + self.socket_mock.assert_has_calls([mock.call( + ('www.example.org', 443), 90, '127.0.0.1' + )]) + self.wrap_mock.assert_has_calls([mock.call( + self.socket_mock(), None, None, ca_certs='SOMECERTS.pem', + cert_reqs=ssl.CERT_REQUIRED + )]) + self.assertEqual(con.sock, self.wrap_mock()) + + def test_HTTPSConnectionWithValidation_tunnel(self): + tunnel_mock = mock.patch.object( + self.sm.HTTPSConnectionWithValidation, + '_tunnel').start() + con = self.sm.HTTPSConnectionWithValidation( + 'www.example.org', 443, timeout=90) + con.source_address = '127.0.0.1' + if not hasattr(con, 'set_tunnel'): + # no tunnel support in py26 + return + con.set_tunnel('myproxy.local', 3128) + con.request("GET", "/") + self.socket_mock.assert_has_calls([mock.call( + ('www.example.org', 443), 90, '127.0.0.1' + )]) + self.wrap_mock.assert_has_calls([mock.call( + self.socket_mock(), None, None, cert_reqs=ssl.CERT_NONE + )]) + # _tunnel() doesn't take any args + tunnel_mock.assert_has_calls([mock.call()]) + self.assertEqual(con._tunnel_host, 'myproxy.local') + self.assertEqual(con._tunnel_port, 3128) + self.assertEqual(con.sock, self.wrap_mock()) + + +class TestSockets(test_rp.BigSwitchProxyPluginV2TestCase): + + def setUp(self): + super(TestSockets, self).setUp() + # http patch must not be running or it will mangle the servermanager + # import where the https connection classes are defined + self.httpPatch.stop() + self.sm = importutils.import_module(SERVERMANAGER) + + def test_socket_create_attempt(self): + # exercise the socket creation to make sure it works on both python + # versions + con = self.sm.HTTPSConnectionWithValidation('127.0.0.1', 0, timeout=1) + # if httpcon was created, a connect attempt should raise a socket error + self.assertRaises(socket.error, con.connect) diff --git a/neutron/tests/unit/bigswitch/test_ssl.py b/neutron/tests/unit/bigswitch/test_ssl.py new file mode 100644 index 000000000..551f9cc53 --- /dev/null +++ b/neutron/tests/unit/bigswitch/test_ssl.py @@ -0,0 +1,250 @@ +# Copyright 2014 Big Switch Networks, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kevin Benton, kevin.benton@bigswitch.com +# +import contextlib +import os + +import mock +from oslo.config import cfg +import webob.exc + +from neutron.openstack.common import log as logging +from neutron.tests.unit.bigswitch import fake_server +from neutron.tests.unit.bigswitch import test_base +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_db_plugin as test_plugin + +LOG = logging.getLogger(__name__) + +SERVERMANAGER = 'neutron.plugins.bigswitch.servermanager' +HTTPS = SERVERMANAGER + '.HTTPSConnectionWithValidation' +CERTCOMBINER = SERVERMANAGER + '.ServerPool._combine_certs_to_file' +FILEPUT = SERVERMANAGER + '.ServerPool._file_put_contents' +GETCACERTS = SERVERMANAGER + '.ServerPool._get_ca_cert_paths' +GETHOSTCERT = SERVERMANAGER + '.ServerPool._get_host_cert_path' +SSLGETCERT = SERVERMANAGER + '.ssl.get_server_certificate' +FAKECERTGET = 'neutron.tests.unit.bigswitch.fake_server.get_cert_contents' + + +class test_ssl_certificate_base(test_plugin.NeutronDbPluginV2TestCase, + test_base.BigSwitchTestBase): + + plugin_str = ('%s.NeutronRestProxyV2' % + test_base.RESTPROXY_PKG_PATH) + servername = None + cert_base = None + + def _setUp(self): + self.servername = test_api_v2._uuid() + self.cert_base = cfg.CONF.RESTPROXY.ssl_cert_directory + self.host_cert_val = 'DUMMYCERTFORHOST%s' % self.servername + self.host_cert_path = os.path.join( + self.cert_base, + 'host_certs', + '%s.pem' % self.servername + ) + self.comb_cert_path = os.path.join( + self.cert_base, + 'combined', + '%s.pem' % self.servername + ) + self.ca_certs_path = os.path.join( + self.cert_base, + 'ca_certs' + ) + cfg.CONF.set_override('servers', ["%s:443" % self.servername], + 'RESTPROXY') + self.setup_patches() + + # Mock method SSL lib uses to grab cert from server + self.sslgetcert_m = mock.patch(SSLGETCERT, create=True).start() + self.sslgetcert_m.return_value = self.host_cert_val + + # Mock methods that write and read certs from the file-system + self.fileput_m = mock.patch(FILEPUT, create=True).start() + self.certcomb_m = mock.patch(CERTCOMBINER, create=True).start() + self.getcacerts_m = mock.patch(GETCACERTS, create=True).start() + + # this is used to configure what certificate contents the fake HTTPS + # lib should expect to receive + self.fake_certget_m = mock.patch(FAKECERTGET, create=True).start() + + def setUp(self): + super(test_ssl_certificate_base, self).setUp(self.plugin_str) + + +class TestSslSticky(test_ssl_certificate_base): + + def setUp(self): + self.setup_config_files() + cfg.CONF.set_override('server_ssl', True, 'RESTPROXY') + cfg.CONF.set_override('ssl_sticky', True, 'RESTPROXY') + self._setUp() + # Set fake HTTPS connection's expectation + self.fake_certget_m.return_value = self.host_cert_val + # No CA certs for this test + self.getcacerts_m.return_value = [] + super(TestSslSticky, self).setUp() + + def test_sticky_cert(self): + # SSL connection should be successful and cert should be cached + with contextlib.nested( + mock.patch(HTTPS, new=fake_server.HTTPSHostValidation), + self.network() + ): + # CA certs should have been checked for + self.getcacerts_m.assert_has_calls([mock.call(self.ca_certs_path)]) + # cert should have been fetched via SSL lib + self.sslgetcert_m.assert_has_calls( + [mock.call((self.servername, 443))] + ) + + # cert should have been recorded + self.fileput_m.assert_has_calls([mock.call(self.host_cert_path, + self.host_cert_val)]) + # no ca certs, so host cert only for this combined cert + self.certcomb_m.assert_has_calls([mock.call([self.host_cert_path], + self.comb_cert_path)]) + + +class TestSslHostCert(test_ssl_certificate_base): + + def setUp(self): + self.setup_config_files() + cfg.CONF.set_override('server_ssl', True, 'RESTPROXY') + cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY') + self.httpsPatch = mock.patch(HTTPS, create=True, + new=fake_server.HTTPSHostValidation) + self.httpsPatch.start() + self._setUp() + # Set fake HTTPS connection's expectation + self.fake_certget_m.return_value = self.host_cert_val + # No CA certs for this test + self.getcacerts_m.return_value = [] + # Pretend host cert exists + self.hcertpath_p = mock.patch(GETHOSTCERT, + return_value=(self.host_cert_path, True), + create=True).start() + super(TestSslHostCert, self).setUp() + + def test_host_cert(self): + # SSL connection should be successful because of pre-configured cert + with self.network(): + self.hcertpath_p.assert_has_calls([ + mock.call(os.path.join(self.cert_base, 'host_certs'), + self.servername) + ]) + # sticky is disabled, no fetching allowed + self.assertFalse(self.sslgetcert_m.call_count) + # no ca certs, so host cert is only for this combined cert + self.certcomb_m.assert_has_calls([mock.call([self.host_cert_path], + self.comb_cert_path)]) + + +class TestSslCaCert(test_ssl_certificate_base): + + def setUp(self): + self.setup_config_files() + cfg.CONF.set_override('server_ssl', True, 'RESTPROXY') + cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY') + self.httpsPatch = mock.patch(HTTPS, create=True, + new=fake_server.HTTPSCAValidation) + self.httpsPatch.start() + self._setUp() + + # pretend to have a few ca certs + self.getcacerts_m.return_value = ['ca1.pem', 'ca2.pem'] + + # Set fake HTTPS connection's expectation + self.fake_certget_m.return_value = 'DUMMYCERTIFICATEAUTHORITY' + + super(TestSslCaCert, self).setUp() + + def test_ca_cert(self): + # SSL connection should be successful because CA cert was present + # If not, attempting to create a network would raise an exception + with self.network(): + # sticky is disabled, no fetching allowed + self.assertFalse(self.sslgetcert_m.call_count) + # 2 CAs and no host cert so combined should only contain both CAs + self.certcomb_m.assert_has_calls([mock.call(['ca1.pem', 'ca2.pem'], + self.comb_cert_path)]) + + +class TestSslWrongHostCert(test_ssl_certificate_base): + + def setUp(self): + self.setup_config_files() + cfg.CONF.set_override('server_ssl', True, 'RESTPROXY') + cfg.CONF.set_override('ssl_sticky', True, 'RESTPROXY') + self._setUp() + + # Set fake HTTPS connection's expectation to something wrong + self.fake_certget_m.return_value = 'OTHERCERT' + + # No CA certs for this test + self.getcacerts_m.return_value = [] + + # Pretend host cert exists + self.hcertpath_p = mock.patch(GETHOSTCERT, + return_value=(self.host_cert_path, True), + create=True).start() + super(TestSslWrongHostCert, self).setUp() + + def test_error_no_cert(self): + # since there will already be a host cert, sticky should not take + # effect and there will be an error because the host cert's contents + # will be incorrect + tid = test_api_v2._uuid() + data = {} + data['network'] = {'tenant_id': tid, 'name': 'name', + 'admin_state_up': True} + with mock.patch(HTTPS, new=fake_server.HTTPSHostValidation): + req = self.new_create_request('networks', data, 'json') + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPInternalServerError.code) + self.hcertpath_p.assert_has_calls([ + mock.call(os.path.join(self.cert_base, 'host_certs'), + self.servername) + ]) + # sticky is enabled, but a host cert already exists so it shant fetch + self.assertFalse(self.sslgetcert_m.call_count) + # no ca certs, so host cert only for this combined cert + self.certcomb_m.assert_has_calls([mock.call([self.host_cert_path], + self.comb_cert_path)]) + + +class TestSslNoValidation(test_ssl_certificate_base): + + def setUp(self): + self.setup_config_files() + cfg.CONF.set_override('server_ssl', True, 'RESTPROXY') + cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY') + cfg.CONF.set_override('no_ssl_validation', True, 'RESTPROXY') + self._setUp() + super(TestSslNoValidation, self).setUp() + + def test_validation_disabled(self): + # SSL connection should be successful without any certificates + # If not, attempting to create a network will raise an exception + with contextlib.nested( + mock.patch(HTTPS, new=fake_server.HTTPSNoValidation), + self.network() + ): + # no sticky grabbing and no cert combining with no enforcement + self.assertFalse(self.sslgetcert_m.call_count) + self.assertFalse(self.certcomb_m.call_count) diff --git a/neutron/tests/unit/brocade/__init__.py b/neutron/tests/unit/brocade/__init__.py new file mode 100644 index 000000000..d1af8c59e --- /dev/null +++ b/neutron/tests/unit/brocade/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack Foundation. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/brocade/test_brocade_db.py b/neutron/tests/unit/brocade/test_brocade_db.py new file mode 100644 index 000000000..127b516be --- /dev/null +++ b/neutron/tests/unit/brocade/test_brocade_db.py @@ -0,0 +1,100 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Unit test brocade db. +""" +import uuid + +from neutron import context +from neutron.plugins.brocade.db import models as brocade_db +from neutron.tests.unit import test_db_plugin as test_plugin + +TEST_VLAN = 1000 + + +class TestBrocadeDb(test_plugin.NeutronDbPluginV2TestCase): + """Test brocade db functionality.""" + + def test_create_network(self): + """Test brocade specific network db.""" + + net_id = str(uuid.uuid4()) + + # Create a network + self.context = context.get_admin_context() + brocade_db.create_network(self.context, net_id, TEST_VLAN) + + # Get the network and verify + net = brocade_db.get_network(self.context, net_id) + self.assertEqual(net['id'], net_id) + self.assertEqual(int(net['vlan']), TEST_VLAN) + + # Delete the network + brocade_db.delete_network(self.context, net['id']) + self.assertFalse(brocade_db.get_networks(self.context)) + + def test_create_port(self): + """Test brocade specific port db.""" + + net_id = str(uuid.uuid4()) + port_id = str(uuid.uuid4()) + # port_id is truncated: since the linux-bridge tap device names are + # based on truncated port id, this enables port lookups using + # tap devices + port_id = port_id[0:11] + tenant_id = str(uuid.uuid4()) + admin_state_up = True + + # Create Port + + # To create a port a network must exists, Create a network + self.context = context.get_admin_context() + brocade_db.create_network(self.context, net_id, TEST_VLAN) + + physical_interface = "em1" + brocade_db.create_port(self.context, port_id, net_id, + physical_interface, + TEST_VLAN, tenant_id, admin_state_up) + + port = brocade_db.get_port(self.context, port_id) + self.assertEqual(port['port_id'], port_id) + self.assertEqual(port['network_id'], net_id) + self.assertEqual(port['physical_interface'], physical_interface) + self.assertEqual(int(port['vlan_id']), TEST_VLAN) + self.assertEqual(port['tenant_id'], tenant_id) + self.assertEqual(port['admin_state_up'], admin_state_up) + + admin_state_up = True + brocade_db.update_port_state(self.context, port_id, admin_state_up) + port = brocade_db.get_port(self.context, port_id) + self.assertEqual(port['admin_state_up'], admin_state_up) + + admin_state_up = False + brocade_db.update_port_state(self.context, port_id, admin_state_up) + port = brocade_db.get_port(self.context, port_id) + self.assertEqual(port['admin_state_up'], admin_state_up) + + admin_state_up = True + brocade_db.update_port_state(self.context, port_id, admin_state_up) + port = brocade_db.get_port(self.context, port_id) + self.assertEqual(port['admin_state_up'], admin_state_up) + + # Delete Port + brocade_db.delete_port(self.context, port_id) + self.assertFalse(brocade_db.get_ports(self.context)) diff --git a/neutron/tests/unit/brocade/test_brocade_plugin.py b/neutron/tests/unit/brocade/test_brocade_plugin.py new file mode 100644 index 000000000..0e3a6ef69 --- /dev/null +++ b/neutron/tests/unit/brocade/test_brocade_plugin.py @@ -0,0 +1,74 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import mock + +from neutron.extensions import portbindings +from neutron.openstack.common import importutils +from neutron.plugins.brocade import NeutronPlugin as brocade_plugin +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit import test_db_plugin as test_plugin + + +PLUGIN_NAME = ('neutron.plugins.brocade.' + 'NeutronPlugin.BrocadePluginV2') +NOS_DRIVER = ('neutron.plugins.brocade.' + 'nos.fake_nosdriver.NOSdriver') +FAKE_IPADDRESS = '2.2.2.2' +FAKE_USERNAME = 'user' +FAKE_PASSWORD = 'password' +FAKE_PHYSICAL_INTERFACE = 'em1' + + +class BrocadePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self): + + def mocked_brocade_init(self): + + self._switch = {'address': FAKE_IPADDRESS, + 'username': FAKE_USERNAME, + 'password': FAKE_PASSWORD + } + self._driver = importutils.import_object(NOS_DRIVER) + + with mock.patch.object(brocade_plugin.BrocadePluginV2, + 'brocade_init', new=mocked_brocade_init): + super(BrocadePluginV2TestCase, self).setUp(self._plugin_name) + + +class TestBrocadeBasicGet(test_plugin.TestBasicGet, + BrocadePluginV2TestCase): + pass + + +class TestBrocadeV2HTTPResponse(test_plugin.TestV2HTTPResponse, + BrocadePluginV2TestCase): + pass + + +class TestBrocadePortsV2(test_plugin.TestPortsV2, + BrocadePluginV2TestCase, + test_bindings.PortBindingsTestCase): + + VIF_TYPE = portbindings.VIF_TYPE_BRIDGE + HAS_PORT_FILTER = True + + +class TestBrocadeNetworksV2(test_plugin.TestNetworksV2, + BrocadePluginV2TestCase): + pass diff --git a/neutron/tests/unit/brocade/test_brocade_vlan.py b/neutron/tests/unit/brocade/test_brocade_vlan.py new file mode 100644 index 000000000..b5a0c33f6 --- /dev/null +++ b/neutron/tests/unit/brocade/test_brocade_vlan.py @@ -0,0 +1,73 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Test vlans alloc/dealloc. +""" + +from neutron.db import api as db +from neutron.openstack.common import context +from neutron.plugins.brocade import vlanbm as vlan_bitmap +from neutron.tests import base + + +class TestVlanBitmap(base.BaseTestCase): + """exercise Vlan bitmap .""" + + def setUp(self): + super(TestVlanBitmap, self).setUp() + db.configure_db() + self.addCleanup(db.clear_db) + self.context = context.get_admin_context() + self.context.session = db.get_session() + + def test_vlan(self): + """test vlan allocation/de-alloc.""" + + self.vbm_ = vlan_bitmap.VlanBitmap(self.context) + vlan_id = self.vbm_.get_next_vlan(None) + + # First vlan is always 2 + self.assertEqual(vlan_id, 2) + + # next vlan is always 3 + vlan_id = self.vbm_.get_next_vlan(None) + self.assertEqual(vlan_id, 3) + + # get a specific vlan i.e. 4 + vlan_id = self.vbm_.get_next_vlan(4) + self.assertEqual(vlan_id, 4) + + # get a specific vlan i.e. 5 + vlan_id = self.vbm_.get_next_vlan(5) + self.assertEqual(vlan_id, 5) + + # Skip 6 + + # get a specific vlan i.e. 7 + vlan_id = self.vbm_.get_next_vlan(7) + self.assertEqual(vlan_id, 7) + + # get a specific vlan i.e. 1900 + vlan_id = self.vbm_.get_next_vlan(1900) + self.assertEqual(vlan_id, 1900) + + # Release 4 and get next again + self.vbm_.release_vlan(4) + vlan_id = self.vbm_.get_next_vlan(None) + self.assertEqual(vlan_id, 4) diff --git a/neutron/tests/unit/cisco/__init__.py b/neutron/tests/unit/cisco/__init__.py new file mode 100644 index 000000000..7e503debd --- /dev/null +++ b/neutron/tests/unit/cisco/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/cisco/n1kv/__init__.py b/neutron/tests/unit/cisco/n1kv/__init__.py new file mode 100644 index 000000000..59a411933 --- /dev/null +++ b/neutron/tests/unit/cisco/n1kv/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems, Inc. +# diff --git a/neutron/tests/unit/cisco/n1kv/fake_client.py b/neutron/tests/unit/cisco/n1kv/fake_client.py new file mode 100755 index 000000000..2d1f0780e --- /dev/null +++ b/neutron/tests/unit/cisco/n1kv/fake_client.py @@ -0,0 +1,119 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2014 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems Inc. +# @author: Sourabh Patwardhan, Cisco Systems Inc. + +from neutron.openstack.common import log as logging +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.n1kv import n1kv_client + +LOG = logging.getLogger(__name__) + +_resource_metadata = {'port': ['id', 'macAddress', 'ipAddress', 'subnetId'], + 'vmnetwork': ['name', 'networkSegmentId', + 'networkSegment', 'portProfile', + 'portProfileId', 'tenantId', + 'portId', 'macAddress', + 'ipAddress', 'subnetId']} + + +class TestClient(n1kv_client.Client): + + def __init__(self, **kwargs): + self.broken = False + self.inject_params = False + self.total_profiles = 2 + super(TestClient, self).__init__() + + def _get_total_profiles(self): + return self.total_profiles + + def _do_request(self, method, action, body=None, headers=None): + if self.broken: + raise c_exc.VSMError(reason='VSM:Internal Server Error') + if self.inject_params and body: + body['invalidKey'] = 'catchMeIfYouCan' + if method == 'POST': + return _validate_resource(action, body) + elif method == 'GET': + if 'virtual-port-profile' in action: + return _policy_profile_generator( + self._get_total_profiles()) + else: + raise c_exc.VSMError(reason='VSM:Internal Server Error') + + +class TestClientInvalidRequest(TestClient): + + def __init__(self, **kwargs): + super(TestClientInvalidRequest, self).__init__() + self.inject_params = True + + +def _validate_resource(action, body=None): + if body: + body_set = set(body.keys()) + else: + return + if 'vm-network' in action and 'port' not in action: + vmnetwork_set = set(_resource_metadata['vmnetwork']) + if body_set - vmnetwork_set: + raise c_exc.VSMError(reason='Invalid Request') + elif 'port' in action: + port_set = set(_resource_metadata['port']) + if body_set - port_set: + raise c_exc.VSMError(reason='Invalid Request') + else: + return + + +def _policy_profile_generator(total_profiles): + """ + Generate policy profile response and return a dictionary. + + :param total_profiles: integer representing total number of profiles to + return + """ + profiles = {} + for num in range(1, total_profiles + 1): + name = "pp-%s" % num + profile_id = "00000000-0000-0000-0000-00000000000%s" % num + profiles[name] = {"properties": {"name": name, "id": profile_id}} + return profiles + + +def _policy_profile_generator_xml(total_profiles): + """ + Generate policy profile response in XML format. + + :param total_profiles: integer representing total number of profiles to + return + """ + xml = [""" + """] + template = ( + '' + '' + '00000000-0000-0000-0000-00000000000%(num)s' + 'pp-%(num)s' + '' + '' + ) + xml.extend(template % {'num': n} for n in range(1, total_profiles + 1)) + xml.append("") + return ''.join(xml) diff --git a/neutron/tests/unit/cisco/n1kv/test_n1kv_db.py b/neutron/tests/unit/cisco/n1kv/test_n1kv_db.py new file mode 100644 index 000000000..e806944e0 --- /dev/null +++ b/neutron/tests/unit/cisco/n1kv/test_n1kv_db.py @@ -0,0 +1,870 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Juergen Brendel, Cisco Systems Inc. +# @author: Abhishek Raut, Cisco Systems Inc. +# @author: Rudrajit Tapadar, Cisco Systems Inc. + +from six import moves +from sqlalchemy.orm import exc as s_exc +from testtools import matchers + +from neutron.common import exceptions as n_exc +from neutron import context +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.plugins.cisco.common import cisco_constants +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.db import n1kv_db_v2 +from neutron.plugins.cisco.db import n1kv_models_v2 +from neutron.tests import base +from neutron.tests.unit import test_db_plugin as test_plugin + + +PHYS_NET = 'physnet1' +PHYS_NET_2 = 'physnet2' +VLAN_MIN = 10 +VLAN_MAX = 19 +VXLAN_MIN = 5000 +VXLAN_MAX = 5009 +SEGMENT_RANGE = '200-220' +SEGMENT_RANGE_MIN_OVERLAP = '210-230' +SEGMENT_RANGE_MAX_OVERLAP = '190-209' +SEGMENT_RANGE_OVERLAP = '190-230' +TEST_NETWORK_ID = 'abcdefghijklmnopqrstuvwxyz' +TEST_NETWORK_ID2 = 'abcdefghijklmnopqrstuvwxy2' +TEST_NETWORK_ID3 = 'abcdefghijklmnopqrstuvwxy3' +TEST_NETWORK_PROFILE = {'name': 'test_profile', + 'segment_type': 'vlan', + 'physical_network': 'physnet1', + 'segment_range': '10-19'} +TEST_NETWORK_PROFILE_2 = {'name': 'test_profile_2', + 'segment_type': 'vlan', + 'physical_network': 'physnet1', + 'segment_range': SEGMENT_RANGE} +TEST_NETWORK_PROFILE_VXLAN = {'name': 'test_profile', + 'segment_type': 'overlay', + 'sub_type': 'native_vxlan', + 'segment_range': '5000-5009', + 'multicast_ip_range': '239.0.0.70-239.0.0.80'} +TEST_POLICY_PROFILE = {'id': '4a417990-76fb-11e2-bcfd-0800200c9a66', + 'name': 'test_policy_profile'} +TEST_NETWORK_PROFILE_MULTI_SEGMENT = {'name': 'test_profile', + 'segment_type': 'multi-segment'} +TEST_NETWORK_PROFILE_VLAN_TRUNK = {'name': 'test_profile', + 'segment_type': 'trunk', + 'sub_type': 'vlan'} +TEST_NETWORK_PROFILE_VXLAN_TRUNK = {'name': 'test_profile', + 'segment_type': 'trunk', + 'sub_type': 'overlay'} + + +def _create_test_network_profile_if_not_there(session, + profile=TEST_NETWORK_PROFILE): + try: + _profile = session.query(n1kv_models_v2.NetworkProfile).filter_by( + name=profile['name']).one() + except s_exc.NoResultFound: + _profile = n1kv_db_v2.create_network_profile(session, profile) + return _profile + + +def _create_test_policy_profile_if_not_there(session, + profile=TEST_POLICY_PROFILE): + try: + _profile = session.query(n1kv_models_v2.PolicyProfile).filter_by( + name=profile['name']).one() + except s_exc.NoResultFound: + _profile = n1kv_db_v2.create_policy_profile(profile) + return _profile + + +class VlanAllocationsTest(base.BaseTestCase): + + def setUp(self): + super(VlanAllocationsTest, self).setUp() + db.configure_db() + self.session = db.get_session() + self.net_p = _create_test_network_profile_if_not_there(self.session) + n1kv_db_v2.sync_vlan_allocations(self.session, self.net_p) + self.addCleanup(db.clear_db) + + def test_sync_vlan_allocations_outside_segment_range(self): + self.assertRaises(c_exc.VlanIDNotFound, + n1kv_db_v2.get_vlan_allocation, + self.session, + PHYS_NET, + VLAN_MIN - 1) + self.assertRaises(c_exc.VlanIDNotFound, + n1kv_db_v2.get_vlan_allocation, + self.session, + PHYS_NET, + VLAN_MAX + 1) + self.assertRaises(c_exc.VlanIDNotFound, + n1kv_db_v2.get_vlan_allocation, + self.session, + PHYS_NET_2, + VLAN_MIN + 20) + self.assertRaises(c_exc.VlanIDNotFound, + n1kv_db_v2.get_vlan_allocation, + self.session, + PHYS_NET_2, + VLAN_MIN + 20) + self.assertRaises(c_exc.VlanIDNotFound, + n1kv_db_v2.get_vlan_allocation, + self.session, + PHYS_NET_2, + VLAN_MAX + 20) + + def test_sync_vlan_allocations_unallocated_vlans(self): + self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session, + PHYS_NET, + VLAN_MIN).allocated) + self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session, + PHYS_NET, + VLAN_MIN + 1). + allocated) + self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session, + PHYS_NET, + VLAN_MAX - 1). + allocated) + self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session, + PHYS_NET, + VLAN_MAX).allocated) + + def test_vlan_pool(self): + vlan_ids = set() + for x in moves.xrange(VLAN_MIN, VLAN_MAX + 1): + (physical_network, seg_type, + vlan_id, m_ip) = n1kv_db_v2.reserve_vlan(self.session, self.net_p) + self.assertEqual(physical_network, PHYS_NET) + self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) + self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) + vlan_ids.add(vlan_id) + + self.assertRaises(n_exc.NoNetworkAvailable, + n1kv_db_v2.reserve_vlan, + self.session, + self.net_p) + + n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_ids.pop()) + physical_network, seg_type, vlan_id, m_ip = (n1kv_db_v2.reserve_vlan( + self.session, self.net_p)) + self.assertEqual(physical_network, PHYS_NET) + self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) + self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) + vlan_ids.add(vlan_id) + + for vlan_id in vlan_ids: + n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_id) + + def test_specific_vlan_inside_pool(self): + vlan_id = VLAN_MIN + 5 + self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session, + PHYS_NET, + vlan_id).allocated) + n1kv_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id) + self.assertTrue(n1kv_db_v2.get_vlan_allocation(self.session, + PHYS_NET, + vlan_id).allocated) + + self.assertRaises(n_exc.VlanIdInUse, + n1kv_db_v2.reserve_specific_vlan, + self.session, + PHYS_NET, + vlan_id) + + n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_id) + self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session, + PHYS_NET, + vlan_id).allocated) + + def test_specific_vlan_outside_pool(self): + vlan_id = VLAN_MAX + 5 + self.assertRaises(c_exc.VlanIDNotFound, + n1kv_db_v2.get_vlan_allocation, + self.session, + PHYS_NET, + vlan_id) + self.assertRaises(c_exc.VlanIDOutsidePool, + n1kv_db_v2.reserve_specific_vlan, + self.session, + PHYS_NET, + vlan_id) + + +class VxlanAllocationsTest(base.BaseTestCase, + n1kv_db_v2.NetworkProfile_db_mixin): + + def setUp(self): + super(VxlanAllocationsTest, self).setUp() + db.configure_db() + self.session = db.get_session() + self.net_p = _create_test_network_profile_if_not_there( + self.session, TEST_NETWORK_PROFILE_VXLAN) + n1kv_db_v2.sync_vxlan_allocations(self.session, self.net_p) + self.addCleanup(db.clear_db) + + def test_sync_vxlan_allocations_outside_segment_range(self): + self.assertRaises(c_exc.VxlanIDNotFound, + n1kv_db_v2.get_vxlan_allocation, + self.session, + VXLAN_MIN - 1) + self.assertRaises(c_exc.VxlanIDNotFound, + n1kv_db_v2.get_vxlan_allocation, + self.session, + VXLAN_MAX + 1) + + def test_sync_vxlan_allocations_unallocated_vxlans(self): + self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session, + VXLAN_MIN).allocated) + self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session, + VXLAN_MIN + 1). + allocated) + self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session, + VXLAN_MAX - 1). + allocated) + self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session, + VXLAN_MAX).allocated) + + def test_vxlan_pool(self): + vxlan_ids = set() + for x in moves.xrange(VXLAN_MIN, VXLAN_MAX + 1): + vxlan = n1kv_db_v2.reserve_vxlan(self.session, self.net_p) + vxlan_id = vxlan[2] + self.assertThat(vxlan_id, matchers.GreaterThan(VXLAN_MIN - 1)) + self.assertThat(vxlan_id, matchers.LessThan(VXLAN_MAX + 1)) + vxlan_ids.add(vxlan_id) + + self.assertRaises(n_exc.NoNetworkAvailable, + n1kv_db_v2.reserve_vxlan, + self.session, + self.net_p) + n1kv_db_v2.release_vxlan(self.session, vxlan_ids.pop()) + vxlan = n1kv_db_v2.reserve_vxlan(self.session, self.net_p) + vxlan_id = vxlan[2] + self.assertThat(vxlan_id, matchers.GreaterThan(VXLAN_MIN - 1)) + self.assertThat(vxlan_id, matchers.LessThan(VXLAN_MAX + 1)) + vxlan_ids.add(vxlan_id) + + for vxlan_id in vxlan_ids: + n1kv_db_v2.release_vxlan(self.session, vxlan_id) + n1kv_db_v2.delete_network_profile(self.session, self.net_p.id) + + def test_specific_vxlan_inside_pool(self): + vxlan_id = VXLAN_MIN + 5 + self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session, + vxlan_id).allocated) + n1kv_db_v2.reserve_specific_vxlan(self.session, vxlan_id) + self.assertTrue(n1kv_db_v2.get_vxlan_allocation(self.session, + vxlan_id).allocated) + + self.assertRaises(c_exc.VxlanIDInUse, + n1kv_db_v2.reserve_specific_vxlan, + self.session, + vxlan_id) + + n1kv_db_v2.release_vxlan(self.session, vxlan_id) + self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session, + vxlan_id).allocated) + + def test_specific_vxlan_outside_pool(self): + vxlan_id = VXLAN_MAX + 5 + self.assertRaises(c_exc.VxlanIDNotFound, + n1kv_db_v2.get_vxlan_allocation, + self.session, + vxlan_id) + self.assertRaises(c_exc.VxlanIDOutsidePool, + n1kv_db_v2.reserve_specific_vxlan, + self.session, + vxlan_id) + + +class NetworkBindingsTest(test_plugin.NeutronDbPluginV2TestCase): + + def setUp(self): + super(NetworkBindingsTest, self).setUp() + db.configure_db() + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def test_add_network_binding(self): + with self.network() as network: + TEST_NETWORK_ID = network['network']['id'] + + self.assertRaises(c_exc.NetworkBindingNotFound, + n1kv_db_v2.get_network_binding, + self.session, + TEST_NETWORK_ID) + + p = _create_test_network_profile_if_not_there(self.session) + n1kv_db_v2.add_network_binding( + self.session, TEST_NETWORK_ID, 'vlan', + PHYS_NET, 1234, '0.0.0.0', p.id, None) + binding = n1kv_db_v2.get_network_binding( + self.session, TEST_NETWORK_ID) + self.assertIsNotNone(binding) + self.assertEqual(binding.network_id, TEST_NETWORK_ID) + self.assertEqual(binding.network_type, 'vlan') + self.assertEqual(binding.physical_network, PHYS_NET) + self.assertEqual(binding.segmentation_id, 1234) + + def test_create_multi_segment_network(self): + with self.network() as network: + TEST_NETWORK_ID = network['network']['id'] + + self.assertRaises(c_exc.NetworkBindingNotFound, + n1kv_db_v2.get_network_binding, + self.session, + TEST_NETWORK_ID) + + p = _create_test_network_profile_if_not_there( + self.session, + TEST_NETWORK_PROFILE_MULTI_SEGMENT) + n1kv_db_v2.add_network_binding( + self.session, TEST_NETWORK_ID, 'multi-segment', + None, 0, '0.0.0.0', p.id, None) + binding = n1kv_db_v2.get_network_binding( + self.session, TEST_NETWORK_ID) + self.assertIsNotNone(binding) + self.assertEqual(binding.network_id, TEST_NETWORK_ID) + self.assertEqual(binding.network_type, 'multi-segment') + self.assertIsNone(binding.physical_network) + self.assertEqual(binding.segmentation_id, 0) + + def test_add_multi_segment_binding(self): + with self.network() as network: + TEST_NETWORK_ID = network['network']['id'] + + self.assertRaises(c_exc.NetworkBindingNotFound, + n1kv_db_v2.get_network_binding, + self.session, + TEST_NETWORK_ID) + + p = _create_test_network_profile_if_not_there( + self.session, + TEST_NETWORK_PROFILE_MULTI_SEGMENT) + n1kv_db_v2.add_network_binding( + self.session, TEST_NETWORK_ID, 'multi-segment', + None, 0, '0.0.0.0', p.id, + [(TEST_NETWORK_ID2, TEST_NETWORK_ID3)]) + binding = n1kv_db_v2.get_network_binding( + self.session, TEST_NETWORK_ID) + self.assertIsNotNone(binding) + self.assertEqual(binding.network_id, TEST_NETWORK_ID) + self.assertEqual(binding.network_type, 'multi-segment') + self.assertIsNone(binding.physical_network) + self.assertEqual(binding.segmentation_id, 0) + ms_binding = (n1kv_db_v2.get_multi_segment_network_binding( + self.session, TEST_NETWORK_ID, + (TEST_NETWORK_ID2, TEST_NETWORK_ID3))) + self.assertIsNotNone(ms_binding) + self.assertEqual(ms_binding.multi_segment_id, TEST_NETWORK_ID) + self.assertEqual(ms_binding.segment1_id, TEST_NETWORK_ID2) + self.assertEqual(ms_binding.segment2_id, TEST_NETWORK_ID3) + ms_members = (n1kv_db_v2.get_multi_segment_members( + self.session, TEST_NETWORK_ID)) + self.assertEqual(ms_members, + [(TEST_NETWORK_ID2, TEST_NETWORK_ID3)]) + self.assertTrue(n1kv_db_v2.is_multi_segment_member( + self.session, TEST_NETWORK_ID2)) + self.assertTrue(n1kv_db_v2.is_multi_segment_member( + self.session, TEST_NETWORK_ID3)) + n1kv_db_v2.del_multi_segment_binding( + self.session, TEST_NETWORK_ID, + [(TEST_NETWORK_ID2, TEST_NETWORK_ID3)]) + ms_members = (n1kv_db_v2.get_multi_segment_members( + self.session, TEST_NETWORK_ID)) + self.assertEqual(ms_members, []) + + def test_create_vlan_trunk_network(self): + with self.network() as network: + TEST_NETWORK_ID = network['network']['id'] + + self.assertRaises(c_exc.NetworkBindingNotFound, + n1kv_db_v2.get_network_binding, + self.session, + TEST_NETWORK_ID) + + p = _create_test_network_profile_if_not_there( + self.session, + TEST_NETWORK_PROFILE_VLAN_TRUNK) + n1kv_db_v2.add_network_binding( + self.session, TEST_NETWORK_ID, 'trunk', + None, 0, '0.0.0.0', p.id, None) + binding = n1kv_db_v2.get_network_binding( + self.session, TEST_NETWORK_ID) + self.assertIsNotNone(binding) + self.assertEqual(binding.network_id, TEST_NETWORK_ID) + self.assertEqual(binding.network_type, 'trunk') + self.assertIsNone(binding.physical_network) + self.assertEqual(binding.segmentation_id, 0) + + def test_create_vxlan_trunk_network(self): + with self.network() as network: + TEST_NETWORK_ID = network['network']['id'] + + self.assertRaises(c_exc.NetworkBindingNotFound, + n1kv_db_v2.get_network_binding, + self.session, + TEST_NETWORK_ID) + + p = _create_test_network_profile_if_not_there( + self.session, + TEST_NETWORK_PROFILE_VXLAN_TRUNK) + n1kv_db_v2.add_network_binding( + self.session, TEST_NETWORK_ID, 'trunk', + None, 0, '0.0.0.0', p.id, None) + binding = n1kv_db_v2.get_network_binding( + self.session, TEST_NETWORK_ID) + self.assertIsNotNone(binding) + self.assertEqual(binding.network_id, TEST_NETWORK_ID) + self.assertEqual(binding.network_type, 'trunk') + self.assertIsNone(binding.physical_network) + self.assertEqual(binding.segmentation_id, 0) + + def test_add_vlan_trunk_binding(self): + with self.network() as network1: + with self.network() as network2: + TEST_NETWORK_ID = network1['network']['id'] + + self.assertRaises(c_exc.NetworkBindingNotFound, + n1kv_db_v2.get_network_binding, + self.session, + TEST_NETWORK_ID) + TEST_NETWORK_ID2 = network2['network']['id'] + self.assertRaises(c_exc.NetworkBindingNotFound, + n1kv_db_v2.get_network_binding, + self.session, + TEST_NETWORK_ID2) + p_v = _create_test_network_profile_if_not_there(self.session) + n1kv_db_v2.add_network_binding( + self.session, TEST_NETWORK_ID2, 'vlan', + PHYS_NET, 1234, '0.0.0.0', p_v.id, None) + p = _create_test_network_profile_if_not_there( + self.session, + TEST_NETWORK_PROFILE_VLAN_TRUNK) + n1kv_db_v2.add_network_binding( + self.session, TEST_NETWORK_ID, 'trunk', + None, 0, '0.0.0.0', p.id, [(TEST_NETWORK_ID2, 0)]) + binding = n1kv_db_v2.get_network_binding( + self.session, TEST_NETWORK_ID) + self.assertIsNotNone(binding) + self.assertEqual(binding.network_id, TEST_NETWORK_ID) + self.assertEqual(binding.network_type, 'trunk') + self.assertEqual(binding.physical_network, PHYS_NET) + self.assertEqual(binding.segmentation_id, 0) + t_binding = (n1kv_db_v2.get_trunk_network_binding( + self.session, TEST_NETWORK_ID, + (TEST_NETWORK_ID2, 0))) + self.assertIsNotNone(t_binding) + self.assertEqual(t_binding.trunk_segment_id, TEST_NETWORK_ID) + self.assertEqual(t_binding.segment_id, TEST_NETWORK_ID2) + self.assertEqual(t_binding.dot1qtag, '0') + t_members = (n1kv_db_v2.get_trunk_members( + self.session, TEST_NETWORK_ID)) + self.assertEqual(t_members, + [(TEST_NETWORK_ID2, '0')]) + self.assertTrue(n1kv_db_v2.is_trunk_member( + self.session, TEST_NETWORK_ID2)) + n1kv_db_v2.del_trunk_segment_binding( + self.session, TEST_NETWORK_ID, + [(TEST_NETWORK_ID2, '0')]) + t_members = (n1kv_db_v2.get_multi_segment_members( + self.session, TEST_NETWORK_ID)) + self.assertEqual(t_members, []) + + def test_add_vxlan_trunk_binding(self): + with self.network() as network1: + with self.network() as network2: + TEST_NETWORK_ID = network1['network']['id'] + + self.assertRaises(c_exc.NetworkBindingNotFound, + n1kv_db_v2.get_network_binding, + self.session, + TEST_NETWORK_ID) + TEST_NETWORK_ID2 = network2['network']['id'] + self.assertRaises(c_exc.NetworkBindingNotFound, + n1kv_db_v2.get_network_binding, + self.session, + TEST_NETWORK_ID2) + p_v = _create_test_network_profile_if_not_there( + self.session, TEST_NETWORK_PROFILE_VXLAN_TRUNK) + n1kv_db_v2.add_network_binding( + self.session, TEST_NETWORK_ID2, 'overlay', + None, 5100, '224.10.10.10', p_v.id, None) + p = _create_test_network_profile_if_not_there( + self.session, + TEST_NETWORK_PROFILE_VXLAN_TRUNK) + n1kv_db_v2.add_network_binding( + self.session, TEST_NETWORK_ID, 'trunk', + None, 0, '0.0.0.0', p.id, + [(TEST_NETWORK_ID2, 5)]) + binding = n1kv_db_v2.get_network_binding( + self.session, TEST_NETWORK_ID) + self.assertIsNotNone(binding) + self.assertEqual(binding.network_id, TEST_NETWORK_ID) + self.assertEqual(binding.network_type, 'trunk') + self.assertIsNone(binding.physical_network) + self.assertEqual(binding.segmentation_id, 0) + t_binding = (n1kv_db_v2.get_trunk_network_binding( + self.session, TEST_NETWORK_ID, + (TEST_NETWORK_ID2, '5'))) + self.assertIsNotNone(t_binding) + self.assertEqual(t_binding.trunk_segment_id, TEST_NETWORK_ID) + self.assertEqual(t_binding.segment_id, TEST_NETWORK_ID2) + self.assertEqual(t_binding.dot1qtag, '5') + t_members = (n1kv_db_v2.get_trunk_members( + self.session, TEST_NETWORK_ID)) + self.assertEqual(t_members, + [(TEST_NETWORK_ID2, '5')]) + self.assertTrue(n1kv_db_v2.is_trunk_member( + self.session, TEST_NETWORK_ID2)) + n1kv_db_v2.del_trunk_segment_binding( + self.session, TEST_NETWORK_ID, + [(TEST_NETWORK_ID2, '5')]) + t_members = (n1kv_db_v2.get_multi_segment_members( + self.session, TEST_NETWORK_ID)) + self.assertEqual(t_members, []) + + +class NetworkProfileTests(base.BaseTestCase, + n1kv_db_v2.NetworkProfile_db_mixin): + + def setUp(self): + super(NetworkProfileTests, self).setUp() + db.configure_db() + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def test_create_network_profile(self): + _db_profile = n1kv_db_v2.create_network_profile(self.session, + TEST_NETWORK_PROFILE) + self.assertIsNotNone(_db_profile) + db_profile = (self.session.query(n1kv_models_v2.NetworkProfile). + filter_by(name=TEST_NETWORK_PROFILE['name']).one()) + self.assertIsNotNone(db_profile) + self.assertEqual(_db_profile.id, db_profile.id) + self.assertEqual(_db_profile.name, db_profile.name) + self.assertEqual(_db_profile.segment_type, db_profile.segment_type) + self.assertEqual(_db_profile.segment_range, db_profile.segment_range) + self.assertEqual(_db_profile.multicast_ip_index, + db_profile.multicast_ip_index) + self.assertEqual(_db_profile.multicast_ip_range, + db_profile.multicast_ip_range) + n1kv_db_v2.delete_network_profile(self.session, _db_profile.id) + + def test_create_multi_segment_network_profile(self): + _db_profile = (n1kv_db_v2.create_network_profile( + self.session, TEST_NETWORK_PROFILE_MULTI_SEGMENT)) + self.assertIsNotNone(_db_profile) + db_profile = ( + self.session.query( + n1kv_models_v2.NetworkProfile).filter_by( + name=TEST_NETWORK_PROFILE_MULTI_SEGMENT['name']) + .one()) + self.assertIsNotNone(db_profile) + self.assertEqual(_db_profile.id, db_profile.id) + self.assertEqual(_db_profile.name, db_profile.name) + self.assertEqual(_db_profile.segment_type, db_profile.segment_type) + self.assertEqual(_db_profile.segment_range, db_profile.segment_range) + self.assertEqual(_db_profile.multicast_ip_index, + db_profile.multicast_ip_index) + self.assertEqual(_db_profile.multicast_ip_range, + db_profile.multicast_ip_range) + n1kv_db_v2.delete_network_profile(self.session, _db_profile.id) + + def test_create_vlan_trunk_network_profile(self): + _db_profile = (n1kv_db_v2.create_network_profile( + self.session, TEST_NETWORK_PROFILE_VLAN_TRUNK)) + self.assertIsNotNone(_db_profile) + db_profile = (self.session.query(n1kv_models_v2.NetworkProfile). + filter_by(name=TEST_NETWORK_PROFILE_VLAN_TRUNK['name']). + one()) + self.assertIsNotNone(db_profile) + self.assertEqual(_db_profile.id, db_profile.id) + self.assertEqual(_db_profile.name, db_profile.name) + self.assertEqual(_db_profile.segment_type, db_profile.segment_type) + self.assertEqual(_db_profile.segment_range, db_profile.segment_range) + self.assertEqual(_db_profile.multicast_ip_index, + db_profile.multicast_ip_index) + self.assertEqual(_db_profile.multicast_ip_range, + db_profile.multicast_ip_range) + self.assertEqual(_db_profile.sub_type, db_profile.sub_type) + n1kv_db_v2.delete_network_profile(self.session, _db_profile.id) + + def test_create_vxlan_trunk_network_profile(self): + _db_profile = (n1kv_db_v2.create_network_profile( + self.session, TEST_NETWORK_PROFILE_VXLAN_TRUNK)) + self.assertIsNotNone(_db_profile) + db_profile = (self.session.query(n1kv_models_v2.NetworkProfile). + filter_by(name=TEST_NETWORK_PROFILE_VXLAN_TRUNK['name']). + one()) + self.assertIsNotNone(db_profile) + self.assertEqual(_db_profile.id, db_profile.id) + self.assertEqual(_db_profile.name, db_profile.name) + self.assertEqual(_db_profile.segment_type, db_profile.segment_type) + self.assertEqual(_db_profile.segment_range, db_profile.segment_range) + self.assertEqual(_db_profile.multicast_ip_index, + db_profile.multicast_ip_index) + self.assertEqual(_db_profile.multicast_ip_range, + db_profile.multicast_ip_range) + self.assertEqual(_db_profile.sub_type, db_profile.sub_type) + n1kv_db_v2.delete_network_profile(self.session, _db_profile.id) + + def test_create_network_profile_overlap(self): + _db_profile = n1kv_db_v2.create_network_profile(self.session, + TEST_NETWORK_PROFILE_2) + ctx = context.get_admin_context() + TEST_NETWORK_PROFILE_2['name'] = 'net-profile-min-overlap' + TEST_NETWORK_PROFILE_2['segment_range'] = SEGMENT_RANGE_MIN_OVERLAP + test_net_profile = {'network_profile': TEST_NETWORK_PROFILE_2} + self.assertRaises(n_exc.InvalidInput, + self.create_network_profile, + ctx, + test_net_profile) + + TEST_NETWORK_PROFILE_2['name'] = 'net-profile-max-overlap' + TEST_NETWORK_PROFILE_2['segment_range'] = SEGMENT_RANGE_MAX_OVERLAP + test_net_profile = {'network_profile': TEST_NETWORK_PROFILE_2} + self.assertRaises(n_exc.InvalidInput, + self.create_network_profile, + ctx, + test_net_profile) + + TEST_NETWORK_PROFILE_2['name'] = 'net-profile-overlap' + TEST_NETWORK_PROFILE_2['segment_range'] = SEGMENT_RANGE_OVERLAP + test_net_profile = {'network_profile': TEST_NETWORK_PROFILE_2} + self.assertRaises(n_exc.InvalidInput, + self.create_network_profile, + ctx, + test_net_profile) + n1kv_db_v2.delete_network_profile(self.session, _db_profile.id) + + def test_delete_network_profile(self): + try: + profile = (self.session.query(n1kv_models_v2.NetworkProfile). + filter_by(name=TEST_NETWORK_PROFILE['name']).one()) + except s_exc.NoResultFound: + profile = n1kv_db_v2.create_network_profile(self.session, + TEST_NETWORK_PROFILE) + + n1kv_db_v2.delete_network_profile(self.session, profile.id) + try: + self.session.query(n1kv_models_v2.NetworkProfile).filter_by( + name=TEST_NETWORK_PROFILE['name']).one() + except s_exc.NoResultFound: + pass + else: + self.fail("Network Profile (%s) was not deleted" % + TEST_NETWORK_PROFILE['name']) + + def test_update_network_profile(self): + TEST_PROFILE_1 = {'name': 'test_profile_1'} + profile = _create_test_network_profile_if_not_there(self.session) + updated_profile = n1kv_db_v2.update_network_profile(self.session, + profile.id, + TEST_PROFILE_1) + self.assertEqual(updated_profile.name, TEST_PROFILE_1['name']) + n1kv_db_v2.delete_network_profile(self.session, profile.id) + + def test_get_network_profile(self): + profile = n1kv_db_v2.create_network_profile(self.session, + TEST_NETWORK_PROFILE) + got_profile = n1kv_db_v2.get_network_profile(self.session, profile.id) + self.assertEqual(profile.id, got_profile.id) + self.assertEqual(profile.name, got_profile.name) + n1kv_db_v2.delete_network_profile(self.session, profile.id) + + def test_get_network_profiles(self): + test_profiles = [{'name': 'test_profile1', + 'segment_type': 'vlan', + 'physical_network': 'phys1', + 'segment_range': '200-210'}, + {'name': 'test_profile2', + 'segment_type': 'vlan', + 'physical_network': 'phys1', + 'segment_range': '211-220'}, + {'name': 'test_profile3', + 'segment_type': 'vlan', + 'physical_network': 'phys1', + 'segment_range': '221-230'}, + {'name': 'test_profile4', + 'segment_type': 'vlan', + 'physical_network': 'phys1', + 'segment_range': '231-240'}, + {'name': 'test_profile5', + 'segment_type': 'vlan', + 'physical_network': 'phys1', + 'segment_range': '241-250'}, + {'name': 'test_profile6', + 'segment_type': 'vlan', + 'physical_network': 'phys1', + 'segment_range': '251-260'}, + {'name': 'test_profile7', + 'segment_type': 'vlan', + 'physical_network': 'phys1', + 'segment_range': '261-270'}] + [n1kv_db_v2.create_network_profile(self.session, p) + for p in test_profiles] + # TODO(abhraut): Fix this test to work with real tenant_td + profiles = n1kv_db_v2._get_network_profiles(db_session=self.session) + self.assertEqual(len(test_profiles), len(list(profiles))) + + +class PolicyProfileTests(base.BaseTestCase): + + def setUp(self): + super(PolicyProfileTests, self).setUp() + db.configure_db() + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def test_create_policy_profile(self): + _db_profile = n1kv_db_v2.create_policy_profile(TEST_POLICY_PROFILE) + self.assertIsNotNone(_db_profile) + db_profile = (self.session.query(n1kv_models_v2.PolicyProfile). + filter_by(name=TEST_POLICY_PROFILE['name']).one)() + self.assertIsNotNone(db_profile) + self.assertTrue(_db_profile.id == db_profile.id) + self.assertTrue(_db_profile.name == db_profile.name) + + def test_delete_policy_profile(self): + profile = _create_test_policy_profile_if_not_there(self.session) + n1kv_db_v2.delete_policy_profile(profile.id) + try: + self.session.query(n1kv_models_v2.PolicyProfile).filter_by( + name=TEST_POLICY_PROFILE['name']).one() + except s_exc.NoResultFound: + pass + else: + self.fail("Policy Profile (%s) was not deleted" % + TEST_POLICY_PROFILE['name']) + + def test_update_policy_profile(self): + TEST_PROFILE_1 = {'name': 'test_profile_1'} + profile = _create_test_policy_profile_if_not_there(self.session) + updated_profile = n1kv_db_v2.update_policy_profile(self.session, + profile.id, + TEST_PROFILE_1) + self.assertEqual(updated_profile.name, TEST_PROFILE_1['name']) + + def test_get_policy_profile(self): + profile = _create_test_policy_profile_if_not_there(self.session) + got_profile = n1kv_db_v2.get_policy_profile(self.session, profile.id) + self.assertEqual(profile.id, got_profile.id) + self.assertEqual(profile.name, got_profile.name) + + +class ProfileBindingTests(base.BaseTestCase, + n1kv_db_v2.NetworkProfile_db_mixin, + db_base_plugin_v2.CommonDbMixin): + + def setUp(self): + super(ProfileBindingTests, self).setUp() + db.configure_db() + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def _create_test_binding_if_not_there(self, tenant_id, profile_id, + profile_type): + try: + _binding = (self.session.query(n1kv_models_v2.ProfileBinding). + filter_by(profile_type=profile_type, + tenant_id=tenant_id, + profile_id=profile_id).one()) + except s_exc.NoResultFound: + _binding = n1kv_db_v2.create_profile_binding(self.session, + tenant_id, + profile_id, + profile_type) + return _binding + + def test_create_profile_binding(self): + test_tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66" + test_profile_id = "dd7b9741-76ec-11e2-bcfd-0800200c9a66" + test_profile_type = "network" + n1kv_db_v2.create_profile_binding(self.session, + test_tenant_id, + test_profile_id, + test_profile_type) + try: + self.session.query(n1kv_models_v2.ProfileBinding).filter_by( + profile_type=test_profile_type, + tenant_id=test_tenant_id, + profile_id=test_profile_id).one() + except s_exc.MultipleResultsFound: + self.fail("Bindings must be unique") + except s_exc.NoResultFound: + self.fail("Could not create Profile Binding") + + def test_get_profile_binding(self): + test_tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66" + test_profile_id = "dd7b9741-76ec-11e2-bcfd-0800200c9a66" + test_profile_type = "network" + self._create_test_binding_if_not_there(test_tenant_id, + test_profile_id, + test_profile_type) + binding = n1kv_db_v2.get_profile_binding(self.session, + test_tenant_id, + test_profile_id) + self.assertEqual(binding.tenant_id, test_tenant_id) + self.assertEqual(binding.profile_id, test_profile_id) + self.assertEqual(binding.profile_type, test_profile_type) + + def test_get_profile_binding_not_found(self): + self.assertRaises( + c_exc.ProfileTenantBindingNotFound, + n1kv_db_v2.get_profile_binding, self.session, "123", "456") + + def test_delete_profile_binding(self): + test_tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66" + test_profile_id = "dd7b9741-76ec-11e2-bcfd-0800200c9a66" + test_profile_type = "network" + self._create_test_binding_if_not_there(test_tenant_id, + test_profile_id, + test_profile_type) + n1kv_db_v2.delete_profile_binding(self.session, + test_tenant_id, + test_profile_id) + q = (self.session.query(n1kv_models_v2.ProfileBinding).filter_by( + profile_type=test_profile_type, + tenant_id=test_tenant_id, + profile_id=test_profile_id)) + self.assertFalse(q.count()) + + def test_default_tenant_replace(self): + ctx = context.get_admin_context() + ctx.tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66" + test_profile_id = "AAAAAAAA-76ec-11e2-bcfd-0800200c9a66" + test_profile_type = "policy" + n1kv_db_v2.create_profile_binding(self.session, + cisco_constants.TENANT_ID_NOT_SET, + test_profile_id, + test_profile_type) + network_profile = {"network_profile": TEST_NETWORK_PROFILE} + self.create_network_profile(ctx, network_profile) + binding = n1kv_db_v2.get_profile_binding(self.session, + ctx.tenant_id, + test_profile_id) + self.assertRaises( + c_exc.ProfileTenantBindingNotFound, + n1kv_db_v2.get_profile_binding, + self.session, + cisco_constants.TENANT_ID_NOT_SET, + test_profile_id) + self.assertNotEqual(binding.tenant_id, + cisco_constants.TENANT_ID_NOT_SET) diff --git a/neutron/tests/unit/cisco/n1kv/test_n1kv_plugin.py b/neutron/tests/unit/cisco/n1kv/test_n1kv_plugin.py new file mode 100644 index 000000000..b2d29de61 --- /dev/null +++ b/neutron/tests/unit/cisco/n1kv/test_n1kv_plugin.py @@ -0,0 +1,709 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Juergen Brendel, Cisco Systems Inc. +# @author: Abhishek Raut, Cisco Systems Inc. +# @author: Sourabh Patwardhan, Cisco Systems Inc. + +import mock + +from neutron.api import extensions as neutron_extensions +from neutron.api.v2 import attributes +from neutron import context +import neutron.db.api as db +from neutron.extensions import portbindings +from neutron import manager +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.db import n1kv_db_v2 +from neutron.plugins.cisco.db import network_db_v2 as cdb +from neutron.plugins.cisco import extensions +from neutron.plugins.cisco.extensions import n1kv +from neutron.plugins.cisco.extensions import network_profile +from neutron.plugins.cisco.n1kv import n1kv_client +from neutron.plugins.cisco.n1kv import n1kv_neutron_plugin +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit.cisco.n1kv import fake_client +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_db_plugin as test_plugin +from neutron.tests.unit import test_l3_plugin +from neutron.tests.unit import test_l3_schedulers + + +PHYS_NET = 'some-phys-net' +VLAN_MIN = 100 +VLAN_MAX = 110 + + +class FakeResponse(object): + + """ + This object is returned by mocked requests lib instead of normal response. + + Initialize it with the status code, header and buffer contents you wish to + return. + + """ + def __init__(self, status, response_text, headers): + self.buffer = response_text + self.status_code = status + self.headers = headers + + def json(self, *args, **kwargs): + return self.buffer + + +def _fake_setup_vsm(self): + """Fake establish Communication with Cisco Nexus1000V VSM.""" + self.agent_vsm = True + self._populate_policy_profiles() + + +class NetworkProfileTestExtensionManager(object): + + def get_resources(self): + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + network_profile.RESOURCE_ATTRIBUTE_MAP) + return network_profile.Network_profile.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class N1kvPluginTestCase(test_plugin.NeutronDbPluginV2TestCase): + + _plugin_name = ('neutron.plugins.cisco.n1kv.' + 'n1kv_neutron_plugin.N1kvNeutronPluginV2') + + tenant_id = "some_tenant" + + DEFAULT_RESP_BODY = "" + DEFAULT_RESP_CODE = 200 + DEFAULT_CONTENT_TYPE = "" + fmt = "json" + + def _make_test_policy_profile(self, name='service_profile'): + """ + Create a policy profile record for testing purpose. + + :param name: string representing the name of the policy profile to + create. Default argument value chosen to correspond to the + default name specified in config.py file. + """ + uuid = test_api_v2._uuid() + profile = {'id': uuid, + 'name': name} + return n1kv_db_v2.create_policy_profile(profile) + + def _make_test_profile(self, + name='default_network_profile', + segment_range='386-400'): + """ + Create a profile record for testing purposes. + + :param name: string representing the name of the network profile to + create. Default argument value chosen to correspond to the + default name specified in config.py file. + :param segment_range: string representing the segment range for network + profile. + """ + db_session = db.get_session() + profile = {'name': name, + 'segment_type': 'vlan', + 'physical_network': PHYS_NET, + 'tenant_id': self.tenant_id, + 'segment_range': segment_range} + net_p = n1kv_db_v2.create_network_profile(db_session, profile) + n1kv_db_v2.sync_vlan_allocations(db_session, net_p) + return net_p + + def setUp(self): + """ + Setup method for n1kv plugin tests. + + First step is to define an acceptable response from the VSM to + our requests. This needs to be done BEFORE the setUp() function + of the super-class is called. + + This default here works for many cases. If you need something + extra, please define your own setUp() function in your test class, + and set your DEFAULT_RESPONSE value also BEFORE calling the + setUp() of the super-function (this one here). If you have set + a value already, it will not be overwritten by this code. + + """ + if not self.DEFAULT_RESP_BODY: + self.DEFAULT_RESP_BODY = { + "icehouse-pp": {"properties": {"name": "icehouse-pp", + "id": "some-uuid-1"}}, + "havana_pp": {"properties": {"name": "havana_pp", + "id": "some-uuid-2"}}, + "dhcp_pp": {"properties": {"name": "dhcp_pp", + "id": "some-uuid-3"}}, + } + # Creating a mock HTTP connection object for requests lib. The N1KV + # client interacts with the VSM via HTTP. Since we don't have a VSM + # running in the unit tests, we need to 'fake' it by patching the HTTP + # library itself. We install a patch for a fake HTTP connection class. + # Using __name__ to avoid having to enter the full module path. + http_patcher = mock.patch(n1kv_client.requests.__name__ + ".request") + FakeHttpConnection = http_patcher.start() + # Now define the return values for a few functions that may be called + # on any instance of the fake HTTP connection class. + self.resp_headers = {"content-type": "application/json"} + FakeHttpConnection.return_value = (FakeResponse( + self.DEFAULT_RESP_CODE, + self.DEFAULT_RESP_BODY, + self.resp_headers)) + + # Patch some internal functions in a few other parts of the system. + # These help us move along, without having to mock up even more systems + # in the background. + + # Return a dummy VSM IP address + mock.patch(n1kv_client.__name__ + ".Client._get_vsm_hosts", + new=lambda self: "127.0.0.1").start() + + # Return dummy user profiles + mock.patch(cdb.__name__ + ".get_credential_name", + new=lambda self: {"user_name": "admin", + "password": "admin_password"}).start() + + n1kv_neutron_plugin.N1kvNeutronPluginV2._setup_vsm = _fake_setup_vsm + + neutron_extensions.append_api_extensions_path(extensions.__path__) + ext_mgr = NetworkProfileTestExtensionManager() + + # Save the original RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.items(): + self.saved_attr_map[resource] = attrs.copy() + # Update the RESOURCE_ATTRIBUTE_MAP with n1kv specific extended attrs. + attributes.RESOURCE_ATTRIBUTE_MAP["networks"].update( + n1kv.EXTENDED_ATTRIBUTES_2_0["networks"]) + attributes.RESOURCE_ATTRIBUTE_MAP["ports"].update( + n1kv.EXTENDED_ATTRIBUTES_2_0["ports"]) + self.addCleanup(self.restore_resource_attribute_map) + self.addCleanup(db.clear_db) + super(N1kvPluginTestCase, self).setUp(self._plugin_name, + ext_mgr=ext_mgr) + # Create some of the database entries that we require. + self._make_test_profile() + self._make_test_policy_profile() + + def restore_resource_attribute_map(self): + # Restore the original RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + + def test_plugin(self): + self._make_network('json', + 'some_net', + True, + tenant_id=self.tenant_id, + set_context=True) + + req = self.new_list_request('networks', params="fields=tenant_id") + req.environ['neutron.context'] = context.Context('', self.tenant_id) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 200) + body = self.deserialize('json', res) + self.assertIn('tenant_id', body['networks'][0]) + + +class TestN1kvNetworkProfiles(N1kvPluginTestCase): + def _prepare_net_profile_data(self, segment_type): + netp = {'network_profile': {'name': 'netp1', + 'segment_type': segment_type, + 'tenant_id': self.tenant_id}} + if segment_type == 'vlan': + netp['network_profile']['segment_range'] = '100-110' + netp['network_profile']['physical_network'] = PHYS_NET + elif segment_type == 'overlay': + netp['network_profile']['segment_range'] = '10000-10010' + netp['network_profile']['sub_type'] = 'enhanced' or 'native_vxlan' + netp['network_profile']['multicast_ip_range'] = ("224.1.1.1-" + "224.1.1.10") + elif segment_type == 'trunk': + netp['network_profile']['sub_type'] = 'vlan' + return netp + + def test_create_network_profile_vlan(self): + data = self._prepare_net_profile_data('vlan') + net_p_req = self.new_create_request('network_profiles', data) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 201) + + def test_create_network_profile_overlay(self): + data = self._prepare_net_profile_data('overlay') + net_p_req = self.new_create_request('network_profiles', data) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 201) + + def test_create_network_profile_trunk(self): + data = self._prepare_net_profile_data('trunk') + net_p_req = self.new_create_request('network_profiles', data) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 201) + + def test_create_network_profile_trunk_missing_subtype(self): + data = self._prepare_net_profile_data('trunk') + data['network_profile'].pop('sub_type') + net_p_req = self.new_create_request('network_profiles', data) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 400) + + def test_create_network_profile_overlay_unreasonable_seg_range(self): + data = self._prepare_net_profile_data('overlay') + data['network_profile']['segment_range'] = '10000-100000000001' + net_p_req = self.new_create_request('network_profiles', data) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 400) + + def test_update_network_profile_plugin(self): + net_p_dict = self._prepare_net_profile_data('overlay') + net_p_req = self.new_create_request('network_profiles', net_p_dict) + net_p = self.deserialize(self.fmt, + net_p_req.get_response(self.ext_api)) + data = {'network_profile': {'name': 'netp2'}} + update_req = self.new_update_request('network_profiles', + data, + net_p['network_profile']['id']) + update_res = update_req.get_response(self.ext_api) + self.assertEqual(update_res.status_int, 200) + + def test_update_network_profile_physical_network_fail(self): + net_p = self._make_test_profile(name='netp1') + data = {'network_profile': {'physical_network': PHYS_NET}} + net_p_req = self.new_update_request('network_profiles', + data, + net_p['id']) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 400) + + def test_update_network_profile_segment_type_fail(self): + net_p = self._make_test_profile(name='netp1') + data = {'network_profile': {'segment_type': 'overlay'}} + net_p_req = self.new_update_request('network_profiles', + data, + net_p['id']) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 400) + + def test_update_network_profile_sub_type_fail(self): + net_p_dict = self._prepare_net_profile_data('overlay') + net_p_req = self.new_create_request('network_profiles', net_p_dict) + net_p = self.deserialize(self.fmt, + net_p_req.get_response(self.ext_api)) + data = {'network_profile': {'sub_type': 'vlan'}} + update_req = self.new_update_request('network_profiles', + data, + net_p['network_profile']['id']) + update_res = update_req.get_response(self.ext_api) + self.assertEqual(update_res.status_int, 400) + + def test_update_network_profiles_with_networks_fail(self): + net_p = self._make_test_profile(name='netp1') + data = {'network_profile': {'segment_range': '200-210'}} + update_req = self.new_update_request('network_profiles', + data, + net_p['id']) + update_res = update_req.get_response(self.ext_api) + self.assertEqual(update_res.status_int, 200) + net_data = {'network': {'name': 'net1', + n1kv.PROFILE_ID: net_p['id'], + 'tenant_id': 'some_tenant'}} + network_req = self.new_create_request('networks', net_data) + network_res = network_req.get_response(self.api) + self.assertEqual(network_res.status_int, 201) + data = {'network_profile': {'segment_range': '300-310'}} + update_req = self.new_update_request('network_profiles', + data, + net_p['id']) + update_res = update_req.get_response(self.ext_api) + self.assertEqual(update_res.status_int, 409) + + def test_create_overlay_network_profile_invalid_multicast_fail(self): + net_p_dict = self._prepare_net_profile_data('overlay') + data = {'network_profile': {'sub_type': 'native_vxlan', + 'multicast_ip_range': '1.1.1.1'}} + net_p_req = self.new_create_request('network_profiles', data, + net_p_dict) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 400) + + def test_create_overlay_network_profile_no_multicast_fail(self): + net_p_dict = self._prepare_net_profile_data('overlay') + data = {'network_profile': {'sub_type': 'native_vxlan', + 'multicast_ip_range': ''}} + net_p_req = self.new_create_request('network_profiles', data, + net_p_dict) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 400) + + def test_create_overlay_network_profile_wrong_split_multicast_fail(self): + net_p_dict = self._prepare_net_profile_data('overlay') + data = {'network_profile': { + 'sub_type': 'native_vxlan', + 'multicast_ip_range': '224.1.1.1.224.1.1.3'}} + net_p_req = self.new_create_request('network_profiles', data, + net_p_dict) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 400) + + def test_create_overlay_network_profile_invalid_minip_multicast_fail(self): + net_p_dict = self._prepare_net_profile_data('overlay') + data = {'network_profile': { + 'sub_type': 'native_vxlan', + 'multicast_ip_range': '10.0.0.1-224.1.1.3'}} + net_p_req = self.new_create_request('network_profiles', data, + net_p_dict) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 400) + + def test_create_overlay_network_profile_invalid_maxip_multicast_fail(self): + net_p_dict = self._prepare_net_profile_data('overlay') + data = {'network_profile': { + 'sub_type': 'native_vxlan', + 'multicast_ip_range': '224.1.1.1-20.0.0.1'}} + net_p_req = self.new_create_request('network_profiles', data, + net_p_dict) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 400) + + def test_create_overlay_network_profile_correct_multicast_pass(self): + data = self._prepare_net_profile_data('overlay') + net_p_req = self.new_create_request('network_profiles', data) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 201) + + def test_update_overlay_network_profile_correct_multicast_pass(self): + data = self._prepare_net_profile_data('overlay') + net_p_req = self.new_create_request('network_profiles', data) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 201) + net_p = self.deserialize(self.fmt, res) + data = {'network_profile': {'multicast_ip_range': + '224.0.1.0-224.0.1.100'}} + update_req = self.new_update_request('network_profiles', + data, + net_p['network_profile']['id']) + update_res = update_req.get_response(self.ext_api) + self.assertEqual(update_res.status_int, 200) + + def test_create_overlay_network_profile_reservedip_multicast_fail(self): + net_p_dict = self._prepare_net_profile_data('overlay') + data = {'network_profile': {'multicast_ip_range': + '224.0.0.100-224.0.1.100'}} + net_p_req = self.new_create_request('network_profiles', data, + net_p_dict) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 400) + + def test_update_overlay_network_profile_reservedip_multicast_fail(self): + data = self._prepare_net_profile_data('overlay') + net_p_req = self.new_create_request('network_profiles', data) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 201) + net_p = self.deserialize(self.fmt, res) + data = {'network_profile': {'multicast_ip_range': + '224.0.0.11-224.0.0.111'}} + update_req = self.new_update_request('network_profiles', + data, + net_p['network_profile']['id']) + update_res = update_req.get_response(self.ext_api) + self.assertEqual(update_res.status_int, 400) + + def test_update_vlan_network_profile_multicast_fail(self): + net_p = self._make_test_profile(name='netp1') + data = {'network_profile': {'multicast_ip_range': + '224.0.1.0-224.0.1.100'}} + update_req = self.new_update_request('network_profiles', + data, + net_p['id']) + update_res = update_req.get_response(self.ext_api) + self.assertEqual(update_res.status_int, 400) + + def test_update_trunk_network_profile_segment_range_fail(self): + data = self._prepare_net_profile_data('trunk') + net_p_req = self.new_create_request('network_profiles', data) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 201) + net_p = self.deserialize(self.fmt, res) + data = {'network_profile': {'segment_range': + '100-200'}} + update_req = self.new_update_request('network_profiles', + data, + net_p['network_profile']['id']) + update_res = update_req.get_response(self.ext_api) + self.assertEqual(update_res.status_int, 400) + + def test_update_trunk_network_profile_multicast_fail(self): + data = self._prepare_net_profile_data('trunk') + net_p_req = self.new_create_request('network_profiles', data) + res = net_p_req.get_response(self.ext_api) + self.assertEqual(res.status_int, 201) + net_p = self.deserialize(self.fmt, res) + data = {'network_profile': {'multicast_ip_range': + '224.0.1.0-224.0.1.100'}} + update_req = self.new_update_request('network_profiles', + data, + net_p['network_profile']['id']) + update_res = update_req.get_response(self.ext_api) + self.assertEqual(update_res.status_int, 400) + + def test_create_network_profile_populate_vlan_segment_pool(self): + db_session = db.get_session() + net_p_dict = self._prepare_net_profile_data('vlan') + net_p_req = self.new_create_request('network_profiles', net_p_dict) + self.deserialize(self.fmt, + net_p_req.get_response(self.ext_api)) + for vlan in range(VLAN_MIN, VLAN_MAX + 1): + self.assertIsNotNone(n1kv_db_v2.get_vlan_allocation(db_session, + PHYS_NET, + vlan)) + self.assertFalse(n1kv_db_v2.get_vlan_allocation(db_session, + PHYS_NET, + vlan).allocated) + self.assertRaises(c_exc.VlanIDNotFound, + n1kv_db_v2.get_vlan_allocation, + db_session, + PHYS_NET, + VLAN_MIN - 1) + self.assertRaises(c_exc.VlanIDNotFound, + n1kv_db_v2.get_vlan_allocation, + db_session, + PHYS_NET, + VLAN_MAX + 1) + + def test_delete_network_profile_with_network_fail(self): + net_p = self._make_test_profile(name='netp1') + net_data = {'network': {'name': 'net1', + n1kv.PROFILE_ID: net_p['id'], + 'tenant_id': 'some_tenant'}} + network_req = self.new_create_request('networks', net_data) + network_res = network_req.get_response(self.api) + self.assertEqual(network_res.status_int, 201) + self._delete('network_profiles', net_p['id'], + expected_code=409) + + def test_delete_network_profile_deallocate_vlan_segment_pool(self): + db_session = db.get_session() + net_p_dict = self._prepare_net_profile_data('vlan') + net_p_req = self.new_create_request('network_profiles', net_p_dict) + net_p = self.deserialize(self.fmt, + net_p_req.get_response(self.ext_api)) + self.assertIsNotNone(n1kv_db_v2.get_vlan_allocation(db_session, + PHYS_NET, + VLAN_MIN)) + self._delete('network_profiles', net_p['network_profile']['id']) + for vlan in range(VLAN_MIN, VLAN_MAX + 1): + self.assertRaises(c_exc.VlanIDNotFound, + n1kv_db_v2.get_vlan_allocation, + db_session, + PHYS_NET, + vlan) + + +class TestN1kvBasicGet(test_plugin.TestBasicGet, + N1kvPluginTestCase): + + pass + + +class TestN1kvHTTPResponse(test_plugin.TestV2HTTPResponse, + N1kvPluginTestCase): + + pass + + +class TestN1kvPorts(test_plugin.TestPortsV2, + N1kvPluginTestCase, + test_bindings.PortBindingsTestCase): + VIF_TYPE = portbindings.VIF_TYPE_OVS + HAS_PORT_FILTER = False + + def test_create_port_with_default_n1kv_policy_profile_id(self): + """Test port create without passing policy profile id.""" + with self.port() as port: + db_session = db.get_session() + pp = n1kv_db_v2.get_policy_profile( + db_session, port['port'][n1kv.PROFILE_ID]) + self.assertEqual(pp['name'], 'service_profile') + + def test_create_port_with_n1kv_policy_profile_id(self): + """Test port create with policy profile id.""" + profile_obj = self._make_test_policy_profile(name='test_profile') + with self.network() as network: + data = {'port': {n1kv.PROFILE_ID: profile_obj.id, + 'tenant_id': self.tenant_id, + 'network_id': network['network']['id']}} + port_req = self.new_create_request('ports', data) + port = self.deserialize(self.fmt, + port_req.get_response(self.api)) + self.assertEqual(port['port'][n1kv.PROFILE_ID], + profile_obj.id) + self._delete('ports', port['port']['id']) + + def test_update_port_with_n1kv_policy_profile_id(self): + """Test port update failure while updating policy profile id.""" + with self.port() as port: + data = {'port': {n1kv.PROFILE_ID: 'some-profile-uuid'}} + port_req = self.new_update_request('ports', + data, + port['port']['id']) + res = port_req.get_response(self.api) + # Port update should fail to update policy profile id. + self.assertEqual(res.status_int, 400) + + def test_create_first_port_invalid_parameters_fail(self): + """Test parameters for first port create sent to the VSM.""" + profile_obj = self._make_test_policy_profile(name='test_profile') + with self.network() as network: + client_patch = mock.patch(n1kv_client.__name__ + ".Client", + new=fake_client.TestClientInvalidRequest) + client_patch.start() + data = {'port': {n1kv.PROFILE_ID: profile_obj.id, + 'tenant_id': self.tenant_id, + 'network_id': network['network']['id'], + }} + port_req = self.new_create_request('ports', data) + res = port_req.get_response(self.api) + self.assertEqual(res.status_int, 500) + client_patch.stop() + + def test_create_next_port_invalid_parameters_fail(self): + """Test parameters for subsequent port create sent to the VSM.""" + with self.port() as port: + client_patch = mock.patch(n1kv_client.__name__ + ".Client", + new=fake_client.TestClientInvalidRequest) + client_patch.start() + data = {'port': {n1kv.PROFILE_ID: port['port']['n1kv:profile_id'], + 'tenant_id': port['port']['tenant_id'], + 'network_id': port['port']['network_id']}} + port_req = self.new_create_request('ports', data) + res = port_req.get_response(self.api) + self.assertEqual(res.status_int, 500) + client_patch.stop() + + +class TestN1kvPolicyProfiles(N1kvPluginTestCase): + def test_populate_policy_profile(self): + client_patch = mock.patch(n1kv_client.__name__ + ".Client", + new=fake_client.TestClient) + client_patch.start() + instance = n1kv_neutron_plugin.N1kvNeutronPluginV2() + instance._populate_policy_profiles() + db_session = db.get_session() + profile = n1kv_db_v2.get_policy_profile( + db_session, '00000000-0000-0000-0000-000000000001') + self.assertEqual('pp-1', profile['name']) + client_patch.stop() + + def test_populate_policy_profile_delete(self): + # Patch the Client class with the TestClient class + with mock.patch(n1kv_client.__name__ + ".Client", + new=fake_client.TestClient): + # Patch the _get_total_profiles() method to return a custom value + with mock.patch(fake_client.__name__ + + '.TestClient._get_total_profiles') as obj_inst: + # Return 3 policy profiles + obj_inst.return_value = 3 + plugin = manager.NeutronManager.get_plugin() + plugin._populate_policy_profiles() + db_session = db.get_session() + profile = n1kv_db_v2.get_policy_profile( + db_session, '00000000-0000-0000-0000-000000000001') + # Verify that DB contains only 3 policy profiles + self.assertEqual('pp-1', profile['name']) + profile = n1kv_db_v2.get_policy_profile( + db_session, '00000000-0000-0000-0000-000000000002') + self.assertEqual('pp-2', profile['name']) + profile = n1kv_db_v2.get_policy_profile( + db_session, '00000000-0000-0000-0000-000000000003') + self.assertEqual('pp-3', profile['name']) + self.assertRaises(c_exc.PolicyProfileIdNotFound, + n1kv_db_v2.get_policy_profile, + db_session, + '00000000-0000-0000-0000-000000000004') + # Return 2 policy profiles + obj_inst.return_value = 2 + plugin._populate_policy_profiles() + # Verify that the third policy profile is deleted + self.assertRaises(c_exc.PolicyProfileIdNotFound, + n1kv_db_v2.get_policy_profile, + db_session, + '00000000-0000-0000-0000-000000000003') + + +class TestN1kvNetworks(test_plugin.TestNetworksV2, + N1kvPluginTestCase): + + def _prepare_net_data(self, net_profile_id): + return {'network': {'name': 'net1', + n1kv.PROFILE_ID: net_profile_id, + 'tenant_id': self.tenant_id}} + + def test_create_network_with_default_n1kv_network_profile_id(self): + """Test network create without passing network profile id.""" + with self.network() as network: + db_session = db.get_session() + np = n1kv_db_v2.get_network_profile( + db_session, network['network'][n1kv.PROFILE_ID]) + self.assertEqual(np['name'], 'default_network_profile') + + def test_create_network_with_n1kv_network_profile_id(self): + """Test network create with network profile id.""" + profile_obj = self._make_test_profile(name='test_profile') + data = self._prepare_net_data(profile_obj.id) + network_req = self.new_create_request('networks', data) + network = self.deserialize(self.fmt, + network_req.get_response(self.api)) + self.assertEqual(network['network'][n1kv.PROFILE_ID], + profile_obj.id) + + def test_update_network_with_n1kv_network_profile_id(self): + """Test network update failure while updating network profile id.""" + with self.network() as network: + data = {'network': {n1kv.PROFILE_ID: 'some-profile-uuid'}} + network_req = self.new_update_request('networks', + data, + network['network']['id']) + res = network_req.get_response(self.api) + # Network update should fail to update network profile id. + self.assertEqual(res.status_int, 400) + + +class TestN1kvSubnets(test_plugin.TestSubnetsV2, + N1kvPluginTestCase): + + def setUp(self): + super(TestN1kvSubnets, self).setUp() + + +class TestN1kvL3Test(test_l3_plugin.L3NatExtensionTestCase): + + pass + + +class TestN1kvL3SchedulersTest(test_l3_schedulers.L3SchedulerTestCase): + + pass diff --git a/neutron/tests/unit/cisco/test_config.py b/neutron/tests/unit/cisco/test_config.py new file mode 100644 index 000000000..7104ed06a --- /dev/null +++ b/neutron/tests/unit/cisco/test_config.py @@ -0,0 +1,72 @@ +# Copyright (c) 2013 Cisco Systems Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from oslo.config import cfg + +from neutron.plugins.cisco.common import config as cisco_config +from neutron.tests import base + + +class TestCiscoNexusPluginConfig(base.BaseTestCase): + + def setUp(self): + # Point neutron config file to: neutron/tests/etc/neutron.conf.test + self.config_parse() + + super(TestCiscoNexusPluginConfig, self).setUp() + + def test_config_parse_error(self): + """Check that config error is raised upon config parser failure.""" + with mock.patch.object(cfg, 'MultiConfigParser') as parser: + parser.return_value.read.return_value = [] + self.assertRaises(cfg.Error, cisco_config.CiscoConfigOptions) + + def test_create_device_dictionary(self): + """Test creation of the device dictionary based on nexus config.""" + test_config = { + 'NEXUS_SWITCH:1.1.1.1': { + 'username': ['admin'], + 'password': ['mySecretPassword'], + 'ssh_port': [22], + 'compute1': ['1/1'], + 'compute2': ['1/2'], + }, + 'NEXUS_SWITCH:2.2.2.2': { + 'username': ['admin'], + 'password': ['mySecretPassword'], + 'ssh_port': [22], + 'compute3': ['1/1'], + 'compute4': ['1/2'], + }, + } + expected_dev_dict = { + ('NEXUS_SWITCH', '1.1.1.1', 'username'): 'admin', + ('NEXUS_SWITCH', '1.1.1.1', 'password'): 'mySecretPassword', + ('NEXUS_SWITCH', '1.1.1.1', 'ssh_port'): 22, + ('NEXUS_SWITCH', '1.1.1.1', 'compute1'): '1/1', + ('NEXUS_SWITCH', '1.1.1.1', 'compute2'): '1/2', + ('NEXUS_SWITCH', '2.2.2.2', 'username'): 'admin', + ('NEXUS_SWITCH', '2.2.2.2', 'password'): 'mySecretPassword', + ('NEXUS_SWITCH', '2.2.2.2', 'ssh_port'): 22, + ('NEXUS_SWITCH', '2.2.2.2', 'compute3'): '1/1', + ('NEXUS_SWITCH', '2.2.2.2', 'compute4'): '1/2', + } + with mock.patch.object(cfg, 'MultiConfigParser') as parser: + parser.return_value.read.return_value = cfg.CONF.config_file + parser.return_value.parsed = [test_config] + cisco_config.CiscoConfigOptions() + self.assertEqual(cisco_config.device_dictionary, + expected_dev_dict) diff --git a/neutron/tests/unit/cisco/test_network_db.py b/neutron/tests/unit/cisco/test_network_db.py new file mode 100644 index 000000000..ef09c81c2 --- /dev/null +++ b/neutron/tests/unit/cisco/test_network_db.py @@ -0,0 +1,291 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import mock +import testtools + +from neutron.db import api as db +from neutron.plugins.cisco.common import cisco_constants +from neutron.plugins.cisco.common import cisco_credentials_v2 +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.common import config as config +from neutron.plugins.cisco.db import network_db_v2 as cdb +from neutron.plugins.cisco import network_plugin +from neutron.tests import base + + +class CiscoNetworkDbTest(base.BaseTestCase): + + """Base class for Cisco network database unit tests.""" + + def setUp(self): + super(CiscoNetworkDbTest, self).setUp() + db.configure_db() + + # The Cisco network plugin includes a thin layer of QoS and + # credential API methods which indirectly call Cisco QoS and + # credential database access methods. For better code coverage, + # this test suite will make calls to the QoS and credential database + # access methods indirectly through the network plugin. The network + # plugin's init function can be mocked out for this purpose. + def new_network_plugin_init(instance): + pass + with mock.patch.object(network_plugin.PluginV2, + '__init__', new=new_network_plugin_init): + self._network_plugin = network_plugin.PluginV2() + + self.addCleanup(db.clear_db) + + +class CiscoNetworkQosDbTest(CiscoNetworkDbTest): + + """Unit tests for Cisco network QoS database model.""" + + QosObj = collections.namedtuple('QosObj', 'tenant qname desc') + + def _qos_test_obj(self, tnum, qnum, desc=None): + """Create a Qos test object from a pair of numbers.""" + if desc is None: + desc = 'test qos %s-%s' % (str(tnum), str(qnum)) + tenant = 'tenant_%s' % str(tnum) + qname = 'qos_%s' % str(qnum) + return self.QosObj(tenant, qname, desc) + + def _assert_equal(self, qos, qos_obj): + self.assertEqual(qos.tenant_id, qos_obj.tenant) + self.assertEqual(qos.qos_name, qos_obj.qname) + self.assertEqual(qos.qos_desc, qos_obj.desc) + + def test_qos_add_remove(self): + qos11 = self._qos_test_obj(1, 1) + qos = self._network_plugin.create_qos(qos11.tenant, qos11.qname, + qos11.desc) + self._assert_equal(qos, qos11) + qos_id = qos.qos_id + qos = self._network_plugin.delete_qos(qos11.tenant, qos_id) + self._assert_equal(qos, qos11) + qos = self._network_plugin.delete_qos(qos11.tenant, qos_id) + self.assertIsNone(qos) + + def test_qos_add_dup(self): + qos22 = self._qos_test_obj(2, 2) + qos = self._network_plugin.create_qos(qos22.tenant, qos22.qname, + qos22.desc) + self._assert_equal(qos, qos22) + qos_id = qos.qos_id + with testtools.ExpectedException(c_exc.QosNameAlreadyExists): + self._network_plugin.create_qos(qos22.tenant, qos22.qname, + "duplicate 22") + qos = self._network_plugin.delete_qos(qos22.tenant, qos_id) + self._assert_equal(qos, qos22) + qos = self._network_plugin.delete_qos(qos22.tenant, qos_id) + self.assertIsNone(qos) + + def test_qos_get(self): + qos11 = self._qos_test_obj(1, 1) + qos11_id = self._network_plugin.create_qos(qos11.tenant, qos11.qname, + qos11.desc).qos_id + qos21 = self._qos_test_obj(2, 1) + qos21_id = self._network_plugin.create_qos(qos21.tenant, qos21.qname, + qos21.desc).qos_id + qos22 = self._qos_test_obj(2, 2) + qos22_id = self._network_plugin.create_qos(qos22.tenant, qos22.qname, + qos22.desc).qos_id + + qos = self._network_plugin.get_qos_details(qos11.tenant, qos11_id) + self._assert_equal(qos, qos11) + qos = self._network_plugin.get_qos_details(qos21.tenant, qos21_id) + self._assert_equal(qos, qos21) + qos = self._network_plugin.get_qos_details(qos21.tenant, qos22_id) + self._assert_equal(qos, qos22) + + with testtools.ExpectedException(c_exc.QosNotFound): + self._network_plugin.get_qos_details(qos11.tenant, "dummyQosId") + with testtools.ExpectedException(c_exc.QosNotFound): + self._network_plugin.get_qos_details(qos11.tenant, qos21_id) + with testtools.ExpectedException(c_exc.QosNotFound): + self._network_plugin.get_qos_details(qos21.tenant, qos11_id) + + qos_all_t1 = self._network_plugin.get_all_qoss(qos11.tenant) + self.assertEqual(len(qos_all_t1), 1) + qos_all_t2 = self._network_plugin.get_all_qoss(qos21.tenant) + self.assertEqual(len(qos_all_t2), 2) + qos_all_t3 = self._network_plugin.get_all_qoss("tenant3") + self.assertEqual(len(qos_all_t3), 0) + + def test_qos_update(self): + qos11 = self._qos_test_obj(1, 1) + qos11_id = self._network_plugin.create_qos(qos11.tenant, qos11.qname, + qos11.desc).qos_id + self._network_plugin.rename_qos(qos11.tenant, qos11_id, + new_name=None) + new_qname = "new qos name" + new_qos = self._network_plugin.rename_qos(qos11.tenant, qos11_id, + new_qname) + expected_qobj = self.QosObj(qos11.tenant, new_qname, qos11.desc) + self._assert_equal(new_qos, expected_qobj) + new_qos = self._network_plugin.get_qos_details(qos11.tenant, qos11_id) + self._assert_equal(new_qos, expected_qobj) + with testtools.ExpectedException(c_exc.QosNotFound): + self._network_plugin.rename_qos(qos11.tenant, "dummyQosId", + new_name=None) + + +class CiscoNetworkCredentialDbTest(CiscoNetworkDbTest): + + """Unit tests for Cisco network credentials database model.""" + + CredObj = collections.namedtuple('CredObj', 'cname usr pwd ctype') + + def _cred_test_obj(self, tnum, cnum): + """Create a Credential test object from a pair of numbers.""" + cname = 'credential_%s_%s' % (str(tnum), str(cnum)) + usr = 'User_%s_%s' % (str(tnum), str(cnum)) + pwd = 'Password_%s_%s' % (str(tnum), str(cnum)) + ctype = 'ctype_%s' % str(tnum) + return self.CredObj(cname, usr, pwd, ctype) + + def _assert_equal(self, credential, cred_obj): + self.assertEqual(credential.type, cred_obj.ctype) + self.assertEqual(credential.credential_name, cred_obj.cname) + self.assertEqual(credential.user_name, cred_obj.usr) + self.assertEqual(credential.password, cred_obj.pwd) + + def test_credential_add_remove(self): + cred11 = self._cred_test_obj(1, 1) + cred = cdb.add_credential( + cred11.cname, cred11.usr, cred11.pwd, cred11.ctype) + self._assert_equal(cred, cred11) + cred_id = cred.credential_id + cred = cdb.remove_credential(cred_id) + self._assert_equal(cred, cred11) + cred = cdb.remove_credential(cred_id) + self.assertIsNone(cred) + + def test_credential_add_dup(self): + cred22 = self._cred_test_obj(2, 2) + cred = cdb.add_credential( + cred22.cname, cred22.usr, cred22.pwd, cred22.ctype) + self._assert_equal(cred, cred22) + cred_id = cred.credential_id + with testtools.ExpectedException(c_exc.CredentialAlreadyExists): + cdb.add_credential( + cred22.cname, cred22.usr, cred22.pwd, cred22.ctype) + cred = cdb.remove_credential(cred_id) + self._assert_equal(cred, cred22) + cred = cdb.remove_credential(cred_id) + self.assertIsNone(cred) + + def test_credential_get_id(self): + cred11 = self._cred_test_obj(1, 1) + cred11_id = cdb.add_credential( + cred11.cname, cred11.usr, cred11.pwd, cred11.ctype).credential_id + cred21 = self._cred_test_obj(2, 1) + cred21_id = cdb.add_credential( + cred21.cname, cred21.usr, cred21.pwd, cred21.ctype).credential_id + cred22 = self._cred_test_obj(2, 2) + cred22_id = cdb.add_credential( + cred22.cname, cred22.usr, cred22.pwd, cred22.ctype).credential_id + + cred = self._network_plugin.get_credential_details(cred11_id) + self._assert_equal(cred, cred11) + cred = self._network_plugin.get_credential_details(cred21_id) + self._assert_equal(cred, cred21) + cred = self._network_plugin.get_credential_details(cred22_id) + self._assert_equal(cred, cred22) + + with testtools.ExpectedException(c_exc.CredentialNotFound): + self._network_plugin.get_credential_details("dummyCredentialId") + + cred_all_t1 = self._network_plugin.get_all_credentials() + self.assertEqual(len(cred_all_t1), 3) + + def test_credential_get_name(self): + cred11 = self._cred_test_obj(1, 1) + cred11_id = cdb.add_credential( + cred11.cname, cred11.usr, cred11.pwd, cred11.ctype).credential_id + cred21 = self._cred_test_obj(2, 1) + cred21_id = cdb.add_credential( + cred21.cname, cred21.usr, cred21.pwd, cred21.ctype).credential_id + cred22 = self._cred_test_obj(2, 2) + cred22_id = cdb.add_credential( + cred22.cname, cred22.usr, cred22.pwd, cred22.ctype).credential_id + self.assertNotEqual(cred11_id, cred21_id) + self.assertNotEqual(cred11_id, cred22_id) + self.assertNotEqual(cred21_id, cred22_id) + + cred = cdb.get_credential_name(cred11.cname) + self._assert_equal(cred, cred11) + cred = cdb.get_credential_name(cred21.cname) + self._assert_equal(cred, cred21) + cred = cdb.get_credential_name(cred22.cname) + self._assert_equal(cred, cred22) + + with testtools.ExpectedException(c_exc.CredentialNameNotFound): + cdb.get_credential_name("dummyCredentialName") + + def test_credential_update(self): + cred11 = self._cred_test_obj(1, 1) + cred11_id = cdb.add_credential( + cred11.cname, cred11.usr, cred11.pwd, cred11.ctype).credential_id + self._network_plugin.rename_credential(cred11_id, new_name=None, + new_password=None) + new_usr = "new user name" + new_pwd = "new password" + new_credential = self._network_plugin.rename_credential( + cred11_id, new_usr, new_pwd) + expected_cred = self.CredObj( + cred11.cname, new_usr, new_pwd, cred11.ctype) + self._assert_equal(new_credential, expected_cred) + new_credential = self._network_plugin.get_credential_details( + cred11_id) + self._assert_equal(new_credential, expected_cred) + with testtools.ExpectedException(c_exc.CredentialNotFound): + self._network_plugin.rename_credential( + "dummyCredentialId", new_usr, new_pwd) + + def test_get_credential_not_found_exception(self): + self.assertRaises(c_exc.CredentialNotFound, + self._network_plugin.get_credential_details, + "dummyCredentialId") + + +class CiscoCredentialStoreTest(base.BaseTestCase): + + """Cisco Credential Store unit tests.""" + + def setUp(self): + super(CiscoCredentialStoreTest, self).setUp() + db.configure_db() + self.addCleanup(db.clear_db) + + def test_cred_store_init_duplicate_creds_ignored(self): + """Check that with multi store instances, dup creds are ignored.""" + # Create a device dictionary containing credentials for 1 switch. + dev_dict = { + ('dev_id', '1.1.1.1', cisco_constants.USERNAME): 'user_1', + ('dev_id', '1.1.1.1', cisco_constants.PASSWORD): 'password_1', + ('dev_id', '1.1.1.1', 'host_a'): '1/1', + ('dev_id', '1.1.1.1', 'host_b'): '1/2', + ('dev_id', '1.1.1.1', 'host_c'): '1/3', + } + with mock.patch.object(config, 'get_device_dictionary', + return_value=dev_dict): + # Create and initialize 2 instances of credential store. + cisco_credentials_v2.Store().initialize() + cisco_credentials_v2.Store().initialize() + # There should be only 1 switch credential in the database. + self.assertEqual(len(cdb.get_all_credentials()), 1) diff --git a/neutron/tests/unit/cisco/test_network_plugin.py b/neutron/tests/unit/cisco/test_network_plugin.py new file mode 100644 index 000000000..4e7be3e87 --- /dev/null +++ b/neutron/tests/unit/cisco/test_network_plugin.py @@ -0,0 +1,1186 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import copy +import inspect +import logging +import mock + +import six +import webob.exc as wexc + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron.common import exceptions as n_exc +from neutron import context +from neutron.db import db_base_plugin_v2 as base_plugin +from neutron.db import l3_db +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron import manager +from neutron.openstack.common import gettextutils +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.common import config as cisco_config +from neutron.plugins.cisco.db import network_db_v2 +from neutron.plugins.cisco.db import nexus_db_v2 +from neutron.plugins.cisco.models import virt_phy_sw_v2 +from neutron.plugins.openvswitch.common import config as ovs_config +from neutron.plugins.openvswitch import ovs_db_v2 +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit import test_db_plugin +from neutron.tests.unit import test_extensions + +LOG = logging.getLogger(__name__) +CORE_PLUGIN = 'neutron.plugins.cisco.network_plugin.PluginV2' +NEXUS_PLUGIN = 'neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin' +NEXUS_DRIVER = ('neutron.plugins.cisco.nexus.' + 'cisco_nexus_network_driver_v2.CiscoNEXUSDriver') +PHYS_NET = 'physnet1' +BRIDGE_NAME = 'br-eth1' +VLAN_START = 1000 +VLAN_END = 1100 +COMP_HOST_NAME = 'testhost' +COMP_HOST_NAME_2 = 'testhost_2' +NEXUS_IP_ADDR = '1.1.1.1' +NEXUS_DEV_ID = 'NEXUS_SWITCH' +NEXUS_USERNAME = 'admin' +NEXUS_PASSWORD = 'mySecretPassword' +NEXUS_SSH_PORT = 22 +NEXUS_INTERFACE = '1/1' +NEXUS_INTERFACE_2 = '1/2' +NEXUS_PORT_1 = 'ethernet:1/1' +NEXUS_PORT_2 = 'ethernet:1/2' +NETWORK_NAME = 'test_network' +CIDR_1 = '10.0.0.0/24' +CIDR_2 = '10.0.1.0/24' +DEVICE_ID_1 = '11111111-1111-1111-1111-111111111111' +DEVICE_ID_2 = '22222222-2222-2222-2222-222222222222' +DEVICE_OWNER = 'compute:None' + + +class CiscoNetworkPluginV2TestCase(test_db_plugin.NeutronDbPluginV2TestCase): + + def setUp(self): + """Configure for end-to-end neutron testing using a mock ncclient. + + This setup includes: + - Configure the OVS plugin to use VLANs in the range of + VLAN_START-VLAN_END. + - Configure the Cisco plugin model to use the Nexus driver. + - Configure the Nexus driver to use an imaginary switch + at NEXUS_IP_ADDR. + + """ + # Configure the OVS and Cisco plugins + phys_bridge = ':'.join([PHYS_NET, BRIDGE_NAME]) + phys_vlan_range = ':'.join([PHYS_NET, str(VLAN_START), str(VLAN_END)]) + config = { + ovs_config: { + 'OVS': {'bridge_mappings': phys_bridge, + 'network_vlan_ranges': [phys_vlan_range], + 'tenant_network_type': 'vlan'} + }, + cisco_config: { + 'CISCO': {'nexus_driver': NEXUS_DRIVER}, + 'CISCO_PLUGINS': {'nexus_plugin': NEXUS_PLUGIN}, + } + } + for module in config: + for group in config[module]: + for opt, val in config[module][group].items(): + module.cfg.CONF.set_override(opt, val, group) + + # Configure the Nexus switch dictionary + # TODO(Henry): add tests for other devices + nexus_config = { + (NEXUS_DEV_ID, NEXUS_IP_ADDR, 'username'): NEXUS_USERNAME, + (NEXUS_DEV_ID, NEXUS_IP_ADDR, 'password'): NEXUS_PASSWORD, + (NEXUS_DEV_ID, NEXUS_IP_ADDR, 'ssh_port'): NEXUS_SSH_PORT, + (NEXUS_DEV_ID, NEXUS_IP_ADDR, COMP_HOST_NAME): NEXUS_INTERFACE, + (NEXUS_DEV_ID, NEXUS_IP_ADDR, COMP_HOST_NAME_2): NEXUS_INTERFACE_2, + } + nexus_patch = mock.patch.dict(cisco_config.device_dictionary, + nexus_config) + nexus_patch.start() + self.addCleanup(nexus_patch.stop) + + # Use a mock netconf client + self.mock_ncclient = mock.Mock() + ncclient_patch = mock.patch.dict('sys.modules', + {'ncclient': self.mock_ncclient}) + ncclient_patch.start() + self.addCleanup(ncclient_patch.stop) + + # Call the parent setUp, start the core plugin + super(CiscoNetworkPluginV2TestCase, self).setUp(CORE_PLUGIN) + self.port_create_status = 'DOWN' + + # Set Cisco config module's first configured Nexus IP address. + # Used for SVI placement when round-robin placement is disabled. + mock.patch.object(cisco_config, 'first_device_ip', + new=NEXUS_IP_ADDR).start() + + def _get_plugin_ref(self): + return getattr(manager.NeutronManager.get_plugin(), + "_model")._plugins[const.VSWITCH_PLUGIN] + + @contextlib.contextmanager + def _patch_ncclient(self, attr, value): + """Configure an attribute on the mock ncclient module. + + This method can be used to inject errors by setting a side effect + or a return value for an ncclient method. + + :param attr: ncclient attribute (typically method) to be configured. + :param value: Value to be configured on the attribute. + + """ + # Configure attribute. + config = {attr: value} + self.mock_ncclient.configure_mock(**config) + # Continue testing + yield + # Unconfigure attribute + config = {attr: None} + self.mock_ncclient.configure_mock(**config) + + @staticmethod + def _config_dependent_side_effect(match_config, exc): + """Generates a config-dependent side effect for ncclient edit_config. + + This method generates a mock side-effect function which can be + configured on the mock ncclient module for the edit_config method. + This side effect will cause a given exception to be raised whenever + the XML config string that is passed to edit_config contains all + words in a given match config string. + + :param match_config: String containing keywords to be matched + :param exc: Exception to be raised when match is found + :return: Side effect function for the mock ncclient module's + edit_config method. + + """ + keywords = match_config.split() + + def _side_effect_function(target, config): + if all(word in config for word in keywords): + raise exc + return _side_effect_function + + def _is_in_nexus_cfg(self, words): + """Check if any config sent to Nexus contains all words in a list.""" + for call in (self.mock_ncclient.manager.connect.return_value. + edit_config.mock_calls): + configlet = call[2]['config'] + if all(word in configlet for word in words): + return True + return False + + def _is_in_last_nexus_cfg(self, words): + """Check if last config sent to Nexus contains all words in a list.""" + last_cfg = (self.mock_ncclient.manager.connect.return_value. + edit_config.mock_calls[-1][2]['config']) + return all(word in last_cfg for word in words) + + def _is_vlan_configured(self, vlan_creation_expected=True, + add_keyword_expected=False): + vlan_created = self._is_in_nexus_cfg(['vlan', 'vlan-name']) + add_appears = self._is_in_last_nexus_cfg(['add']) + return (self._is_in_last_nexus_cfg(['allowed', 'vlan']) and + vlan_created == vlan_creation_expected and + add_appears == add_keyword_expected) + + def _is_vlan_unconfigured(self, vlan_deletion_expected=True, + vlan_untrunk_expected=True): + vlan_deleted = self._is_in_nexus_cfg( + ['no', 'vlan', 'vlan-id-create-delete']) + vlan_untrunked = self._is_in_nexus_cfg(['allowed', 'vlan', 'remove']) + return (vlan_deleted == vlan_deletion_expected and + vlan_untrunked == vlan_untrunk_expected) + + def _assertExpectedHTTP(self, status, exc): + """Confirm that an HTTP status corresponds to an expected exception. + + Confirm that an HTTP status which has been returned for an + neutron API request matches the HTTP status corresponding + to an expected exception. + + :param status: HTTP status + :param exc: Expected exception + + """ + if exc in base.FAULT_MAP: + expected_http = base.FAULT_MAP[exc].code + else: + expected_http = wexc.HTTPInternalServerError.code + self.assertEqual(status, expected_http) + + +class TestCiscoGetAttribute(CiscoNetworkPluginV2TestCase): + + def test_get_unsupported_attr_in_lazy_gettext_mode(self): + """Test get of unsupported attribute in lazy gettext mode. + + This test also checks that this operation does not cause + excessive nesting of calls to deepcopy. + """ + plugin = manager.NeutronManager.get_plugin() + + def _lazy_gettext(msg): + return gettextutils.Message(msg, domain='neutron') + + with mock.patch.dict(six.moves.builtins.__dict__, + {'_': _lazy_gettext}): + self.nesting_count = 0 + + def _count_nesting(*args, **kwargs): + self.nesting_count += 1 + + with mock.patch.object(copy, 'deepcopy', + side_effect=_count_nesting, + wraps=copy.deepcopy): + self.assertRaises(AttributeError, getattr, plugin, + 'an_unsupported_attribute') + # If there were no nested calls to deepcopy, then the total + # number of calls to deepcopy should be 2 (1 call for + # each mod'd field in the AttributeError message raised + # by the plugin). + self.assertEqual(self.nesting_count, 2) + + +class TestCiscoBasicGet(CiscoNetworkPluginV2TestCase, + test_db_plugin.TestBasicGet): + pass + + +class TestCiscoV2HTTPResponse(CiscoNetworkPluginV2TestCase, + test_db_plugin.TestV2HTTPResponse): + pass + + +class TestCiscoPortsV2(CiscoNetworkPluginV2TestCase, + test_db_plugin.TestPortsV2, + test_bindings.PortBindingsHostTestCaseMixin): + + @contextlib.contextmanager + def _create_port_res(self, name=NETWORK_NAME, cidr=CIDR_1, + do_delete=True, host_id=COMP_HOST_NAME): + """Create a network, subnet, and port and yield the result. + + Create a network, subnet, and port, yield the result, + then delete the port, subnet, and network. + + :param name: Name of network to be created + :param cidr: cidr address of subnetwork to be created + :param do_delete: If set to True, delete the port at the + end of testing + :param host_id: Name of compute host to use for testing + + """ + ctx = context.get_admin_context() + with self.network(name=name) as network: + with self.subnet(network=network, cidr=cidr) as subnet: + net_id = subnet['subnet']['network_id'] + args = (portbindings.HOST_ID, 'device_id', 'device_owner') + port_dict = {portbindings.HOST_ID: host_id, + 'device_id': DEVICE_ID_1, + 'device_owner': DEVICE_OWNER} + res = self._create_port(self.fmt, net_id, arg_list=args, + context=ctx, **port_dict) + port = self.deserialize(self.fmt, res) + yield res + if do_delete: + self._delete('ports', port['port']['id']) + + def test_create_ports_bulk_emulated_plugin_failure(self): + real_has_attr = hasattr + + #ensures the API choose the emulation code path + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + plugin_ref = self._get_plugin_ref() + orig = plugin_ref.create_port + with mock.patch.object(plugin_ref, + 'create_port') as patched_plugin: + + def side_effect(*args, **kwargs): + return self._do_side_effect(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + with self.network() as net: + res = self._create_port_bulk(self.fmt, 2, + net['network']['id'], + 'test', + True) + # Expect an internal server error as we injected a fault + self._validate_behavior_on_bulk_failure( + res, + 'ports', + wexc.HTTPInternalServerError.code) + + def test_create_ports_bulk_native(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk port create") + + def test_create_ports_bulk_emulated(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk port create") + + def test_create_ports_bulk_native_plugin_failure(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk port create") + ctx = context.get_admin_context() + with self.network() as net: + plugin_ref = self._get_plugin_ref() + orig = plugin_ref.create_port + with mock.patch.object(plugin_ref, + 'create_port') as patched_plugin: + + def side_effect(*args, **kwargs): + return self._do_side_effect(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + res = self._create_port_bulk(self.fmt, 2, + net['network']['id'], + 'test', True, context=ctx) + # We expect an internal server error as we injected a fault + self._validate_behavior_on_bulk_failure( + res, + 'ports', + wexc.HTTPInternalServerError.code) + + def test_nexus_enable_vlan_cmd(self): + """Verify the syntax of the command to enable a vlan on an intf.""" + + # First vlan should be configured without 'add' keyword + with self._create_port_res(name='net1', cidr=CIDR_1): + self.assertTrue(self._is_vlan_configured( + vlan_creation_expected=True, + add_keyword_expected=False)) + self.mock_ncclient.reset_mock() + + # Second vlan should be configured with 'add' keyword + with self._create_port_res(name='net2', cidr=CIDR_2): + self.assertTrue(self._is_vlan_configured( + vlan_creation_expected=True, + add_keyword_expected=True)) + + def test_nexus_vlan_config_two_hosts(self): + """Verify config/unconfig of vlan on two compute hosts.""" + + @contextlib.contextmanager + def _create_port_check_vlan(comp_host_name, device_id, + vlan_creation_expected=True): + arg_list = (portbindings.HOST_ID,) + port_dict = {portbindings.HOST_ID: comp_host_name, + 'device_id': device_id, + 'device_owner': DEVICE_OWNER} + with self.port(subnet=subnet, fmt=self.fmt, + arg_list=arg_list, **port_dict): + self.assertTrue(self._is_vlan_configured( + vlan_creation_expected=vlan_creation_expected, + add_keyword_expected=False)) + self.mock_ncclient.reset_mock() + yield + + # Create network and subnet + with self.network(name=NETWORK_NAME) as network: + with self.subnet(network=network, cidr=CIDR_1) as subnet: + + # Create an instance on first compute host + with _create_port_check_vlan( + COMP_HOST_NAME, DEVICE_ID_1, vlan_creation_expected=True): + + # Create an instance on second compute host + with _create_port_check_vlan( + COMP_HOST_NAME_2, DEVICE_ID_2, + vlan_creation_expected=False): + pass + + # Instance on second host is now terminated. + # Vlan should be untrunked from port, but vlan should + # still exist on the switch. + self.assertTrue(self._is_vlan_unconfigured( + vlan_deletion_expected=False)) + self.mock_ncclient.reset_mock() + + # Instance on first host is now terminated. + # Vlan should be untrunked from port and vlan should have + # been deleted from the switch. + self.assertTrue(self._is_vlan_unconfigured( + vlan_deletion_expected=True)) + + def test_nexus_connect_fail(self): + """Test failure to connect to a Nexus switch. + + While creating a network, subnet, and port, simulate a connection + failure to a nexus switch. Confirm that the expected HTTP code + is returned for the create port operation. + + """ + with self._patch_ncclient('manager.connect.side_effect', + AttributeError): + with self._create_port_res(do_delete=False) as res: + self._assertExpectedHTTP(res.status_int, + c_exc.NexusConnectFailed) + + def test_nexus_config_fail(self): + """Test a Nexus switch configuration failure. + + While creating a network, subnet, and port, simulate a nexus + switch configuration error. Confirm that the expected HTTP code + is returned for the create port operation. + + """ + with self._patch_ncclient( + 'manager.connect.return_value.edit_config.side_effect', + AttributeError): + with self._create_port_res(do_delete=False) as res: + self._assertExpectedHTTP(res.status_int, + c_exc.NexusConfigFailed) + + def test_nexus_extended_vlan_range_failure(self): + """Test that extended VLAN range config errors are ignored. + + Some versions of Nexus switch do not allow state changes for + the extended VLAN range (1006-4094), but these errors can be + ignored (default values are appropriate). Test that such errors + are ignored by the Nexus plugin. + + """ + config_err_strings = { + "state active": "Can't modify state for extended", + "no shutdown": "Command is only allowed on VLAN", + } + for config, err_string in config_err_strings.items(): + with self._patch_ncclient( + 'manager.connect.return_value.edit_config.side_effect', + self._config_dependent_side_effect(config, + Exception(err_string))): + with self._create_port_res() as res: + self.assertEqual(res.status_int, wexc.HTTPCreated.code) + + def test_nexus_vlan_config_rollback(self): + """Test rollback following Nexus VLAN state config failure. + + Test that the Cisco Nexus plugin correctly deletes the VLAN + on the Nexus switch when the 'state active' command fails (for + a reason other than state configuration change is rejected + for the extended VLAN range). + + """ + vlan_state_configs = ['state active', 'no shutdown'] + for config in vlan_state_configs: + with self._patch_ncclient( + 'manager.connect.return_value.edit_config.side_effect', + self._config_dependent_side_effect(config, ValueError)): + with self._create_port_res(do_delete=False) as res: + # Confirm that the last configuration sent to the Nexus + # switch was deletion of the VLAN. + self.assertTrue( + self._is_in_last_nexus_cfg(['', '']) + ) + self._assertExpectedHTTP(res.status_int, + c_exc.NexusConfigFailed) + + def test_get_seg_id_fail(self): + """Test handling of a NetworkSegmentIDNotFound exception. + + Test the Cisco NetworkSegmentIDNotFound exception by simulating + a return of None by the OVS DB get_network_binding method + during port creation. + + """ + orig = ovs_db_v2.get_network_binding + + def _return_none_if_nexus_caller(self, *args, **kwargs): + def _calling_func_name(offset=0): + """Get name of the calling function 'offset' frames back.""" + return inspect.stack()[1 + offset][3] + if (_calling_func_name(1) == '_get_segmentation_id' and + _calling_func_name(2) == '_invoke_nexus_for_net_create'): + return None + else: + return orig(self, *args, **kwargs) + + with mock.patch.object(ovs_db_v2, 'get_network_binding', + new=_return_none_if_nexus_caller): + with self._create_port_res(do_delete=False) as res: + self._assertExpectedHTTP(res.status_int, + c_exc.NetworkSegmentIDNotFound) + + def test_nexus_host_non_configured(self): + """Test handling of a NexusComputeHostNotConfigured exception. + + Test the Cisco NexusComputeHostNotConfigured exception by using + a fictitious host name during port creation. + + """ + with self._create_port_res(do_delete=False, + host_id='fakehost') as res: + self._assertExpectedHTTP(res.status_int, + c_exc.NexusComputeHostNotConfigured) + + def _check_rollback_on_bind_failure(self, + vlan_deletion_expected, + vlan_untrunk_expected): + """Test for proper rollback following add Nexus DB binding failure. + + Test that the Cisco Nexus plugin correctly rolls back the vlan + configuration on the Nexus switch when add_nexusport_binding fails + within the plugin's create_port() method. + + """ + inserted_exc = KeyError + with mock.patch.object(nexus_db_v2, 'add_nexusport_binding', + side_effect=inserted_exc): + with self._create_port_res(do_delete=False) as res: + # Confirm that the configuration sent to the Nexus + # switch includes deletion of the vlan (if expected) + # and untrunking of the vlan from the ethernet interface + # (if expected). + self.assertTrue(self._is_vlan_unconfigured( + vlan_deletion_expected=vlan_deletion_expected, + vlan_untrunk_expected=vlan_untrunk_expected)) + self._assertExpectedHTTP(res.status_int, inserted_exc) + + def test_nexus_rollback_on_bind_failure_non_provider_vlan(self): + """Test rollback upon DB binding failure for non-provider vlan.""" + self._check_rollback_on_bind_failure(vlan_deletion_expected=True, + vlan_untrunk_expected=True) + + def test_nexus_rollback_on_bind_failure_prov_vlan_no_auto_create(self): + """Test rollback on bind fail for prov vlan w auto-create disabled.""" + with mock.patch.object(network_db_v2, 'is_provider_vlan', + return_value=True): + # Disable auto-create. This config change will be cleared based + # on cleanup scheduled in the CiscoNetworkPluginV2TestCase + # class' setUp() method. + cisco_config.CONF.set_override('provider_vlan_auto_create', + False, 'CISCO') + self._check_rollback_on_bind_failure(vlan_deletion_expected=False, + vlan_untrunk_expected=True) + + def test_nexus_rollback_on_bind_failure_prov_vlan_no_auto_trunk(self): + """Test rollback on bind fail for prov vlan w auto-trunk disabled.""" + with mock.patch.object(network_db_v2, 'is_provider_vlan', + return_value=True): + # Disable auto-trunk. This config change will be cleared + # based on post-test cleanup scheduled in the + # CiscoNetworkPluginV2TestCase class' setUp() method. + cisco_config.CONF.set_override('provider_vlan_auto_trunk', + False, 'CISCO') + self._check_rollback_on_bind_failure(vlan_deletion_expected=True, + vlan_untrunk_expected=False) + + def test_model_update_port_rollback(self): + """Test for proper rollback for Cisco model layer update port failure. + + Test that the vSwitch plugin port configuration is rolled back + (restored) by the Cisco plugin model layer when there is a + failure in the Nexus sub-plugin for an update port operation. + + The update port operation simulates a port attachment scenario: + first a port is created with no instance (null device_id), + and then a port update is requested with a non-null device_id + to simulate the port attachment. + + """ + with self.port(fmt=self.fmt, device_id='', + device_owner=DEVICE_OWNER) as orig_port: + + inserted_exc = ValueError + with mock.patch.object( + virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, + '_invoke_nexus_for_net_create', + side_effect=inserted_exc): + + # Send an update port request including a non-null device ID + data = {'port': {'device_id': DEVICE_ID_2, + 'device_owner': DEVICE_OWNER, + portbindings.HOST_ID: COMP_HOST_NAME}} + port_id = orig_port['port']['id'] + req = self.new_update_request('ports', data, port_id) + res = req.get_response(self.api) + + # Sanity check failure result code + self._assertExpectedHTTP(res.status_int, inserted_exc) + + # Check that the port still has the original device ID + plugin = base_plugin.NeutronDbPluginV2() + ctx = context.get_admin_context() + db_port = plugin._get_port(ctx, port_id) + self.assertEqual(db_port['device_id'], + orig_port['port']['device_id']) + + def test_model_delete_port_rollback(self): + """Test for proper rollback for OVS plugin delete port failure. + + Test that the nexus port configuration is rolled back (restored) + by the Cisco model plugin when there is a failure in the OVS + plugin for a delete port operation. + + """ + with self._create_port_res() as res: + + # After port is created, we should have one binding for this + # vlan/nexus switch. + port = self.deserialize(self.fmt, res) + start_rows = nexus_db_v2.get_nexusvlan_binding(VLAN_START, + NEXUS_IP_ADDR) + self.assertEqual(len(start_rows), 1) + + # Inject an exception in the OVS plugin delete_port + # processing, and attempt a port deletion. + inserted_exc = n_exc.Conflict + expected_http = base.FAULT_MAP[inserted_exc].code + with mock.patch.object(l3_db.L3_NAT_db_mixin, + 'disassociate_floatingips', + side_effect=inserted_exc): + self._delete('ports', port['port']['id'], + expected_code=expected_http) + + # Confirm that the Cisco model plugin has restored + # the nexus configuration for this port after deletion failure. + end_rows = nexus_db_v2.get_nexusvlan_binding(VLAN_START, + NEXUS_IP_ADDR) + self.assertEqual(start_rows, end_rows) + + def test_nexus_delete_port_rollback(self): + """Test for proper rollback for nexus plugin delete port failure. + + Test for rollback (i.e. restoration) of a VLAN entry in the + nexus database whenever the nexus plugin fails to reconfigure the + nexus switch during a delete_port operation. + + """ + with self._create_port_res() as res: + + port = self.deserialize(self.fmt, res) + + # Check that there is only one binding in the nexus database + # for this VLAN/nexus switch. + start_rows = nexus_db_v2.get_nexusvlan_binding(VLAN_START, + NEXUS_IP_ADDR) + self.assertEqual(len(start_rows), 1) + + # Simulate a Nexus switch configuration error during + # port deletion. + with self._patch_ncclient( + 'manager.connect.return_value.edit_config.side_effect', + AttributeError): + self._delete('ports', port['port']['id'], + base.FAULT_MAP[c_exc.NexusConfigFailed].code) + + # Confirm that the binding has been restored (rolled back). + end_rows = nexus_db_v2.get_nexusvlan_binding(VLAN_START, + NEXUS_IP_ADDR) + self.assertEqual(start_rows, end_rows) + + def test_model_update_port_attach(self): + """Test the model for update_port in attaching to an instance. + + Mock the routines that call into the plugin code, and make sure they + are called with correct arguments. + + """ + with contextlib.nested( + self.port(), + mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, + '_invoke_plugin_per_device'), + mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, + '_invoke_nexus_for_net_create') + ) as (port, invoke_plugin_per_device, invoke_nexus_for_net_create): + data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME, + 'device_id': DEVICE_ID_1, + 'device_owner': DEVICE_OWNER}} + + req = self.new_update_request('ports', data, port['port']['id']) + # Note, due to mocking out the two model routines, response won't + # contain any useful data + req.get_response(self.api) + + # Note that call_args_list is used instead of + # assert_called_once_with which requires exact match of arguments. + # This is because the mocked routines contain variable number of + # arguments and/or dynamic objects. + self.assertEqual(invoke_plugin_per_device.call_count, 1) + self.assertEqual( + invoke_plugin_per_device.call_args_list[0][0][0:2], + (const.VSWITCH_PLUGIN, 'update_port')) + self.assertEqual(invoke_nexus_for_net_create.call_count, 1) + self.assertEqual( + invoke_nexus_for_net_create.call_args_list[0][0][1:], + (port['port']['tenant_id'], port['port']['network_id'], + data['port']['device_id'], + data['port'][portbindings.HOST_ID],)) + + def test_model_update_port_migrate(self): + """Test the model for update_port in migrating an instance. + + Mock the routines that call into the plugin code, and make sure they + are called with correct arguments. + + """ + arg_list = (portbindings.HOST_ID,) + data = {portbindings.HOST_ID: COMP_HOST_NAME, + 'device_id': DEVICE_ID_1, + 'device_owner': DEVICE_OWNER} + + with contextlib.nested( + self.port(arg_list=arg_list, **data), + mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, + '_invoke_plugin_per_device'), + mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, + '_invoke_nexus_for_net_create') + ) as (port, invoke_plugin_per_device, invoke_nexus_for_net_create): + data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME_2}} + req = self.new_update_request('ports', data, port['port']['id']) + # Note, due to mocking out the two model routines, response won't + # contain any useful data + req.get_response(self.api) + + # Note that call_args_list is used instead of + # assert_called_once_with which requires exact match of arguments. + # This is because the mocked routines contain variable number of + # arguments and/or dynamic objects. + self.assertEqual(invoke_plugin_per_device.call_count, 2) + self.assertEqual( + invoke_plugin_per_device.call_args_list[0][0][0:2], + (const.VSWITCH_PLUGIN, 'update_port')) + self.assertEqual( + invoke_plugin_per_device.call_args_list[1][0][0:2], + (const.NEXUS_PLUGIN, 'delete_port')) + self.assertEqual(invoke_nexus_for_net_create.call_count, 1) + self.assertEqual( + invoke_nexus_for_net_create.call_args_list[0][0][1:], + (port['port']['tenant_id'], port['port']['network_id'], + port['port']['device_id'], + data['port'][portbindings.HOST_ID],)) + + def test_model_update_port_net_create_not_needed(self): + """Test the model for update_port when no action is needed. + + Mock the routines that call into the plugin code, and make sure that + VSWITCH plugin is called with correct arguments, while NEXUS plugin is + not called at all. + + """ + arg_list = (portbindings.HOST_ID,) + data = {portbindings.HOST_ID: COMP_HOST_NAME, + 'device_id': DEVICE_ID_1, + 'device_owner': DEVICE_OWNER} + + with contextlib.nested( + self.port(arg_list=arg_list, **data), + mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, + '_invoke_plugin_per_device'), + mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, + '_invoke_nexus_for_net_create') + ) as (port, invoke_plugin_per_device, invoke_nexus_for_net_create): + data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME, + 'device_id': DEVICE_ID_1, + 'device_owner': DEVICE_OWNER}} + req = self.new_update_request('ports', data, port['port']['id']) + # Note, due to mocking out the two model routines, response won't + # contain any useful data + req.get_response(self.api) + + # Note that call_args_list is used instead of + # assert_called_once_with which requires exact match of arguments. + # This is because the mocked routines contain variable number of + # arguments and/or dynamic objects. + self.assertEqual(invoke_plugin_per_device.call_count, 1) + self.assertEqual( + invoke_plugin_per_device.call_args_list[0][0][0:2], + (const.VSWITCH_PLUGIN, 'update_port')) + self.assertFalse(invoke_nexus_for_net_create.called) + + def verify_portbinding(self, host_id1, host_id2, + vlan, device_id, binding_port): + """Verify a port binding entry in the DB is correct.""" + self.assertEqual(host_id1, host_id2) + pb = nexus_db_v2.get_nexusvm_bindings(vlan, device_id) + self.assertEqual(len(pb), 1) + self.assertEqual(pb[0].port_id, binding_port) + self.assertEqual(pb[0].switch_ip, NEXUS_IP_ADDR) + + def test_db_update_port_attach(self): + """Test DB for update_port in attaching to an instance. + + Query DB for the port binding entry corresponding to the search key + (vlan, device_id), and make sure that it's bound to correct switch port + + """ + with self.port() as port: + data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME, + 'device_id': DEVICE_ID_1, + 'device_owner': DEVICE_OWNER}} + + req = self.new_update_request('ports', data, port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + ctx = context.get_admin_context() + net = self._show('networks', res['port']['network_id'], + neutron_context=ctx)['network'] + self.assertTrue(attributes.is_attr_set( + net.get(provider.SEGMENTATION_ID))) + vlan = net[provider.SEGMENTATION_ID] + self.assertEqual(vlan, VLAN_START) + self.verify_portbinding(res['port'][portbindings.HOST_ID], + data['port'][portbindings.HOST_ID], + vlan, + data['port']['device_id'], + NEXUS_PORT_1) + + def test_db_update_port_migrate(self): + """Test DB for update_port in migrating an instance. + + Query DB for the port binding entry corresponding to the search key + (vlan, device_id), and make sure that it's bound to correct switch port + before and after the migration. + + """ + arg_list = (portbindings.HOST_ID,) + data = {portbindings.HOST_ID: COMP_HOST_NAME, + 'device_id': DEVICE_ID_1, + 'device_owner': DEVICE_OWNER} + + with self.port(arg_list=arg_list, **data) as port: + ctx = context.get_admin_context() + net = self._show('networks', port['port']['network_id'], + neutron_context=ctx)['network'] + self.assertTrue(attributes.is_attr_set( + net.get(provider.SEGMENTATION_ID))) + vlan = net[provider.SEGMENTATION_ID] + self.assertEqual(vlan, VLAN_START) + self.verify_portbinding(port['port'][portbindings.HOST_ID], + data[portbindings.HOST_ID], + vlan, + data['device_id'], + NEXUS_PORT_1) + + new_data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME_2}} + req = self.new_update_request('ports', + new_data, port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.verify_portbinding(res['port'][portbindings.HOST_ID], + new_data['port'][portbindings.HOST_ID], + vlan, + data['device_id'], + NEXUS_PORT_2) + + def test_delete_ports_by_device_id_second_call_failure(self): + plugin_ref = self._get_plugin_ref() + self._test_delete_ports_by_device_id_second_call_failure(plugin_ref) + + def test_delete_ports_ignores_port_not_found(self): + plugin_ref = self._get_plugin_ref() + self._test_delete_ports_ignores_port_not_found(plugin_ref) + + +class TestCiscoNetworksV2(CiscoNetworkPluginV2TestCase, + test_db_plugin.TestNetworksV2): + + def test_create_networks_bulk_emulated_plugin_failure(self): + real_has_attr = hasattr + + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + plugin_ref = self._get_plugin_ref() + orig = plugin_ref.create_network + #ensures the API choose the emulation code path + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + with mock.patch.object(plugin_ref, + 'create_network') as patched_plugin: + def side_effect(*args, **kwargs): + return self._do_side_effect(patched_plugin, orig, + *args, **kwargs) + patched_plugin.side_effect = side_effect + res = self._create_network_bulk(self.fmt, 2, 'test', True) + LOG.debug("response is %s" % res) + # We expect an internal server error as we injected a fault + self._validate_behavior_on_bulk_failure( + res, + 'networks', + wexc.HTTPInternalServerError.code) + + def test_create_networks_bulk_native_plugin_failure(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk network create") + plugin_ref = self._get_plugin_ref() + orig = plugin_ref.create_network + with mock.patch.object(plugin_ref, + 'create_network') as patched_plugin: + + def side_effect(*args, **kwargs): + return self._do_side_effect(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + res = self._create_network_bulk(self.fmt, 2, 'test', True) + # We expect an internal server error as we injected a fault + self._validate_behavior_on_bulk_failure( + res, + 'networks', + wexc.HTTPInternalServerError.code) + + @contextlib.contextmanager + def _provider_vlan_network(self, phys_net, segment_id, net_name): + provider_attrs = {provider.NETWORK_TYPE: 'vlan', + provider.PHYSICAL_NETWORK: phys_net, + provider.SEGMENTATION_ID: segment_id} + arg_list = tuple(provider_attrs.keys()) + res = self._create_network(self.fmt, net_name, True, + arg_list=arg_list, **provider_attrs) + network = self.deserialize(self.fmt, res)['network'] + yield network + req = self.new_delete_request('networks', network['id']) + req.get_response(self.api) + + def test_create_provider_vlan_network(self): + with self._provider_vlan_network(PHYS_NET, '1234', + 'pvnet1') as network: + expected = [('name', 'pvnet1'), + ('admin_state_up', True), + ('status', 'ACTIVE'), + ('shared', False), + (provider.NETWORK_TYPE, 'vlan'), + (provider.PHYSICAL_NETWORK, PHYS_NET), + (provider.SEGMENTATION_ID, 1234)] + for k, v in expected: + self.assertEqual(network[k], v) + self.assertTrue(network_db_v2.is_provider_network(network['id'])) + + def test_delete_provider_vlan_network(self): + with self._provider_vlan_network(PHYS_NET, '1234', + 'pvnet1') as network: + network_id = network['id'] + # Provider network should now be deleted + self.assertFalse(network_db_v2.is_provider_network(network_id)) + + +class TestCiscoSubnetsV2(CiscoNetworkPluginV2TestCase, + test_db_plugin.TestSubnetsV2): + + def test_create_subnets_bulk_emulated_plugin_failure(self): + real_has_attr = hasattr + + #ensures the API choose the emulation code path + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + plugin_ref = self._get_plugin_ref() + orig = plugin_ref.create_subnet + with mock.patch.object(plugin_ref, + 'create_subnet') as patched_plugin: + + def side_effect(*args, **kwargs): + self._do_side_effect(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + with self.network() as net: + res = self._create_subnet_bulk(self.fmt, 2, + net['network']['id'], + 'test') + # We expect an internal server error as we injected a fault + self._validate_behavior_on_bulk_failure( + res, + 'subnets', + wexc.HTTPInternalServerError.code) + + def test_create_subnets_bulk_native_plugin_failure(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk subnet create") + plugin_ref = self._get_plugin_ref() + orig = plugin_ref.create_subnet + with mock.patch.object(plugin_ref, + 'create_subnet') as patched_plugin: + def side_effect(*args, **kwargs): + return self._do_side_effect(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + with self.network() as net: + res = self._create_subnet_bulk(self.fmt, 2, + net['network']['id'], + 'test') + + # We expect an internal server error as we injected a fault + self._validate_behavior_on_bulk_failure( + res, + 'subnets', + wexc.HTTPInternalServerError.code) + + +class TestCiscoRouterInterfacesV2(CiscoNetworkPluginV2TestCase): + + def setUp(self): + """Configure a log exception counter and an API extension manager.""" + self.log_exc_count = 0 + + def _count_exception_logs(*args, **kwargs): + self.log_exc_count += 1 + + mock.patch.object(logging.LoggerAdapter, 'exception', + autospec=True, + side_effect=_count_exception_logs, + wraps=logging.LoggerAdapter.exception).start() + super(TestCiscoRouterInterfacesV2, self).setUp() + ext_mgr = extensions.PluginAwareExtensionManager.get_instance() + self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) + + @contextlib.contextmanager + def _network_subnet_router(self): + """Context mgr for creating/deleting a net, subnet, and router.""" + with self.network() as network: + with self.subnet(network=network) as subnet: + data = {'router': {'tenant_id': 'test_tenant_id'}} + request = self.new_create_request('routers', data, self.fmt) + response = request.get_response(self.ext_api) + router = self.deserialize(self.fmt, response) + yield network, subnet, router + self._delete('routers', router['router']['id']) + + @contextlib.contextmanager + def _router_interface(self, router, subnet, **kwargs): + """Create a router interface, yield the response, then delete it.""" + interface_data = {} + if subnet: + interface_data['subnet_id'] = subnet['subnet']['id'] + interface_data.update(kwargs) + request = self.new_action_request('routers', interface_data, + router['router']['id'], + 'add_router_interface') + response = request.get_response(self.ext_api) + + yield response + + # If router interface was created successfully, delete it now. + if response.status_int == wexc.HTTPOk.code: + request = self.new_action_request('routers', interface_data, + router['router']['id'], + 'remove_router_interface') + request.get_response(self.ext_api) + + @contextlib.contextmanager + def _network_subnet_router_interface(self, **kwargs): + """Context mgr for create/deleting a net, subnet, router and intf.""" + with self._network_subnet_router() as (network, subnet, router): + with self._router_interface(router, subnet, + **kwargs) as response: + yield response + + def test_port_list_filtered_by_router_id(self): + """Test port list command filtered by router ID.""" + with self._network_subnet_router() as (network, subnet, router): + with self._router_interface(router, subnet): + query_params = "device_id=%s" % router['router']['id'] + req = self.new_list_request('ports', self.fmt, query_params) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(len(res['ports']), 1) + self.assertEqual(res['ports'][0]['device_id'], + router['router']['id']) + self.assertFalse(self.log_exc_count) + + def test_add_remove_router_intf_with_nexus_l3_enabled(self): + """Verifies proper add/remove intf operation with Nexus L3 enabled. + + With 'nexus_l3_enable' configured to True, confirm that a switched + virtual interface (SVI) is created/deleted on the Nexus switch when + a virtual router interface is created/deleted. + """ + cisco_config.CONF.set_override('nexus_l3_enable', True, 'CISCO') + with self._network_subnet_router_interface(): + self.assertTrue(self._is_in_last_nexus_cfg( + ['interface', 'vlan', 'ip', 'address'])) + # Clear list of calls made to mock ncclient + self.mock_ncclient.reset() + # Router interface is now deleted. Confirm that SVI + # has been deleted from the Nexus switch. + self.assertTrue(self._is_in_nexus_cfg(['no', 'interface', 'vlan'])) + self.assertTrue(self._is_in_last_nexus_cfg(['no', 'vlan'])) + + def test_add_remove_router_intf_with_nexus_l3_disabled(self): + """Verifies proper add/remove intf operation with Nexus L3 disabled. + + With 'nexus_l3_enable' configured to False, confirm that no changes + are made to the Nexus switch running configuration when a virtual + router interface is created and then deleted. + """ + cisco_config.CONF.set_override('nexus_l3_enable', False, 'CISCO') + with self._network_subnet_router_interface(): + self.assertFalse(self.mock_ncclient.manager.connect. + return_value.edit_config.called) + + def test_create_svi_but_subnet_not_specified_exception(self): + """Tests raising of SubnetNotSpecified exception. + + Tests that a SubnetNotSpecified exception is raised when an + add_router_interface request is made for creating a switch virtual + interface (SVI), but the request does not specify a subnet. + """ + cisco_config.CONF.set_override('nexus_l3_enable', True, 'CISCO') + with self._network_subnet_router() as (network, subnet, router): + with self._router_interface(router, subnet=None) as response: + self._assertExpectedHTTP(response.status_int, + c_exc.SubnetNotSpecified) + + def test_create_svi_but_port_id_included_exception(self): + """Tests raising of PortIdForNexusSvi exception. + + Tests that a PortIdForNexusSvi exception is raised when an + add_router_interface request is made for creating a switch virtual + interface (SVI), but the request includes a virtual port ID. + """ + cisco_config.CONF.set_override('nexus_l3_enable', True, 'CISCO') + with self._network_subnet_router_interface( + port_id='my_port_id') as response: + self._assertExpectedHTTP(response.status_int, + c_exc.PortIdForNexusSvi) + + +class TestCiscoPortsV2XML(TestCiscoPortsV2): + fmt = 'xml' + + +class TestCiscoNetworksV2XML(TestCiscoNetworksV2): + fmt = 'xml' + + +class TestCiscoSubnetsV2XML(TestCiscoSubnetsV2): + fmt = 'xml' + + +class TestCiscoRouterInterfacesV2XML(TestCiscoRouterInterfacesV2): + fmt = 'xml' diff --git a/neutron/tests/unit/cisco/test_nexus_db.py b/neutron/tests/unit/cisco/test_nexus_db.py new file mode 100644 index 000000000..49b3dce50 --- /dev/null +++ b/neutron/tests/unit/cisco/test_nexus_db.py @@ -0,0 +1,239 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import mock +import testtools + +from neutron.db import api as db +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.common import config +from neutron.plugins.cisco.db import nexus_db_v2 as nxdb +from neutron.plugins.cisco.nexus import cisco_nexus_plugin_v2 +from neutron.tests import base + + +class CiscoNexusDbTest(base.BaseTestCase): + + """Unit tests for cisco.db.nexus_models_v2.NexusPortBinding model.""" + + NpbObj = collections.namedtuple('NpbObj', 'port vlan switch instance') + + def setUp(self): + super(CiscoNexusDbTest, self).setUp() + db.configure_db() + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def _npb_test_obj(self, pnum, vnum, switch=None, instance=None): + """Create a Nexus port binding test object from a pair of numbers.""" + if pnum is 'router': + port = pnum + else: + port = '1/%s' % str(pnum) + vlan = str(vnum) + if switch is None: + switch = '10.9.8.7' + if instance is None: + instance = 'instance_%s_%s' % (str(pnum), str(vnum)) + return self.NpbObj(port, vlan, switch, instance) + + def _assert_equal(self, npb, npb_obj): + self.assertEqual(npb.port_id, npb_obj.port) + self.assertEqual(int(npb.vlan_id), int(npb_obj.vlan)) + self.assertEqual(npb.switch_ip, npb_obj.switch) + self.assertEqual(npb.instance_id, npb_obj.instance) + + def _add_to_db(self, npbs): + for npb in npbs: + nxdb.add_nexusport_binding( + npb.port, npb.vlan, npb.switch, npb.instance) + + def test_nexusportbinding_add_remove(self): + npb11 = self._npb_test_obj(10, 100) + npb = nxdb.add_nexusport_binding( + npb11.port, npb11.vlan, npb11.switch, npb11.instance) + self._assert_equal(npb, npb11) + npb = nxdb.remove_nexusport_binding( + npb11.port, npb11.vlan, npb11.switch, npb11.instance) + self.assertEqual(len(npb), 1) + self._assert_equal(npb[0], npb11) + with testtools.ExpectedException(c_exc.NexusPortBindingNotFound): + nxdb.remove_nexusport_binding( + npb11.port, npb11.vlan, npb11.switch, npb11.instance) + + def test_nexusportbinding_get(self): + npb11 = self._npb_test_obj(10, 100) + npb21 = self._npb_test_obj(20, 100) + npb22 = self._npb_test_obj(20, 200) + self._add_to_db([npb11, npb21, npb22]) + + npb = nxdb.get_nexusport_binding( + npb11.port, npb11.vlan, npb11.switch, npb11.instance) + self.assertEqual(len(npb), 1) + self._assert_equal(npb[0], npb11) + npb = nxdb.get_nexusport_binding( + npb21.port, npb21.vlan, npb21.switch, npb21.instance) + self.assertEqual(len(npb), 1) + self._assert_equal(npb[0], npb21) + npb = nxdb.get_nexusport_binding( + npb22.port, npb22.vlan, npb22.switch, npb22.instance) + self.assertEqual(len(npb), 1) + self._assert_equal(npb[0], npb22) + + with testtools.ExpectedException(c_exc.NexusPortBindingNotFound): + nxdb.get_nexusport_binding( + npb21.port, npb21.vlan, npb21.switch, "dummyInstance") + + def test_nexusvlanbinding_get(self): + npb11 = self._npb_test_obj(10, 100) + npb21 = self._npb_test_obj(20, 100) + npb22 = self._npb_test_obj(20, 200) + self._add_to_db([npb11, npb21, npb22]) + + npb_all_v100 = nxdb.get_nexusvlan_binding(npb11.vlan, npb11.switch) + self.assertEqual(len(npb_all_v100), 2) + npb_v200 = nxdb.get_nexusvlan_binding(npb22.vlan, npb22.switch) + self.assertEqual(len(npb_v200), 1) + self._assert_equal(npb_v200[0], npb22) + + with testtools.ExpectedException(c_exc.NexusPortBindingNotFound): + nxdb.get_nexusvlan_binding(npb21.vlan, "dummySwitch") + + def test_nexusvmbinding_get(self): + npb11 = self._npb_test_obj(10, 100) + npb21 = self._npb_test_obj(20, 100) + npb22 = self._npb_test_obj(20, 200) + self._add_to_db([npb11, npb21, npb22]) + + npb = nxdb.get_nexusvm_bindings(npb21.vlan, npb21.instance)[0] + self._assert_equal(npb, npb21) + npb = nxdb.get_nexusvm_bindings(npb22.vlan, npb22.instance)[0] + self._assert_equal(npb, npb22) + + with testtools.ExpectedException(c_exc.NexusPortBindingNotFound): + nxdb.get_nexusvm_bindings(npb21.vlan, "dummyInstance") + + def test_nexusportvlanswitchbinding_get(self): + npb11 = self._npb_test_obj(10, 100) + npb21 = self._npb_test_obj(20, 100) + self._add_to_db([npb11, npb21]) + + npb = nxdb.get_port_vlan_switch_binding( + npb11.port, npb11.vlan, npb11.switch) + self.assertEqual(len(npb), 1) + self._assert_equal(npb[0], npb11) + + with testtools.ExpectedException(c_exc.NexusPortBindingNotFound): + nxdb.get_port_vlan_switch_binding( + npb21.port, npb21.vlan, "dummySwitch") + + def test_nexusportswitchbinding_get(self): + npb11 = self._npb_test_obj(10, 100) + npb21 = self._npb_test_obj(20, 100, switch='2.2.2.2') + npb22 = self._npb_test_obj(20, 200, switch='2.2.2.2') + self._add_to_db([npb11, npb21, npb22]) + + npb = nxdb.get_port_switch_bindings(npb11.port, npb11.switch) + self.assertEqual(len(npb), 1) + self._assert_equal(npb[0], npb11) + npb_all_p20 = nxdb.get_port_switch_bindings(npb21.port, npb21.switch) + self.assertEqual(len(npb_all_p20), 2) + + npb = nxdb.get_port_switch_bindings(npb21.port, "dummySwitch") + self.assertIsNone(npb) + + def test_nexussvibinding_get(self): + npbr1 = self._npb_test_obj('router', 100) + npb21 = self._npb_test_obj(20, 100) + self._add_to_db([npbr1, npb21]) + + npb_svi = nxdb.get_nexussvi_bindings() + self.assertEqual(len(npb_svi), 1) + self._assert_equal(npb_svi[0], npbr1) + + npbr2 = self._npb_test_obj('router', 200) + self._add_to_db([npbr2]) + npb_svi = nxdb.get_nexussvi_bindings() + self.assertEqual(len(npb_svi), 2) + + def test_nexussviswitch_find(self): + """Test Nexus switch selection for SVI placement.""" + # Configure 2 Nexus switches + nexus_switches = { + ('1.1.1.1', 'username'): 'admin', + ('1.1.1.1', 'password'): 'password1', + ('1.1.1.1', 'host1'): '1/1', + ('2.2.2.2', 'username'): 'admin', + ('2.2.2.2', 'password'): 'password2', + ('2.2.2.2', 'host2'): '1/1', + } + nexus_plugin = cisco_nexus_plugin_v2.NexusPlugin() + nexus_plugin._client = mock.Mock() + nexus_plugin._client.nexus_switches = nexus_switches + + # Set the Cisco config module's first configured device IP address + # according to the preceding switch config + with mock.patch.object(config, 'first_device_ip', new='1.1.1.1'): + + # Enable round-robin mode with no SVIs configured on any of the + # Nexus switches (i.e. no entries in the SVI database). The + # plugin should select the first switch in the configuration. + config.CONF.set_override('svi_round_robin', True, 'CISCO') + switch_ip = nexus_plugin._find_switch_for_svi() + self.assertEqual(switch_ip, '1.1.1.1') + + # Keep round-robin mode enabled, and add entries to the SVI + # database. The plugin should select the switch with the least + # number of entries in the SVI database. + vlan = 100 + npbr11 = self._npb_test_obj('router', vlan, switch='1.1.1.1', + instance='instance11') + npbr12 = self._npb_test_obj('router', vlan, switch='1.1.1.1', + instance='instance12') + npbr21 = self._npb_test_obj('router', vlan, switch='2.2.2.2', + instance='instance21') + self._add_to_db([npbr11, npbr12, npbr21]) + switch_ip = nexus_plugin._find_switch_for_svi() + self.assertEqual(switch_ip, '2.2.2.2') + + # Disable round-robin mode. The plugin should select the + # first switch in the configuration. + config.CONF.clear_override('svi_round_robin', 'CISCO') + switch_ip = nexus_plugin._find_switch_for_svi() + self.assertEqual(switch_ip, '1.1.1.1') + + def test_nexusbinding_update(self): + npb11 = self._npb_test_obj(10, 100, switch='1.1.1.1', instance='test') + npb21 = self._npb_test_obj(20, 100, switch='1.1.1.1', instance='test') + self._add_to_db([npb11, npb21]) + + npb_all_v100 = nxdb.get_nexusvlan_binding(npb11.vlan, '1.1.1.1') + self.assertEqual(len(npb_all_v100), 2) + + npb22 = self._npb_test_obj(20, 200, switch='1.1.1.1', instance='test') + npb = nxdb.update_nexusport_binding(npb21.port, 200) + self._assert_equal(npb, npb22) + + npb_all_v100 = nxdb.get_nexusvlan_binding(npb11.vlan, '1.1.1.1') + self.assertEqual(len(npb_all_v100), 1) + self._assert_equal(npb_all_v100[0], npb11) + + npb = nxdb.update_nexusport_binding(npb21.port, 0) + self.assertIsNone(npb) + + npb33 = self._npb_test_obj(30, 300, switch='1.1.1.1', instance='test') + with testtools.ExpectedException(c_exc.NexusPortBindingNotFound): + nxdb.update_nexusport_binding(npb33.port, 200) diff --git a/neutron/tests/unit/cisco/test_nexus_plugin.py b/neutron/tests/unit/cisco/test_nexus_plugin.py new file mode 100644 index 000000000..6cf54ab84 --- /dev/null +++ b/neutron/tests/unit/cisco/test_nexus_plugin.py @@ -0,0 +1,301 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import mock + +from oslo.config import cfg + +from neutron.db import api as db +from neutron.extensions import providernet as provider +from neutron.openstack.common import importutils +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import cisco_exceptions as cisco_exc +from neutron.plugins.cisco.common import config as cisco_config +from neutron.plugins.cisco.db import network_db_v2 as cdb +from neutron.plugins.cisco.nexus import cisco_nexus_plugin_v2 +from neutron.tests import base + + +NEXUS_IP_ADDRESS = '1.1.1.1' +HOSTNAME1 = 'testhost1' +HOSTNAME2 = 'testhost2' +HOSTNAME3 = 'testhost3' +INSTANCE1 = 'testvm1' +INSTANCE2 = 'testvm2' +INSTANCE3 = 'testvm3' +NEXUS_PORT1 = '1/10' +NEXUS_PORT2 = '1/20' +NEXUS_PC_IP_ADDRESS = '2.2.2.2' +NEXUS_PORTCHANNELS = 'portchannel:2' +PC_HOSTNAME = 'testpchost' +NEXUS_SSH_PORT = '22' +NEXUS_DRIVER = ('neutron.plugins.cisco.nexus.' + 'cisco_nexus_network_driver_v2.CiscoNEXUSDriver') +NET_ATTRS = [const.NET_ID, + const.NET_NAME, + const.NET_VLAN_NAME, + const.NET_VLAN_ID] + + +class TestCiscoNexusPlugin(base.BaseTestCase): + + def setUp(self): + """Set up function.""" + super(TestCiscoNexusPlugin, self).setUp() + self.tenant_id = "test_tenant_cisco1" + self.net_name = "test_network_cisco1" + self.net_id = 7 + self.vlan_name = "q-" + str(self.net_id) + "vlan" + self.vlan_id = 267 + self.second_tenant_id = "test_tenant_2" + self.second_net_name = "test_network_cisco2" + self.second_net_id = 5 + self.second_vlan_name = "q-" + str(self.second_net_id) + "vlan" + self.second_vlan_id = 265 + self._pchostname = PC_HOSTNAME + + self.attachment1 = { + const.TENANT_ID: self.tenant_id, + const.INSTANCE_ID: INSTANCE1, + const.HOST_NAME: HOSTNAME1, + } + self.attachment2 = { + const.TENANT_ID: self.second_tenant_id, + const.INSTANCE_ID: INSTANCE2, + const.HOST_NAME: HOSTNAME2, + } + self.attachment3 = { + const.TENANT_ID: self.second_tenant_id, + const.INSTANCE_ID: INSTANCE3, + const.HOST_NAME: HOSTNAME3, + } + self.network1 = { + const.NET_ID: self.net_id, + const.NET_NAME: self.net_name, + const.NET_VLAN_NAME: self.vlan_name, + const.NET_VLAN_ID: self.vlan_id, + } + self.network2 = { + const.NET_ID: self.second_net_id, + const.NET_NAME: self.second_net_name, + const.NET_VLAN_NAME: self.second_vlan_name, + const.NET_VLAN_ID: self.second_vlan_id, + } + self.network3 = { + const.NET_ID: 8, + const.NET_NAME: 'vpc_net', + const.NET_VLAN_NAME: 'q-268', + const.NET_VLAN_ID: '268', + } + self.delete_port_args_1 = [ + self.attachment1[const.INSTANCE_ID], + self.network1[const.NET_VLAN_ID], + ] + self.providernet = { + const.NET_ID: 9, + const.NET_NAME: 'pnet1', + const.NET_VLAN_NAME: 'p-300', + const.NET_VLAN_ID: 300, + provider.NETWORK_TYPE: 'vlan', + provider.PHYSICAL_NETWORK: self.net_name + '200:299', + provider.SEGMENTATION_ID: 300, + } + + def new_nexus_init(self): + self._client = importutils.import_object(NEXUS_DRIVER) + self._client.nexus_switches = { + (NEXUS_IP_ADDRESS, HOSTNAME1): NEXUS_PORT1, + (NEXUS_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT, + (NEXUS_IP_ADDRESS, HOSTNAME2): NEXUS_PORT2, + (NEXUS_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT, + (NEXUS_PC_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT, + } + self._nexus_switches = { + ('NEXUS_SWITCH', NEXUS_IP_ADDRESS, HOSTNAME1): NEXUS_PORT1, + ('NEXUS_SWITCH', NEXUS_IP_ADDRESS, HOSTNAME2): NEXUS_PORT2, + ('NEXUS_SWITCH', NEXUS_PC_IP_ADDRESS, HOSTNAME3): + NEXUS_PORTCHANNELS, + ('NEXUS_SWITCH', NEXUS_PC_IP_ADDRESS, 'ssh_port'): + NEXUS_SSH_PORT, + ('NEXUS_SWITCH', NEXUS_IP_ADDRESS, HOSTNAME3): + NEXUS_PORTCHANNELS, + ('NEXUS_SWITCH', NEXUS_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT, + } + self._client.credentials = { + NEXUS_IP_ADDRESS: { + 'username': 'admin', + 'password': 'pass1234' + }, + NEXUS_PC_IP_ADDRESS: { + 'username': 'admin', + 'password': 'password' + }, + } + db.configure_db() + + self.addCleanup(db.clear_db) + # Use a mock netconf client + self.mock_ncclient = mock.Mock() + + with contextlib.nested( + mock.patch.dict('sys.modules', {'ncclient': self.mock_ncclient}), + mock.patch.object(cisco_nexus_plugin_v2.NexusPlugin, + '__init__', new=new_nexus_init) + ): + self._cisco_nexus_plugin = cisco_nexus_plugin_v2.NexusPlugin() + + # Set the Cisco config module's first configured device IP address + # according to the preceding switch config. + mock.patch.object(cisco_config, 'first_device_ip', + new=NEXUS_IP_ADDRESS).start() + + def test_create_delete_networks(self): + """Tests creation of two new Virtual Networks.""" + new_net_dict = self._cisco_nexus_plugin.create_network( + self.network1, self.attachment1) + for attr in NET_ATTRS: + self.assertEqual(new_net_dict[attr], self.network1[attr]) + + expected_instance_id = self._cisco_nexus_plugin.delete_port( + INSTANCE1, self.vlan_id) + + self.assertEqual(expected_instance_id, INSTANCE1) + + new_net_dict = self._cisco_nexus_plugin.create_network( + self.network2, self.attachment1) + for attr in NET_ATTRS: + self.assertEqual(new_net_dict[attr], self.network2[attr]) + + expected_instance_id = self._cisco_nexus_plugin.delete_port( + INSTANCE1, self.second_vlan_id) + + self.assertEqual(expected_instance_id, INSTANCE1) + + def _create_delete_providernet(self, auto_create, auto_trunk): + cfg.CONF.set_override( + 'provider_vlan_auto_create', auto_create, 'CISCO') + cfg.CONF.set_override( + 'provider_vlan_auto_trunk', auto_trunk, 'CISCO') + with mock.patch.object(cdb, 'is_provider_vlan', + return_value=True) as mock_db: + # Create a provider network + new_net_dict = self._cisco_nexus_plugin.create_network( + self.providernet, self.attachment1) + self.assertEqual(mock_db.call_count, 1) + for attr in NET_ATTRS: + self.assertEqual(new_net_dict[attr], self.providernet[attr]) + # Delete the provider network + instance_id = self._cisco_nexus_plugin.delete_port( + self.attachment1[const.INSTANCE_ID], + self.providernet[const.NET_VLAN_ID]) + self.assertEqual(instance_id, + self.attachment1[const.INSTANCE_ID]) + + def test_create_delete_providernet(self): + self._create_delete_providernet(auto_create=True, auto_trunk=True) + + def test_create_delete_provider_vlan_network_cfg_auto_man(self): + self._create_delete_providernet(auto_create=True, auto_trunk=False) + + def test_create_delete_provider_vlan_network_cfg_man_auto(self): + self._create_delete_providernet(auto_create=False, auto_trunk=True) + + def test_create_delete_provider_vlan_network_cfg_man_man(self): + self._create_delete_providernet(auto_create=False, auto_trunk=False) + + def test_create_delete_network_portchannel(self): + """Tests creation of a network over a portchannel.""" + new_net_dict = self._cisco_nexus_plugin.create_network( + self.network3, self.attachment3) + self.assertEqual(new_net_dict[const.NET_ID], + self.network3[const.NET_ID]) + self.assertEqual(new_net_dict[const.NET_NAME], + self.network3[const.NET_NAME]) + self.assertEqual(new_net_dict[const.NET_VLAN_NAME], + self.network3[const.NET_VLAN_NAME]) + self.assertEqual(new_net_dict[const.NET_VLAN_ID], + self.network3[const.NET_VLAN_ID]) + + self._cisco_nexus_plugin.delete_port( + INSTANCE3, self.network3[const.NET_VLAN_ID] + ) + + def _add_router_interface(self): + """Add a router interface using fixed (canned) parameters.""" + vlan_name = self.vlan_name + vlan_id = self.vlan_id + gateway_ip = '10.0.0.1/24' + router_id = '00000R1' + subnet_id = '00001' + return self._cisco_nexus_plugin.add_router_interface( + vlan_name, vlan_id, subnet_id, gateway_ip, router_id) + + def _remove_router_interface(self): + """Remove a router interface created with _add_router_interface.""" + vlan_id = self.vlan_id + router_id = '00000R1' + return self._cisco_nexus_plugin.remove_router_interface(vlan_id, + router_id) + + def test_nexus_add_remove_router_interface(self): + """Tests addition of a router interface.""" + self.assertTrue(self._add_router_interface()) + self.assertEqual(self._remove_router_interface(), '00000R1') + + def test_nexus_dup_add_router_interface(self): + """Tests a duplicate add of a router interface.""" + self._add_router_interface() + try: + self.assertRaises( + cisco_exc.SubnetInterfacePresent, + self._add_router_interface) + finally: + self._remove_router_interface() + + def test_nexus_no_svi_switch_exception(self): + """Tests failure to find a Nexus switch for SVI placement.""" + # Clear the Nexus switches dictionary. + with mock.patch.dict(self._cisco_nexus_plugin._client.nexus_switches, + {}, clear=True): + # Clear the first Nexus IP address discovered in config + with mock.patch.object(cisco_config, 'first_device_ip', + new=None): + self.assertRaises(cisco_exc.NoNexusSviSwitch, + self._add_router_interface) + + def test_nexus_add_port_after_router_interface(self): + """Tests creating a port after a router interface. + + Test creating a port after an SVI router interface has + been created. Only a trunk call should be invoked and the + plugin should not attempt to recreate the vlan. + """ + self._add_router_interface() + # Create a network on the switch + self._cisco_nexus_plugin.create_network( + self.network1, self.attachment1) + + # Grab a list of all mock calls from ncclient + last_cfgs = (self.mock_ncclient.manager.connect.return_value. + edit_config.mock_calls) + + # The last ncclient call should be for trunking and the second + # to last call should be creating the SVI interface + last_cfg = last_cfgs[-1][2]['config'] + self.assertIn('allowed', last_cfg) + + slast_cfg = last_cfgs[-2][2]['config'] + self.assertIn('10.0.0.1/24', slast_cfg) diff --git a/neutron/tests/unit/cisco/test_plugin_model.py b/neutron/tests/unit/cisco/test_plugin_model.py new file mode 100755 index 000000000..fa87a5010 --- /dev/null +++ b/neutron/tests/unit/cisco/test_plugin_model.py @@ -0,0 +1,63 @@ +# Copyright 2014 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +import mock + +from neutron import context +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import config as cisco_config +from neutron.plugins.cisco.models import virt_phy_sw_v2 +from neutron.plugins.cisco.nexus import cisco_nexus_plugin_v2 +from neutron.tests import base + + +class TestCiscoPluginModel(base.BaseTestCase): + + def setUp(self): + # Point config file to: neutron/tests/etc/neutron.conf.test + self.config_parse() + + super(TestCiscoPluginModel, self).setUp() + + def test_non_nexus_device_driver(self): + """Tests handling of an non-Nexus device driver being configured.""" + with mock.patch.dict(sys.modules, {'mock_driver': mock.Mock()}): + cisco_config.CONF.set_override('nexus_driver', + 'mock_driver.Non_Nexus_Driver', + 'CISCO') + # Plugin model instance should have is_nexus_plugin set to False + model = virt_phy_sw_v2.VirtualPhysicalSwitchModelV2() + self.assertFalse(model.is_nexus_plugin) + + # Model's _invoke_nexus_for_net_create should just return False + user_id = 'user_id' + tenant_id = 'tenant_id' + ctx = context.Context(user_id, tenant_id) + self.assertFalse(model._invoke_nexus_for_net_create( + ctx, tenant_id, net_id='net_id', + instance_id='instance_id', host_id='host_id')) + + def test_nexus_plugin_calls_ignored_if_plugin_not_loaded(self): + """Verifies Nexus plugin calls are ignored if plugin is not loaded.""" + cisco_config.CONF.set_override(const.NEXUS_PLUGIN, + None, 'CISCO_PLUGINS') + with mock.patch.object(cisco_nexus_plugin_v2.NexusPlugin, + 'create_network') as mock_create_network: + model = virt_phy_sw_v2.VirtualPhysicalSwitchModelV2() + model._invoke_plugin_per_device(model, const.NEXUS_PLUGIN, + 'create_network') + self.assertFalse(mock_create_network.called) diff --git a/neutron/tests/unit/database_stubs.py b/neutron/tests/unit/database_stubs.py new file mode 100644 index 000000000..5d388cfa2 --- /dev/null +++ b/neutron/tests/unit/database_stubs.py @@ -0,0 +1,188 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011, Cisco Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rohit Agarwalla, Cisco Systems, Inc. + +"""stubs.py provides interface methods for the database test cases""" + +from neutron.db import api as db +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class NeutronDB(object): + """Class conisting of methods to call Neutron db methods.""" + def get_all_networks(self, tenant_id): + """Get all networks.""" + nets = [] + try: + for net in db.network_list(tenant_id): + LOG.debug("Getting network: %s", net.uuid) + net_dict = {} + net_dict["tenant_id"] = net.tenant_id + net_dict["id"] = str(net.uuid) + net_dict["name"] = net.name + nets.append(net_dict) + except Exception as exc: + LOG.error("Failed to get all networks: %s", str(exc)) + return nets + + def get_network(self, network_id): + """Get a network.""" + net = [] + try: + for net in db.network_get(network_id): + LOG.debug("Getting network: %s", net.uuid) + net_dict = {} + net_dict["tenant_id"] = net.tenant_id + net_dict["id"] = str(net.uuid) + net_dict["name"] = net.name + net.append(net_dict) + except Exception as exc: + LOG.error("Failed to get network: %s", str(exc)) + return net + + def create_network(self, tenant_id, net_name): + """Create a network.""" + net_dict = {} + try: + res = db.network_create(tenant_id, net_name) + LOG.debug("Created network: %s", res.uuid) + net_dict["tenant_id"] = res.tenant_id + net_dict["id"] = str(res.uuid) + net_dict["name"] = res.name + return net_dict + except Exception as exc: + LOG.error("Failed to create network: %s", str(exc)) + + def delete_network(self, net_id): + """Delete a network.""" + try: + net = db.network_destroy(net_id) + LOG.debug("Deleted network: %s", net.uuid) + net_dict = {} + net_dict["id"] = str(net.uuid) + return net_dict + except Exception as exc: + LOG.error("Failed to delete network: %s", str(exc)) + + def update_network(self, tenant_id, net_id, param_data): + """Rename a network.""" + try: + net = db.network_update(net_id, tenant_id, **param_data) + LOG.debug("Updated network: %s", net.uuid) + net_dict = {} + net_dict["id"] = str(net.uuid) + net_dict["name"] = net.name + return net_dict + except Exception as exc: + LOG.error("Failed to update network: %s", str(exc)) + + def get_all_ports(self, net_id): + """Get all ports.""" + ports = [] + try: + for port in db.port_list(net_id): + LOG.debug("Getting port: %s", port.uuid) + port_dict = {} + port_dict["id"] = str(port.uuid) + port_dict["net-id"] = str(port.network_id) + port_dict["attachment"] = port.interface_id + port_dict["state"] = port.state + ports.append(port_dict) + return ports + except Exception as exc: + LOG.error("Failed to get all ports: %s", str(exc)) + + def get_port(self, net_id, port_id): + """Get a port.""" + port_list = [] + port = db.port_get(port_id, net_id) + try: + LOG.debug("Getting port: %s", port.uuid) + port_dict = {} + port_dict["id"] = str(port.uuid) + port_dict["net-id"] = str(port.network_id) + port_dict["attachment"] = port.interface_id + port_dict["state"] = port.state + port_list.append(port_dict) + return port_list + except Exception as exc: + LOG.error("Failed to get port: %s", str(exc)) + + def create_port(self, net_id): + """Add a port.""" + port_dict = {} + try: + port = db.port_create(net_id) + LOG.debug("Creating port %s", port.uuid) + port_dict["id"] = str(port.uuid) + port_dict["net-id"] = str(port.network_id) + port_dict["attachment"] = port.interface_id + port_dict["state"] = port.state + return port_dict + except Exception as exc: + LOG.error("Failed to create port: %s", str(exc)) + + def delete_port(self, net_id, port_id): + """Delete a port.""" + try: + port = db.port_destroy(port_id, net_id) + LOG.debug("Deleted port %s", port.uuid) + port_dict = {} + port_dict["id"] = str(port.uuid) + return port_dict + except Exception as exc: + LOG.error("Failed to delete port: %s", str(exc)) + + def update_port(self, net_id, port_id, **kwargs): + """Update a port.""" + try: + port = db.port_update(port_id, net_id, **kwargs) + LOG.debug("Updated port %s", port.uuid) + port_dict = {} + port_dict["id"] = str(port.uuid) + port_dict["net-id"] = str(port.network_id) + port_dict["attachment"] = port.interface_id + port_dict["state"] = port.state + return port_dict + except Exception as exc: + LOG.error("Failed to update port state: %s", str(exc)) + + def plug_interface(self, net_id, port_id, int_id): + """Plug interface to a port.""" + try: + port = db.port_set_attachment(port_id, net_id, int_id) + LOG.debug("Attached interface to port %s", port.uuid) + port_dict = {} + port_dict["id"] = str(port.uuid) + port_dict["net-id"] = str(port.network_id) + port_dict["attachment"] = port.interface_id + port_dict["state"] = port.state + return port_dict + except Exception as exc: + LOG.error("Failed to plug interface: %s", str(exc)) + + def unplug_interface(self, net_id, port_id): + """Unplug interface to a port.""" + try: + db.port_unset_attachment(port_id, net_id) + LOG.debug("Detached interface from port %s", port_id) + except Exception as exc: + LOG.error("Failed to unplug interface: %s", str(exc)) diff --git a/neutron/tests/unit/db/__init__.py b/neutron/tests/unit/db/__init__.py new file mode 100644 index 000000000..cae279d0a --- /dev/null +++ b/neutron/tests/unit/db/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/db/firewall/__init__.py b/neutron/tests/unit/db/firewall/__init__.py new file mode 100644 index 000000000..cae279d0a --- /dev/null +++ b/neutron/tests/unit/db/firewall/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/db/firewall/test_db_firewall.py b/neutron/tests/unit/db/firewall/test_db_firewall.py new file mode 100644 index 000000000..69cf36483 --- /dev/null +++ b/neutron/tests/unit/db/firewall/test_db_firewall.py @@ -0,0 +1,1055 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. + +import contextlib +import logging + +import mock +import webob.exc + +from neutron.api import extensions as api_ext +from neutron.common import config +from neutron import context +from neutron.db.firewall import firewall_db as fdb +import neutron.extensions +from neutron.extensions import firewall +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.services.firewall import fwaas_plugin +from neutron.tests.unit import test_db_plugin + + +LOG = logging.getLogger(__name__) +DB_FW_PLUGIN_KLASS = ( + "neutron.db.firewall.firewall_db.Firewall_db_mixin" +) +FWAAS_PLUGIN = 'neutron.services.firewall.fwaas_plugin' +DELETEFW_PATH = FWAAS_PLUGIN + '.FirewallAgentApi.delete_firewall' +extensions_path = ':'.join(neutron.extensions.__path__) +DESCRIPTION = 'default description' +SHARED = True +PROTOCOL = 'tcp' +IP_VERSION = 4 +SOURCE_IP_ADDRESS_RAW = '1.1.1.1' +DESTINATION_IP_ADDRESS_RAW = '2.2.2.2' +SOURCE_PORT = '55000:56000' +DESTINATION_PORT = '56000:57000' +ACTION = 'allow' +AUDITED = True +ENABLED = True +ADMIN_STATE_UP = True + + +class FakeAgentApi(fwaas_plugin.FirewallCallbacks): + """ + This class used to mock the AgentAPI delete method inherits from + FirewallCallbacks because it needs access to the firewall_deleted method. + The delete_firewall method belongs to the FirewallAgentApi, which has + no access to the firewall_deleted method normally because it's not + responsible for deleting the firewall from the DB. However, it needs + to in the unit tests since there is no agent to call back. + """ + def __init__(self): + pass + + def delete_firewall(self, context, firewall, **kwargs): + self.plugin = manager.NeutronManager.get_service_plugins()['FIREWALL'] + self.firewall_deleted(context, firewall['id'], **kwargs) + + +class FirewallPluginDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase): + resource_prefix_map = dict( + (k, constants.COMMON_PREFIXES[constants.FIREWALL]) + for k in firewall.RESOURCE_ATTRIBUTE_MAP.keys() + ) + + def setUp(self, core_plugin=None, fw_plugin=None, ext_mgr=None): + self.agentapi_delf_p = mock.patch(DELETEFW_PATH, create=True, + new=FakeAgentApi().delete_firewall) + self.agentapi_delf_p.start() + if not fw_plugin: + fw_plugin = DB_FW_PLUGIN_KLASS + service_plugins = {'fw_plugin_name': fw_plugin} + + fdb.Firewall_db_mixin.supported_extension_aliases = ["fwaas"] + super(FirewallPluginDbTestCase, self).setUp( + ext_mgr=ext_mgr, + service_plugins=service_plugins + ) + + if not ext_mgr: + self.plugin = importutils.import_object(fw_plugin) + ext_mgr = api_ext.PluginAwareExtensionManager( + extensions_path, + {constants.FIREWALL: self.plugin} + ) + app = config.load_paste_app('extensions_test_app') + self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr) + + def _test_list_resources(self, resource, items, + neutron_context=None, + query_params=None): + if resource.endswith('y'): + resource_plural = resource.replace('y', 'ies') + else: + resource_plural = resource + 's' + + res = self._list(resource_plural, + neutron_context=neutron_context, + query_params=query_params) + resource = resource.replace('-', '_') + self.assertEqual(sorted([i['id'] for i in res[resource_plural]]), + sorted([i[resource]['id'] for i in items])) + + def _get_test_firewall_rule_attrs(self, name='firewall_rule1'): + attrs = {'name': name, + 'tenant_id': self._tenant_id, + 'shared': SHARED, + 'protocol': PROTOCOL, + 'ip_version': IP_VERSION, + 'source_ip_address': SOURCE_IP_ADDRESS_RAW, + 'destination_ip_address': DESTINATION_IP_ADDRESS_RAW, + 'source_port': SOURCE_PORT, + 'destination_port': DESTINATION_PORT, + 'action': ACTION, + 'enabled': ENABLED} + return attrs + + def _get_test_firewall_policy_attrs(self, name='firewall_policy1'): + attrs = {'name': name, + 'description': DESCRIPTION, + 'tenant_id': self._tenant_id, + 'shared': SHARED, + 'firewall_rules': [], + 'audited': AUDITED} + return attrs + + def _get_test_firewall_attrs(self, name='firewall_1'): + attrs = {'name': name, + 'tenant_id': self._tenant_id, + 'admin_state_up': ADMIN_STATE_UP, + 'status': 'PENDING_CREATE'} + + return attrs + + def _create_firewall_policy(self, fmt, name, description, shared, + firewall_rules, audited, + expected_res_status=None, **kwargs): + tenant_id = kwargs.get('tenant_id', self._tenant_id) + data = {'firewall_policy': {'name': name, + 'description': description, + 'tenant_id': tenant_id, + 'shared': shared, + 'firewall_rules': firewall_rules, + 'audited': audited}} + + fw_policy_req = self.new_create_request('firewall_policies', data, fmt) + fw_policy_res = fw_policy_req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(fw_policy_res.status_int, expected_res_status) + + return fw_policy_res + + def _replace_firewall_status(self, attrs, old_status, new_status): + if attrs['status'] is old_status: + attrs['status'] = new_status + return attrs + + @contextlib.contextmanager + def firewall_policy(self, fmt=None, name='firewall_policy1', + description=DESCRIPTION, shared=True, + firewall_rules=None, audited=True, + no_delete=False, **kwargs): + if firewall_rules is None: + firewall_rules = [] + if not fmt: + fmt = self.fmt + res = self._create_firewall_policy(fmt, name, description, shared, + firewall_rules, audited, + **kwargs) + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + firewall_policy = self.deserialize(fmt or self.fmt, res) + yield firewall_policy + if not no_delete: + self._delete('firewall_policies', + firewall_policy['firewall_policy']['id']) + + def _create_firewall_rule(self, fmt, name, shared, protocol, + ip_version, source_ip_address, + destination_ip_address, source_port, + destination_port, action, enabled, + expected_res_status=None, **kwargs): + tenant_id = kwargs.get('tenant_id', self._tenant_id) + data = {'firewall_rule': {'name': name, + 'tenant_id': tenant_id, + 'shared': shared, + 'protocol': protocol, + 'ip_version': ip_version, + 'source_ip_address': source_ip_address, + 'destination_ip_address': + destination_ip_address, + 'source_port': source_port, + 'destination_port': destination_port, + 'action': action, + 'enabled': enabled}} + + fw_rule_req = self.new_create_request('firewall_rules', data, fmt) + fw_rule_res = fw_rule_req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(fw_rule_res.status_int, expected_res_status) + + return fw_rule_res + + @contextlib.contextmanager + def firewall_rule(self, fmt=None, name='firewall_rule1', + shared=SHARED, protocol=PROTOCOL, ip_version=IP_VERSION, + source_ip_address=SOURCE_IP_ADDRESS_RAW, + destination_ip_address=DESTINATION_IP_ADDRESS_RAW, + source_port=SOURCE_PORT, + destination_port=DESTINATION_PORT, + action=ACTION, enabled=ENABLED, + no_delete=False, **kwargs): + if not fmt: + fmt = self.fmt + res = self._create_firewall_rule(fmt, name, shared, protocol, + ip_version, source_ip_address, + destination_ip_address, + source_port, destination_port, + action, enabled, **kwargs) + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + firewall_rule = self.deserialize(fmt or self.fmt, res) + yield firewall_rule + if not no_delete: + self._delete('firewall_rules', + firewall_rule['firewall_rule']['id']) + + def _create_firewall(self, fmt, name, description, firewall_policy_id, + admin_state_up=True, expected_res_status=None, + **kwargs): + tenant_id = kwargs.get('tenant_id', self._tenant_id) + data = {'firewall': {'name': name, + 'description': description, + 'firewall_policy_id': firewall_policy_id, + 'admin_state_up': admin_state_up, + 'tenant_id': tenant_id}} + + firewall_req = self.new_create_request('firewalls', data, fmt) + firewall_res = firewall_req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(firewall_res.status_int, expected_res_status) + + return firewall_res + + @contextlib.contextmanager + def firewall(self, fmt=None, name='firewall_1', description=DESCRIPTION, + firewall_policy_id=None, admin_state_up=True, + no_delete=False, **kwargs): + if not fmt: + fmt = self.fmt + res = self._create_firewall(fmt, name, description, firewall_policy_id, + admin_state_up, **kwargs) + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + firewall = self.deserialize(fmt or self.fmt, res) + yield firewall + if not no_delete: + self._delete('firewalls', firewall['firewall']['id']) + + def _rule_action(self, action, id, firewall_rule_id, insert_before=None, + insert_after=None, expected_code=webob.exc.HTTPOk.code, + expected_body=None, body_data=None): + # We intentionally do this check for None since we want to distinguish + # from empty dictionary + if body_data is None: + if action == 'insert': + body_data = {'firewall_rule_id': firewall_rule_id, + 'insert_before': insert_before, + 'insert_after': insert_after} + else: + body_data = {'firewall_rule_id': firewall_rule_id} + + req = self.new_action_request('firewall_policies', + body_data, id, + "%s_rule" % action) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, expected_code) + response = self.deserialize(self.fmt, res) + if expected_body: + self.assertEqual(response, expected_body) + return response + + def _compare_firewall_rule_lists(self, firewall_policy_id, + list1, list2): + position = 0 + for r1, r2 in zip(list1, list2): + rule = r1['firewall_rule'] + rule['firewall_policy_id'] = firewall_policy_id + position += 1 + rule['position'] = position + for k in rule: + self.assertEqual(rule[k], r2[k]) + + +class TestFirewallDBPlugin(FirewallPluginDbTestCase): + + def test_create_firewall_policy(self): + name = "firewall_policy1" + attrs = self._get_test_firewall_policy_attrs(name) + + with self.firewall_policy(name=name, shared=SHARED, + firewall_rules=None, + audited=AUDITED) as firewall_policy: + for k, v in attrs.iteritems(): + self.assertEqual(firewall_policy['firewall_policy'][k], v) + + def test_create_firewall_policy_with_rules(self): + name = "firewall_policy1" + attrs = self._get_test_firewall_policy_attrs(name) + + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3')) as fr: + fw_rule_ids = [r['firewall_rule']['id'] for r in fr] + attrs['firewall_rules'] = fw_rule_ids + with self.firewall_policy(name=name, shared=SHARED, + firewall_rules=fw_rule_ids, + audited=AUDITED) as fwp: + for k, v in attrs.iteritems(): + self.assertEqual(fwp['firewall_policy'][k], v) + + def test_create_firewall_policy_with_previously_associated_rule(self): + with self.firewall_rule() as fwr: + fw_rule_ids = [fwr['firewall_rule']['id']] + with self.firewall_policy(firewall_rules=fw_rule_ids): + res = self._create_firewall_policy( + None, 'firewall_policy2', description=DESCRIPTION, + shared=SHARED, firewall_rules=fw_rule_ids, + audited=AUDITED) + self.assertEqual(res.status_int, 409) + + def test_show_firewall_policy(self): + name = "firewall_policy1" + attrs = self._get_test_firewall_policy_attrs(name) + + with self.firewall_policy(name=name, shared=SHARED, + firewall_rules=None, + audited=AUDITED) as fwp: + req = self.new_show_request('firewall_policies', + fwp['firewall_policy']['id'], + fmt=self.fmt) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_policy'][k], v) + + def test_list_firewall_policies(self): + with contextlib.nested(self.firewall_policy(name='fwp1', + description='fwp'), + self.firewall_policy(name='fwp2', + description='fwp'), + self.firewall_policy(name='fwp3', + description='fwp') + ) as fw_policies: + self._test_list_resources('firewall_policy', + fw_policies, + query_params='description=fwp') + + def test_update_firewall_policy(self): + name = "new_firewall_policy1" + attrs = self._get_test_firewall_policy_attrs(name) + + with self.firewall_policy(shared=SHARED, + firewall_rules=None, + audited=AUDITED) as fwp: + data = {'firewall_policy': {'name': name}} + req = self.new_update_request('firewall_policies', data, + fwp['firewall_policy']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_policy'][k], v) + + def test_update_firewall_policy_with_rules(self): + attrs = self._get_test_firewall_policy_attrs() + + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3')) as fr: + with self.firewall_policy() as fwp: + fw_rule_ids = [r['firewall_rule']['id'] for r in fr] + attrs['firewall_rules'] = fw_rule_ids + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp['firewall_policy']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + attrs['audited'] = False + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_policy'][k], v) + + def test_update_firewall_policy_replace_rules(self): + attrs = self._get_test_firewall_policy_attrs() + + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3'), + self.firewall_rule(name='fwr4')) as frs: + fr1 = frs[0:2] + fr2 = frs[2:4] + with self.firewall_policy() as fwp: + fw_rule_ids = [r['firewall_rule']['id'] for r in fr1] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp['firewall_policy']['id']) + req.get_response(self.ext_api) + + fw_rule_ids = [r['firewall_rule']['id'] for r in fr2] + attrs['firewall_rules'] = fw_rule_ids + new_data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', new_data, + fwp['firewall_policy']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + attrs['audited'] = False + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_policy'][k], v) + + def test_update_firewall_policy_reorder_rules(self): + attrs = self._get_test_firewall_policy_attrs() + + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3'), + self.firewall_rule(name='fwr4')) as fr: + with self.firewall_policy() as fwp: + fw_rule_ids = [fr[2]['firewall_rule']['id'], + fr[3]['firewall_rule']['id']] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp['firewall_policy']['id']) + req.get_response(self.ext_api) + # shuffle the rules, add more rules + fw_rule_ids = [fr[1]['firewall_rule']['id'], + fr[3]['firewall_rule']['id'], + fr[2]['firewall_rule']['id'], + fr[0]['firewall_rule']['id']] + attrs['firewall_rules'] = fw_rule_ids + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp['firewall_policy']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + rules = [] + for rule_id in fw_rule_ids: + req = self.new_show_request('firewall_rules', + rule_id, + fmt=self.fmt) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + rules.append(res['firewall_rule']) + self.assertEqual(rules[0]['position'], 1) + self.assertEqual(rules[0]['id'], fr[1]['firewall_rule']['id']) + self.assertEqual(rules[1]['position'], 2) + self.assertEqual(rules[1]['id'], fr[3]['firewall_rule']['id']) + self.assertEqual(rules[2]['position'], 3) + self.assertEqual(rules[2]['id'], fr[2]['firewall_rule']['id']) + self.assertEqual(rules[3]['position'], 4) + self.assertEqual(rules[3]['id'], fr[0]['firewall_rule']['id']) + + def test_update_firewall_policy_with_non_existing_rule(self): + attrs = self._get_test_firewall_policy_attrs() + + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2')) as fr: + with self.firewall_policy() as fwp: + fw_rule_ids = [r['firewall_rule']['id'] for r in fr] + # appending non-existent rule + fw_rule_ids.append(uuidutils.generate_uuid()) + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp['firewall_policy']['id']) + res = req.get_response(self.ext_api) + #check that the firewall_rule was not found + self.assertEqual(res.status_int, 404) + #check if none of the rules got added to the policy + req = self.new_show_request('firewall_policies', + fwp['firewall_policy']['id'], + fmt=self.fmt) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_policy'][k], v) + + def test_delete_firewall_policy(self): + ctx = context.get_admin_context() + with self.firewall_policy(no_delete=True) as fwp: + fwp_id = fwp['firewall_policy']['id'] + req = self.new_delete_request('firewall_policies', fwp_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + self.assertRaises(firewall.FirewallPolicyNotFound, + self.plugin.get_firewall_policy, + ctx, fwp_id) + + def test_delete_firewall_policy_with_rule(self): + ctx = context.get_admin_context() + attrs = self._get_test_firewall_policy_attrs() + with self.firewall_policy(no_delete=True) as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall_rule(name='fwr1') as fr: + fr_id = fr['firewall_rule']['id'] + fw_rule_ids = [fr_id] + attrs['firewall_rules'] = fw_rule_ids + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp['firewall_policy']['id']) + req.get_response(self.ext_api) + fw_rule = self.plugin.get_firewall_rule(ctx, fr_id) + self.assertEqual(fw_rule['firewall_policy_id'], fwp_id) + req = self.new_delete_request('firewall_policies', fwp_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + self.assertRaises(firewall.FirewallPolicyNotFound, + self.plugin.get_firewall_policy, + ctx, fwp_id) + fw_rule = self.plugin.get_firewall_rule(ctx, fr_id) + self.assertIsNone(fw_rule['firewall_policy_id']) + + def test_delete_firewall_policy_with_firewall_association(self): + attrs = self._get_test_firewall_attrs() + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + with self.firewall(firewall_policy_id=fwp_id, + admin_state_up= + ADMIN_STATE_UP): + req = self.new_delete_request('firewall_policies', fwp_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 409) + + def test_create_firewall_rule(self): + attrs = self._get_test_firewall_rule_attrs() + + with self.firewall_rule() as firewall_rule: + for k, v in attrs.iteritems(): + self.assertEqual(firewall_rule['firewall_rule'][k], v) + + attrs['source_port'] = None + attrs['destination_port'] = None + with self.firewall_rule(source_port=None, + destination_port=None) as firewall_rule: + for k, v in attrs.iteritems(): + self.assertEqual(firewall_rule['firewall_rule'][k], v) + + attrs['source_port'] = '10000' + attrs['destination_port'] = '80' + with self.firewall_rule(source_port=10000, + destination_port=80) as firewall_rule: + for k, v in attrs.iteritems(): + self.assertEqual(firewall_rule['firewall_rule'][k], v) + + attrs['source_port'] = '10000' + attrs['destination_port'] = '80' + with self.firewall_rule(source_port='10000', + destination_port='80') as firewall_rule: + for k, v in attrs.iteritems(): + self.assertEqual(firewall_rule['firewall_rule'][k], v) + + def test_create_firewall_rule_icmp_with_port(self): + attrs = self._get_test_firewall_rule_attrs() + attrs['protocol'] = 'icmp' + res = self._create_firewall_rule(self.fmt, **attrs) + self.assertEqual(400, res.status_int) + + def test_create_firewall_rule_icmp_without_port(self): + attrs = self._get_test_firewall_rule_attrs() + + attrs['protocol'] = 'icmp' + attrs['source_port'] = None + attrs['destination_port'] = None + with self.firewall_rule(source_port=None, + destination_port=None, + protocol='icmp') as firewall_rule: + for k, v in attrs.iteritems(): + self.assertEqual(firewall_rule['firewall_rule'][k], v) + + def test_show_firewall_rule_with_fw_policy_not_associated(self): + attrs = self._get_test_firewall_rule_attrs() + with self.firewall_rule() as fw_rule: + req = self.new_show_request('firewall_rules', + fw_rule['firewall_rule']['id'], + fmt=self.fmt) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_rule'][k], v) + + def test_show_firewall_rule_with_fw_policy_associated(self): + attrs = self._get_test_firewall_rule_attrs() + with self.firewall_rule() as fw_rule: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + data = {'firewall_policy': + {'firewall_rules': + [fw_rule['firewall_rule']['id']]}} + req = self.new_update_request('firewall_policies', data, + fwp['firewall_policy']['id']) + req.get_response(self.ext_api) + req = self.new_show_request('firewall_rules', + fw_rule['firewall_rule']['id'], + fmt=self.fmt) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_rule'][k], v) + + def test_list_firewall_rules(self): + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3')) as fr: + query_params = 'protocol=tcp' + self._test_list_resources('firewall_rule', fr, + query_params=query_params) + + def test_update_firewall_rule(self): + name = "new_firewall_rule1" + attrs = self._get_test_firewall_rule_attrs(name) + + attrs['source_port'] = '10:20' + attrs['destination_port'] = '30:40' + with self.firewall_rule() as fwr: + data = {'firewall_rule': {'name': name, + 'source_port': '10:20', + 'destination_port': '30:40'}} + req = self.new_update_request('firewall_rules', data, + fwr['firewall_rule']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_rule'][k], v) + + attrs['source_port'] = '10000' + attrs['destination_port'] = '80' + with self.firewall_rule() as fwr: + data = {'firewall_rule': {'name': name, + 'source_port': 10000, + 'destination_port': 80}} + req = self.new_update_request('firewall_rules', data, + fwr['firewall_rule']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_rule'][k], v) + + attrs['source_port'] = '10000' + attrs['destination_port'] = '80' + with self.firewall_rule() as fwr: + data = {'firewall_rule': {'name': name, + 'source_port': '10000', + 'destination_port': '80'}} + req = self.new_update_request('firewall_rules', data, + fwr['firewall_rule']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_rule'][k], v) + + attrs['source_port'] = None + attrs['destination_port'] = None + with self.firewall_rule() as fwr: + data = {'firewall_rule': {'name': name, + 'source_port': None, + 'destination_port': None}} + req = self.new_update_request('firewall_rules', data, + fwr['firewall_rule']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_rule'][k], v) + + def test_update_firewall_rule_with_policy_associated(self): + name = "new_firewall_rule1" + attrs = self._get_test_firewall_rule_attrs(name) + with self.firewall_rule() as fwr: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + fwr_id = fwr['firewall_rule']['id'] + data = {'firewall_policy': {'firewall_rules': [fwr_id]}} + req = self.new_update_request('firewall_policies', data, + fwp['firewall_policy']['id']) + req.get_response(self.ext_api) + data = {'firewall_rule': {'name': name}} + req = self.new_update_request('firewall_rules', data, + fwr['firewall_rule']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + attrs['firewall_policy_id'] = fwp_id + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_rule'][k], v) + req = self.new_show_request('firewall_policies', + fwp['firewall_policy']['id'], + fmt=self.fmt) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + self.assertEqual(res['firewall_policy']['firewall_rules'], + [fwr_id]) + self.assertEqual(res['firewall_policy']['audited'], False) + + def test_delete_firewall_rule(self): + ctx = context.get_admin_context() + with self.firewall_rule(no_delete=True) as fwr: + fwr_id = fwr['firewall_rule']['id'] + req = self.new_delete_request('firewall_rules', fwr_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + self.assertRaises(firewall.FirewallRuleNotFound, + self.plugin.get_firewall_rule, + ctx, fwr_id) + + def test_delete_firewall_rule_with_policy_associated(self): + attrs = self._get_test_firewall_rule_attrs() + with self.firewall_rule() as fwr: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + fwr_id = fwr['firewall_rule']['id'] + data = {'firewall_policy': {'firewall_rules': [fwr_id]}} + req = self.new_update_request('firewall_policies', data, + fwp['firewall_policy']['id']) + req.get_response(self.ext_api) + req = self.new_delete_request('firewall_rules', fwr_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 409) + + def test_create_firewall(self): + name = "firewall1" + attrs = self._get_test_firewall_attrs(name) + + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + with self.firewall(name=name, + firewall_policy_id=fwp_id, + admin_state_up= + ADMIN_STATE_UP) as firewall: + for k, v in attrs.iteritems(): + self.assertEqual(firewall['firewall'][k], v) + + def test_show_firewall(self): + name = "firewall1" + attrs = self._get_test_firewall_attrs(name) + + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + with self.firewall(name=name, + firewall_policy_id=fwp_id, + admin_state_up= + ADMIN_STATE_UP) as firewall: + req = self.new_show_request('firewalls', + firewall['firewall']['id'], + fmt=self.fmt) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall'][k], v) + + def test_list_firewalls(self): + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + with contextlib.nested(self.firewall(name='fw1', + firewall_policy_id=fwp_id, + description='fw'), + self.firewall(name='fw2', + firewall_policy_id=fwp_id, + description='fw'), + self.firewall(name='fw3', + firewall_policy_id=fwp_id, + description='fw')) as fwalls: + self._test_list_resources('firewall', fwalls, + query_params='description=fw') + + def test_update_firewall(self): + name = "new_firewall1" + attrs = self._get_test_firewall_attrs(name) + + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + with self.firewall(firewall_policy_id=fwp_id, + admin_state_up= + ADMIN_STATE_UP) as firewall: + data = {'firewall': {'name': name}} + req = self.new_update_request('firewalls', data, + firewall['firewall']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall'][k], v) + + def test_delete_firewall(self): + ctx = context.get_admin_context() + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(firewall_policy_id=fwp_id, + no_delete=True) as fw: + fw_id = fw['firewall']['id'] + req = self.new_delete_request('firewalls', fw_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + self.assertRaises(firewall.FirewallNotFound, + self.plugin.get_firewall, + ctx, fw_id) + + def test_insert_rule_in_policy_with_prior_rules_added_via_update(self): + attrs = self._get_test_firewall_policy_attrs() + attrs['audited'] = False + attrs['firewall_list'] = [] + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3')) as frs: + fr1 = frs[0:2] + fwr3 = frs[2] + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['id'] = fwp_id + fw_rule_ids = [r['firewall_rule']['id'] for r in fr1] + attrs['firewall_rules'] = fw_rule_ids[:] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp_id) + req.get_response(self.ext_api) + self._rule_action('insert', fwp_id, fw_rule_ids[0], + insert_before=fw_rule_ids[0], + insert_after=None, + expected_code=webob.exc.HTTPConflict.code, + expected_body=None) + fwr3_id = fwr3['firewall_rule']['id'] + attrs['firewall_rules'].insert(0, fwr3_id) + self._rule_action('insert', fwp_id, fwr3_id, + insert_before=fw_rule_ids[0], + insert_after=None, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + + def test_insert_rule_in_policy_failures(self): + with self.firewall_rule(name='fwr1') as fr1: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + fr1_id = fr1['firewall_rule']['id'] + fw_rule_ids = [fr1_id] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp_id) + req.get_response(self.ext_api) + # test inserting with empty request body + self._rule_action('insert', fwp_id, '123', + expected_code=webob.exc.HTTPBadRequest.code, + expected_body=None, body_data={}) + # test inserting when firewall_rule_id is missing in + # request body + insert_data = {'insert_before': '123', + 'insert_after': '456'} + self._rule_action('insert', fwp_id, '123', + expected_code=webob.exc.HTTPBadRequest.code, + expected_body=None, + body_data=insert_data) + # test inserting when firewall_rule_id is None + insert_data = {'firewall_rule_id': None, + 'insert_before': '123', + 'insert_after': '456'} + self._rule_action('insert', fwp_id, '123', + expected_code=webob.exc.HTTPNotFound.code, + expected_body=None, + body_data=insert_data) + # test inserting when firewall_policy_id is incorrect + self._rule_action('insert', '123', fr1_id, + expected_code=webob.exc.HTTPNotFound.code, + expected_body=None) + # test inserting when firewall_policy_id is None + self._rule_action('insert', None, fr1_id, + expected_code=webob.exc.HTTPBadRequest.code, + expected_body=None) + + def test_insert_rule_for_previously_associated_rule(self): + with self.firewall_rule() as fwr: + fwr_id = fwr['firewall_rule']['id'] + fw_rule_ids = [fwr_id] + with self.firewall_policy(firewall_rules=fw_rule_ids): + with self.firewall_policy(name='firewall_policy2') as fwp: + fwp_id = fwp['firewall_policy']['id'] + insert_data = {'firewall_rule_id': fwr_id} + self._rule_action( + 'insert', fwp_id, fwr_id, insert_before=None, + insert_after=None, + expected_code=webob.exc.HTTPConflict.code, + expected_body=None, body_data=insert_data) + + def test_insert_rule_in_policy(self): + attrs = self._get_test_firewall_policy_attrs() + attrs['audited'] = False + attrs['firewall_list'] = [] + with contextlib.nested(self.firewall_rule(name='fwr0'), + self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3'), + self.firewall_rule(name='fwr4'), + self.firewall_rule(name='fwr5'), + self.firewall_rule(name='fwr6')) as fwr: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['id'] = fwp_id + # test insert when rule list is empty + fwr0_id = fwr[0]['firewall_rule']['id'] + attrs['firewall_rules'].insert(0, fwr0_id) + self._rule_action('insert', fwp_id, fwr0_id, + insert_before=None, + insert_after=None, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert at top of rule list, insert_before and + # insert_after not provided + fwr1_id = fwr[1]['firewall_rule']['id'] + attrs['firewall_rules'].insert(0, fwr1_id) + insert_data = {'firewall_rule_id': fwr1_id} + self._rule_action('insert', fwp_id, fwr0_id, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs, body_data=insert_data) + # test insert at top of list above existing rule + fwr2_id = fwr[2]['firewall_rule']['id'] + attrs['firewall_rules'].insert(0, fwr2_id) + self._rule_action('insert', fwp_id, fwr2_id, + insert_before=fwr1_id, + insert_after=None, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert at bottom of list + fwr3_id = fwr[3]['firewall_rule']['id'] + attrs['firewall_rules'].append(fwr3_id) + self._rule_action('insert', fwp_id, fwr3_id, + insert_before=None, + insert_after=fwr0_id, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert in the middle of the list using + # insert_before + fwr4_id = fwr[4]['firewall_rule']['id'] + attrs['firewall_rules'].insert(1, fwr4_id) + self._rule_action('insert', fwp_id, fwr4_id, + insert_before=fwr1_id, + insert_after=None, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert in the middle of the list using + # insert_after + fwr5_id = fwr[5]['firewall_rule']['id'] + attrs['firewall_rules'].insert(1, fwr5_id) + self._rule_action('insert', fwp_id, fwr5_id, + insert_before=None, + insert_after=fwr2_id, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert when both insert_before and + # insert_after are set + fwr6_id = fwr[6]['firewall_rule']['id'] + attrs['firewall_rules'].insert(1, fwr6_id) + self._rule_action('insert', fwp_id, fwr6_id, + insert_before=fwr5_id, + insert_after=fwr5_id, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + + def test_remove_rule_from_policy(self): + attrs = self._get_test_firewall_policy_attrs() + attrs['audited'] = False + attrs['firewall_list'] = [] + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3')) as fr1: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['id'] = fwp_id + fw_rule_ids = [r['firewall_rule']['id'] for r in fr1] + attrs['firewall_rules'] = fw_rule_ids[:] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp_id) + req.get_response(self.ext_api) + # test removing a rule from a policy that does not exist + self._rule_action('remove', '123', fw_rule_ids[1], + expected_code=webob.exc.HTTPNotFound.code, + expected_body=None) + # test removing a rule in the middle of the list + attrs['firewall_rules'].remove(fw_rule_ids[1]) + self._rule_action('remove', fwp_id, fw_rule_ids[1], + expected_body=attrs) + # test removing a rule at the top of the list + attrs['firewall_rules'].remove(fw_rule_ids[0]) + self._rule_action('remove', fwp_id, fw_rule_ids[0], + expected_body=attrs) + # test removing remaining rule in the list + attrs['firewall_rules'].remove(fw_rule_ids[2]) + self._rule_action('remove', fwp_id, fw_rule_ids[2], + expected_body=attrs) + # test removing rule that is not associated with the policy + self._rule_action('remove', fwp_id, fw_rule_ids[2], + expected_code=webob.exc.HTTPBadRequest.code, + expected_body=None) + + def test_remove_rule_from_policy_failures(self): + with self.firewall_rule(name='fwr1') as fr1: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + fw_rule_ids = [fr1['firewall_rule']['id']] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp_id) + req.get_response(self.ext_api) + # test removing rule that does not exist + self._rule_action('remove', fwp_id, '123', + expected_code=webob.exc.HTTPNotFound.code, + expected_body=None) + # test removing rule with bad request + self._rule_action('remove', fwp_id, '123', + expected_code=webob.exc.HTTPBadRequest.code, + expected_body=None, body_data={}) + # test removing rule with firewall_rule_id set to None + self._rule_action('remove', fwp_id, '123', + expected_code=webob.exc.HTTPNotFound.code, + expected_body=None, + body_data={'firewall_rule_id': None}) + + +class TestFirewallDBPluginXML(TestFirewallDBPlugin): + fmt = 'xml' diff --git a/neutron/tests/unit/db/loadbalancer/__init__.py b/neutron/tests/unit/db/loadbalancer/__init__.py new file mode 100644 index 000000000..cae279d0a --- /dev/null +++ b/neutron/tests/unit/db/loadbalancer/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py b/neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py new file mode 100644 index 000000000..bc541c7c8 --- /dev/null +++ b/neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py @@ -0,0 +1,1572 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import logging + +import mock +from oslo.config import cfg +import testtools +import webob.exc + +from neutron.api import extensions +from neutron.common import config +from neutron import context +import neutron.db.l3_db # noqa +from neutron.db.loadbalancer import loadbalancer_db as ldb +from neutron.db import servicetype_db as sdb +import neutron.extensions +from neutron.extensions import loadbalancer +from neutron.plugins.common import constants +from neutron.services.loadbalancer import ( + plugin as loadbalancer_plugin +) +from neutron.services.loadbalancer.drivers import abstract_driver +from neutron.services import provider_configuration as pconf +from neutron.tests.unit import test_db_plugin + + +LOG = logging.getLogger(__name__) + +DB_CORE_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' +DB_LB_PLUGIN_KLASS = ( + "neutron.services.loadbalancer." + "plugin.LoadBalancerPlugin" +) +NOOP_DRIVER_KLASS = ('neutron.tests.unit.db.loadbalancer.test_db_loadbalancer.' + 'NoopLbaaSDriver') + +extensions_path = ':'.join(neutron.extensions.__path__) + +_subnet_id = "0c798ed8-33ba-11e2-8b28-000c291c4d14" + + +class NoopLbaaSDriver(abstract_driver.LoadBalancerAbstractDriver): + """A dummy lbass driver that that only performs object deletion.""" + + def __init__(self, plugin): + self.plugin = plugin + + def create_vip(self, context, vip): + pass + + def update_vip(self, context, old_vip, vip): + pass + + def delete_vip(self, context, vip): + self.plugin._delete_db_vip(context, vip["id"]) + + def create_pool(self, context, pool): + pass + + def update_pool(self, context, old_pool, pool): + pass + + def delete_pool(self, context, pool): + self.plugin._delete_db_pool(context, pool["id"]) + + def stats(self, context, pool_id): + return {"bytes_in": 0, + "bytes_out": 0, + "active_connections": 0, + "total_connections": 0} + + def create_member(self, context, member): + pass + + def update_member(self, context, old_member, member): + pass + + def delete_member(self, context, member): + self.plugin._delete_db_member(context, member["id"]) + + def update_pool_health_monitor(self, context, old_health_monitor, + health_monitor, + pool_association): + pass + + def create_pool_health_monitor(self, context, + health_monitor, pool_id): + pass + + def delete_pool_health_monitor(self, context, health_monitor, pool_id): + self.plugin._delete_db_pool_health_monitor( + context, health_monitor["id"], + pool_id + ) + + +class LoadBalancerTestMixin(object): + resource_prefix_map = dict( + (k, constants.COMMON_PREFIXES[constants.LOADBALANCER]) + for k in loadbalancer.RESOURCE_ATTRIBUTE_MAP.keys() + ) + + def _get_vip_optional_args(self): + return ('description', 'subnet_id', 'address', + 'session_persistence', 'connection_limit') + + def _create_vip(self, fmt, name, pool_id, protocol, protocol_port, + admin_state_up, expected_res_status=None, **kwargs): + data = {'vip': {'name': name, + 'pool_id': pool_id, + 'protocol': protocol, + 'protocol_port': protocol_port, + 'admin_state_up': admin_state_up, + 'tenant_id': self._tenant_id}} + args = self._get_vip_optional_args() + for arg in args: + if arg in kwargs and kwargs[arg] is not None: + data['vip'][arg] = kwargs[arg] + + vip_req = self.new_create_request('vips', data, fmt) + vip_res = vip_req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(vip_res.status_int, expected_res_status) + + return vip_res + + def _create_pool(self, fmt, name, lb_method, protocol, admin_state_up, + expected_res_status=None, **kwargs): + data = {'pool': {'name': name, + 'subnet_id': _subnet_id, + 'lb_method': lb_method, + 'protocol': protocol, + 'admin_state_up': admin_state_up, + 'tenant_id': self._tenant_id}} + for arg in ('description', 'provider', 'subnet_id'): + if arg in kwargs and kwargs[arg] is not None: + data['pool'][arg] = kwargs[arg] + pool_req = self.new_create_request('pools', data, fmt) + pool_res = pool_req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(pool_res.status_int, expected_res_status) + + return pool_res + + def _create_member(self, fmt, address, protocol_port, admin_state_up, + expected_res_status=None, **kwargs): + data = {'member': {'address': address, + 'protocol_port': protocol_port, + 'admin_state_up': admin_state_up, + 'tenant_id': self._tenant_id}} + for arg in ('weight', 'pool_id'): + if arg in kwargs and kwargs[arg] is not None: + data['member'][arg] = kwargs[arg] + + member_req = self.new_create_request('members', data, fmt) + member_res = member_req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(member_res.status_int, expected_res_status) + + return member_res + + def _create_health_monitor(self, fmt, type, delay, timeout, max_retries, + admin_state_up, expected_res_status=None, + **kwargs): + data = {'health_monitor': {'type': type, + 'delay': delay, + 'timeout': timeout, + 'max_retries': max_retries, + 'admin_state_up': admin_state_up, + 'tenant_id': self._tenant_id}} + for arg in ('http_method', 'path', 'expected_code'): + if arg in kwargs and kwargs[arg] is not None: + data['health_monitor'][arg] = kwargs[arg] + + req = self.new_create_request('health_monitors', data, fmt) + + res = req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(res.status_int, expected_res_status) + + return res + + @contextlib.contextmanager + def vip(self, fmt=None, name='vip1', pool=None, subnet=None, + protocol='HTTP', protocol_port=80, admin_state_up=True, + no_delete=False, **kwargs): + if not fmt: + fmt = self.fmt + + with test_db_plugin.optional_ctx(subnet, self.subnet) as tmp_subnet: + with test_db_plugin.optional_ctx(pool, self.pool) as tmp_pool: + pool_id = tmp_pool['pool']['id'] + res = self._create_vip(fmt, + name, + pool_id, + protocol, + protocol_port, + admin_state_up, + subnet_id=tmp_subnet['subnet']['id'], + **kwargs) + if res.status_int >= webob.exc.HTTPClientError.code: + raise webob.exc.HTTPClientError( + explanation=_("Unexpected error code: %s") % + res.status_int + ) + vip = self.deserialize(fmt or self.fmt, res) + yield vip + if not no_delete: + self._delete('vips', vip['vip']['id']) + + @contextlib.contextmanager + def pool(self, fmt=None, name='pool1', lb_method='ROUND_ROBIN', + protocol='HTTP', admin_state_up=True, no_delete=False, + **kwargs): + if not fmt: + fmt = self.fmt + res = self._create_pool(fmt, + name, + lb_method, + protocol, + admin_state_up, + **kwargs) + if res.status_int >= webob.exc.HTTPClientError.code: + raise webob.exc.HTTPClientError( + explanation=_("Unexpected error code: %s") % res.status_int + ) + pool = self.deserialize(fmt or self.fmt, res) + yield pool + if not no_delete: + self._delete('pools', pool['pool']['id']) + + @contextlib.contextmanager + def member(self, fmt=None, address='192.168.1.100', protocol_port=80, + admin_state_up=True, no_delete=False, **kwargs): + if not fmt: + fmt = self.fmt + res = self._create_member(fmt, + address, + protocol_port, + admin_state_up, + **kwargs) + if res.status_int >= webob.exc.HTTPClientError.code: + raise webob.exc.HTTPClientError( + explanation=_("Unexpected error code: %s") % res.status_int + ) + member = self.deserialize(fmt or self.fmt, res) + yield member + if not no_delete: + self._delete('members', member['member']['id']) + + @contextlib.contextmanager + def health_monitor(self, fmt=None, type='TCP', + delay=30, timeout=10, max_retries=3, + admin_state_up=True, + no_delete=False, **kwargs): + if not fmt: + fmt = self.fmt + res = self._create_health_monitor(fmt, + type, + delay, + timeout, + max_retries, + admin_state_up, + **kwargs) + if res.status_int >= webob.exc.HTTPClientError.code: + raise webob.exc.HTTPClientError( + explanation=_("Unexpected error code: %s") % res.status_int + ) + health_monitor = self.deserialize(fmt or self.fmt, res) + the_health_monitor = health_monitor['health_monitor'] + # make sure: + # 1. When the type is HTTP/S we have HTTP related attributes in + # the result + # 2. When the type is not HTTP/S we do not have HTTP related + # attributes in the result + http_related_attributes = ('http_method', 'url_path', 'expected_codes') + if type in ['HTTP', 'HTTPS']: + for arg in http_related_attributes: + self.assertIsNotNone(the_health_monitor.get(arg)) + else: + for arg in http_related_attributes: + self.assertIsNone(the_health_monitor.get(arg)) + yield health_monitor + if not no_delete: + self._delete('health_monitors', the_health_monitor['id']) + + +class LoadBalancerPluginDbTestCase(LoadBalancerTestMixin, + test_db_plugin.NeutronDbPluginV2TestCase): + def setUp(self, core_plugin=None, lb_plugin=None, lbaas_provider=None, + ext_mgr=None): + service_plugins = {'lb_plugin_name': DB_LB_PLUGIN_KLASS} + if not lbaas_provider: + lbaas_provider = ( + constants.LOADBALANCER + + ':lbaas:' + NOOP_DRIVER_KLASS + ':default') + cfg.CONF.set_override('service_provider', + [lbaas_provider], + 'service_providers') + #force service type manager to reload configuration: + sdb.ServiceTypeManager._instance = None + + super(LoadBalancerPluginDbTestCase, self).setUp( + ext_mgr=ext_mgr, + service_plugins=service_plugins + ) + + if not ext_mgr: + self.plugin = loadbalancer_plugin.LoadBalancerPlugin() + ext_mgr = extensions.PluginAwareExtensionManager( + extensions_path, + {constants.LOADBALANCER: self.plugin} + ) + app = config.load_paste_app('extensions_test_app') + self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) + + get_lbaas_agent_patcher = mock.patch( + 'neutron.services.loadbalancer.agent_scheduler' + '.LbaasAgentSchedulerDbMixin.get_lbaas_agent_hosting_pool') + mock_lbaas_agent = mock.MagicMock() + get_lbaas_agent_patcher.start().return_value = mock_lbaas_agent + mock_lbaas_agent.__getitem__.return_value = {'host': 'host'} + + self._subnet_id = _subnet_id + + +class TestLoadBalancer(LoadBalancerPluginDbTestCase): + + def test_create_vip(self, **extras): + expected = { + 'name': 'vip1', + 'description': '', + 'protocol_port': 80, + 'protocol': 'HTTP', + 'connection_limit': -1, + 'admin_state_up': True, + 'status': 'PENDING_CREATE', + 'tenant_id': self._tenant_id, + } + + expected.update(extras) + + with self.subnet() as subnet: + expected['subnet_id'] = subnet['subnet']['id'] + name = expected['name'] + + with self.vip(name=name, subnet=subnet, **extras) as vip: + for k in ('id', 'address', 'port_id', 'pool_id'): + self.assertTrue(vip['vip'].get(k, None)) + + self.assertEqual( + dict((k, v) + for k, v in vip['vip'].items() if k in expected), + expected + ) + return vip + + def test_create_vip_twice_for_same_pool(self): + """Test loadbalancer db plugin via extension and directly.""" + with self.subnet() as subnet: + with self.pool(name="pool1") as pool: + with self.vip(name='vip1', subnet=subnet, pool=pool): + vip_data = { + 'name': 'vip1', + 'pool_id': pool['pool']['id'], + 'description': '', + 'protocol_port': 80, + 'protocol': 'HTTP', + 'connection_limit': -1, + 'admin_state_up': True, + 'status': 'PENDING_CREATE', + 'tenant_id': self._tenant_id, + 'session_persistence': '' + } + self.assertRaises(loadbalancer.VipExists, + self.plugin.create_vip, + context.get_admin_context(), + {'vip': vip_data}) + + def test_update_vip_raises_vip_exists(self): + with self.subnet() as subnet: + with contextlib.nested( + self.pool(name="pool1"), + self.pool(name="pool2") + ) as (pool1, pool2): + with contextlib.nested( + self.vip(name='vip1', subnet=subnet, pool=pool1), + self.vip(name='vip2', subnet=subnet, pool=pool2) + ) as (vip1, vip2): + vip_data = { + 'id': vip2['vip']['id'], + 'name': 'vip1', + 'pool_id': pool1['pool']['id'], + } + self.assertRaises(loadbalancer.VipExists, + self.plugin.update_vip, + context.get_admin_context(), + vip2['vip']['id'], + {'vip': vip_data}) + + def test_update_vip_change_pool(self): + with self.subnet() as subnet: + with contextlib.nested( + self.pool(name="pool1"), + self.pool(name="pool2") + ) as (pool1, pool2): + with self.vip(name='vip1', subnet=subnet, pool=pool1) as vip: + # change vip from pool1 to pool2 + vip_data = { + 'id': vip['vip']['id'], + 'name': 'vip1', + 'pool_id': pool2['pool']['id'], + } + ctx = context.get_admin_context() + self.plugin.update_vip(ctx, + vip['vip']['id'], + {'vip': vip_data}) + db_pool2 = (ctx.session.query(ldb.Pool). + filter_by(id=pool2['pool']['id']).one()) + db_pool1 = (ctx.session.query(ldb.Pool). + filter_by(id=pool1['pool']['id']).one()) + # check that pool1.vip became None + self.assertIsNone(db_pool1.vip) + # and pool2 got vip + self.assertEqual(db_pool2.vip.id, vip['vip']['id']) + + def test_create_vip_with_invalid_values(self): + invalid = { + 'protocol': 'UNSUPPORTED', + 'protocol_port': 'NOT_AN_INT', + 'protocol_port': 1000500, + 'subnet': {'subnet': {'id': 'invalid-subnet'}} + } + + for param, value in invalid.items(): + kwargs = {'name': 'the-vip', param: value} + with testtools.ExpectedException(webob.exc.HTTPClientError): + with self.vip(**kwargs): + pass + + def test_create_vip_with_address(self): + self.test_create_vip(address='10.0.0.7') + + def test_create_vip_with_address_outside_subnet(self): + with testtools.ExpectedException(webob.exc.HTTPClientError): + self.test_create_vip(address='9.9.9.9') + + def test_create_vip_with_session_persistence(self): + self.test_create_vip(session_persistence={'type': 'HTTP_COOKIE'}) + + def test_create_vip_with_session_persistence_with_app_cookie(self): + sp = {'type': 'APP_COOKIE', 'cookie_name': 'sessionId'} + self.test_create_vip(session_persistence=sp) + + def test_create_vip_with_session_persistence_unsupported_type(self): + with testtools.ExpectedException(webob.exc.HTTPClientError): + self.test_create_vip(session_persistence={'type': 'UNSUPPORTED'}) + + def test_create_vip_with_unnecessary_cookie_name(self): + sp = {'type': "SOURCE_IP", 'cookie_name': 'sessionId'} + with testtools.ExpectedException(webob.exc.HTTPClientError): + self.test_create_vip(session_persistence=sp) + + def test_create_vip_with_session_persistence_without_cookie_name(self): + sp = {'type': "APP_COOKIE"} + with testtools.ExpectedException(webob.exc.HTTPClientError): + self.test_create_vip(session_persistence=sp) + + def test_create_vip_with_protocol_mismatch(self): + with self.pool(protocol='TCP') as pool: + with testtools.ExpectedException(webob.exc.HTTPClientError): + self.test_create_vip(pool=pool, protocol='HTTP') + + def test_update_vip_with_protocol_mismatch(self): + with self.pool(protocol='TCP') as pool: + with self.vip(protocol='HTTP') as vip: + data = {'vip': {'pool_id': pool['pool']['id']}} + req = self.new_update_request('vips', data, vip['vip']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_reset_session_persistence(self): + name = 'vip4' + session_persistence = {'type': "HTTP_COOKIE"} + + update_info = {'vip': {'session_persistence': None}} + + with self.vip(name=name, session_persistence=session_persistence) as v: + # Ensure that vip has been created properly + self.assertEqual(v['vip']['session_persistence'], + session_persistence) + + # Try resetting session_persistence + req = self.new_update_request('vips', update_info, v['vip']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + + self.assertIsNone(res['vip']['session_persistence']) + + def test_update_vip(self): + name = 'new_vip' + keys = [('name', name), + ('address', "10.0.0.2"), + ('protocol_port', 80), + ('connection_limit', 100), + ('admin_state_up', False), + ('status', 'PENDING_UPDATE')] + + with self.vip(name=name) as vip: + keys.append(('subnet_id', vip['vip']['subnet_id'])) + data = {'vip': {'name': name, + 'connection_limit': 100, + 'session_persistence': + {'type': "APP_COOKIE", + 'cookie_name': "jesssionId"}, + 'admin_state_up': False}} + req = self.new_update_request('vips', data, vip['vip']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['vip'][k], v) + + def test_delete_vip(self): + with self.pool(): + with self.vip(no_delete=True) as vip: + req = self.new_delete_request('vips', + vip['vip']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + + def test_show_vip(self): + name = "vip_show" + keys = [('name', name), + ('address', "10.0.0.10"), + ('protocol_port', 80), + ('protocol', 'HTTP'), + ('connection_limit', -1), + ('admin_state_up', True), + ('status', 'PENDING_CREATE')] + with self.vip(name=name, address='10.0.0.10') as vip: + req = self.new_show_request('vips', + vip['vip']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['vip'][k], v) + + def test_list_vips(self): + name = "vips_list" + keys = [('name', name), + ('address', "10.0.0.2"), + ('protocol_port', 80), + ('protocol', 'HTTP'), + ('connection_limit', -1), + ('admin_state_up', True), + ('status', 'PENDING_CREATE')] + with self.vip(name=name) as vip: + keys.append(('subnet_id', vip['vip']['subnet_id'])) + req = self.new_list_request('vips') + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self.assertEqual(len(res['vips']), 1) + for k, v in keys: + self.assertEqual(res['vips'][0][k], v) + + def test_list_vips_with_sort_emulated(self): + with self.subnet() as subnet: + with contextlib.nested( + self.vip(name='vip1', subnet=subnet, protocol_port=81), + self.vip(name='vip2', subnet=subnet, protocol_port=82), + self.vip(name='vip3', subnet=subnet, protocol_port=82) + ) as (vip1, vip2, vip3): + self._test_list_with_sort( + 'vip', + (vip1, vip3, vip2), + [('protocol_port', 'asc'), ('name', 'desc')] + ) + + def test_list_vips_with_pagination_emulated(self): + with self.subnet() as subnet: + with contextlib.nested(self.vip(name='vip1', subnet=subnet), + self.vip(name='vip2', subnet=subnet), + self.vip(name='vip3', subnet=subnet) + ) as (vip1, vip2, vip3): + self._test_list_with_pagination('vip', + (vip1, vip2, vip3), + ('name', 'asc'), 2, 2) + + def test_list_vips_with_pagination_reverse_emulated(self): + with self.subnet() as subnet: + with contextlib.nested(self.vip(name='vip1', subnet=subnet), + self.vip(name='vip2', subnet=subnet), + self.vip(name='vip3', subnet=subnet) + ) as (vip1, vip2, vip3): + self._test_list_with_pagination_reverse('vip', + (vip1, vip2, vip3), + ('name', 'asc'), 2, 2) + + def test_create_pool_with_invalid_values(self): + name = 'pool3' + + pool = self.pool(name=name, protocol='UNSUPPORTED') + self.assertRaises(webob.exc.HTTPClientError, pool.__enter__) + + pool = self.pool(name=name, lb_method='UNSUPPORTED') + self.assertRaises(webob.exc.HTTPClientError, pool.__enter__) + + def _create_pool_directly_via_plugin(self, provider_name): + #default provider will be haproxy + prov1 = (constants.LOADBALANCER + + ':lbaas:' + NOOP_DRIVER_KLASS) + prov2 = (constants.LOADBALANCER + + ':haproxy:neutron.services.loadbalancer.' + 'drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver' + ':default') + cfg.CONF.set_override('service_provider', + [prov1, prov2], + 'service_providers') + sdb.ServiceTypeManager._instance = None + self.plugin = loadbalancer_plugin.LoadBalancerPlugin() + with self.subnet() as subnet: + ctx = context.get_admin_context() + #create pool with another provider - lbaas + #which is noop driver + pool = {'name': 'pool1', + 'subnet_id': subnet['subnet']['id'], + 'lb_method': 'ROUND_ROBIN', + 'protocol': 'HTTP', + 'admin_state_up': True, + 'tenant_id': self._tenant_id, + 'provider': provider_name, + 'description': ''} + self.plugin.create_pool(ctx, {'pool': pool}) + assoc = ctx.session.query(sdb.ProviderResourceAssociation).one() + self.assertEqual(assoc.provider_name, + pconf.normalize_provider_name(provider_name)) + + def test_create_pool_another_provider(self): + self._create_pool_directly_via_plugin('lbaas') + + def test_create_pool_unnormalized_provider_name(self): + self._create_pool_directly_via_plugin('LBAAS') + + def test_create_pool_unexisting_provider(self): + self.assertRaises( + pconf.ServiceProviderNotFound, + self._create_pool_directly_via_plugin, 'unexisting') + + def test_create_pool(self): + name = "pool1" + keys = [('name', name), + ('subnet_id', self._subnet_id), + ('tenant_id', self._tenant_id), + ('protocol', 'HTTP'), + ('lb_method', 'ROUND_ROBIN'), + ('admin_state_up', True), + ('status', 'PENDING_CREATE')] + + with self.pool(name=name) as pool: + for k, v in keys: + self.assertEqual(pool['pool'][k], v) + + def test_create_pool_with_members(self): + name = "pool2" + with self.pool(name=name) as pool: + pool_id = pool['pool']['id'] + res1 = self._create_member(self.fmt, + '192.168.1.100', + '80', + True, + pool_id=pool_id, + weight=1) + req = self.new_show_request('pools', + pool_id, + fmt=self.fmt) + pool_updated = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + + member1 = self.deserialize(self.fmt, res1) + self.assertEqual(member1['member']['id'], + pool_updated['pool']['members'][0]) + self.assertEqual(len(pool_updated['pool']['members']), 1) + + keys = [('address', '192.168.1.100'), + ('protocol_port', 80), + ('weight', 1), + ('pool_id', pool_id), + ('admin_state_up', True), + ('status', 'PENDING_CREATE')] + for k, v in keys: + self.assertEqual(member1['member'][k], v) + self._delete('members', member1['member']['id']) + + def test_delete_pool(self): + with self.pool(no_delete=True) as pool: + with self.member(no_delete=True, + pool_id=pool['pool']['id']): + req = self.new_delete_request('pools', + pool['pool']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + + def test_delete_pool_preserve_state(self): + with self.pool(no_delete=True) as pool: + with self.vip(pool=pool): + req = self.new_delete_request('pools', + pool['pool']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + req = self.new_show_request('pools', + pool['pool']['id'], + fmt=self.fmt) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPOk.code) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + self.assertEqual(res['pool']['status'], + constants.PENDING_CREATE) + req = self.new_delete_request('pools', + pool['pool']['id']) + + def test_show_pool(self): + name = "pool1" + keys = [('name', name), + ('subnet_id', self._subnet_id), + ('tenant_id', self._tenant_id), + ('protocol', 'HTTP'), + ('lb_method', 'ROUND_ROBIN'), + ('admin_state_up', True), + ('status', 'PENDING_CREATE')] + with self.pool(name=name) as pool: + req = self.new_show_request('pools', + pool['pool']['id'], + fmt=self.fmt) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['pool'][k], v) + + def test_list_pools_with_sort_emulated(self): + with contextlib.nested(self.pool(name='p1'), + self.pool(name='p2'), + self.pool(name='p3') + ) as (p1, p2, p3): + self._test_list_with_sort('pool', (p3, p2, p1), + [('name', 'desc')]) + + def test_list_pools_with_pagination_emulated(self): + with contextlib.nested(self.pool(name='p1'), + self.pool(name='p2'), + self.pool(name='p3') + ) as (p1, p2, p3): + self._test_list_with_pagination('pool', + (p1, p2, p3), + ('name', 'asc'), 2, 2) + + def test_list_pools_with_pagination_reverse_emulated(self): + with contextlib.nested(self.pool(name='p1'), + self.pool(name='p2'), + self.pool(name='p3') + ) as (p1, p2, p3): + self._test_list_with_pagination_reverse('pool', + (p1, p2, p3), + ('name', 'asc'), 2, 2) + + def test_create_member(self): + with self.pool() as pool: + pool_id = pool['pool']['id'] + with self.member(address='192.168.1.100', + protocol_port=80, + pool_id=pool_id) as member1: + with self.member(address='192.168.1.101', + protocol_port=80, + pool_id=pool_id) as member2: + req = self.new_show_request('pools', + pool_id, + fmt=self.fmt) + pool_update = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + self.assertIn(member1['member']['id'], + pool_update['pool']['members']) + self.assertIn(member2['member']['id'], + pool_update['pool']['members']) + + def test_create_same_member_in_same_pool_raises_member_exists(self): + with self.subnet(): + with self.pool(name="pool1") as pool: + pool_id = pool['pool']['id'] + with self.member(address='192.168.1.100', + protocol_port=80, + pool_id=pool_id): + member_data = { + 'address': '192.168.1.100', + 'protocol_port': 80, + 'weight': 1, + 'admin_state_up': True, + 'pool_id': pool_id + } + self.assertRaises(loadbalancer.MemberExists, + self.plugin.create_member, + context.get_admin_context(), + {'member': member_data}) + + def test_update_member(self): + with self.pool(name="pool1") as pool1: + with self.pool(name="pool2") as pool2: + keys = [('address', "192.168.1.100"), + ('tenant_id', self._tenant_id), + ('protocol_port', 80), + ('weight', 10), + ('pool_id', pool2['pool']['id']), + ('admin_state_up', False), + ('status', 'PENDING_UPDATE')] + with self.member(pool_id=pool1['pool']['id']) as member: + req = self.new_show_request('pools', + pool1['pool']['id'], + fmt=self.fmt) + pool1_update = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + self.assertEqual(len(pool1_update['pool']['members']), 1) + + req = self.new_show_request('pools', + pool2['pool']['id'], + fmt=self.fmt) + pool2_update = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + self.assertEqual(len(pool1_update['pool']['members']), 1) + self.assertEqual(len(pool2_update['pool']['members']), 0) + + data = {'member': {'pool_id': pool2['pool']['id'], + 'weight': 10, + 'admin_state_up': False}} + req = self.new_update_request('members', + data, + member['member']['id']) + res = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + for k, v in keys: + self.assertEqual(res['member'][k], v) + + req = self.new_show_request('pools', + pool1['pool']['id'], + fmt=self.fmt) + pool1_update = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + + req = self.new_show_request('pools', + pool2['pool']['id'], + fmt=self.fmt) + pool2_update = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + + self.assertEqual(len(pool2_update['pool']['members']), 1) + self.assertEqual(len(pool1_update['pool']['members']), 0) + + def test_delete_member(self): + with self.pool() as pool: + pool_id = pool['pool']['id'] + with self.member(pool_id=pool_id, + no_delete=True) as member: + req = self.new_delete_request('members', + member['member']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + + req = self.new_show_request('pools', + pool_id, + fmt=self.fmt) + pool_update = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + self.assertEqual(len(pool_update['pool']['members']), 0) + + def test_show_member(self): + with self.pool() as pool: + keys = [('address', "192.168.1.100"), + ('tenant_id', self._tenant_id), + ('protocol_port', 80), + ('weight', 1), + ('pool_id', pool['pool']['id']), + ('admin_state_up', True), + ('status', 'PENDING_CREATE')] + with self.member(pool_id=pool['pool']['id']) as member: + req = self.new_show_request('members', + member['member']['id'], + fmt=self.fmt) + res = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + for k, v in keys: + self.assertEqual(res['member'][k], v) + + def test_list_members_with_sort_emulated(self): + with self.pool() as pool: + with contextlib.nested(self.member(pool_id=pool['pool']['id'], + protocol_port=81), + self.member(pool_id=pool['pool']['id'], + protocol_port=82), + self.member(pool_id=pool['pool']['id'], + protocol_port=83) + ) as (m1, m2, m3): + self._test_list_with_sort('member', (m3, m2, m1), + [('protocol_port', 'desc')]) + + def test_list_members_with_pagination_emulated(self): + with self.pool() as pool: + with contextlib.nested(self.member(pool_id=pool['pool']['id'], + protocol_port=81), + self.member(pool_id=pool['pool']['id'], + protocol_port=82), + self.member(pool_id=pool['pool']['id'], + protocol_port=83) + ) as (m1, m2, m3): + self._test_list_with_pagination( + 'member', (m1, m2, m3), ('protocol_port', 'asc'), 2, 2 + ) + + def test_list_members_with_pagination_reverse_emulated(self): + with self.pool() as pool: + with contextlib.nested(self.member(pool_id=pool['pool']['id'], + protocol_port=81), + self.member(pool_id=pool['pool']['id'], + protocol_port=82), + self.member(pool_id=pool['pool']['id'], + protocol_port=83) + ) as (m1, m2, m3): + self._test_list_with_pagination_reverse( + 'member', (m1, m2, m3), ('protocol_port', 'asc'), 2, 2 + ) + + def test_create_healthmonitor(self): + keys = [('type', "TCP"), + ('tenant_id', self._tenant_id), + ('delay', 30), + ('timeout', 10), + ('max_retries', 3), + ('admin_state_up', True)] + with self.health_monitor() as monitor: + for k, v in keys: + self.assertEqual(monitor['health_monitor'][k], v) + + def test_create_health_monitor_with_timeout_delay_invalid(self): + data = {'health_monitor': {'type': type, + 'delay': 3, + 'timeout': 6, + 'max_retries': 2, + 'admin_state_up': True, + 'tenant_id': self._tenant_id}} + req = self.new_create_request('health_monitors', data, self.fmt) + res = req.get_response(self.ext_api) + self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) + + def test_update_health_monitor_with_timeout_delay_invalid(self): + with self.health_monitor() as monitor: + data = {'health_monitor': {'delay': 10, + 'timeout': 20, + 'max_retries': 2, + 'admin_state_up': False}} + req = self.new_update_request("health_monitors", + data, + monitor['health_monitor']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) + + def test_update_healthmonitor(self): + keys = [('type', "TCP"), + ('tenant_id', self._tenant_id), + ('delay', 20), + ('timeout', 20), + ('max_retries', 2), + ('admin_state_up', False)] + with self.health_monitor() as monitor: + data = {'health_monitor': {'delay': 20, + 'timeout': 20, + 'max_retries': 2, + 'admin_state_up': False}} + req = self.new_update_request("health_monitors", + data, + monitor['health_monitor']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['health_monitor'][k], v) + + def test_delete_healthmonitor(self): + with self.health_monitor(no_delete=True) as monitor: + ctx = context.get_admin_context() + qry = ctx.session.query(ldb.HealthMonitor) + qry = qry.filter_by(id=monitor['health_monitor']['id']) + self.assertIsNotNone(qry.first()) + + req = self.new_delete_request('health_monitors', + monitor['health_monitor']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + qry = ctx.session.query(ldb.HealthMonitor) + qry = qry.filter_by(id=monitor['health_monitor']['id']) + self.assertIsNone(qry.first()) + + def test_delete_healthmonitor_with_associations_raises(self): + with self.health_monitor(type='HTTP') as monitor: + with self.pool() as pool: + data = { + 'health_monitor': { + 'id': monitor['health_monitor']['id'], + 'tenant_id': self._tenant_id + } + } + req = self.new_create_request( + 'pools', + data, + fmt=self.fmt, + id=pool['pool']['id'], + subresource='health_monitors') + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + ctx = context.get_admin_context() + + # check if we actually have corresponding Pool associations + qry = ctx.session.query(ldb.PoolMonitorAssociation) + qry = qry.filter_by(monitor_id=monitor['health_monitor']['id']) + self.assertTrue(qry.all()) + # try to delete the HealthMonitor instance + req = self.new_delete_request( + 'health_monitors', + monitor['health_monitor']['id'] + ) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + qry = ctx.session.query(ldb.HealthMonitor) + qry = qry.filter_by(id=monitor['health_monitor']['id']) + self.assertIsNotNone(qry.first()) + # check if all corresponding Pool associations are not deleted + qry = ctx.session.query(ldb.PoolMonitorAssociation) + qry = qry.filter_by(monitor_id=monitor['health_monitor']['id']) + self.assertTrue(qry.all()) + + def test_show_healthmonitor(self): + with self.health_monitor() as monitor: + keys = [('type', "TCP"), + ('tenant_id', self._tenant_id), + ('delay', 30), + ('timeout', 10), + ('max_retries', 3), + ('admin_state_up', True)] + req = self.new_show_request('health_monitors', + monitor['health_monitor']['id'], + fmt=self.fmt) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['health_monitor'][k], v) + + def test_list_healthmonitors_with_sort_emulated(self): + with contextlib.nested(self.health_monitor(delay=30), + self.health_monitor(delay=31), + self.health_monitor(delay=32) + ) as (m1, m2, m3): + self._test_list_with_sort('health_monitor', (m3, m2, m1), + [('delay', 'desc')]) + + def test_list_healthmonitors_with_pagination_emulated(self): + with contextlib.nested(self.health_monitor(delay=30), + self.health_monitor(delay=31), + self.health_monitor(delay=32) + ) as (m1, m2, m3): + self._test_list_with_pagination('health_monitor', + (m1, m2, m3), + ('delay', 'asc'), 2, 2) + + def test_list_healthmonitors_with_pagination_reverse_emulated(self): + with contextlib.nested(self.health_monitor(delay=30), + self.health_monitor(delay=31), + self.health_monitor(delay=32) + ) as (m1, m2, m3): + self._test_list_with_pagination_reverse('health_monitor', + (m1, m2, m3), + ('delay', 'asc'), 2, 2) + + def test_update_pool_stats_with_no_stats(self): + keys = ["bytes_in", "bytes_out", + "active_connections", + "total_connections"] + with self.pool() as pool: + pool_id = pool['pool']['id'] + ctx = context.get_admin_context() + self.plugin.update_pool_stats(ctx, pool_id) + pool_obj = ctx.session.query(ldb.Pool).filter_by(id=pool_id).one() + for key in keys: + self.assertEqual(pool_obj.stats.__dict__[key], 0) + + def test_update_pool_stats_with_negative_values(self): + stats_data = {"bytes_in": -1, + "bytes_out": -2, + "active_connections": -3, + "total_connections": -4} + for k, v in stats_data.items(): + self._test_update_pool_stats_with_negative_value(k, v) + + def _test_update_pool_stats_with_negative_value(self, k, v): + with self.pool() as pool: + pool_id = pool['pool']['id'] + ctx = context.get_admin_context() + self.assertRaises(ValueError, self.plugin.update_pool_stats, + ctx, pool_id, {k: v}) + + def test_update_pool_stats(self): + stats_data = {"bytes_in": 1, + "bytes_out": 2, + "active_connections": 3, + "total_connections": 4} + with self.pool() as pool: + pool_id = pool['pool']['id'] + ctx = context.get_admin_context() + self.plugin.update_pool_stats(ctx, pool_id, stats_data) + pool_obj = ctx.session.query(ldb.Pool).filter_by(id=pool_id).one() + for k, v in stats_data.items(): + self.assertEqual(pool_obj.stats.__dict__[k], v) + + def test_update_pool_stats_members_statuses(self): + with self.pool() as pool: + pool_id = pool['pool']['id'] + with self.member(pool_id=pool_id) as member: + member_id = member['member']['id'] + stats_data = {'members': { + member_id: { + 'status': 'INACTIVE' + } + }} + ctx = context.get_admin_context() + member = self.plugin.get_member(ctx, member_id) + self.assertEqual('PENDING_CREATE', member['status']) + self.plugin.update_pool_stats(ctx, pool_id, stats_data) + member = self.plugin.get_member(ctx, member_id) + self.assertEqual('INACTIVE', member['status']) + + def test_get_pool_stats(self): + keys = [("bytes_in", 0), + ("bytes_out", 0), + ("active_connections", 0), + ("total_connections", 0)] + with self.pool() as pool: + req = self.new_show_request("pools", + pool['pool']['id'], + subresource="stats", + fmt=self.fmt) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['stats'][k], v) + + def test_create_healthmonitor_of_pool(self): + with self.health_monitor(type="TCP") as monitor1: + with self.health_monitor(type="HTTP") as monitor2: + with self.pool() as pool: + data = {"health_monitor": { + "id": monitor1['health_monitor']['id'], + 'tenant_id': self._tenant_id}} + req = self.new_create_request( + "pools", + data, + fmt=self.fmt, + id=pool['pool']['id'], + subresource="health_monitors") + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, + webob.exc.HTTPCreated.code) + + data = {"health_monitor": { + "id": monitor2['health_monitor']['id'], + 'tenant_id': self._tenant_id}} + req = self.new_create_request( + "pools", + data, + fmt=self.fmt, + id=pool['pool']['id'], + subresource="health_monitors") + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, + webob.exc.HTTPCreated.code) + + req = self.new_show_request( + 'pools', + pool['pool']['id'], + fmt=self.fmt) + res = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + self.assertIn(monitor1['health_monitor']['id'], + res['pool']['health_monitors']) + self.assertIn(monitor2['health_monitor']['id'], + res['pool']['health_monitors']) + expected = [ + {'monitor_id': monitor1['health_monitor']['id'], + 'status': 'PENDING_CREATE', + 'status_description': None}, + {'monitor_id': monitor2['health_monitor']['id'], + 'status': 'PENDING_CREATE', + 'status_description': None}] + self.assertEqual( + sorted(expected), + sorted(res['pool']['health_monitors_status'])) + + def test_delete_healthmonitor_of_pool(self): + with self.health_monitor(type="TCP") as monitor1: + with self.health_monitor(type="HTTP") as monitor2: + with self.pool() as pool: + # add the monitors to the pool + data = {"health_monitor": { + "id": monitor1['health_monitor']['id'], + 'tenant_id': self._tenant_id}} + req = self.new_create_request( + "pools", + data, + fmt=self.fmt, + id=pool['pool']['id'], + subresource="health_monitors") + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, + webob.exc.HTTPCreated.code) + + data = {"health_monitor": { + "id": monitor2['health_monitor']['id'], + 'tenant_id': self._tenant_id}} + req = self.new_create_request( + "pools", + data, + fmt=self.fmt, + id=pool['pool']['id'], + subresource="health_monitors") + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, + webob.exc.HTTPCreated.code) + + # remove one of healthmonitor from the pool + req = self.new_delete_request( + "pools", + fmt=self.fmt, + id=pool['pool']['id'], + sub_id=monitor1['health_monitor']['id'], + subresource="health_monitors") + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, + webob.exc.HTTPNoContent.code) + + req = self.new_show_request( + 'pools', + pool['pool']['id'], + fmt=self.fmt) + res = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + self.assertNotIn(monitor1['health_monitor']['id'], + res['pool']['health_monitors']) + self.assertIn(monitor2['health_monitor']['id'], + res['pool']['health_monitors']) + expected = [ + {'monitor_id': monitor2['health_monitor']['id'], + 'status': 'PENDING_CREATE', + 'status_description': None} + ] + self.assertEqual(expected, + res['pool']['health_monitors_status']) + + def test_create_loadbalancer(self): + vip_name = "vip3" + pool_name = "pool3" + + with self.pool(name=pool_name) as pool: + with self.vip(name=vip_name, pool=pool) as vip: + pool_id = pool['pool']['id'] + vip_id = vip['vip']['id'] + # Add two members + res1 = self._create_member(self.fmt, + '192.168.1.100', + '80', + True, + pool_id=pool_id, + weight=1) + res2 = self._create_member(self.fmt, + '192.168.1.101', + '80', + True, + pool_id=pool_id, + weight=2) + # Add a health_monitor + req = self._create_health_monitor(self.fmt, + 'HTTP', + '10', + '10', + '3', + True) + health_monitor = self.deserialize(self.fmt, req) + self.assertEqual(req.status_int, webob.exc.HTTPCreated.code) + + # Associate the health_monitor to the pool + data = {"health_monitor": { + "id": health_monitor['health_monitor']['id'], + 'tenant_id': self._tenant_id}} + req = self.new_create_request("pools", + data, + fmt=self.fmt, + id=pool['pool']['id'], + subresource="health_monitors") + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + # Get pool and vip + req = self.new_show_request('pools', + pool_id, + fmt=self.fmt) + pool_updated = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + member1 = self.deserialize(self.fmt, res1) + member2 = self.deserialize(self.fmt, res2) + self.assertIn(member1['member']['id'], + pool_updated['pool']['members']) + self.assertIn(member2['member']['id'], + pool_updated['pool']['members']) + self.assertIn(health_monitor['health_monitor']['id'], + pool_updated['pool']['health_monitors']) + expected = [ + {'monitor_id': health_monitor['health_monitor']['id'], + 'status': 'PENDING_CREATE', + 'status_description': None} + ] + self.assertEqual( + expected, pool_updated['pool']['health_monitors_status']) + + req = self.new_show_request('vips', + vip_id, + fmt=self.fmt) + vip_updated = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + self.assertEqual(vip_updated['vip']['pool_id'], + pool_updated['pool']['id']) + + # clean up + # disassociate the health_monitor from the pool first + req = self.new_delete_request( + "pools", + fmt=self.fmt, + id=pool['pool']['id'], + subresource="health_monitors", + sub_id=health_monitor['health_monitor']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + self._delete('health_monitors', + health_monitor['health_monitor']['id']) + self._delete('members', member1['member']['id']) + self._delete('members', member2['member']['id']) + + def test_create_pool_health_monitor(self): + with contextlib.nested( + self.health_monitor(), + self.health_monitor(), + self.pool(name="pool") + ) as (health_mon1, health_mon2, pool): + res = self.plugin.create_pool_health_monitor( + context.get_admin_context(), + health_mon1, pool['pool']['id'] + ) + self.assertEqual({'health_monitor': + [health_mon1['health_monitor']['id']]}, + res) + + res = self.plugin.create_pool_health_monitor( + context.get_admin_context(), + health_mon2, pool['pool']['id'] + ) + self.assertEqual({'health_monitor': + [health_mon1['health_monitor']['id'], + health_mon2['health_monitor']['id']]}, + res) + + res = self.plugin.get_pool_health_monitor( + context.get_admin_context(), + health_mon2['health_monitor']['id'], pool['pool']['id']) + self.assertEqual(res['tenant_id'], + health_mon1['health_monitor']['tenant_id']) + + def test_driver_call_create_pool_health_monitor(self): + with mock.patch.object(self.plugin.drivers['lbaas'], + 'create_pool_health_monitor') as driver_call: + with contextlib.nested( + self.health_monitor(), + self.pool() + ) as (hm, pool): + data = {'health_monitor': { + 'id': hm['health_monitor']['id'], + 'tenant_id': self._tenant_id}} + self.plugin.create_pool_health_monitor( + context.get_admin_context(), + data, pool['pool']['id'] + ) + hm['health_monitor']['pools'] = [ + {'pool_id': pool['pool']['id'], + 'status': 'PENDING_CREATE', + 'status_description': None}] + driver_call.assert_called_once_with( + mock.ANY, hm['health_monitor'], pool['pool']['id']) + + def test_pool_monitor_list_of_pools(self): + with contextlib.nested( + self.health_monitor(), + self.pool(), + self.pool() + ) as (hm, p1, p2): + ctx = context.get_admin_context() + data = {'health_monitor': { + 'id': hm['health_monitor']['id'], + 'tenant_id': self._tenant_id}} + self.plugin.create_pool_health_monitor( + ctx, data, p1['pool']['id']) + self.plugin.create_pool_health_monitor( + ctx, data, p2['pool']['id']) + healthmon = self.plugin.get_health_monitor( + ctx, hm['health_monitor']['id']) + pool_data = [{'pool_id': p1['pool']['id'], + 'status': 'PENDING_CREATE', + 'status_description': None}, + {'pool_id': p2['pool']['id'], + 'status': 'PENDING_CREATE', + 'status_description': None}] + self.assertEqual(sorted(healthmon['pools']), + sorted(pool_data)) + req = self.new_show_request( + 'health_monitors', + hm['health_monitor']['id'], + fmt=self.fmt) + hm = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + self.assertEqual(sorted(hm['health_monitor']['pools']), + sorted(pool_data)) + + def test_create_pool_health_monitor_already_associated(self): + with contextlib.nested( + self.health_monitor(), + self.pool(name="pool") + ) as (hm, pool): + res = self.plugin.create_pool_health_monitor( + context.get_admin_context(), + hm, pool['pool']['id'] + ) + self.assertEqual({'health_monitor': + [hm['health_monitor']['id']]}, + res) + self.assertRaises(loadbalancer.PoolMonitorAssociationExists, + self.plugin.create_pool_health_monitor, + context.get_admin_context(), + hm, + pool['pool']['id']) + + def test_create_pool_healthmon_invalid_pool_id(self): + with self.health_monitor() as healthmon: + self.assertRaises(loadbalancer.PoolNotFound, + self.plugin.create_pool_health_monitor, + context.get_admin_context(), + healthmon, + "123-456-789" + ) + + def test_update_status(self): + with self.pool() as pool: + self.assertEqual(pool['pool']['status'], 'PENDING_CREATE') + self.assertFalse(pool['pool']['status_description']) + + self.plugin.update_status(context.get_admin_context(), ldb.Pool, + pool['pool']['id'], 'ERROR', 'unknown') + updated_pool = self.plugin.get_pool(context.get_admin_context(), + pool['pool']['id']) + self.assertEqual(updated_pool['status'], 'ERROR') + self.assertEqual(updated_pool['status_description'], 'unknown') + + # update status to ACTIVE, status_description should be cleared + self.plugin.update_status(context.get_admin_context(), ldb.Pool, + pool['pool']['id'], 'ACTIVE') + updated_pool = self.plugin.get_pool(context.get_admin_context(), + pool['pool']['id']) + self.assertEqual(updated_pool['status'], 'ACTIVE') + self.assertFalse(updated_pool['status_description']) + + def test_update_pool_health_monitor(self): + with contextlib.nested( + self.health_monitor(), + self.pool(name="pool") + ) as (hm, pool): + res = self.plugin.create_pool_health_monitor( + context.get_admin_context(), + hm, pool['pool']['id']) + self.assertEqual({'health_monitor': + [hm['health_monitor']['id']]}, + res) + + assoc = self.plugin.get_pool_health_monitor( + context.get_admin_context(), + hm['health_monitor']['id'], + pool['pool']['id']) + self.assertEqual(assoc['status'], 'PENDING_CREATE') + self.assertIsNone(assoc['status_description']) + + self.plugin.update_pool_health_monitor( + context.get_admin_context(), + hm['health_monitor']['id'], + pool['pool']['id'], + 'ACTIVE', 'ok') + assoc = self.plugin.get_pool_health_monitor( + context.get_admin_context(), + hm['health_monitor']['id'], + pool['pool']['id']) + self.assertEqual(assoc['status'], 'ACTIVE') + self.assertEqual(assoc['status_description'], 'ok') + + def test_check_orphan_pool_associations(self): + with contextlib.nested( + #creating pools with default noop driver + self.pool(), + self.pool() + ) as (p1, p2): + #checking that 3 associations exist + ctx = context.get_admin_context() + qry = ctx.session.query(sdb.ProviderResourceAssociation) + self.assertEqual(qry.count(), 2) + #removing driver + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas1:' + NOOP_DRIVER_KLASS + + ':default'], + 'service_providers') + sdb.ServiceTypeManager._instance = None + # calling _remove_orphan... in constructor + self.assertRaises( + SystemExit, + loadbalancer_plugin.LoadBalancerPlugin + ) + + +class TestLoadBalancerXML(TestLoadBalancer): + fmt = 'xml' diff --git a/neutron/tests/unit/db/metering/__init__.py b/neutron/tests/unit/db/metering/__init__.py new file mode 100644 index 000000000..82a447213 --- /dev/null +++ b/neutron/tests/unit/db/metering/__init__.py @@ -0,0 +1,15 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/db/metering/test_db_metering.py b/neutron/tests/unit/db/metering/test_db_metering.py new file mode 100644 index 000000000..83e4996af --- /dev/null +++ b/neutron/tests/unit/db/metering/test_db_metering.py @@ -0,0 +1,291 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import logging + +import webob.exc + +from neutron.api import extensions +from neutron.common import config +from neutron import context +import neutron.extensions +from neutron.extensions import metering +from neutron.plugins.common import constants +from neutron.services.metering import metering_plugin +from neutron.tests.unit import test_db_plugin + +LOG = logging.getLogger(__name__) + +DB_METERING_PLUGIN_KLASS = ( + "neutron.services.metering." + "metering_plugin.MeteringPlugin" +) + +extensions_path = ':'.join(neutron.extensions.__path__) + + +class MeteringPluginDbTestCaseMixin(object): + def _create_metering_label(self, fmt, name, description, **kwargs): + data = {'metering_label': {'name': name, + 'tenant_id': kwargs.get('tenant_id', + 'test_tenant'), + 'description': description}} + req = self.new_create_request('metering-labels', data, + fmt) + + if kwargs.get('set_context') and 'tenant_id' in kwargs: + # create a specific auth context for this request + req.environ['neutron.context'] = ( + context.Context('', kwargs['tenant_id'], + is_admin=kwargs.get('is_admin', True))) + + return req.get_response(self.ext_api) + + def _make_metering_label(self, fmt, name, description, **kwargs): + res = self._create_metering_label(fmt, name, description, **kwargs) + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + return self.deserialize(fmt, res) + + def _create_metering_label_rule(self, fmt, metering_label_id, direction, + remote_ip_prefix, excluded, **kwargs): + data = {'metering_label_rule': + {'metering_label_id': metering_label_id, + 'tenant_id': kwargs.get('tenant_id', 'test_tenant'), + 'direction': direction, + 'excluded': excluded, + 'remote_ip_prefix': remote_ip_prefix}} + req = self.new_create_request('metering-label-rules', + data, fmt) + + if kwargs.get('set_context') and 'tenant_id' in kwargs: + # create a specific auth context for this request + req.environ['neutron.context'] = ( + context.Context('', kwargs['tenant_id'])) + + return req.get_response(self.ext_api) + + def _make_metering_label_rule(self, fmt, metering_label_id, direction, + remote_ip_prefix, excluded, **kwargs): + res = self._create_metering_label_rule(fmt, metering_label_id, + direction, remote_ip_prefix, + excluded, **kwargs) + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + return self.deserialize(fmt, res) + + @contextlib.contextmanager + def metering_label(self, name='label', description='desc', + fmt=None, no_delete=False, **kwargs): + if not fmt: + fmt = self.fmt + metering_label = self._make_metering_label(fmt, name, + description, **kwargs) + yield metering_label + if not no_delete: + self._delete('metering-labels', + metering_label['metering_label']['id']) + + @contextlib.contextmanager + def metering_label_rule(self, metering_label_id=None, direction='ingress', + remote_ip_prefix='10.0.0.0/24', + excluded='false', fmt=None, no_delete=False): + if not fmt: + fmt = self.fmt + metering_label_rule = self._make_metering_label_rule(fmt, + metering_label_id, + direction, + remote_ip_prefix, + excluded) + yield metering_label_rule + if not no_delete: + self._delete('metering-label-rules', + metering_label_rule['metering_label_rule']['id']) + + +class MeteringPluginDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase, + MeteringPluginDbTestCaseMixin): + fmt = 'json' + + resource_prefix_map = dict( + (k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING]) + for k in metering.RESOURCE_ATTRIBUTE_MAP.keys() + ) + + def setUp(self, plugin=None): + service_plugins = {'metering_plugin_name': DB_METERING_PLUGIN_KLASS} + + super(MeteringPluginDbTestCase, self).setUp( + plugin=plugin, + service_plugins=service_plugins + ) + + self.plugin = metering_plugin.MeteringPlugin() + ext_mgr = extensions.PluginAwareExtensionManager( + extensions_path, + {constants.METERING: self.plugin} + ) + app = config.load_paste_app('extensions_test_app') + self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) + + def test_create_metering_label(self): + name = 'my label' + description = 'my metering label' + keys = [('name', name,), ('description', description)] + with self.metering_label(name, description) as metering_label: + for k, v, in keys: + self.assertEqual(metering_label['metering_label'][k], v) + + def test_delete_metering_label(self): + name = 'my label' + description = 'my metering label' + + with self.metering_label(name, description, + no_delete=True) as metering_label: + metering_label_id = metering_label['metering_label']['id'] + self._delete('metering-labels', metering_label_id, 204) + + def test_list_metering_label(self): + name = 'my label' + description = 'my metering label' + + with contextlib.nested( + self.metering_label(name, description), + self.metering_label(name, description)) as metering_label: + + self._test_list_resources('metering-label', metering_label) + + def test_create_metering_label_rule(self): + name = 'my label' + description = 'my metering label' + + with self.metering_label(name, description) as metering_label: + metering_label_id = metering_label['metering_label']['id'] + + direction = 'egress' + remote_ip_prefix = '192.168.0.0/24' + excluded = True + + keys = [('metering_label_id', metering_label_id), + ('direction', direction), + ('excluded', excluded), + ('remote_ip_prefix', remote_ip_prefix)] + with self.metering_label_rule(metering_label_id, + direction, + remote_ip_prefix, + excluded) as label_rule: + for k, v, in keys: + self.assertEqual(label_rule['metering_label_rule'][k], v) + + def test_delete_metering_label_rule(self): + name = 'my label' + description = 'my metering label' + + with self.metering_label(name, description) as metering_label: + metering_label_id = metering_label['metering_label']['id'] + + direction = 'egress' + remote_ip_prefix = '192.168.0.0/24' + excluded = True + + with self.metering_label_rule(metering_label_id, + direction, + remote_ip_prefix, + excluded, + no_delete=True) as label_rule: + rule_id = label_rule['metering_label_rule']['id'] + self._delete('metering-label-rules', rule_id, 204) + + def test_list_metering_label_rule(self): + name = 'my label' + description = 'my metering label' + + with self.metering_label(name, description) as metering_label: + metering_label_id = metering_label['metering_label']['id'] + + direction = 'egress' + remote_ip_prefix = '192.168.0.0/24' + excluded = True + + with contextlib.nested( + self.metering_label_rule(metering_label_id, + direction, + remote_ip_prefix, + excluded), + self.metering_label_rule(metering_label_id, + 'ingress', + remote_ip_prefix, + excluded)) as metering_label_rule: + + self._test_list_resources('metering-label-rule', + metering_label_rule) + + def test_create_metering_label_rules(self): + name = 'my label' + description = 'my metering label' + + with self.metering_label(name, description) as metering_label: + metering_label_id = metering_label['metering_label']['id'] + + direction = 'egress' + remote_ip_prefix = '192.168.0.0/24' + excluded = True + + with contextlib.nested( + self.metering_label_rule(metering_label_id, + direction, + remote_ip_prefix, + excluded), + self.metering_label_rule(metering_label_id, + direction, + '0.0.0.0/0', + False)) as metering_label_rule: + + self._test_list_resources('metering-label-rule', + metering_label_rule) + + def test_create_metering_label_rule_two_labels(self): + name1 = 'my label 1' + name2 = 'my label 2' + description = 'my metering label' + + with self.metering_label(name1, description) as metering_label1: + metering_label_id1 = metering_label1['metering_label']['id'] + + with self.metering_label(name2, description) as metering_label2: + metering_label_id2 = metering_label2['metering_label']['id'] + + direction = 'egress' + remote_ip_prefix = '192.168.0.0/24' + excluded = True + + with contextlib.nested( + self.metering_label_rule(metering_label_id1, + direction, + remote_ip_prefix, + excluded), + self.metering_label_rule(metering_label_id2, + direction, + remote_ip_prefix, + excluded)) as metering_label_rule: + + self._test_list_resources('metering-label-rule', + metering_label_rule) + + +class TestMeteringDbXML(MeteringPluginDbTestCase): + fmt = 'xml' diff --git a/neutron/tests/unit/db/test_agent_db.py b/neutron/tests/unit/db/test_agent_db.py new file mode 100644 index 000000000..e3dc5ee8f --- /dev/null +++ b/neutron/tests/unit/db/test_agent_db.py @@ -0,0 +1,86 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from neutron import context +from neutron.db import agents_db +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 as base_plugin +from neutron.openstack.common.db import exception as exc +from neutron.tests import base + + +class FakePlugin(base_plugin.NeutronDbPluginV2, agents_db.AgentDbMixin): + """A fake plugin class containing all DB methods.""" + + +class TestAgentsDbMixin(base.BaseTestCase): + def setUp(self): + super(TestAgentsDbMixin, self).setUp() + + self.context = context.get_admin_context() + self.plugin = FakePlugin() + self.addCleanup(db.clear_db) + + self.agent_status = { + 'agent_type': 'Open vSwitch agent', + 'binary': 'neutron-openvswitch-agent', + 'host': 'overcloud-notcompute', + 'topic': 'N/A' + } + + def _assert_ref_fields_are_equal(self, reference, result): + """Compare (key, value) pairs of a reference dict with the result + + Note: the result MAY have additional keys + """ + + for field, value in reference.items(): + self.assertEqual(value, result[field], field) + + def test_create_or_update_agent_new_entry(self): + self.plugin.create_or_update_agent(self.context, self.agent_status) + + agent = self.plugin.get_agents(self.context)[0] + self._assert_ref_fields_are_equal(self.agent_status, agent) + + def test_create_or_update_agent_existing_entry(self): + self.plugin.create_or_update_agent(self.context, self.agent_status) + self.plugin.create_or_update_agent(self.context, self.agent_status) + self.plugin.create_or_update_agent(self.context, self.agent_status) + + agents = self.plugin.get_agents(self.context) + self.assertEqual(len(agents), 1) + + agent = agents[0] + self._assert_ref_fields_are_equal(self.agent_status, agent) + + def test_create_or_update_agent_concurrent_insert(self): + # NOTE(rpodolyaka): emulate violation of the unique constraint caused + # by a concurrent insert. Ensure we make another + # attempt on fail + with mock.patch('sqlalchemy.orm.Session.add') as add_mock: + add_mock.side_effect = [ + exc.DBDuplicateEntry(columns=['agent_type', 'host']), + None + ] + + self.plugin.create_or_update_agent(self.context, self.agent_status) + + self.assertEqual(add_mock.call_count, 2, + "Agent entry creation hasn't been retried") diff --git a/neutron/tests/unit/db/test_quota_db.py b/neutron/tests/unit/db/test_quota_db.py new file mode 100644 index 000000000..813f0166a --- /dev/null +++ b/neutron/tests/unit/db/test_quota_db.py @@ -0,0 +1,143 @@ +# Copyright (c) 2014 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @author: Sergio Cazzolato, Intel + +from neutron.common import exceptions +from neutron import context +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 as base_plugin +from neutron.db import quota_db +from neutron.tests import base + + +class FakePlugin(base_plugin.NeutronDbPluginV2, quota_db.DbQuotaDriver): + """A fake plugin class containing all DB methods.""" + + +class TestResource(object): + """Describe a test resource for quota checking.""" + + def __init__(self, name, default): + self.name = name + self.quota = default + + @property + def default(self): + return self.quota + +PROJECT = 'prj_test' +RESOURCE = 'res_test' + + +class TestDbQuotaDriver(base.BaseTestCase): + def setUp(self): + super(TestDbQuotaDriver, self).setUp() + self.plugin = FakePlugin() + self.context = context.get_admin_context() + self.addCleanup(db.clear_db) + + def test_create_quota_limit(self): + defaults = {RESOURCE: TestResource(RESOURCE, 4)} + + self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) + quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT) + self.assertEqual(2, quotas[RESOURCE]) + + def test_update_quota_limit(self): + defaults = {RESOURCE: TestResource(RESOURCE, 4)} + + self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) + self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 3) + quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT) + self.assertEqual(3, quotas[RESOURCE]) + + def test_delete_tenant_quota_restores_default_limit(self): + defaults = {RESOURCE: TestResource(RESOURCE, 4)} + + self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) + self.plugin.delete_tenant_quota(self.context, PROJECT) + quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT) + self.assertEqual(4, quotas[RESOURCE]) + + def test_get_all_quotas(self): + project_1 = 'prj_test_1' + project_2 = 'prj_test_2' + resource_1 = 'res_test_1' + resource_2 = 'res_test_2' + + resources = {resource_1: TestResource(resource_1, 1), + resource_2: TestResource(resource_2, 1)} + + self.plugin.update_quota_limit(self.context, project_1, resource_1, 2) + self.plugin.update_quota_limit(self.context, project_2, resource_2, 2) + quotas = self.plugin.get_all_quotas(self.context, resources) + + self.assertEqual(2, len(quotas)) + + self.assertEqual(3, len(quotas[0])) + self.assertEqual(project_1, quotas[0]['tenant_id']) + self.assertEqual(2, quotas[0][resource_1]) + self.assertEqual(1, quotas[0][resource_2]) + + self.assertEqual(3, len(quotas[1])) + self.assertEqual(project_2, quotas[1]['tenant_id']) + self.assertEqual(1, quotas[1][resource_1]) + self.assertEqual(2, quotas[1][resource_2]) + + def test_limit_check(self): + resources = {RESOURCE: TestResource(RESOURCE, 2)} + values = {RESOURCE: 1} + + self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) + self.plugin.limit_check(self.context, PROJECT, resources, values) + + def test_limit_check_over_quota(self): + resources = {RESOURCE: TestResource(RESOURCE, 2)} + values = {RESOURCE: 3} + + self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) + + self.assertRaises(exceptions.OverQuota, self.plugin.limit_check, + context.get_admin_context(), PROJECT, resources, + values) + + def test_limit_check_equals_to_quota(self): + resources = {RESOURCE: TestResource(RESOURCE, 2)} + values = {RESOURCE: 2} + + self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) + self.plugin.limit_check(self.context, PROJECT, resources, values) + + def test_limit_check_value_lower_than_zero(self): + resources = {RESOURCE: TestResource(RESOURCE, 2)} + values = {RESOURCE: -1} + + self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) + self.assertRaises(exceptions.InvalidQuotaValue, + self.plugin.limit_check, context.get_admin_context(), + PROJECT, resources, values) + + def test_limit_check_wrong_values_size(self): + resource_1 = 'res_test_1' + resource_2 = 'res_test_2' + + resources = {resource_1: TestResource(resource_1, 2)} + values = {resource_1: 1, resource_2: 1} + + self.plugin.update_quota_limit(self.context, PROJECT, resource_1, 2) + self.assertRaises(exceptions.QuotaResourceUnknown, + self.plugin.limit_check, context.get_admin_context(), + PROJECT, resources, values) diff --git a/neutron/tests/unit/db/vpn/__init__.py b/neutron/tests/unit/db/vpn/__init__.py new file mode 100644 index 000000000..b936bbcb8 --- /dev/null +++ b/neutron/tests/unit/db/vpn/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Swaminathan Vasudevan, Hewlett-Packard. diff --git a/neutron/tests/unit/db/vpn/test_db_vpnaas.py b/neutron/tests/unit/db/vpn/test_db_vpnaas.py new file mode 100644 index 000000000..d685df96c --- /dev/null +++ b/neutron/tests/unit/db/vpn/test_db_vpnaas.py @@ -0,0 +1,1670 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Swaminathan Vasudevan, Hewlett-Packard. + +import contextlib +import os + +from oslo.config import cfg +import webob.exc + +from neutron.api import extensions as api_extensions +from neutron.common import config +from neutron import context +from neutron.db import agentschedulers_db +from neutron.db import l3_agentschedulers_db +from neutron.db import servicetype_db as sdb +from neutron.db.vpn import vpn_db +from neutron import extensions +from neutron.extensions import vpnaas +from neutron import manager +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.scheduler import l3_agent_scheduler +from neutron.services.vpn import plugin as vpn_plugin +from neutron.tests.unit import test_db_plugin +from neutron.tests.unit import test_l3_plugin + +DB_CORE_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' +DB_VPN_PLUGIN_KLASS = "neutron.services.vpn.plugin.VPNPlugin" +ROOTDIR = os.path.normpath(os.path.join( + os.path.dirname(__file__), + '..', '..', '..', '..')) + +extensions_path = ':'.join(extensions.__path__) + + +class TestVpnCorePlugin(test_l3_plugin.TestL3NatIntPlugin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin): + def __init__(self, configfile=None): + super(TestVpnCorePlugin, self).__init__() + self.router_scheduler = l3_agent_scheduler.ChanceScheduler() + + +class VPNTestMixin(object): + resource_prefix_map = dict( + (k.replace('_', '-'), + constants.COMMON_PREFIXES[constants.VPN]) + for k in vpnaas.RESOURCE_ATTRIBUTE_MAP + ) + + def _create_ikepolicy(self, fmt, + name='ikepolicy1', + auth_algorithm='sha1', + encryption_algorithm='aes-128', + phase1_negotiation_mode='main', + lifetime_units='seconds', + lifetime_value=3600, + ike_version='v1', + pfs='group5', + expected_res_status=None, **kwargs): + + data = {'ikepolicy': { + 'name': name, + 'auth_algorithm': auth_algorithm, + 'encryption_algorithm': encryption_algorithm, + 'phase1_negotiation_mode': phase1_negotiation_mode, + 'lifetime': { + 'units': lifetime_units, + 'value': lifetime_value}, + 'ike_version': ike_version, + 'pfs': pfs, + 'tenant_id': self._tenant_id + }} + for arg in ['description']: + if arg in kwargs and kwargs[arg] is not None: + data['ikepolicy'][arg] = kwargs[arg] + + ikepolicy_req = self.new_create_request('ikepolicies', data, fmt) + ikepolicy_res = ikepolicy_req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(ikepolicy_res.status_int, expected_res_status) + + return ikepolicy_res + + @contextlib.contextmanager + def ikepolicy(self, fmt=None, + name='ikepolicy1', + auth_algorithm='sha1', + encryption_algorithm='aes-128', + phase1_negotiation_mode='main', + lifetime_units='seconds', + lifetime_value=3600, + ike_version='v1', + pfs='group5', + no_delete=False, + **kwargs): + if not fmt: + fmt = self.fmt + res = self._create_ikepolicy(fmt, + name, + auth_algorithm, + encryption_algorithm, + phase1_negotiation_mode, + lifetime_units, + lifetime_value, + ike_version, + pfs, + **kwargs) + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + ikepolicy = self.deserialize(fmt or self.fmt, res) + yield ikepolicy + if not no_delete: + self._delete('ikepolicies', ikepolicy['ikepolicy']['id']) + + def _create_ipsecpolicy(self, fmt, + name='ipsecpolicy1', + auth_algorithm='sha1', + encryption_algorithm='aes-128', + encapsulation_mode='tunnel', + transform_protocol='esp', + lifetime_units='seconds', + lifetime_value=3600, + pfs='group5', + expected_res_status=None, + **kwargs): + + data = {'ipsecpolicy': {'name': name, + 'auth_algorithm': auth_algorithm, + 'encryption_algorithm': encryption_algorithm, + 'encapsulation_mode': encapsulation_mode, + 'transform_protocol': transform_protocol, + 'lifetime': {'units': lifetime_units, + 'value': lifetime_value}, + 'pfs': pfs, + 'tenant_id': self._tenant_id}} + for arg in ['description']: + if arg in kwargs and kwargs[arg] is not None: + data['ipsecpolicy'][arg] = kwargs[arg] + ipsecpolicy_req = self.new_create_request('ipsecpolicies', data, fmt) + ipsecpolicy_res = ipsecpolicy_req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(ipsecpolicy_res.status_int, expected_res_status) + + return ipsecpolicy_res + + @contextlib.contextmanager + def ipsecpolicy(self, fmt=None, + name='ipsecpolicy1', + auth_algorithm='sha1', + encryption_algorithm='aes-128', + encapsulation_mode='tunnel', + transform_protocol='esp', + lifetime_units='seconds', + lifetime_value=3600, + pfs='group5', + no_delete=False, **kwargs): + if not fmt: + fmt = self.fmt + res = self._create_ipsecpolicy(fmt, + name, + auth_algorithm, + encryption_algorithm, + encapsulation_mode, + transform_protocol, + lifetime_units, + lifetime_value, + pfs, + **kwargs) + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + ipsecpolicy = self.deserialize(fmt or self.fmt, res) + yield ipsecpolicy + if not no_delete: + self._delete('ipsecpolicies', ipsecpolicy['ipsecpolicy']['id']) + + def _create_vpnservice(self, fmt, name, + admin_state_up, + router_id, subnet_id, + expected_res_status=None, **kwargs): + tenant_id = kwargs.get('tenant_id', self._tenant_id) + data = {'vpnservice': {'name': name, + 'subnet_id': subnet_id, + 'router_id': router_id, + 'admin_state_up': admin_state_up, + 'tenant_id': tenant_id}} + for arg in ['description']: + if arg in kwargs and kwargs[arg] is not None: + data['vpnservice'][arg] = kwargs[arg] + vpnservice_req = self.new_create_request('vpnservices', data, fmt) + if (kwargs.get('set_context') and + 'tenant_id' in kwargs): + # create a specific auth context for this request + vpnservice_req.environ['neutron.context'] = context.Context( + '', kwargs['tenant_id']) + vpnservice_res = vpnservice_req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(vpnservice_res.status_int, expected_res_status) + return vpnservice_res + + @contextlib.contextmanager + def vpnservice(self, fmt=None, name='vpnservice1', + subnet=None, + router=None, + admin_state_up=True, + no_delete=False, + plug_subnet=True, + external_subnet_cidr='192.168.100.0/24', + external_router=True, + **kwargs): + if not fmt: + fmt = self.fmt + with contextlib.nested( + test_db_plugin.optional_ctx(subnet, self.subnet), + test_db_plugin.optional_ctx(router, self.router), + self.subnet(cidr=external_subnet_cidr)) as (tmp_subnet, + tmp_router, + public_sub): + if external_router: + self._set_net_external( + public_sub['subnet']['network_id']) + self._add_external_gateway_to_router( + tmp_router['router']['id'], + public_sub['subnet']['network_id']) + tmp_router['router']['external_gateway_info'] = { + 'network_id': public_sub['subnet']['network_id']} + if plug_subnet: + self._router_interface_action( + 'add', + tmp_router['router']['id'], + tmp_subnet['subnet']['id'], None) + + res = self._create_vpnservice(fmt, + name, + admin_state_up, + router_id=(tmp_router['router'] + ['id']), + subnet_id=(tmp_subnet['subnet'] + ['id']), + **kwargs) + vpnservice = self.deserialize(fmt or self.fmt, res) + if res.status_int < 400: + yield vpnservice + + if not no_delete and vpnservice.get('vpnservice'): + self._delete('vpnservices', + vpnservice['vpnservice']['id']) + if plug_subnet: + self._router_interface_action( + 'remove', + tmp_router['router']['id'], + tmp_subnet['subnet']['id'], None) + if external_router: + external_gateway = tmp_router['router'].get( + 'external_gateway_info') + if external_gateway: + network_id = external_gateway['network_id'] + self._remove_external_gateway_from_router( + tmp_router['router']['id'], network_id) + if res.status_int >= 400: + raise webob.exc.HTTPClientError( + code=res.status_int, detail=vpnservice) + + def _create_ipsec_site_connection(self, fmt, name='test', + peer_address='192.168.1.10', + peer_id='192.168.1.10', + peer_cidrs=None, + mtu=1500, + psk='abcdefg', + initiator='bi-directional', + dpd_action='hold', + dpd_interval=30, + dpd_timeout=120, + vpnservice_id='fake_id', + ikepolicy_id='fake_id', + ipsecpolicy_id='fake_id', + admin_state_up=True, + expected_res_status=None, **kwargs): + data = { + 'ipsec_site_connection': {'name': name, + 'peer_address': peer_address, + 'peer_id': peer_id, + 'peer_cidrs': peer_cidrs, + 'mtu': mtu, + 'psk': psk, + 'initiator': initiator, + 'dpd': { + 'action': dpd_action, + 'interval': dpd_interval, + 'timeout': dpd_timeout, + }, + 'vpnservice_id': vpnservice_id, + 'ikepolicy_id': ikepolicy_id, + 'ipsecpolicy_id': ipsecpolicy_id, + 'admin_state_up': admin_state_up, + 'tenant_id': self._tenant_id} + } + for arg in ['description']: + if arg in kwargs and kwargs[arg] is not None: + data['ipsec_site_connection'][arg] = kwargs[arg] + + ipsec_site_connection_req = self.new_create_request( + 'ipsec-site-connections', data, fmt + ) + ipsec_site_connection_res = ipsec_site_connection_req.get_response( + self.ext_api + ) + if expected_res_status: + self.assertEqual( + ipsec_site_connection_res.status_int, expected_res_status + ) + + return ipsec_site_connection_res + + @contextlib.contextmanager + def ipsec_site_connection(self, fmt=None, name='ipsec_site_connection1', + peer_address='192.168.1.10', + peer_id='192.168.1.10', + peer_cidrs=None, + mtu=1500, + psk='abcdefg', + initiator='bi-directional', + dpd_action='hold', + dpd_interval=30, + dpd_timeout=120, + vpnservice=None, + ikepolicy=None, + ipsecpolicy=None, + admin_state_up=True, no_delete=False, + **kwargs): + if not fmt: + fmt = self.fmt + with contextlib.nested( + test_db_plugin.optional_ctx(vpnservice, + self.vpnservice), + test_db_plugin.optional_ctx(ikepolicy, + self.ikepolicy), + test_db_plugin.optional_ctx(ipsecpolicy, + self.ipsecpolicy) + ) as (tmp_vpnservice, tmp_ikepolicy, tmp_ipsecpolicy): + vpnservice_id = tmp_vpnservice['vpnservice']['id'] + ikepolicy_id = tmp_ikepolicy['ikepolicy']['id'] + ipsecpolicy_id = tmp_ipsecpolicy['ipsecpolicy']['id'] + res = self._create_ipsec_site_connection(fmt, + name, + peer_address, + peer_id, + peer_cidrs, + mtu, + psk, + initiator, + dpd_action, + dpd_interval, + dpd_timeout, + vpnservice_id, + ikepolicy_id, + ipsecpolicy_id, + admin_state_up, + **kwargs) + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + + ipsec_site_connection = self.deserialize( + fmt or self.fmt, res + ) + yield ipsec_site_connection + + if not no_delete: + self._delete( + 'ipsec-site-connections', + ipsec_site_connection[ + 'ipsec_site_connection']['id'] + ) + + def _check_ipsec_site_connection(self, ipsec_site_connection, keys, dpd): + self.assertEqual( + keys, + dict((k, v) for k, v + in ipsec_site_connection.items() + if k in keys)) + self.assertEqual( + dpd, + dict((k, v) for k, v + in ipsec_site_connection['dpd'].items() + if k in dpd)) + + def _set_active(self, model, resource_id): + service_plugin = manager.NeutronManager.get_service_plugins()[ + constants.VPN] + adminContext = context.get_admin_context() + with adminContext.session.begin(subtransactions=True): + resource_db = service_plugin._get_resource( + adminContext, + model, + resource_id) + resource_db.status = constants.ACTIVE + + +class VPNPluginDbTestCase(VPNTestMixin, + test_l3_plugin.L3NatTestCaseMixin, + test_db_plugin.NeutronDbPluginV2TestCase): + def setUp(self, core_plugin=None, vpnaas_plugin=DB_VPN_PLUGIN_KLASS, + vpnaas_provider=None): + if not vpnaas_provider: + vpnaas_provider = ( + constants.VPN + + ':vpnaas:neutron.services.vpn.' + 'service_drivers.ipsec.IPsecVPNDriver:default') + + cfg.CONF.set_override('service_provider', + [vpnaas_provider], + 'service_providers') + # force service type manager to reload configuration: + sdb.ServiceTypeManager._instance = None + + service_plugins = {'vpnaas_plugin': vpnaas_plugin} + plugin_str = ('neutron.tests.unit.db.vpn.' + 'test_db_vpnaas.TestVpnCorePlugin') + + super(VPNPluginDbTestCase, self).setUp( + plugin_str, + service_plugins=service_plugins + ) + self._subnet_id = uuidutils.generate_uuid() + self.core_plugin = TestVpnCorePlugin + self.plugin = vpn_plugin.VPNPlugin() + ext_mgr = api_extensions.PluginAwareExtensionManager( + extensions_path, + {constants.CORE: self.core_plugin, + constants.VPN: self.plugin} + ) + app = config.load_paste_app('extensions_test_app') + self.ext_api = api_extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) + + +class TestVpnaas(VPNPluginDbTestCase): + + def _check_policy(self, policy, keys, lifetime): + for k, v in keys: + self.assertEqual(policy[k], v) + for k, v in lifetime.iteritems(): + self.assertEqual(policy['lifetime'][k], v) + + def test_create_ikepolicy(self): + """Test case to create an ikepolicy.""" + name = "ikepolicy1" + description = 'ipsec-ikepolicy' + keys = [('name', name), + ('description', 'ipsec-ikepolicy'), + ('auth_algorithm', 'sha1'), + ('encryption_algorithm', 'aes-128'), + ('phase1_negotiation_mode', 'main'), + ('ike_version', 'v1'), + ('pfs', 'group5'), + ('tenant_id', self._tenant_id)] + lifetime = { + 'units': 'seconds', + 'value': 3600} + with self.ikepolicy(name=name, description=description) as ikepolicy: + self._check_policy(ikepolicy['ikepolicy'], keys, lifetime) + + def test_delete_ikepolicy(self): + """Test case to delete an ikepolicy.""" + with self.ikepolicy(no_delete=True) as ikepolicy: + req = self.new_delete_request('ikepolicies', + ikepolicy['ikepolicy']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + + def test_show_ikepolicy(self): + """Test case to show or get an ikepolicy.""" + name = "ikepolicy1" + description = 'ipsec-ikepolicy' + keys = [('name', name), + ('auth_algorithm', 'sha1'), + ('encryption_algorithm', 'aes-128'), + ('phase1_negotiation_mode', 'main'), + ('ike_version', 'v1'), + ('pfs', 'group5'), + ('tenant_id', self._tenant_id)] + lifetime = { + 'units': 'seconds', + 'value': 3600} + with self.ikepolicy(name=name, description=description) as ikepolicy: + req = self.new_show_request('ikepolicies', + ikepolicy['ikepolicy']['id'], + fmt=self.fmt) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self._check_policy(res['ikepolicy'], keys, lifetime) + + def test_list_ikepolicies(self): + """Test case to list all ikepolicies.""" + name = "ikepolicy_list" + keys = [('name', name), + ('auth_algorithm', 'sha1'), + ('encryption_algorithm', 'aes-128'), + ('phase1_negotiation_mode', 'main'), + ('ike_version', 'v1'), + ('pfs', 'group5'), + ('tenant_id', self._tenant_id)] + lifetime = { + 'units': 'seconds', + 'value': 3600} + with self.ikepolicy(name=name) as ikepolicy: + keys.append(('id', ikepolicy['ikepolicy']['id'])) + req = self.new_list_request('ikepolicies') + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self.assertEqual(len(res), 1) + for k, v in keys: + self.assertEqual(res['ikepolicies'][0][k], v) + for k, v in lifetime.iteritems(): + self.assertEqual(res['ikepolicies'][0]['lifetime'][k], v) + + def test_list_ikepolicies_with_sort_emulated(self): + """Test case to list all ikepolicies.""" + with contextlib.nested(self.ikepolicy(name='ikepolicy1'), + self.ikepolicy(name='ikepolicy2'), + self.ikepolicy(name='ikepolicy3') + ) as (ikepolicy1, ikepolicy2, ikepolicy3): + self._test_list_with_sort('ikepolicy', (ikepolicy3, + ikepolicy2, + ikepolicy1), + [('name', 'desc')], + 'ikepolicies') + + def test_list_ikepolicies_with_pagination_emulated(self): + """Test case to list all ikepolicies with pagination.""" + with contextlib.nested(self.ikepolicy(name='ikepolicy1'), + self.ikepolicy(name='ikepolicy2'), + self.ikepolicy(name='ikepolicy3') + ) as (ikepolicy1, ikepolicy2, ikepolicy3): + self._test_list_with_pagination('ikepolicy', + (ikepolicy1, + ikepolicy2, + ikepolicy3), + ('name', 'asc'), 2, 2, + 'ikepolicies') + + def test_list_ikepolicies_with_pagination_reverse_emulated(self): + """Test case to list all ikepolicies with reverse pagination.""" + with contextlib.nested(self.ikepolicy(name='ikepolicy1'), + self.ikepolicy(name='ikepolicy2'), + self.ikepolicy(name='ikepolicy3') + ) as (ikepolicy1, ikepolicy2, ikepolicy3): + self._test_list_with_pagination_reverse('ikepolicy', + (ikepolicy1, + ikepolicy2, + ikepolicy3), + ('name', 'asc'), 2, 2, + 'ikepolicies') + + def test_update_ikepolicy(self): + """Test case to update an ikepolicy.""" + name = "new_ikepolicy1" + keys = [('name', name), + ('auth_algorithm', 'sha1'), + ('encryption_algorithm', 'aes-128'), + ('phase1_negotiation_mode', 'main'), + ('ike_version', 'v1'), + ('pfs', 'group5'), + ('tenant_id', self._tenant_id), + ('lifetime', {'units': 'seconds', + 'value': 60})] + with self.ikepolicy(name=name) as ikepolicy: + data = {'ikepolicy': {'name': name, + 'lifetime': {'units': 'seconds', + 'value': 60}}} + req = self.new_update_request("ikepolicies", + data, + ikepolicy['ikepolicy']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['ikepolicy'][k], v) + + def test_create_ikepolicy_with_invalid_values(self): + """Test case to test invalid values.""" + name = 'ikepolicy1' + self._create_ikepolicy(name=name, + fmt=self.fmt, + auth_algorithm='md5', + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + auth_algorithm=200, + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + encryption_algorithm='des', + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + encryption_algorithm=100, + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + phase1_negotiation_mode='aggressive', + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + phase1_negotiation_mode=-100, + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + ike_version='v6', + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + ike_version=500, + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + pfs='group1', + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + pfs=120, + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + lifetime_units='Megabytes', + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + lifetime_units=20000, + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + lifetime_value=-20, + expected_res_status=400) + self._create_ikepolicy(name=name, + fmt=self.fmt, + lifetime_value='Megabytes', + expected_res_status=400) + + def test_create_ipsecpolicy(self): + """Test case to create an ipsecpolicy.""" + name = "ipsecpolicy1" + description = 'my-ipsecpolicy' + keys = [('name', name), + ('description', 'my-ipsecpolicy'), + ('auth_algorithm', 'sha1'), + ('encryption_algorithm', 'aes-128'), + ('encapsulation_mode', 'tunnel'), + ('transform_protocol', 'esp'), + ('pfs', 'group5'), + ('tenant_id', self._tenant_id)] + lifetime = { + 'units': 'seconds', + 'value': 3600} + with self.ipsecpolicy(name=name, + description=description) as ipsecpolicy: + self._check_policy(ipsecpolicy['ipsecpolicy'], keys, lifetime) + + def test_delete_ipsecpolicy(self): + """Test case to delete an ipsecpolicy.""" + with self.ipsecpolicy(no_delete=True) as ipsecpolicy: + req = self.new_delete_request('ipsecpolicies', + ipsecpolicy['ipsecpolicy']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + + def test_show_ipsecpolicy(self): + """Test case to show or get an ipsecpolicy.""" + name = "ipsecpolicy1" + keys = [('name', name), + ('auth_algorithm', 'sha1'), + ('encryption_algorithm', 'aes-128'), + ('encapsulation_mode', 'tunnel'), + ('transform_protocol', 'esp'), + ('pfs', 'group5'), + ('tenant_id', self._tenant_id)] + lifetime = { + 'units': 'seconds', + 'value': 3600} + with self.ipsecpolicy(name=name) as ipsecpolicy: + req = self.new_show_request('ipsecpolicies', + ipsecpolicy['ipsecpolicy']['id'], + fmt=self.fmt) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self._check_policy(res['ipsecpolicy'], keys, lifetime) + + def test_list_ipsecpolicies(self): + """Test case to list all ipsecpolicies.""" + name = "ipsecpolicy_list" + keys = [('name', name), + ('auth_algorithm', 'sha1'), + ('encryption_algorithm', 'aes-128'), + ('encapsulation_mode', 'tunnel'), + ('transform_protocol', 'esp'), + ('pfs', 'group5'), + ('tenant_id', self._tenant_id)] + lifetime = { + 'units': 'seconds', + 'value': 3600} + with self.ipsecpolicy(name=name) as ipsecpolicy: + keys.append(('id', ipsecpolicy['ipsecpolicy']['id'])) + req = self.new_list_request('ipsecpolicies') + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self.assertEqual(len(res), 1) + self._check_policy(res['ipsecpolicies'][0], keys, lifetime) + + def test_list_ipsecpolicies_with_sort_emulated(self): + """Test case to list all ipsecpolicies.""" + with contextlib.nested(self.ipsecpolicy(name='ipsecpolicy1'), + self.ipsecpolicy(name='ipsecpolicy2'), + self.ipsecpolicy(name='ipsecpolicy3') + ) as(ipsecpolicy1, ipsecpolicy2, ipsecpolicy3): + self._test_list_with_sort('ipsecpolicy', (ipsecpolicy3, + ipsecpolicy2, + ipsecpolicy1), + [('name', 'desc')], + 'ipsecpolicies') + + def test_list_ipsecpolicies_with_pagination_emulated(self): + """Test case to list all ipsecpolicies with pagination.""" + with contextlib.nested(self.ipsecpolicy(name='ipsecpolicy1'), + self.ipsecpolicy(name='ipsecpolicy2'), + self.ipsecpolicy(name='ipsecpolicy3') + ) as(ipsecpolicy1, ipsecpolicy2, ipsecpolicy3): + self._test_list_with_pagination('ipsecpolicy', + (ipsecpolicy1, + ipsecpolicy2, + ipsecpolicy3), + ('name', 'asc'), 2, 2, + 'ipsecpolicies') + + def test_list_ipsecpolicies_with_pagination_reverse_emulated(self): + """Test case to list all ipsecpolicies with reverse pagination.""" + with contextlib.nested(self.ipsecpolicy(name='ipsecpolicy1'), + self.ipsecpolicy(name='ipsecpolicy2'), + self.ipsecpolicy(name='ipsecpolicy3') + ) as(ipsecpolicy1, ipsecpolicy2, ipsecpolicy3): + self._test_list_with_pagination_reverse('ipsecpolicy', + (ipsecpolicy1, + ipsecpolicy2, + ipsecpolicy3), + ('name', 'asc'), 2, 2, + 'ipsecpolicies') + + def test_update_ipsecpolicy(self): + """Test case to update an ipsecpolicy.""" + name = "new_ipsecpolicy1" + keys = [('name', name), + ('auth_algorithm', 'sha1'), + ('encryption_algorithm', 'aes-128'), + ('encapsulation_mode', 'tunnel'), + ('transform_protocol', 'esp'), + ('pfs', 'group5'), + ('tenant_id', self._tenant_id), + ('lifetime', {'units': 'seconds', + 'value': 60})] + with self.ipsecpolicy(name=name) as ipsecpolicy: + data = {'ipsecpolicy': {'name': name, + 'lifetime': {'units': 'seconds', + 'value': 60}}} + req = self.new_update_request("ipsecpolicies", + data, + ipsecpolicy['ipsecpolicy']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['ipsecpolicy'][k], v) + + def test_update_ipsecpolicy_lifetime(self): + with self.ipsecpolicy() as ipsecpolicy: + data = {'ipsecpolicy': {'lifetime': {'units': 'seconds'}}} + req = self.new_update_request("ipsecpolicies", + data, + ipsecpolicy['ipsecpolicy']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self.assertEqual(res['ipsecpolicy']['lifetime']['units'], + 'seconds') + + data = {'ipsecpolicy': {'lifetime': {'value': 60}}} + req = self.new_update_request("ipsecpolicies", + data, + ipsecpolicy['ipsecpolicy']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self.assertEqual(res['ipsecpolicy']['lifetime']['value'], 60) + + def test_create_ipsecpolicy_with_invalid_values(self): + """Test case to test invalid values.""" + name = 'ipsecpolicy1' + + self._create_ipsecpolicy( + fmt=self.fmt, + name=name, auth_algorithm='md5', expected_res_status=400) + self._create_ipsecpolicy( + fmt=self.fmt, + name=name, auth_algorithm=100, expected_res_status=400) + + self._create_ipsecpolicy( + fmt=self.fmt, + name=name, encryption_algorithm='des', expected_res_status=400) + self._create_ipsecpolicy( + fmt=self.fmt, + name=name, encryption_algorithm=200, expected_res_status=400) + + self._create_ipsecpolicy( + fmt=self.fmt, + name=name, transform_protocol='abcd', expected_res_status=400) + self._create_ipsecpolicy( + fmt=self.fmt, + name=name, transform_protocol=500, expected_res_status=400) + + self._create_ipsecpolicy( + fmt=self.fmt, + name=name, + encapsulation_mode='unsupported', expected_res_status=400) + self._create_ipsecpolicy(name=name, + fmt=self.fmt, + encapsulation_mode=100, + expected_res_status=400) + + self._create_ipsecpolicy(name=name, + fmt=self.fmt, + pfs='group9', expected_res_status=400) + self._create_ipsecpolicy( + fmt=self.fmt, name=name, pfs=-1, expected_res_status=400) + + self._create_ipsecpolicy( + fmt=self.fmt, name=name, lifetime_units='minutes', + expected_res_status=400) + + self._create_ipsecpolicy(fmt=self.fmt, name=name, lifetime_units=100, + expected_res_status=400) + + self._create_ipsecpolicy(fmt=self.fmt, name=name, + lifetime_value=-800, expected_res_status=400) + self._create_ipsecpolicy(fmt=self.fmt, name=name, + lifetime_value='Megabytes', + expected_res_status=400) + + def test_create_vpnservice(self, **extras): + """Test case to create a vpnservice.""" + description = 'my-vpn-service' + expected = {'name': 'vpnservice1', + 'description': 'my-vpn-service', + 'admin_state_up': True, + 'status': 'PENDING_CREATE', + 'tenant_id': self._tenant_id, } + + expected.update(extras) + with self.subnet(cidr='10.2.0.0/24') as subnet: + with self.router() as router: + expected['router_id'] = router['router']['id'] + expected['subnet_id'] = subnet['subnet']['id'] + name = expected['name'] + with self.vpnservice(name=name, + subnet=subnet, + router=router, + description=description, + **extras) as vpnservice: + self.assertEqual(dict((k, v) for k, v in + vpnservice['vpnservice'].items() + if k in expected), + expected) + + def test_create_vpnservice_with_invalid_router(self): + """Test case to create a vpnservice with other tenant's router""" + with self.network( + set_context=True, + tenant_id='tenant_a') as network: + with self.subnet(network=network, + cidr='10.2.0.0/24') as subnet: + with self.router( + set_context=True, tenant_id='tenant_a') as router: + router_id = router['router']['id'] + subnet_id = subnet['subnet']['id'] + self._create_vpnservice( + self.fmt, 'fake', + True, router_id, subnet_id, + expected_res_status=webob.exc.HTTPNotFound.code, + set_context=True, tenant_id='tenant_b') + + def test_create_vpnservice_with_router_no_external_gateway(self): + """Test case to create a vpnservice with inner router""" + error_code = 0 + with self.subnet(cidr='10.2.0.0/24') as subnet: + with self.router() as router: + router_id = router['router']['id'] + try: + with self.vpnservice(subnet=subnet, + router=router, + external_router=False): + pass + except webob.exc.HTTPClientError as e: + error_code, error_detail = ( + e.status_code, e.detail['NeutronError']['message']) + self.assertEqual(400, error_code) + msg = str(vpnaas.RouterIsNotExternal(router_id=router_id)) + self.assertEqual(msg, error_detail) + + def test_create_vpnservice_with_nonconnected_subnet(self): + """Test case to create a vpnservice with nonconnected subnet.""" + with self.network() as network: + with self.subnet(network=network, + cidr='10.2.0.0/24') as subnet: + with self.router() as router: + router_id = router['router']['id'] + subnet_id = subnet['subnet']['id'] + self._create_vpnservice( + self.fmt, 'fake', + True, router_id, subnet_id, + expected_res_status=webob.exc.HTTPBadRequest.code) + + def test_delete_router_in_use_by_vpnservice(self): + """Test delete router in use by vpn service.""" + with self.subnet(cidr='10.2.0.0/24') as subnet: + with self.router() as router: + with self.vpnservice(subnet=subnet, + router=router): + self._delete('routers', router['router']['id'], + expected_code=webob.exc.HTTPConflict.code) + + def test_update_vpnservice(self): + """Test case to update a vpnservice.""" + name = 'new_vpnservice1' + keys = [('name', name)] + with contextlib.nested( + self.subnet(cidr='10.2.0.0/24'), + self.router()) as (subnet, router): + with self.vpnservice(name=name, + subnet=subnet, + router=router) as vpnservice: + keys.append(('subnet_id', + vpnservice['vpnservice']['subnet_id'])) + keys.append(('router_id', + vpnservice['vpnservice']['router_id'])) + data = {'vpnservice': {'name': name}} + self._set_active(vpn_db.VPNService, + vpnservice['vpnservice']['id']) + req = self.new_update_request( + 'vpnservices', + data, + vpnservice['vpnservice']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['vpnservice'][k], v) + + def test_update_vpnservice_with_invalid_state(self): + """Test case to update a vpnservice in invalid state .""" + name = 'new_vpnservice1' + keys = [('name', name)] + with contextlib.nested( + self.subnet(cidr='10.2.0.0/24'), + self.router()) as (subnet, router): + with self.vpnservice(name=name, + subnet=subnet, + router=router) as vpnservice: + keys.append(('subnet_id', + vpnservice['vpnservice']['subnet_id'])) + keys.append(('router_id', + vpnservice['vpnservice']['router_id'])) + data = {'vpnservice': {'name': name}} + req = self.new_update_request( + 'vpnservices', + data, + vpnservice['vpnservice']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(400, res.status_int) + res = self.deserialize(self.fmt, res) + self.assertIn(vpnservice['vpnservice']['id'], + res['NeutronError']['message']) + + def test_delete_vpnservice(self): + """Test case to delete a vpnservice.""" + with self.vpnservice(name='vpnserver', + no_delete=True) as vpnservice: + req = self.new_delete_request('vpnservices', + vpnservice['vpnservice']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + + def test_show_vpnservice(self): + """Test case to show or get a vpnservice.""" + name = "vpnservice1" + keys = [('name', name), + ('description', ''), + ('admin_state_up', True), + ('status', 'PENDING_CREATE')] + with self.vpnservice(name=name) as vpnservice: + req = self.new_show_request('vpnservices', + vpnservice['vpnservice']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['vpnservice'][k], v) + + def test_list_vpnservices(self): + """Test case to list all vpnservices.""" + name = "vpnservice_list" + keys = [('name', name), + ('description', ''), + ('admin_state_up', True), + ('status', 'PENDING_CREATE')] + with self.vpnservice(name=name) as vpnservice: + keys.append(('subnet_id', vpnservice['vpnservice']['subnet_id'])) + keys.append(('router_id', vpnservice['vpnservice']['router_id'])) + req = self.new_list_request('vpnservices') + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self.assertEqual(len(res), 1) + for k, v in keys: + self.assertEqual(res['vpnservices'][0][k], v) + + def test_list_vpnservices_with_sort_emulated(self): + """Test case to list all vpnservices with sorting.""" + with self.subnet() as subnet: + with self.router() as router: + with contextlib.nested( + self.vpnservice(name='vpnservice1', + subnet=subnet, + router=router, + external_subnet_cidr='192.168.10.0/24',), + self.vpnservice(name='vpnservice2', + subnet=subnet, + router=router, + plug_subnet=False, + external_router=False, + external_subnet_cidr='192.168.11.0/24',), + self.vpnservice(name='vpnservice3', + subnet=subnet, + router=router, + plug_subnet=False, + external_router=False, + external_subnet_cidr='192.168.13.0/24',) + ) as(vpnservice1, vpnservice2, vpnservice3): + self._test_list_with_sort('vpnservice', (vpnservice3, + vpnservice2, + vpnservice1), + [('name', 'desc')]) + + def test_list_vpnservice_with_pagination_emulated(self): + """Test case to list all vpnservices with pagination.""" + with self.subnet() as subnet: + with self.router() as router: + with contextlib.nested( + self.vpnservice(name='vpnservice1', + subnet=subnet, + router=router, + external_subnet_cidr='192.168.10.0/24'), + self.vpnservice(name='vpnservice2', + subnet=subnet, + router=router, + plug_subnet=False, + external_subnet_cidr='192.168.20.0/24', + external_router=False), + self.vpnservice(name='vpnservice3', + subnet=subnet, + router=router, + plug_subnet=False, + external_subnet_cidr='192.168.30.0/24', + external_router=False) + ) as(vpnservice1, vpnservice2, vpnservice3): + self._test_list_with_pagination('vpnservice', + (vpnservice1, + vpnservice2, + vpnservice3), + ('name', 'asc'), 2, 2) + + def test_list_vpnservice_with_pagination_reverse_emulated(self): + """Test case to list all vpnservices with reverse pagination.""" + with self.subnet() as subnet: + with self.router() as router: + with contextlib.nested( + self.vpnservice(name='vpnservice1', + subnet=subnet, + router=router, + external_subnet_cidr='192.168.10.0/24'), + self.vpnservice(name='vpnservice2', + subnet=subnet, + router=router, + plug_subnet=False, + external_subnet_cidr='192.168.11.0/24', + external_router=False), + self.vpnservice(name='vpnservice3', + subnet=subnet, + router=router, + plug_subnet=False, + external_subnet_cidr='192.168.12.0/24', + external_router=False) + ) as(vpnservice1, vpnservice2, vpnservice3): + self._test_list_with_pagination_reverse('vpnservice', + (vpnservice1, + vpnservice2, + vpnservice3), + ('name', 'asc'), + 2, 2) + + def test_create_ipsec_site_connection_with_invalid_values(self): + """Test case to create an ipsec_site_connection with invalid values.""" + name = 'connection1' + self._create_ipsec_site_connection( + fmt=self.fmt, + name=name, peer_cidrs='myname', expected_status_int=400) + self._create_ipsec_site_connection( + fmt=self.fmt, + name=name, mtu=-100, expected_status_int=400) + self._create_ipsec_site_connection( + fmt=self.fmt, + name=name, dpd_action='unsupported', expected_status_int=400) + self._create_ipsec_site_connection( + fmt=self.fmt, + name=name, dpd_interval=-1, expected_status_int=400) + self._create_ipsec_site_connection( + fmt=self.fmt, + name=name, dpd_timeout=-200, expected_status_int=400) + self._create_ipsec_site_connection( + fmt=self.fmt, + name=name, initiator='unsupported', expected_status_int=400) + self._create_ipsec_site_connection( + fmt=self.fmt, + name=name, + dpd_interval=30, + dpd_timeout=20, expected_status_int=400) + self._create_ipsec_site_connection( + fmt=self.fmt, + name=name, + dpd_interval=100, + dpd_timeout=100, expected_status_int=400) + + def _test_create_ipsec_site_connection(self, key_overrides=None, + setup_overrides=None, + expected_status_int=200): + """Create ipsec_site_connection and check results.""" + params = {'ikename': 'ikepolicy1', + 'ipsecname': 'ipsecpolicy1', + 'vpnsname': 'vpnservice1', + 'subnet_cidr': '10.2.0.0/24', + 'subnet_version': 4} + if setup_overrides is not None: + params.update(setup_overrides) + keys = {'name': 'connection1', + 'description': 'my-ipsec-connection', + 'peer_address': '192.168.1.10', + 'peer_id': '192.168.1.10', + 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], + 'initiator': 'bi-directional', + 'mtu': 1500, + 'tenant_id': self._tenant_id, + 'psk': 'abcd', + 'status': 'PENDING_CREATE', + 'admin_state_up': True} + if key_overrides is not None: + keys.update(key_overrides) + dpd = {'action': 'hold', + 'interval': 40, + 'timeout': 120} + with contextlib.nested( + self.ikepolicy(name=params['ikename']), + self.ipsecpolicy(name=params['ipsecname']), + self.subnet(cidr=params['subnet_cidr'], + ip_version=params['subnet_version']), + self.router()) as ( + ikepolicy, ipsecpolicy, subnet, router): + with self.vpnservice(name=params['vpnsname'], subnet=subnet, + router=router) as vpnservice1: + keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id'] + keys['ipsecpolicy_id'] = ( + ipsecpolicy['ipsecpolicy']['id'] + ) + keys['vpnservice_id'] = ( + vpnservice1['vpnservice']['id'] + ) + try: + with self.ipsec_site_connection( + self.fmt, + keys['name'], + keys['peer_address'], + keys['peer_id'], + keys['peer_cidrs'], + keys['mtu'], + keys['psk'], + keys['initiator'], + dpd['action'], + dpd['interval'], + dpd['timeout'], + vpnservice1, + ikepolicy, + ipsecpolicy, + keys['admin_state_up'], + description=keys['description'] + ) as ipsec_site_connection: + if expected_status_int != 200: + self.fail("Expected failure on create") + self._check_ipsec_site_connection( + ipsec_site_connection['ipsec_site_connection'], + keys, + dpd) + except webob.exc.HTTPClientError as ce: + self.assertEqual(ce.code, expected_status_int) + + def test_create_ipsec_site_connection(self, **extras): + """Test case to create an ipsec_site_connection.""" + self._test_create_ipsec_site_connection(key_overrides=extras) + + def test_create_ipsec_site_connection_invalid_mtu(self): + """Test creating an ipsec_site_connection with invalid MTU.""" + self._test_create_ipsec_site_connection(key_overrides={'mtu': 67}, + expected_status_int=400) + ipv6_overrides = { + 'peer_address': 'fe80::c0a8:10a', + 'peer_id': 'fe80::c0a8:10a', + 'peer_cidrs': ['fe80::c0a8:200/120', 'fe80::c0a8:300/120'], + 'mtu': 1279} + ipv6_setup_params = {'subnet_cidr': 'fe80::a01:0/120', + 'subnet_version': 6} + self._test_create_ipsec_site_connection( + key_overrides=ipv6_overrides, + setup_overrides=ipv6_setup_params, + expected_status_int=400) + + def test_delete_ipsec_site_connection(self): + """Test case to delete a ipsec_site_connection.""" + with self.ipsec_site_connection( + no_delete=True) as ipsec_site_connection: + req = self.new_delete_request( + 'ipsec-site-connections', + ipsec_site_connection['ipsec_site_connection']['id'] + ) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + + def test_update_ipsec_site_connection(self): + """Test case for valid updates to IPSec site connection.""" + dpd = {'action': 'hold', + 'interval': 40, + 'timeout': 120} + self._test_update_ipsec_site_connection(update={'dpd': dpd}) + self._test_update_ipsec_site_connection(update={'mtu': 2000}) + ipv6_settings = { + 'peer_address': 'fe80::c0a8:10a', + 'peer_id': 'fe80::c0a8:10a', + 'peer_cidrs': ['fe80::c0a8:200/120', 'fe80::c0a8:300/120'], + 'subnet_cidr': 'fe80::a02:0/120', + 'subnet_version': 6} + self._test_update_ipsec_site_connection(update={'mtu': 2000}, + overrides=ipv6_settings) + + def test_update_ipsec_site_connection_with_invalid_dpd(self): + """Test updates to ipsec_site_connection with invalid DPD settings.""" + dpd1 = {'action': 'hold', + 'interval': 100, + 'timeout': 100} + self._test_update_ipsec_site_connection( + update={'dpd': dpd1}, + expected_status_int=400) + dpd2 = {'action': 'hold', + 'interval': 100, + 'timeout': 60} + self._test_update_ipsec_site_connection( + update={'dpd': dpd2}, + expected_status_int=400) + dpd3 = {'action': 'hold', + 'interval': -50, + 'timeout': -100} + self._test_update_ipsec_site_connection( + update={'dpd': dpd3}, + expected_status_int=400) + + def test_update_ipsec_site_connection_with_invalid_mtu(self): + """Test updates to ipsec_site_connection with invalid MTU settings.""" + self._test_update_ipsec_site_connection( + update={'mtu': 67}, expected_status_int=400) + ipv6_settings = { + 'peer_address': 'fe80::c0a8:10a', + 'peer_id': 'fe80::c0a8:10a', + 'peer_cidrs': ['fe80::c0a8:200/120', 'fe80::c0a8:300/120'], + 'subnet_cidr': 'fe80::a02:0/120', + 'subnet_version': 6} + self._test_update_ipsec_site_connection( + update={'mtu': 1279}, + overrides=ipv6_settings, + expected_status_int=400) + + def test_update_ipsec_site_connection_with_invalid_state(self): + """Test updating an ipsec_site_connection in invalid state.""" + self._test_update_ipsec_site_connection( + overrides={'make_active': False}, + expected_status_int=400) + + def test_update_ipsec_site_connection_peer_cidrs(self): + """Test updating an ipsec_site_connection for peer_cidrs.""" + new_peers = {'peer_cidrs': ['192.168.4.0/24', + '192.168.5.0/24']} + self._test_update_ipsec_site_connection( + update=new_peers) + + def _test_update_ipsec_site_connection(self, + update={'name': 'new name'}, + overrides=None, + expected_status_int=200): + """Creates and then updates ipsec_site_connection.""" + keys = {'name': 'new_ipsec_site_connection', + 'ikename': 'ikepolicy1', + 'ipsecname': 'ipsecpolicy1', + 'vpnsname': 'vpnservice1', + 'description': 'my-ipsec-connection', + 'peer_address': '192.168.1.10', + 'peer_id': '192.168.1.10', + 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], + 'initiator': 'bi-directional', + 'mtu': 1500, + 'tenant_id': self._tenant_id, + 'psk': 'abcd', + 'status': 'ACTIVE', + 'admin_state_up': True, + 'action': 'hold', + 'interval': 40, + 'timeout': 120, + 'subnet_cidr': '10.2.0.0/24', + 'subnet_version': 4, + 'make_active': True} + if overrides is not None: + keys.update(overrides) + + with contextlib.nested( + self.ikepolicy(name=keys['ikename']), + self.ipsecpolicy(name=keys['ipsecname']), + self.subnet(cidr=keys['subnet_cidr'], + ip_version=keys['subnet_version']), + self.router()) as ( + ikepolicy, ipsecpolicy, subnet, router): + with self.vpnservice(name=keys['vpnsname'], subnet=subnet, + router=router) as vpnservice1: + keys['vpnservice_id'] = vpnservice1['vpnservice']['id'] + keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id'] + keys['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id'] + with self.ipsec_site_connection( + self.fmt, + keys['name'], + keys['peer_address'], + keys['peer_id'], + keys['peer_cidrs'], + keys['mtu'], + keys['psk'], + keys['initiator'], + keys['action'], + keys['interval'], + keys['timeout'], + vpnservice1, + ikepolicy, + ipsecpolicy, + keys['admin_state_up'], + description=keys['description'] + ) as ipsec_site_connection: + data = {'ipsec_site_connection': update} + if keys.get('make_active', None): + self._set_active( + vpn_db.IPsecSiteConnection, + (ipsec_site_connection['ipsec_site_connection'] + ['id'])) + req = self.new_update_request( + 'ipsec-site-connections', + data, + ipsec_site_connection['ipsec_site_connection']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(expected_status_int, res.status_int) + if expected_status_int == 200: + res_dict = self.deserialize(self.fmt, res) + for k, v in update.items(): + self.assertEqual( + res_dict['ipsec_site_connection'][k], v) + + def test_show_ipsec_site_connection(self): + """Test case to show a ipsec_site_connection.""" + ikename = "ikepolicy1" + ipsecname = "ipsecpolicy1" + vpnsname = "vpnservice1" + name = "connection1" + description = "my-ipsec-connection" + keys = {'name': name, + 'description': "my-ipsec-connection", + 'peer_address': '192.168.1.10', + 'peer_id': '192.168.1.10', + 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], + 'initiator': 'bi-directional', + 'mtu': 1500, + 'tenant_id': self._tenant_id, + 'psk': 'abcd', + 'status': 'PENDING_CREATE', + 'admin_state_up': True} + dpd = {'action': 'hold', + 'interval': 40, + 'timeout': 120} + with contextlib.nested( + self.ikepolicy(name=ikename), + self.ipsecpolicy(name=ipsecname), + self.subnet(), + self.router()) as ( + ikepolicy, ipsecpolicy, subnet, router): + with self.vpnservice(name=vpnsname, subnet=subnet, + router=router) as vpnservice1: + keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id'] + keys['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id'] + keys['vpnservice_id'] = vpnservice1['vpnservice']['id'] + with self.ipsec_site_connection( + self.fmt, + name, + keys['peer_address'], + keys['peer_id'], + keys['peer_cidrs'], + keys['mtu'], + keys['psk'], + keys['initiator'], + dpd['action'], + dpd['interval'], + dpd['timeout'], + vpnservice1, + ikepolicy, + ipsecpolicy, + keys['admin_state_up'], + description=description, + ) as ipsec_site_connection: + + req = self.new_show_request( + 'ipsec-site-connections', + ipsec_site_connection[ + 'ipsec_site_connection']['id'], + fmt=self.fmt + ) + res = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + + self._check_ipsec_site_connection( + res['ipsec_site_connection'], + keys, + dpd) + + def test_list_ipsec_site_connections_with_sort_emulated(self): + """Test case to list all ipsec_site_connections with sort.""" + with self.subnet(cidr='10.2.0.0/24') as subnet: + with self.router() as router: + with self.vpnservice(subnet=subnet, + router=router + ) as vpnservice: + with contextlib.nested( + self.ipsec_site_connection( + name='connection1', vpnservice=vpnservice + ), + self.ipsec_site_connection( + name='connection2', vpnservice=vpnservice + ), + self.ipsec_site_connection( + name='connection3', vpnservice=vpnservice + ) + ) as(ipsec_site_connection1, + ipsec_site_connection2, + ipsec_site_connection3): + self._test_list_with_sort('ipsec-site-connection', + (ipsec_site_connection3, + ipsec_site_connection2, + ipsec_site_connection1), + [('name', 'desc')]) + + def test_list_ipsec_site_connections_with_pagination_emulated(self): + """Test case to list all ipsec_site_connections with pagination.""" + with self.subnet(cidr='10.2.0.0/24') as subnet: + with self.router() as router: + with self.vpnservice(subnet=subnet, + router=router + ) as vpnservice: + with contextlib.nested( + self.ipsec_site_connection( + name='ipsec_site_connection1', + vpnservice=vpnservice + ), + self.ipsec_site_connection( + name='ipsec_site_connection1', + vpnservice=vpnservice + ), + self.ipsec_site_connection( + name='ipsec_site_connection1', + vpnservice=vpnservice + ) + ) as(ipsec_site_connection1, + ipsec_site_connection2, + ipsec_site_connection3): + self._test_list_with_pagination( + 'ipsec-site-connection', + (ipsec_site_connection1, + ipsec_site_connection2, + ipsec_site_connection3), + ('name', 'asc'), 2, 2) + + def test_list_ipsec_site_conns_with_pagination_reverse_emulated(self): + """Test to list all ipsec_site_connections with reverse pagination.""" + with self.subnet(cidr='10.2.0.0/24') as subnet: + with self.router() as router: + with self.vpnservice(subnet=subnet, + router=router + ) as vpnservice: + with contextlib.nested( + self.ipsec_site_connection( + name='connection1', vpnservice=vpnservice + ), + self.ipsec_site_connection( + name='connection2', vpnservice=vpnservice + ), + self.ipsec_site_connection( + name='connection3', vpnservice=vpnservice + ) + ) as(ipsec_site_connection1, + ipsec_site_connection2, + ipsec_site_connection3): + self._test_list_with_pagination_reverse( + 'ipsec-site-connection', + (ipsec_site_connection1, + ipsec_site_connection2, + ipsec_site_connection3), + ('name', 'asc'), 2, 2 + ) + + def test_create_vpn(self): + """Test case to create a vpn.""" + vpns_name = "vpnservice1" + ike_name = "ikepolicy1" + ipsec_name = "ipsecpolicy1" + name1 = "ipsec_site_connection1" + with contextlib.nested( + self.ikepolicy(name=ike_name), + self.ipsecpolicy(name=ipsec_name), + self.vpnservice(name=vpns_name)) as ( + ikepolicy, ipsecpolicy, vpnservice): + vpnservice_id = vpnservice['vpnservice']['id'] + ikepolicy_id = ikepolicy['ikepolicy']['id'] + ipsecpolicy_id = ipsecpolicy['ipsecpolicy']['id'] + with self.ipsec_site_connection( + self.fmt, + name1, + '192.168.1.10', + '192.168.1.10', + ['192.168.2.0/24', + '192.168.3.0/24'], + 1500, + 'abcdef', + 'bi-directional', + 'hold', + 30, + 120, + vpnservice, + ikepolicy, + ipsecpolicy, + True + ) as vpnconn1: + + vpnservice_req = self.new_show_request( + 'vpnservices', + vpnservice_id, + fmt=self.fmt) + vpnservice_updated = self.deserialize( + self.fmt, + vpnservice_req.get_response(self.ext_api) + ) + self.assertEqual( + vpnservice_updated['vpnservice']['id'], + vpnconn1['ipsec_site_connection']['vpnservice_id'] + ) + ikepolicy_req = self.new_show_request('ikepolicies', + ikepolicy_id, + fmt=self.fmt) + ikepolicy_res = self.deserialize( + self.fmt, + ikepolicy_req.get_response(self.ext_api) + ) + self.assertEqual( + ikepolicy_res['ikepolicy']['id'], + vpnconn1['ipsec_site_connection']['ikepolicy_id']) + ipsecpolicy_req = self.new_show_request( + 'ipsecpolicies', + ipsecpolicy_id, + fmt=self.fmt) + ipsecpolicy_res = self.deserialize( + self.fmt, + ipsecpolicy_req.get_response(self.ext_api) + ) + self.assertEqual( + ipsecpolicy_res['ipsecpolicy']['id'], + vpnconn1['ipsec_site_connection']['ipsecpolicy_id'] + ) + + def test_delete_ikepolicy_inuse(self): + """Test case to delete an ikepolicy, that is in use.""" + vpns_name = "vpnservice1" + ike_name = "ikepolicy1" + ipsec_name = "ipsecpolicy1" + name1 = "ipsec_site_connection1" + with self.ikepolicy(name=ike_name) as ikepolicy: + with self.ipsecpolicy(name=ipsec_name) as ipsecpolicy: + with self.vpnservice(name=vpns_name) as vpnservice: + with self.ipsec_site_connection( + self.fmt, + name1, + '192.168.1.10', + '192.168.1.10', + ['192.168.2.0/24', + '192.168.3.0/24'], + 1500, + 'abcdef', + 'bi-directional', + 'hold', + 30, + 120, + vpnservice, + ikepolicy, + ipsecpolicy, + True + ): + delete_req = self.new_delete_request( + 'ikepolicies', + ikepolicy['ikepolicy']['id'] + ) + delete_res = delete_req.get_response(self.ext_api) + self.assertEqual(409, delete_res.status_int) + + def test_delete_ipsecpolicy_inuse(self): + """Test case to delete an ipsecpolicy, that is in use.""" + vpns_name = "vpnservice1" + ike_name = "ikepolicy1" + ipsec_name = "ipsecpolicy1" + name1 = "ipsec_site_connection1" + with self.ikepolicy(name=ike_name) as ikepolicy: + with self.ipsecpolicy(name=ipsec_name) as ipsecpolicy: + with self.vpnservice(name=vpns_name) as vpnservice: + with self.ipsec_site_connection( + self.fmt, + name1, + '192.168.1.10', + '192.168.1.10', + ['192.168.2.0/24', + '192.168.3.0/24'], + 1500, + 'abcdef', + 'bi-directional', + 'hold', + 30, + 120, + vpnservice, + ikepolicy, + ipsecpolicy, + True + ): + + delete_req = self.new_delete_request( + 'ipsecpolicies', + ipsecpolicy['ipsecpolicy']['id'] + ) + delete_res = delete_req.get_response(self.ext_api) + self.assertEqual(409, delete_res.status_int) + + +class TestVpnaasXML(TestVpnaas): + fmt = 'xml' diff --git a/neutron/tests/unit/dummy_plugin.py b/neutron/tests/unit/dummy_plugin.py new file mode 100644 index 000000000..fc58a7e29 --- /dev/null +++ b/neutron/tests/unit/dummy_plugin.py @@ -0,0 +1,139 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions +from neutron.api.v2 import base +from neutron.common import exceptions +from neutron.db import servicetype_db +from neutron.extensions import servicetype +from neutron import manager +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.services import service_base + + +DUMMY_PLUGIN_NAME = "dummy_plugin" +RESOURCE_NAME = "dummy" +COLLECTION_NAME = "%ss" % RESOURCE_NAME + +# Attribute Map for dummy resource +RESOURCE_ATTRIBUTE_MAP = { + COLLECTION_NAME: { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + 'service_type': {'allow_post': True, + 'allow_put': False, + 'validate': {'type:servicetype_ref': None}, + 'is_visible': True, + 'default': None} + } +} + + +class Dummy(object): + + @classmethod + def get_name(cls): + return "dummy" + + @classmethod + def get_alias(cls): + return "dummy" + + @classmethod + def get_description(cls): + return "Dummy stuff" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/dummy/api/v1.0" + + @classmethod + def get_updated(cls): + return "2012-11-20T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Extended Resource for dummy management.""" + q_mgr = manager.NeutronManager.get_instance() + dummy_inst = q_mgr.get_service_plugins()['DUMMY'] + controller = base.create_resource( + COLLECTION_NAME, RESOURCE_NAME, dummy_inst, + RESOURCE_ATTRIBUTE_MAP[COLLECTION_NAME]) + return [extensions.ResourceExtension(COLLECTION_NAME, + controller)] + + +class DummyServicePlugin(service_base.ServicePluginBase): + """This is a simple plugin for managing instantes of a fictional 'dummy' + service. This plugin is provided as a proof-of-concept of how + advanced service might leverage the service type extension. + Ideally, instances of real advanced services, such as load balancing + or VPN will adopt a similar solution. + """ + + supported_extension_aliases = ['dummy', servicetype.EXT_ALIAS] + agent_notifiers = {'dummy': 'dummy_agent_notifier'} + + def __init__(self): + self.svctype_mgr = servicetype_db.ServiceTypeManager.get_instance() + self.dummys = {} + + def get_plugin_type(self): + return constants.DUMMY + + def get_plugin_name(self): + return DUMMY_PLUGIN_NAME + + def get_plugin_description(self): + return "Neutron Dummy Service Plugin" + + def get_dummys(self, context, filters, fields): + return self.dummys.values() + + def get_dummy(self, context, id, fields): + try: + return self.dummys[id] + except KeyError: + raise exceptions.NotFound() + + def create_dummy(self, context, dummy): + d = dummy['dummy'] + d['id'] = uuidutils.generate_uuid() + self.dummys[d['id']] = d + self.svctype_mgr.increase_service_type_refcount(context, + d['service_type']) + return d + + def update_dummy(self, context, id, dummy): + pass + + def delete_dummy(self, context, id): + try: + svc_type_id = self.dummys[id]['service_type'] + del self.dummys[id] + self.svctype_mgr.decrease_service_type_refcount(context, + svc_type_id) + except KeyError: + raise exceptions.NotFound() diff --git a/neutron/tests/unit/embrane/__init__.py b/neutron/tests/unit/embrane/__init__.py new file mode 100644 index 000000000..bb81770cd --- /dev/null +++ b/neutron/tests/unit/embrane/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/tests/unit/embrane/test_embrane_defaults.py b/neutron/tests/unit/embrane/test_embrane_defaults.py new file mode 100644 index 000000000..ea84d63ab --- /dev/null +++ b/neutron/tests/unit/embrane/test_embrane_defaults.py @@ -0,0 +1,31 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from oslo.config import cfg + +from neutron.plugins.embrane.common import config # noqa +from neutron.tests import base + + +class ConfigurationTest(base.BaseTestCase): + + def test_defaults(self): + self.assertEqual('admin', cfg.CONF.heleos.admin_username) + self.assertEqual('default', cfg.CONF.heleos.resource_pool_id) + self.assertTrue(cfg.CONF.heleos.async_requests) diff --git a/neutron/tests/unit/embrane/test_embrane_l3_plugin.py b/neutron/tests/unit/embrane/test_embrane_l3_plugin.py new file mode 100644 index 000000000..548a1d432 --- /dev/null +++ b/neutron/tests/unit/embrane/test_embrane_l3_plugin.py @@ -0,0 +1,41 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from oslo.config import cfg + +from neutron.db import api as db +from neutron.plugins.embrane.common import config # noqa +from neutron.tests.unit import test_extension_extraroute as extraroute_test +from neutron.tests.unit import test_l3_plugin as router_test + +PLUGIN_NAME = ('neutron.plugins.embrane.plugins.embrane_fake_plugin.' + 'EmbraneFakePlugin') + + +class TestEmbraneL3NatDBTestCase(router_test.L3NatDBIntTestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self): + cfg.CONF.set_override('admin_password', "admin123", 'heleos') + self.addCleanup(db.clear_db) + super(TestEmbraneL3NatDBTestCase, self).setUp() + + +class ExtraRouteDBTestCase(extraroute_test.ExtraRouteDBIntTestCase): + _plugin_name = PLUGIN_NAME diff --git a/neutron/tests/unit/embrane/test_embrane_neutron_plugin.py b/neutron/tests/unit/embrane/test_embrane_neutron_plugin.py new file mode 100644 index 000000000..74b64e415 --- /dev/null +++ b/neutron/tests/unit/embrane/test_embrane_neutron_plugin.py @@ -0,0 +1,82 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. +import sys + +import mock +from oslo.config import cfg + +from neutron.db import api as db +from neutron.plugins.embrane.common import config # noqa +from neutron.tests.unit import test_db_plugin as test_plugin + +PLUGIN_NAME = ('neutron.plugins.embrane.plugins.embrane_fake_plugin.' + 'EmbraneFakePlugin') + + +class EmbranePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self): + cfg.CONF.set_override('admin_password', "admin123", 'heleos') + p = mock.patch.dict(sys.modules, {'heleosapi': mock.Mock()}) + p.start() + self.addCleanup(db.clear_db) + # dict patches must be explicitly stopped + self.addCleanup(p.stop) + super(EmbranePluginV2TestCase, self).setUp(self._plugin_name) + + +class TestEmbraneBasicGet(test_plugin.TestBasicGet, EmbranePluginV2TestCase): + pass + + +class TestEmbraneV2HTTPResponse(test_plugin.TestV2HTTPResponse, + EmbranePluginV2TestCase): + pass + + +class TestEmbranePortsV2(test_plugin.TestPortsV2, EmbranePluginV2TestCase): + + def test_create_ports_bulk_emulated_plugin_failure(self): + self.skip("Temporary skipping due to incompatibility with the" + " plugin dynamic class type") + + def test_recycle_expired_previously_run_within_context(self): + self.skip("Temporary skipping due to incompatibility with the" + " plugin dynamic class type") + + def test_recycle_held_ip_address(self): + self.skip("Temporary skipping due to incompatibility with the" + " plugin dynamic class type") + + +class TestEmbraneNetworksV2(test_plugin.TestNetworksV2, + EmbranePluginV2TestCase): + + def test_create_networks_bulk_emulated_plugin_failure(self): + self.skip("Temporary skipping due to incompatibility with the" + " plugin dynamic class type") + + +class TestEmbraneSubnetsV2(test_plugin.TestSubnetsV2, + EmbranePluginV2TestCase): + + def test_create_subnets_bulk_emulated_plugin_failure(self): + self.skip("Temporary skipping due to incompatibility with the" + " plugin dynamic class type") diff --git a/neutron/tests/unit/extension_stubs.py b/neutron/tests/unit/extension_stubs.py new file mode 100644 index 000000000..6241ab0f1 --- /dev/null +++ b/neutron/tests/unit/extension_stubs.py @@ -0,0 +1,77 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from neutron.api import extensions +from neutron import wsgi + + +class StubExtension(object): + + def __init__(self, alias="stub_extension"): + self.alias = alias + + def get_name(self): + return "Stub Extension" + + def get_alias(self): + return self.alias + + def get_description(self): + return "" + + def get_namespace(self): + return "" + + def get_updated(self): + return "" + + +class StubPlugin(object): + + def __init__(self, supported_extensions=[]): + self.supported_extension_aliases = supported_extensions + + +class ExtensionExpectingPluginInterface(StubExtension): + """Expect plugin to implement all methods in StubPluginInterface. + + This extension expects plugin to implement all the methods defined + in StubPluginInterface. + """ + + def get_plugin_interface(self): + return StubPluginInterface + + +class StubPluginInterface(extensions.PluginInterface): + + @abc.abstractmethod + def get_foo(self, bar=None): + pass + + +class StubBaseAppController(wsgi.Controller): + + def index(self, request): + return "base app index" + + def show(self, request, id): + return {'fort': 'knox'} + + def update(self, request, id): + return {'uneditable': 'original_value'} diff --git a/neutron/tests/unit/extensions/__init__.py b/neutron/tests/unit/extensions/__init__.py new file mode 100644 index 000000000..4ac574e58 --- /dev/null +++ b/neutron/tests/unit/extensions/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/extensions/extendedattribute.py b/neutron/tests/unit/extensions/extendedattribute.py new file mode 100644 index 000000000..062acc776 --- /dev/null +++ b/neutron/tests/unit/extensions/extendedattribute.py @@ -0,0 +1,58 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kaiwei Fan, VMware, Inc + +from neutron.api import extensions + +EXTENDED_ATTRIBUTE = 'extended_attribute' +EXTENDED_ATTRIBUTES_2_0 = { + 'ext_test_resources': { + EXTENDED_ATTRIBUTE: {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'default': None, 'is_visible': True}, + } +} + + +class Extendedattribute(extensions.ExtensionDescriptor): + """Extension class supporting extended attribute for router.""" + + @classmethod + def get_name(cls): + return "Extended Extension Attributes" + + @classmethod + def get_alias(cls): + return "extended-ext-attr" + + @classmethod + def get_description(cls): + return "Provides extended_attr attribute to router" + + @classmethod + def get_namespace(cls): + return "" + + @classmethod + def get_updated(cls): + return "2013-02-05T00:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/tests/unit/extensions/extensionattribute.py b/neutron/tests/unit/extensions/extensionattribute.py new file mode 100644 index 000000000..a348587c5 --- /dev/null +++ b/neutron/tests/unit/extensions/extensionattribute.py @@ -0,0 +1,110 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kaiwei Fan, VMware, Inc +# + +import abc + +from neutron.api import extensions +from neutron.api.v2 import base +from neutron import manager +from neutron import quota + + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + 'ext_test_resources': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + } +} + + +class Extensionattribute(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Extension Test Resource" + + @classmethod + def get_alias(cls): + return "ext-obj-test" + + @classmethod + def get_description(cls): + return "Extension Test Resource" + + @classmethod + def get_namespace(cls): + return "" + + @classmethod + def get_updated(cls): + return "2013-02-05T10:00:00-00:00" + + def update_attributes_map(self, attributes): + super(Extensionattribute, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + plugin = manager.NeutronManager.get_plugin() + resource_name = 'ext_test_resource' + collection_name = resource_name + "s" + params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict()) + + quota.QUOTAS.register_resource_by_name(resource_name) + + controller = base.create_resource(collection_name, + resource_name, + plugin, params, + member_actions={}) + + ex = extensions.ResourceExtension(collection_name, + controller, + member_actions={}) + exts.append(ex) + + return exts + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +class ExtensionObjectTestPluginBase(object): + + @abc.abstractmethod + def create_ext_test_resource(self, context, router): + pass + + @abc.abstractmethod + def get_ext_test_resource(self, context, id, fields=None): + pass diff --git a/neutron/tests/unit/extensions/foxinsocks.py b/neutron/tests/unit/extensions/foxinsocks.py new file mode 100644 index 000000000..27308a5a2 --- /dev/null +++ b/neutron/tests/unit/extensions/foxinsocks.py @@ -0,0 +1,110 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from neutron.api import extensions +from neutron.openstack.common import jsonutils +from neutron import wsgi + + +class FoxInSocksController(wsgi.Controller): + + def index(self, request): + return "Try to say this Mr. Knox, sir..." + + +class FoxInSocksPluginInterface(extensions.PluginInterface): + + @abc.abstractmethod + def method_to_support_foxnsox_extension(self): + pass + + +class Foxinsocks(object): + + def __init__(self): + pass + + def get_plugin_interface(self): + return FoxInSocksPluginInterface + + def get_name(self): + return "Fox In Socks" + + def get_alias(self): + return "FOXNSOX" + + def get_description(self): + return "The Fox In Socks Extension" + + def get_namespace(self): + return "http://www.fox.in.socks/api/ext/pie/v1.0" + + def get_updated(self): + return "2011-01-22T13:25:27-06:00" + + def get_resources(self): + resources = [] + resource = extensions.ResourceExtension('foxnsocks', + FoxInSocksController()) + resources.append(resource) + return resources + + def get_actions(self): + return [extensions.ActionExtension('dummy_resources', + 'FOXNSOX:add_tweedle', + self._add_tweedle_handler), + extensions.ActionExtension('dummy_resources', + 'FOXNSOX:delete_tweedle', + self._delete_tweedle_handler)] + + def get_request_extensions(self): + request_exts = [] + + def _goose_handler(req, res): + #NOTE: This only handles JSON responses. + # You can use content type header to test for XML. + data = jsonutils.loads(res.body) + data['FOXNSOX:googoose'] = req.GET.get('chewing') + res.body = jsonutils.dumps(data) + return res + + req_ext1 = extensions.RequestExtension('GET', '/dummy_resources/:(id)', + _goose_handler) + request_exts.append(req_ext1) + + def _bands_handler(req, res): + #NOTE: This only handles JSON responses. + # You can use content type header to test for XML. + data = jsonutils.loads(res.body) + data['FOXNSOX:big_bands'] = 'Pig Bands!' + res.body = jsonutils.dumps(data) + return res + + req_ext2 = extensions.RequestExtension('GET', '/dummy_resources/:(id)', + _bands_handler) + request_exts.append(req_ext2) + return request_exts + + def _add_tweedle_handler(self, input_dict, req, id): + return "Tweedle {0} Added.".format( + input_dict['FOXNSOX:add_tweedle']['name']) + + def _delete_tweedle_handler(self, input_dict, req, id): + return "Tweedle {0} Deleted.".format( + input_dict['FOXNSOX:delete_tweedle']['name']) diff --git a/neutron/tests/unit/extensions/v2attributes.py b/neutron/tests/unit/extensions/v2attributes.py new file mode 100644 index 000000000..ab40f260a --- /dev/null +++ b/neutron/tests/unit/extensions/v2attributes.py @@ -0,0 +1,48 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +EXTENDED_ATTRIBUTES_2_0 = { + 'networks': { + 'v2attrs:something': {'allow_post': False, + 'allow_put': False, + 'is_visible': True}, + 'v2attrs:something_else': {'allow_post': True, + 'allow_put': False, + 'is_visible': False}, + } +} + + +class V2attributes(object): + def get_name(self): + return "V2 Extended Attributes Example" + + def get_alias(self): + return "v2attrs" + + def get_description(self): + return "Demonstrates extended attributes on V2 core resources" + + def get_namespace(self): + return "http://docs.openstack.org/ext/examples/v2attributes/api/v1.0" + + def get_updated(self): + return "2012-07-18T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/neutron/tests/unit/hyperv/__init__.py b/neutron/tests/unit/hyperv/__init__.py new file mode 100644 index 000000000..7ef4e09fa --- /dev/null +++ b/neutron/tests/unit/hyperv/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/hyperv/test_hyperv_neutron_agent.py b/neutron/tests/unit/hyperv/test_hyperv_neutron_agent.py new file mode 100644 index 000000000..5d2a24c0d --- /dev/null +++ b/neutron/tests/unit/hyperv/test_hyperv_neutron_agent.py @@ -0,0 +1,221 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# Copyright 2013 Pedro Navarro Perez +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit tests for Windows Hyper-V virtual switch neutron driver +""" + +import mock +from oslo.config import cfg + +from neutron.plugins.hyperv.agent import hyperv_neutron_agent +from neutron.plugins.hyperv.agent import utilsfactory +from neutron.tests import base + +cfg.CONF.import_opt('enable_metrics_collection', + 'neutron.plugins.hyperv.agent.hyperv_neutron_agent', + 'AGENT') + + +class TestHyperVNeutronAgent(base.BaseTestCase): + + _FAKE_PORT_ID = 'fake_port_id' + + def setUp(self): + super(TestHyperVNeutronAgent, self).setUp() + # Avoid rpc initialization for unit tests + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + + utilsfactory._get_windows_version = mock.MagicMock( + return_value='6.2.0') + + class MockFixedIntervalLoopingCall(object): + def __init__(self, f): + self.f = f + + def start(self, interval=0): + self.f() + + mock.patch('neutron.openstack.common.loopingcall.' + 'FixedIntervalLoopingCall', + new=MockFixedIntervalLoopingCall).start() + cfg.CONF.set_default('firewall_driver', + 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + self.agent = hyperv_neutron_agent.HyperVNeutronAgent() + self.agent.plugin_rpc = mock.Mock() + self.agent.sec_groups_agent = mock.MagicMock() + self.agent.context = mock.Mock() + self.agent.agent_id = mock.Mock() + + fake_agent_state = { + 'binary': 'neutron-hyperv-agent', + 'host': 'fake_host_name', + 'topic': 'N/A', + 'configurations': {'vswitch_mappings': ['*:MyVirtualSwitch']}, + 'agent_type': 'HyperV agent', + 'start_flag': True} + self.agent_state = fake_agent_state + + def test_port_bound_enable_metrics(self): + cfg.CONF.set_override('enable_metrics_collection', True, 'AGENT') + self._test_port_bound(True) + + def test_port_bound_no_metrics(self): + cfg.CONF.set_override('enable_metrics_collection', False, 'AGENT') + self._test_port_bound(False) + + def _test_port_bound(self, enable_metrics): + port = mock.MagicMock() + mock_enable_metrics = mock.MagicMock() + net_uuid = 'my-net-uuid' + + with mock.patch.multiple( + self.agent._utils, + connect_vnic_to_vswitch=mock.MagicMock(), + set_vswitch_port_vlan_id=mock.MagicMock(), + enable_port_metrics_collection=mock_enable_metrics): + + self.agent._port_bound(port, net_uuid, 'vlan', None, None) + + self.assertEqual(enable_metrics, mock_enable_metrics.called) + + def test_port_unbound(self): + map = { + 'network_type': 'vlan', + 'vswitch_name': 'fake-vswitch', + 'ports': [], + 'vlan_id': 1} + net_uuid = 'my-net-uuid' + network_vswitch_map = (net_uuid, map) + with mock.patch.object(self.agent, + '_get_network_vswitch_map_by_port_id', + return_value=network_vswitch_map): + with mock.patch.object( + self.agent._utils, + 'disconnect_switch_port'): + self.agent._port_unbound(net_uuid) + + def test_port_enable_control_metrics_ok(self): + cfg.CONF.set_override('enable_metrics_collection', True, 'AGENT') + self.agent._port_metric_retries[self._FAKE_PORT_ID] = ( + cfg.CONF.AGENT.metrics_max_retries) + + with mock.patch.multiple(self.agent._utils, + can_enable_control_metrics=mock.MagicMock(), + enable_control_metrics=mock.MagicMock()): + + self.agent._utils.can_enable_control_metrics.return_value = True + self.agent._port_enable_control_metrics() + self.agent._utils.enable_control_metrics.assert_called_with( + self._FAKE_PORT_ID) + + self.assertNotIn(self._FAKE_PORT_ID, self.agent._port_metric_retries) + + def test_port_enable_control_metrics_maxed(self): + cfg.CONF.set_override('enable_metrics_collection', True, 'AGENT') + cfg.CONF.set_override('metrics_max_retries', 3, 'AGENT') + self.agent._port_metric_retries[self._FAKE_PORT_ID] = ( + cfg.CONF.AGENT.metrics_max_retries) + + with mock.patch.multiple(self.agent._utils, + can_enable_control_metrics=mock.MagicMock(), + enable_control_metrics=mock.MagicMock()): + + self.agent._utils.can_enable_control_metrics.return_value = False + for i in range(cfg.CONF.AGENT.metrics_max_retries + 1): + self.assertIn(self._FAKE_PORT_ID, + self.agent._port_metric_retries) + self.agent._port_enable_control_metrics() + + self.assertNotIn(self._FAKE_PORT_ID, self.agent._port_metric_retries) + + def test_treat_devices_added_returns_true_for_missing_device(self): + attrs = {'get_device_details.side_effect': Exception()} + self.agent.plugin_rpc.configure_mock(**attrs) + self.assertTrue(self.agent._treat_devices_added([{}])) + + def mock_treat_devices_added(self, details, func_name): + """Mock treat devices added. + + :param details: the details to return for the device + :param func_name: the function that should be called + :returns: whether the named function was called + """ + attrs = {'get_device_details.return_value': details} + self.agent.plugin_rpc.configure_mock(**attrs) + with mock.patch.object(self.agent, func_name) as func: + self.assertFalse(self.agent._treat_devices_added([{}])) + return func.called + + def test_treat_devices_added_updates_known_port(self): + details = mock.MagicMock() + details.__contains__.side_effect = lambda x: True + with mock.patch.object(self.agent.plugin_rpc, + "update_device_up") as func: + self.assertTrue(self.mock_treat_devices_added(details, + '_treat_vif_port')) + self.assertTrue(func.called) + + def test_treat_devices_added_missing_port_id(self): + details = mock.MagicMock() + details.__contains__.side_effect = lambda x: False + with mock.patch.object(self.agent.plugin_rpc, + "update_device_up") as func: + self.assertFalse(self.mock_treat_devices_added(details, + '_treat_vif_port')) + self.assertFalse(func.called) + + def test_treat_devices_removed_returns_true_for_missing_device(self): + attrs = {'update_device_down.side_effect': Exception()} + self.agent.plugin_rpc.configure_mock(**attrs) + self.assertTrue(self.agent._treat_devices_removed([{}])) + + def mock_treat_devices_removed(self, port_exists): + details = dict(exists=port_exists) + attrs = {'update_device_down.return_value': details} + self.agent.plugin_rpc.configure_mock(**attrs) + with mock.patch.object(self.agent, '_port_unbound') as func: + self.assertFalse(self.agent._treat_devices_removed([{}])) + self.assertEqual(func.called, not port_exists) + + def test_treat_devices_removed_unbinds_port(self): + self.mock_treat_devices_removed(False) + + def test_treat_devices_removed_ignores_missing_port(self): + self.mock_treat_devices_removed(False) + + def test_report_state(self): + with mock.patch.object(self.agent.state_rpc, + "report_state") as report_st: + self.agent._report_state() + report_st.assert_called_with(self.agent.context, + self.agent.agent_state) + self.assertNotIn("start_flag", self.agent.agent_state) + + def test_main(self): + with mock.patch.object(hyperv_neutron_agent, + 'HyperVNeutronAgent') as plugin: + with mock.patch.object(hyperv_neutron_agent, + 'common_config') as common_config: + hyperv_neutron_agent.main() + + self.assertTrue(common_config.init.called) + self.assertTrue(common_config.setup_logging.called) + plugin.assert_has_calls([mock.call().daemon_loop()]) diff --git a/neutron/tests/unit/hyperv/test_hyperv_neutron_plugin.py b/neutron/tests/unit/hyperv/test_hyperv_neutron_plugin.py new file mode 100644 index 000000000..8e34777c2 --- /dev/null +++ b/neutron/tests/unit/hyperv/test_hyperv_neutron_plugin.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# Copyright 2013 Pedro Navarro Perez +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +from oslo.config import cfg + +from neutron import context +from neutron.extensions import portbindings +from neutron import manager +from neutron.tests.unit import test_db_plugin as test_plugin + + +class HyperVNeutronPluginTestCase(test_plugin.NeutronDbPluginV2TestCase): + + _plugin_name = ('neutron.plugins.hyperv.' + 'hyperv_neutron_plugin.HyperVNeutronPlugin') + + def setUp(self): + super(HyperVNeutronPluginTestCase, self).setUp(self._plugin_name) + + +class TestHyperVVirtualSwitchBasicGet( + test_plugin.TestBasicGet, HyperVNeutronPluginTestCase): + pass + + +class TestHyperVVirtualSwitchV2HTTPResponse( + test_plugin.TestV2HTTPResponse, HyperVNeutronPluginTestCase): + pass + + +class TestHyperVVirtualSwitchPortsV2( + test_plugin.TestPortsV2, HyperVNeutronPluginTestCase): + def test_port_vif_details(self): + with self.port(name='name') as port: + self.assertEqual(port['port']['binding:vif_type'], + portbindings.VIF_TYPE_HYPERV) + + def test_ports_vif_details(self): + cfg.CONF.set_default('allow_overlapping_ips', True) + plugin = manager.NeutronManager.get_plugin() + with contextlib.nested(self.port(), self.port()) as (port1, port2): + ctx = context.get_admin_context() + ports = plugin.get_ports(ctx) + self.assertEqual(len(ports), 2) + for port in ports: + self.assertEqual(port['binding:vif_type'], + portbindings.VIF_TYPE_HYPERV) + + +class TestHyperVVirtualSwitchNetworksV2( + test_plugin.TestNetworksV2, HyperVNeutronPluginTestCase): + pass diff --git a/neutron/tests/unit/hyperv/test_hyperv_rpcapi.py b/neutron/tests/unit/hyperv/test_hyperv_rpcapi.py new file mode 100644 index 000000000..965842738 --- /dev/null +++ b/neutron/tests/unit/hyperv/test_hyperv_rpcapi.py @@ -0,0 +1,125 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# Copyright 2013 Pedro Navarro Perez +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for hyperv neutron rpc +""" + +import mock + +from neutron.agent import rpc as agent_rpc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.openstack.common import context +from neutron.plugins.hyperv import agent_notifier_api as ana +from neutron.plugins.hyperv.common import constants +from neutron.tests import base + + +class rpcHyperVApiTestCase(base.BaseTestCase): + + def _test_hyperv_neutron_api( + self, rpcapi, topic, method, rpc_method, **kwargs): + ctxt = context.RequestContext('fake_user', 'fake_project') + expected_retval = 'foo' if method == 'call' else None + expected_msg = rpcapi.make_msg(method, **kwargs) + if rpc_method == 'cast' and method == 'run_instance': + kwargs['call'] = False + + proxy = rpc_compat.RpcProxy + with mock.patch.object(proxy, rpc_method) as rpc_method_mock: + rpc_method_mock.return_value = expected_retval + retval = getattr(rpcapi, method)(ctxt, **kwargs) + + self.assertEqual(retval, expected_retval) + expected = [ + mock.call(ctxt, expected_msg, topic=topic) + ] + rpc_method_mock.assert_has_calls(expected) + + def test_delete_network(self): + rpcapi = ana.AgentNotifierApi(topics.AGENT) + self._test_hyperv_neutron_api( + rpcapi, + topics.get_topic_name( + topics.AGENT, + topics.NETWORK, + topics.DELETE), + 'network_delete', rpc_method='fanout_cast', + network_id='fake_request_spec') + + def test_port_update(self): + rpcapi = ana.AgentNotifierApi(topics.AGENT) + self._test_hyperv_neutron_api( + rpcapi, + topics.get_topic_name( + topics.AGENT, + topics.PORT, + topics.UPDATE), + 'port_update', rpc_method='fanout_cast', + port='fake_port', + network_type='fake_network_type', + segmentation_id='fake_segmentation_id', + physical_network='fake_physical_network') + + def test_port_delete(self): + rpcapi = ana.AgentNotifierApi(topics.AGENT) + self._test_hyperv_neutron_api( + rpcapi, + topics.get_topic_name( + topics.AGENT, + topics.PORT, + topics.DELETE), + 'port_delete', rpc_method='fanout_cast', + port_id='port_id') + + def test_tunnel_update(self): + rpcapi = ana.AgentNotifierApi(topics.AGENT) + self._test_hyperv_neutron_api( + rpcapi, + topics.get_topic_name( + topics.AGENT, + constants.TUNNEL, + topics.UPDATE), + 'tunnel_update', rpc_method='fanout_cast', + tunnel_ip='fake_ip', tunnel_id='fake_id') + + def test_device_details(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_hyperv_neutron_api( + rpcapi, topics.PLUGIN, + 'get_device_details', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id') + + def test_update_device_down(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_hyperv_neutron_api( + rpcapi, topics.PLUGIN, + 'update_device_down', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id', + host='fake_host') + + def test_tunnel_sync(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_hyperv_neutron_api( + rpcapi, topics.PLUGIN, + 'tunnel_sync', rpc_method='call', + tunnel_ip='fake_tunnel_ip', + tunnel_type=None) diff --git a/neutron/tests/unit/hyperv/test_hyperv_security_groups_driver.py b/neutron/tests/unit/hyperv/test_hyperv_security_groups_driver.py new file mode 100644 index 000000000..bcbe6ba0e --- /dev/null +++ b/neutron/tests/unit/hyperv/test_hyperv_security_groups_driver.py @@ -0,0 +1,189 @@ +# Copyright 2014 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Claudiu Belu, Cloudbase Solutions Srl + +""" +Unit tests for the Hyper-V Security Groups Driver. +""" + +import mock +from oslo.config import cfg + +from neutron.plugins.hyperv.agent import security_groups_driver as sg_driver +from neutron.plugins.hyperv.agent import utilsfactory +from neutron.tests import base + +CONF = cfg.CONF + + +class TestHyperVSecurityGroupsDriver(base.BaseTestCase): + + _FAKE_DEVICE = 'fake_device' + _FAKE_ID = 'fake_id' + _FAKE_DIRECTION = 'ingress' + _FAKE_ETHERTYPE = 'IPv4' + _FAKE_ETHERTYPE_IPV6 = 'IPv6' + _FAKE_DEST_IP_PREFIX = 'fake_dest_ip_prefix' + _FAKE_SOURCE_IP_PREFIX = 'fake_source_ip_prefix' + _FAKE_PARAM_NAME = 'fake_param_name' + _FAKE_PARAM_VALUE = 'fake_param_value' + + _FAKE_PORT_MIN = 9001 + _FAKE_PORT_MAX = 9011 + + def setUp(self): + super(TestHyperVSecurityGroupsDriver, self).setUp() + self._mock_windows_version = mock.patch.object(utilsfactory, + 'get_hypervutils') + self._mock_windows_version.start() + self._driver = sg_driver.HyperVSecurityGroupsDriver() + self._driver._utils = mock.MagicMock() + + @mock.patch('neutron.plugins.hyperv.agent.security_groups_driver' + '.HyperVSecurityGroupsDriver._create_port_rules') + def test_prepare_port_filter(self, mock_create_rules): + mock_port = self._get_port() + mock_utils_method = self._driver._utils.create_default_reject_all_rules + self._driver.prepare_port_filter(mock_port) + + self.assertEqual(mock_port, + self._driver._security_ports[self._FAKE_DEVICE]) + mock_utils_method.assert_called_once_with(self._FAKE_ID) + self._driver._create_port_rules.assert_called_once_with( + self._FAKE_ID, mock_port['security_group_rules']) + + def test_update_port_filter(self): + mock_port = self._get_port() + new_mock_port = self._get_port() + new_mock_port['id'] += '2' + new_mock_port['security_group_rules'][0]['ethertype'] += "2" + + self._driver._security_ports[mock_port['device']] = mock_port + self._driver._create_port_rules = mock.MagicMock() + self._driver._remove_port_rules = mock.MagicMock() + self._driver.update_port_filter(new_mock_port) + + self._driver._remove_port_rules.assert_called_once_with( + mock_port['id'], mock_port['security_group_rules']) + self._driver._create_port_rules.assert_called_once_with( + new_mock_port['id'], new_mock_port['security_group_rules']) + self.assertEqual(new_mock_port, + self._driver._security_ports[new_mock_port['device']]) + + @mock.patch('neutron.plugins.hyperv.agent.security_groups_driver' + '.HyperVSecurityGroupsDriver.prepare_port_filter') + def test_update_port_filter_new_port(self, mock_method): + mock_port = self._get_port() + self._driver.prepare_port_filter = mock.MagicMock() + self._driver.update_port_filter(mock_port) + + self._driver.prepare_port_filter.assert_called_once_with(mock_port) + + def test_remove_port_filter(self): + mock_port = self._get_port() + self._driver._security_ports[mock_port['device']] = mock_port + self._driver.remove_port_filter(mock_port) + self.assertFalse(mock_port['device'] in self._driver._security_ports) + + def test_create_port_rules_exception(self): + fake_rule = self._create_security_rule() + self._driver._utils.create_security_rule.side_effect = Exception( + 'Generated Exception for testing.') + self._driver._create_port_rules(self._FAKE_ID, [fake_rule]) + + def test_create_param_map(self): + fake_rule = self._create_security_rule() + self._driver._get_rule_remote_address = mock.MagicMock( + return_value=self._FAKE_SOURCE_IP_PREFIX) + actual = self._driver._create_param_map(fake_rule) + expected = { + 'direction': self._driver._ACL_PROP_MAP[ + 'direction'][self._FAKE_DIRECTION], + 'acl_type': self._driver._ACL_PROP_MAP[ + 'ethertype'][self._FAKE_ETHERTYPE], + 'local_port': '%s-%s' % (self._FAKE_PORT_MIN, self._FAKE_PORT_MAX), + 'protocol': self._driver._ACL_PROP_MAP['default'], + 'remote_address': self._FAKE_SOURCE_IP_PREFIX + } + + self.assertEqual(expected, actual) + + @mock.patch('neutron.plugins.hyperv.agent.security_groups_driver' + '.HyperVSecurityGroupsDriver._create_param_map') + def test_create_port_rules(self, mock_method): + fake_rule = self._create_security_rule() + mock_method.return_value = { + self._FAKE_PARAM_NAME: self._FAKE_PARAM_VALUE} + self._driver._create_port_rules(self._FAKE_ID, [fake_rule]) + + self._driver._utils.create_security_rule.assert_called_once_with( + self._FAKE_ID, fake_param_name=self._FAKE_PARAM_VALUE) + + def test_convert_any_address_to_same_ingress(self): + rule = self._create_security_rule() + actual = self._driver._get_rule_remote_address(rule) + self.assertEqual(self._FAKE_SOURCE_IP_PREFIX, actual) + + def test_convert_any_address_to_same_egress(self): + rule = self._create_security_rule() + rule['direction'] += '2' + actual = self._driver._get_rule_remote_address(rule) + self.assertEqual(self._FAKE_DEST_IP_PREFIX, actual) + + def test_convert_any_address_to_ipv4(self): + rule = self._create_security_rule() + del rule['source_ip_prefix'] + actual = self._driver._get_rule_remote_address(rule) + self.assertEqual(self._driver._ACL_PROP_MAP['address_default']['IPv4'], + actual) + + def test_convert_any_address_to_ipv6(self): + rule = self._create_security_rule() + del rule['source_ip_prefix'] + rule['ethertype'] = self._FAKE_ETHERTYPE_IPV6 + actual = self._driver._get_rule_remote_address(rule) + self.assertEqual(self._driver._ACL_PROP_MAP['address_default']['IPv6'], + actual) + + def test_get_rule_protocol_icmp(self): + self._test_get_rule_protocol( + 'icmp', self._driver._ACL_PROP_MAP['protocol']['icmp']) + + def test_get_rule_protocol_no_icmp(self): + self._test_get_rule_protocol('tcp', 'tcp') + + def _test_get_rule_protocol(self, protocol, expected): + rule = self._create_security_rule() + rule['protocol'] = protocol + actual = self._driver._get_rule_protocol(rule) + + self.assertEqual(expected, actual) + + def _get_port(self): + return { + 'device': self._FAKE_DEVICE, + 'id': self._FAKE_ID, + 'security_group_rules': [self._create_security_rule()] + } + + def _create_security_rule(self): + return { + 'direction': self._FAKE_DIRECTION, + 'ethertype': self._FAKE_ETHERTYPE, + 'dest_ip_prefix': self._FAKE_DEST_IP_PREFIX, + 'source_ip_prefix': self._FAKE_SOURCE_IP_PREFIX, + 'port_range_min': self._FAKE_PORT_MIN, + 'port_range_max': self._FAKE_PORT_MAX + } diff --git a/neutron/tests/unit/hyperv/test_hyperv_utilsfactory.py b/neutron/tests/unit/hyperv/test_hyperv_utilsfactory.py new file mode 100644 index 000000000..fef96d734 --- /dev/null +++ b/neutron/tests/unit/hyperv/test_hyperv_utilsfactory.py @@ -0,0 +1,54 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Claudiu Belu, Cloudbase Solutions Srl + +""" +Unit tests for the Hyper-V utils factory. +""" + +import mock + +from oslo.config import cfg + +from neutron.plugins.hyperv.agent import utils +from neutron.plugins.hyperv.agent import utilsfactory +from neutron.plugins.hyperv.agent import utilsv2 +from neutron.tests import base + +CONF = cfg.CONF + + +class TestHyperVUtilsFactory(base.BaseTestCase): + + def test_get_hypervutils_v2_r2(self): + self._test_returned_class(utilsv2.HyperVUtilsV2R2, True, '6.3.0') + + def test_get_hypervutils_v2(self): + self._test_returned_class(utilsv2.HyperVUtilsV2, False, '6.2.0') + + def test_get_hypervutils_v1_old_version(self): + self._test_returned_class(utils.HyperVUtils, False, '6.1.0') + + def test_get_hypervutils_v1_forced(self): + self._test_returned_class(utils.HyperVUtils, True, '6.2.0') + + def _test_returned_class(self, expected_class, force_v1, os_version): + CONF.hyperv.force_hyperv_utils_v1 = force_v1 + utilsfactory._get_windows_version = mock.MagicMock( + return_value=os_version) + actual_class = type(utilsfactory.get_hypervutils()) + self.assertEqual(actual_class, expected_class) diff --git a/neutron/tests/unit/hyperv/test_hyperv_utilsv2.py b/neutron/tests/unit/hyperv/test_hyperv_utilsv2.py new file mode 100644 index 000000000..c020f16e0 --- /dev/null +++ b/neutron/tests/unit/hyperv/test_hyperv_utilsv2.py @@ -0,0 +1,519 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +""" +Unit tests for the Hyper-V utils V2. +""" + +import mock + +from neutron.plugins.hyperv.agent import utils +from neutron.plugins.hyperv.agent import utilsv2 +from neutron.tests import base + + +class TestHyperVUtilsV2(base.BaseTestCase): + + _FAKE_VSWITCH_NAME = "fake_vswitch_name" + _FAKE_PORT_NAME = "fake_port_name" + _FAKE_JOB_PATH = 'fake_job_path' + _FAKE_RET_VAL = 0 + _FAKE_VM_PATH = "fake_vm_path" + _FAKE_RES_DATA = "fake_res_data" + _FAKE_RES_PATH = "fake_res_path" + _FAKE_VSWITCH = "fake_vswitch" + _FAKE_VLAN_ID = "fake_vlan_id" + _FAKE_CLASS_NAME = "fake_class_name" + _FAKE_ELEMENT_NAME = "fake_element_name" + _FAKE_HYPERV_VM_STATE = 'fake_hyperv_state' + + _FAKE_ACL_ACT = 'fake_acl_action' + _FAKE_ACL_DIR = 'fake_acl_dir' + _FAKE_ACL_TYPE = 'fake_acl_type' + _FAKE_LOCAL_PORT = 'fake_local_port' + _FAKE_PROTOCOL = 'fake_port_protocol' + _FAKE_REMOTE_ADDR = '0.0.0.0/0' + _FAKE_WEIGHT = 'fake_weight' + + def setUp(self): + super(TestHyperVUtilsV2, self).setUp() + self._utils = utilsv2.HyperVUtilsV2() + self._utils._wmi_conn = mock.MagicMock() + + def test_connect_vnic_to_vswitch_found(self): + self._test_connect_vnic_to_vswitch(True) + + def test_connect_vnic_to_vswitch_not_found(self): + self._test_connect_vnic_to_vswitch(False) + + def _test_connect_vnic_to_vswitch(self, found): + self._utils._get_vnic_settings = mock.MagicMock() + + if not found: + mock_vm = mock.MagicMock() + self._utils._get_vm_from_res_setting_data = mock.MagicMock( + return_value=mock_vm) + self._utils._add_virt_resource = mock.MagicMock() + else: + self._utils._modify_virt_resource = mock.MagicMock() + + self._utils._get_vswitch = mock.MagicMock() + self._utils._get_switch_port_allocation = mock.MagicMock() + + mock_port = mock.MagicMock() + self._utils._get_switch_port_allocation.return_value = (mock_port, + found) + + self._utils.connect_vnic_to_vswitch(self._FAKE_VSWITCH_NAME, + self._FAKE_PORT_NAME) + + if not found: + self._utils._add_virt_resource.assert_called_with(mock_vm, + mock_port) + else: + self._utils._modify_virt_resource.assert_called_with(mock_port) + + def test_add_virt_resource(self): + self._test_virt_method('AddResourceSettings', 3, '_add_virt_resource', + True, self._FAKE_VM_PATH, [self._FAKE_RES_DATA]) + + def test_add_virt_feature(self): + self._test_virt_method('AddFeatureSettings', 3, '_add_virt_feature', + True, self._FAKE_VM_PATH, [self._FAKE_RES_DATA]) + + def test_modify_virt_resource(self): + self._test_virt_method('ModifyResourceSettings', 3, + '_modify_virt_resource', False, + ResourceSettings=[self._FAKE_RES_DATA]) + + def test_remove_virt_resource(self): + self._test_virt_method('RemoveResourceSettings', 2, + '_remove_virt_resource', False, + ResourceSettings=[self._FAKE_RES_PATH]) + + def test_remove_virt_feature(self): + self._test_virt_method('RemoveFeatureSettings', 2, + '_remove_virt_feature', False, + FeatureSettings=[self._FAKE_RES_PATH]) + + def _test_virt_method(self, vsms_method_name, return_count, + utils_method_name, with_mock_vm, *args, **kwargs): + mock_svc = self._utils._conn.Msvm_VirtualSystemManagementService()[0] + vsms_method = getattr(mock_svc, vsms_method_name) + mock_rsd = self._mock_vsms_method(vsms_method, return_count) + if with_mock_vm: + mock_vm = mock.MagicMock() + mock_vm.path_.return_value = self._FAKE_VM_PATH + getattr(self._utils, utils_method_name)(mock_vm, mock_rsd) + else: + getattr(self._utils, utils_method_name)(mock_rsd) + + if args: + vsms_method.assert_called_once_with(*args) + else: + vsms_method.assert_called_once_with(**kwargs) + + def _mock_vsms_method(self, vsms_method, return_count): + args = None + if return_count == 3: + args = (self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL) + else: + args = (self._FAKE_JOB_PATH, self._FAKE_RET_VAL) + + vsms_method.return_value = args + mock_res_setting_data = mock.MagicMock() + mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA + mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH + + self._utils._check_job_status = mock.MagicMock() + + return mock_res_setting_data + + def test_disconnect_switch_port_delete_port(self): + self._test_disconnect_switch_port(True) + + def test_disconnect_switch_port_modify_port(self): + self._test_disconnect_switch_port(False) + + def _test_disconnect_switch_port(self, delete_port): + self._utils._get_switch_port_allocation = mock.MagicMock() + + mock_sw_port = mock.MagicMock() + self._utils._get_switch_port_allocation.return_value = (mock_sw_port, + True) + + if delete_port: + self._utils._remove_virt_resource = mock.MagicMock() + else: + self._utils._modify_virt_resource = mock.MagicMock() + + self._utils.disconnect_switch_port(self._FAKE_VSWITCH_NAME, + self._FAKE_PORT_NAME, + delete_port) + + if delete_port: + self._utils._remove_virt_resource.assert_called_with(mock_sw_port) + else: + self._utils._modify_virt_resource.assert_called_with(mock_sw_port) + + def test_get_vswitch(self): + self._utils._conn.Msvm_VirtualEthernetSwitch.return_value = [ + self._FAKE_VSWITCH] + vswitch = self._utils._get_vswitch(self._FAKE_VSWITCH_NAME) + + self.assertEqual(self._FAKE_VSWITCH, vswitch) + + def test_get_vswitch_not_found(self): + self._utils._conn.Msvm_VirtualEthernetSwitch.return_value = [] + self.assertRaises(utils.HyperVException, self._utils._get_vswitch, + self._FAKE_VSWITCH_NAME) + + def test_get_vswitch_external_port(self): + mock_vswitch = mock.MagicMock() + mock_sw_port = mock.MagicMock() + mock_vswitch.associators.return_value = [mock_sw_port] + mock_le = mock_sw_port.associators.return_value + mock_le.__len__.return_value = 1 + mock_le1 = mock_le[0].associators.return_value + mock_le1.__len__.return_value = 1 + + vswitch_port = self._utils._get_vswitch_external_port(mock_vswitch) + + self.assertEqual(mock_sw_port, vswitch_port) + + def test_set_vswitch_port_vlan_id(self): + mock_port_alloc = mock.MagicMock() + self._utils._get_switch_port_allocation = mock.MagicMock(return_value=( + mock_port_alloc, True)) + self._utils._get_vlan_setting_data_from_port_alloc = mock.MagicMock() + + mock_svc = self._utils._conn.Msvm_VirtualSystemManagementService()[0] + mock_svc.RemoveFeatureSettings.return_value = (self._FAKE_JOB_PATH, + self._FAKE_RET_VAL) + mock_vlan_settings = mock.MagicMock() + self._utils._get_vlan_setting_data = mock.MagicMock(return_value=( + mock_vlan_settings, True)) + + mock_svc.AddFeatureSettings.return_value = (self._FAKE_JOB_PATH, + None, + self._FAKE_RET_VAL) + + self._utils.set_vswitch_port_vlan_id(self._FAKE_VLAN_ID, + self._FAKE_PORT_NAME) + + self.assertTrue(mock_svc.RemoveFeatureSettings.called) + self.assertTrue(mock_svc.AddFeatureSettings.called) + + def test_get_setting_data(self): + self._utils._get_first_item = mock.MagicMock(return_value=None) + + mock_data = mock.MagicMock() + self._utils._get_default_setting_data = mock.MagicMock( + return_value=mock_data) + + ret_val = self._utils._get_setting_data(self._FAKE_CLASS_NAME, + self._FAKE_ELEMENT_NAME, + True) + + self.assertEqual(ret_val, (mock_data, False)) + + def test_enable_port_metrics_collection(self): + mock_port = mock.MagicMock() + self._utils._get_switch_port_allocation = mock.MagicMock(return_value=( + mock_port, True)) + + mock_acl = mock.MagicMock() + + with mock.patch.multiple( + self._utils, + _get_default_setting_data=mock.MagicMock(return_value=mock_acl), + _add_virt_feature=mock.MagicMock()): + + self._utils.enable_port_metrics_collection(self._FAKE_PORT_NAME) + + self.assertEqual(4, len(self._utils._add_virt_feature.mock_calls)) + self._utils._add_virt_feature.assert_called_with( + mock_port, mock_acl) + + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._get_switch_port_allocation') + def test_enable_control_metrics_ok(self, mock_get_port_allocation): + mock_metrics_svc = self._utils._conn.Msvm_MetricService()[0] + mock_metrics_def_source = self._utils._conn.CIM_BaseMetricDefinition + mock_metric_def = mock.MagicMock() + mock_port = mock.MagicMock() + mock_get_port_allocation.return_value = (mock_port, True) + + mock_metrics_def_source.return_value = [mock_metric_def] + m_call = mock.call(Subject=mock_port.path_.return_value, + Definition=mock_metric_def.path_.return_value, + MetricCollectionEnabled=self._utils._METRIC_ENABLED) + + self._utils.enable_control_metrics(self._FAKE_PORT_NAME) + + mock_metrics_svc.ControlMetrics.assert_has_calls([m_call, m_call]) + + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._get_switch_port_allocation') + def test_enable_control_metrics_no_port(self, mock_get_port_allocation): + mock_metrics_svc = self._utils._conn.Msvm_MetricService()[0] + mock_get_port_allocation.return_value = (None, False) + + self._utils.enable_control_metrics(self._FAKE_PORT_NAME) + self.assertEqual(0, mock_metrics_svc.ControlMetrics.call_count) + + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._get_switch_port_allocation') + def test_enable_control_metrics_no_def(self, mock_get_port_allocation): + mock_metrics_svc = self._utils._conn.Msvm_MetricService()[0] + mock_metrics_def_source = self._utils._conn.CIM_BaseMetricDefinition + mock_port = mock.MagicMock() + + mock_get_port_allocation.return_value = (mock_port, True) + mock_metrics_def_source.return_value = None + + self._utils.enable_control_metrics(self._FAKE_PORT_NAME) + self.assertEqual(0, mock_metrics_svc.ControlMetrics.call_count) + + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._is_port_vm_started') + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._get_switch_port_allocation') + def test_can_enable_control_metrics_true(self, mock_get, mock_is_started): + mock_acl = mock.MagicMock() + mock_acl.Action = self._utils._ACL_ACTION_METER + self._test_can_enable_control_metrics(mock_get, mock_is_started, + [mock_acl, mock_acl], True) + + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._is_port_vm_started') + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._get_switch_port_allocation') + def test_can_enable_control_metrics_false(self, mock_get, mock_is_started): + self._test_can_enable_control_metrics(mock_get, mock_is_started, [], + False) + + def _test_can_enable_control_metrics(self, mock_get_port, mock_vm_started, + acls, expected_result): + mock_port = mock.MagicMock() + mock_acl = mock.MagicMock() + mock_acl.Action = self._utils._ACL_ACTION_METER + + mock_port.associators.return_value = acls + mock_get_port.return_value = (mock_port, True) + mock_vm_started.return_value = True + + result = self._utils.can_enable_control_metrics(self._FAKE_PORT_NAME) + self.assertEqual(expected_result, result) + + def test_is_port_vm_started_true(self): + self._test_is_port_vm_started(self._utils._HYPERV_VM_STATE_ENABLED, + True) + + def test_is_port_vm_started_false(self): + self._test_is_port_vm_started(self._FAKE_HYPERV_VM_STATE, False) + + def _test_is_port_vm_started(self, vm_state, expected_result): + mock_svc = self._utils._conn.Msvm_VirtualSystemManagementService()[0] + mock_port = mock.MagicMock() + mock_vmsettings = mock.MagicMock() + mock_summary = mock.MagicMock() + mock_summary.EnabledState = vm_state + mock_vmsettings.path_.return_value = self._FAKE_RES_PATH + + mock_port.associators.return_value = [mock_vmsettings] + mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL, + [mock_summary]) + + result = self._utils._is_port_vm_started(mock_port) + self.assertEqual(expected_result, result) + mock_svc.GetSummaryInformation.assert_called_once_with( + [self._utils._VM_SUMMARY_ENABLED_STATE], + [self._FAKE_RES_PATH]) + + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._remove_virt_feature') + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._bind_security_rule') + def test_create_default_reject_all_rules(self, mock_bind, mock_remove): + (m_port, m_acl) = self._setup_security_rule_test() + m_acl.Action = self._utils._ACL_ACTION_DENY + self._utils.create_default_reject_all_rules(self._FAKE_PORT_NAME) + + calls = [] + ipv4_pair = (self._utils._ACL_TYPE_IPV4, self._utils._IPV4_ANY) + ipv6_pair = (self._utils._ACL_TYPE_IPV6, self._utils._IPV6_ANY) + for direction in [self._utils._ACL_DIR_IN, self._utils._ACL_DIR_OUT]: + for acl_type, address in [ipv4_pair, ipv6_pair]: + for protocol in [self._utils._TCP_PROTOCOL, + self._utils._UDP_PROTOCOL, + self._utils._ICMP_PROTOCOL]: + calls.append(mock.call(m_port, direction, acl_type, + self._utils._ACL_ACTION_DENY, + self._utils._ACL_DEFAULT, + protocol, address, mock.ANY)) + + self._utils._remove_virt_feature.assert_called_once_with(m_acl) + self._utils._bind_security_rule.assert_has_calls(calls) + + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._remove_virt_feature') + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._bind_security_rule') + def test_create_default_reject_all_rules_already_added(self, mock_bind, + mock_remove): + (m_port, m_acl) = self._setup_security_rule_test() + m_acl.Action = self._utils._ACL_ACTION_DENY + m_port.associators.return_value = [ + m_acl] * self._utils._REJECT_ACLS_COUNT + self._utils.create_default_reject_all_rules(self._FAKE_PORT_NAME) + + self.assertFalse(self._utils._remove_virt_feature.called) + self.assertFalse(self._utils._bind_security_rule.called) + + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._remove_virt_feature') + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._add_virt_feature') + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._create_security_acl') + def test_bind_security_rule(self, mock_create_acl, mock_add, mock_remove): + (m_port, m_acl) = self._setup_security_rule_test() + mock_create_acl.return_value = m_acl + + self._utils._bind_security_rule( + m_port, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE, + self._FAKE_ACL_ACT, self._FAKE_LOCAL_PORT, self._FAKE_PROTOCOL, + self._FAKE_REMOTE_ADDR, self._FAKE_WEIGHT) + + self._utils._add_virt_feature.assert_called_once_with(m_port, m_acl) + + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._remove_virt_feature') + def test_remove_security_rule(self, mock_remove_feature): + mock_acl = self._setup_security_rule_test()[1] + self._utils.remove_security_rule( + self._FAKE_PORT_NAME, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE, + self._FAKE_LOCAL_PORT, self._FAKE_PROTOCOL, self._FAKE_REMOTE_ADDR) + self._utils._remove_virt_feature.assert_called_once_with(mock_acl) + + @mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2' + '._remove_multiple_virt_features') + def test_remove_all_security_rules(self, mock_remove_feature): + mock_acl = self._setup_security_rule_test()[1] + self._utils.remove_all_security_rules(self._FAKE_PORT_NAME) + self._utils._remove_multiple_virt_features.assert_called_once_with( + [mock_acl]) + + def _setup_security_rule_test(self): + mock_port = mock.MagicMock() + mock_acl = mock.MagicMock() + mock_port.associators.return_value = [mock_acl] + + self._utils._get_switch_port_allocation = mock.MagicMock(return_value=( + mock_port, True)) + self._utils._filter_security_acls = mock.MagicMock( + return_value=[mock_acl]) + + return (mock_port, mock_acl) + + def test_filter_acls(self): + mock_acl = mock.MagicMock() + mock_acl.Action = self._FAKE_ACL_ACT + mock_acl.Applicability = self._utils._ACL_APPLICABILITY_LOCAL + mock_acl.Direction = self._FAKE_ACL_DIR + mock_acl.AclType = self._FAKE_ACL_TYPE + mock_acl.RemoteAddress = self._FAKE_REMOTE_ADDR + + acls = [mock_acl, mock_acl] + good_acls = self._utils._filter_acls( + acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, + self._FAKE_ACL_TYPE, self._FAKE_REMOTE_ADDR) + bad_acls = self._utils._filter_acls( + acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE) + + self.assertEqual(acls, good_acls) + self.assertEqual([], bad_acls) + + +class TestHyperVUtilsV2R2(base.BaseTestCase): + _FAKE_ACL_ACT = 'fake_acl_action' + _FAKE_ACL_DIR = 'fake_direction' + _FAKE_ACL_TYPE = 'fake_acl_type' + _FAKE_LOCAL_PORT = 'fake_local_port' + _FAKE_PROTOCOL = 'fake_port_protocol' + _FAKE_REMOTE_ADDR = '10.0.0.0/0' + + def setUp(self): + super(TestHyperVUtilsV2R2, self).setUp() + self._utils = utilsv2.HyperVUtilsV2R2() + + def test_filter_security_acls(self): + self._test_filter_security_acls( + self._FAKE_LOCAL_PORT, self._FAKE_PROTOCOL, self._FAKE_REMOTE_ADDR) + + def test_filter_security_acls_default(self): + default = self._utils._ACL_DEFAULT + self._test_filter_security_acls( + default, default, self._FAKE_REMOTE_ADDR) + + def _test_filter_security_acls(self, local_port, protocol, remote_addr): + acls = [] + default = self._utils._ACL_DEFAULT + for port, proto in [(default, default), (local_port, protocol)]: + mock_acl = mock.MagicMock() + mock_acl.Action = self._utils._ACL_ACTION_ALLOW + mock_acl.Direction = self._FAKE_ACL_DIR + mock_acl.LocalPort = port + mock_acl.Protocol = proto + mock_acl.RemoteIPAddress = remote_addr + acls.append(mock_acl) + + right_acls = [a for a in acls if a.LocalPort == local_port] + + good_acls = self._utils._filter_security_acls( + acls, mock_acl.Action, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE, + local_port, protocol, remote_addr) + bad_acls = self._utils._filter_security_acls( + acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE, + local_port, protocol, remote_addr) + + self.assertEqual(right_acls, good_acls) + self.assertEqual([], bad_acls) + + def test_get_new_weight(self): + mockacl1 = mock.MagicMock() + mockacl1.Weight = self._utils._MAX_WEIGHT - 1 + mockacl2 = mock.MagicMock() + mockacl2.Weight = self._utils._MAX_WEIGHT - 3 + self.assertEqual(self._utils._MAX_WEIGHT - 2, + self._utils._get_new_weight([mockacl1, mockacl2])) + + def test_get_new_weight_no_acls(self): + self.assertEqual(self._utils._MAX_WEIGHT - 1, + self._utils._get_new_weight([])) + + def test_get_new_weight_default_acls(self): + mockacl1 = mock.MagicMock() + mockacl1.Weight = self._utils._MAX_WEIGHT - 1 + mockacl2 = mock.MagicMock() + mockacl2.Weight = self._utils._MAX_WEIGHT - 2 + mockacl2.Action = self._utils._ACL_ACTION_DENY + + self.assertEqual(self._utils._MAX_WEIGHT - 2, + self._utils._get_new_weight([mockacl1, mockacl2])) diff --git a/neutron/tests/unit/ibm/__init__.py b/neutron/tests/unit/ibm/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/ibm/test_sdnve_agent.py b/neutron/tests/unit/ibm/test_sdnve_agent.py new file mode 100644 index 000000000..3b33d901e --- /dev/null +++ b/neutron/tests/unit/ibm/test_sdnve_agent.py @@ -0,0 +1,118 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp + + +import contextlib + +import mock +from oslo.config import cfg + +from neutron.agent.linux import ip_lib +from neutron.plugins.ibm.agent import sdnve_neutron_agent +from neutron.tests import base + + +NOTIFIER = ('neutron.plugins.ibm.' + 'sdnve_neutron_plugin.AgentNotifierApi') + + +class CreateAgentConfigMap(base.BaseTestCase): + + def test_create_agent_config_map_succeeds(self): + self.assertTrue(sdnve_neutron_agent.create_agent_config_map(cfg.CONF)) + + def test_create_agent_config_using_controller_ips(self): + cfg.CONF.set_override('controller_ips', + ['10.10.10.1', '10.10.10.2'], group='SDNVE') + cfgmap = sdnve_neutron_agent.create_agent_config_map(cfg.CONF) + self.assertEqual(cfgmap['controller_ip'], '10.10.10.1') + + def test_create_agent_config_using_interface_mappings(self): + cfg.CONF.set_override('interface_mappings', + ['interface1 : eth1', 'interface2 : eth2'], + group='SDNVE') + cfgmap = sdnve_neutron_agent.create_agent_config_map(cfg.CONF) + self.assertEqual(cfgmap['interface_mappings'], + {'interface1': 'eth1', 'interface2': 'eth2'}) + + +class TestSdnveNeutronAgent(base.BaseTestCase): + + def setUp(self): + super(TestSdnveNeutronAgent, self).setUp() + notifier_p = mock.patch(NOTIFIER) + notifier_cls = notifier_p.start() + self.notifier = mock.Mock() + notifier_cls.return_value = self.notifier + # Avoid rpc initialization for unit tests + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + cfg.CONF.set_override('integration_bridge', + 'br_int', group='SDNVE') + kwargs = sdnve_neutron_agent.create_agent_config_map(cfg.CONF) + + class MockFixedIntervalLoopingCall(object): + def __init__(self, f): + self.f = f + + def start(self, interval=0): + self.f() + + with contextlib.nested( + mock.patch('neutron.plugins.ibm.agent.sdnve_neutron_agent.' + 'SdnveNeutronAgent.setup_integration_br', + return_value=mock.Mock()), + mock.patch('neutron.openstack.common.loopingcall.' + 'FixedIntervalLoopingCall', + new=MockFixedIntervalLoopingCall)): + self.agent = sdnve_neutron_agent.SdnveNeutronAgent(**kwargs) + + def test_setup_physical_interfaces(self): + with mock.patch.object(self.agent.int_br, + 'add_port') as add_port_func: + with mock.patch.object(ip_lib, + 'device_exists', + return_valxue=True): + self.agent.setup_physical_interfaces({"interface1": "eth1"}) + add_port_func.assert_called_once_with('eth1') + + def test_setup_physical_interfaces_none(self): + with mock.patch.object(self.agent.int_br, + 'add_port') as add_port_func: + with mock.patch.object(ip_lib, + 'device_exists', + return_valxue=True): + self.agent.setup_physical_interfaces({}) + self.assertFalse(add_port_func.called) + + def test_get_info_set_controller(self): + with mock.patch.object(self.agent.int_br, + 'run_vsctl') as run_vsctl_func: + kwargs = {} + kwargs['info'] = {'new_controller': '10.10.10.1'} + self.agent.info_update('dummy', **kwargs) + run_vsctl_func.assert_called_once_with(['set-controller', + 'br_int', + 'tcp:10.10.10.1']) + + def test_get_info(self): + with mock.patch.object(self.agent.int_br, + 'run_vsctl') as run_vsctl_func: + kwargs = {} + self.agent.info_update('dummy', **kwargs) + self.assertFalse(run_vsctl_func.called) diff --git a/neutron/tests/unit/ibm/test_sdnve_api.py b/neutron/tests/unit/ibm/test_sdnve_api.py new file mode 100644 index 000000000..f1f8d60b8 --- /dev/null +++ b/neutron/tests/unit/ibm/test_sdnve_api.py @@ -0,0 +1,145 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp + + +import mock + +from neutron.openstack.common import uuidutils +from neutron.plugins.ibm.common import constants +from neutron.plugins.ibm import sdnve_api +from neutron.tests import base + +RESOURCE_PATH = { + 'network': "ln/networks/", +} +RESOURCE = 'network' +HTTP_OK = 200 +TENANT_ID = uuidutils.generate_uuid() + + +class TestSdnveApi(base.BaseTestCase): + + def setUp(self): + super(TestSdnveApi, self).setUp() + + class MockKeystoneClient(object): + def __init__(self, **kwargs): + pass + + def get_tenant_name(self, id): + return 'test tenant name' + + with mock.patch('neutron.plugins.ibm.sdnve_api.' + 'KeystoneClient', + new=MockKeystoneClient): + self.api = sdnve_api.Client() + + def mock_do_request(self, method, url, body=None, headers=None, + params=None, connection_type=None): + return (HTTP_OK, url) + + def mock_do_request_tenant(self, method, url, body=None, headers=None, + params=None, connection_type=None): + return (HTTP_OK, {'id': TENANT_ID, + 'network_type': constants.TENANT_TYPE_OF}) + + def mock_do_request_no_tenant(self, method, url, body=None, headers=None, + params=None, connection_type=None): + return (None, None) + + def mock_process_request(self, body): + return body + + def test_sdnve_api_list(self): + with mock.patch('neutron.plugins.ibm.sdnve_api.' + 'Client.do_request', + new=self.mock_do_request): + result = self.api.sdnve_list(RESOURCE) + self.assertEqual(result, (HTTP_OK, RESOURCE_PATH[RESOURCE])) + + def test_sdnve_api_show(self): + with mock.patch('neutron.plugins.ibm.sdnve_api.' + 'Client.do_request', + new=self.mock_do_request): + result = self.api.sdnve_show(RESOURCE, TENANT_ID) + self.assertEqual(result, + (HTTP_OK, RESOURCE_PATH[RESOURCE] + TENANT_ID)) + + def test_sdnve_api_create(self): + with mock.patch('neutron.plugins.ibm.sdnve_api.' + 'Client.do_request', + new=self.mock_do_request): + with mock.patch('neutron.plugins.ibm.sdnve_api.' + 'Client.process_request', + new=self.mock_process_request): + result = self.api.sdnve_create(RESOURCE, '') + self.assertEqual(result, (HTTP_OK, RESOURCE_PATH[RESOURCE])) + + def test_sdnve_api_update(self): + with mock.patch('neutron.plugins.ibm.sdnve_api.' + 'Client.do_request', + new=self.mock_do_request): + with mock.patch('neutron.plugins.ibm.sdnve_api.' + 'Client.process_request', + new=self.mock_process_request): + result = self.api.sdnve_update(RESOURCE, TENANT_ID, '') + self.assertEqual(result, + (HTTP_OK, + RESOURCE_PATH[RESOURCE] + TENANT_ID)) + + def test_sdnve_api_delete(self): + with mock.patch('neutron.plugins.ibm.sdnve_api.' + 'Client.do_request', + new=self.mock_do_request): + result = self.api.sdnve_delete(RESOURCE, TENANT_ID) + self.assertEqual(result, + (HTTP_OK, RESOURCE_PATH[RESOURCE] + TENANT_ID)) + + def test_sdnve_get_tenant_by_id(self): + with mock.patch('neutron.plugins.ibm.sdnve_api.' + 'Client.do_request', + new=self.mock_do_request_tenant): + id = TENANT_ID + result = self.api.sdnve_get_tenant_byid(id) + self.assertEqual(result, + (TENANT_ID, constants.TENANT_TYPE_OF)) + + def test_sdnve_check_and_create_tenant(self): + with mock.patch('neutron.plugins.ibm.sdnve_api.' + 'Client.do_request', + new=self.mock_do_request_tenant): + id = TENANT_ID + result = self.api.sdnve_check_and_create_tenant(id) + self.assertEqual(result, TENANT_ID) + + def test_sdnve_check_and_create_tenant_fail(self): + with mock.patch('neutron.plugins.ibm.sdnve_api.' + 'Client.do_request', + new=self.mock_do_request_no_tenant): + id = TENANT_ID + result = self.api.sdnve_check_and_create_tenant( + id, constants.TENANT_TYPE_OF) + self.assertIsNone(result) + + def test_process_request(self): + my_request = {'key_1': 'value_1', 'router:external': 'True', + 'key_2': 'value_2'} + expected = {'key_1': 'value_1', 'router_external': 'True', + 'key_2': 'value_2'} + result = self.api.process_request(my_request) + self.assertEqual(expected, result) diff --git a/neutron/tests/unit/ibm/test_sdnve_plugin.py b/neutron/tests/unit/ibm/test_sdnve_plugin.py new file mode 100644 index 000000000..4e4c967cc --- /dev/null +++ b/neutron/tests/unit/ibm/test_sdnve_plugin.py @@ -0,0 +1,126 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp + + +import contextlib +import mock + +from neutron.extensions import portbindings +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit import test_db_plugin as test_plugin +from neutron.tests.unit import test_l3_plugin as test_l3_plugin + +from neutron.plugins.ibm.common import constants + + +_plugin_name = ('neutron.plugins.ibm.' + 'sdnve_neutron_plugin.SdnvePluginV2') +HTTP_OK = 200 + + +class MockClient(object): + def sdnve_list(self, resource, **params): + return (HTTP_OK, 'body') + + def sdnve_show(self, resource, specific, **params): + return (HTTP_OK, 'body') + + def sdnve_create(self, resource, body): + return (HTTP_OK, 'body') + + def sdnve_update(self, resource, specific, body=None): + return (HTTP_OK, 'body') + + def sdnve_delete(self, resource, specific): + return (HTTP_OK, 'body') + + def sdnve_get_tenant_byid(self, os_tenant_id): + return (os_tenant_id, constants.TENANT_TYPE_OF) + + def sdnve_check_and_create_tenant( + self, os_tenant_id, network_type=None): + return os_tenant_id + + def sdnve_get_controller(self): + return + + +class MockKeystoneClient(object): + def __init__(self, **kwargs): + pass + + def get_tenant_type(self, id): + return constants.TENANT_TYPE_OF + + def get_tenant_name(self, id): + return "tenant name" + + +class IBMPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): + def setUp(self): + with contextlib.nested( + mock.patch('neutron.plugins.ibm.sdnve_api.' + 'KeystoneClient', + new=MockKeystoneClient), + mock.patch('neutron.plugins.ibm.sdnve_api.' + 'Client', + new=MockClient)): + super(IBMPluginV2TestCase, self).setUp(plugin=_plugin_name) + + +class TestIBMBasicGet(test_plugin.TestBasicGet, + IBMPluginV2TestCase): + pass + + +class TestIBMV2HTTPResponse(test_plugin.TestV2HTTPResponse, + IBMPluginV2TestCase): + pass + + +class TestIBMNetworksV2(test_plugin.TestNetworksV2, + IBMPluginV2TestCase): + pass + + +class TestIBMPortsV2(test_plugin.TestPortsV2, + IBMPluginV2TestCase): + pass + + +class TestIBMSubnetsV2(test_plugin.TestSubnetsV2, + IBMPluginV2TestCase): + pass + + +class TestIBMPortBinding(IBMPluginV2TestCase, + test_bindings.PortBindingsTestCase): + VIF_TYPE = portbindings.VIF_TYPE_OVS + + +class IBMPluginRouterTestCase(test_l3_plugin.L3NatDBIntTestCase): + + def setUp(self): + with contextlib.nested( + mock.patch('neutron.plugins.ibm.sdnve_api.' + 'KeystoneClient', + new=MockKeystoneClient), + mock.patch('neutron.plugins.ibm.sdnve_api.' + 'Client', + new=MockClient)): + super(IBMPluginRouterTestCase, self).setUp(plugin=_plugin_name) diff --git a/neutron/tests/unit/linuxbridge/__init__.py b/neutron/tests/unit/linuxbridge/__init__.py new file mode 100644 index 000000000..7e503debd --- /dev/null +++ b/neutron/tests/unit/linuxbridge/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/linuxbridge/test_agent_scheduler.py b/neutron/tests/unit/linuxbridge/test_agent_scheduler.py new file mode 100644 index 000000000..397baaf5c --- /dev/null +++ b/neutron/tests/unit/linuxbridge/test_agent_scheduler.py @@ -0,0 +1,34 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.tests.unit.linuxbridge import test_linuxbridge_plugin +from neutron.tests.unit.openvswitch import test_agent_scheduler + + +class LbAgentSchedulerTestCase( + test_agent_scheduler.OvsAgentSchedulerTestCase): + plugin_str = test_linuxbridge_plugin.PLUGIN_NAME + l3_plugin = None + + +class LbL3AgentNotifierTestCase( + test_agent_scheduler.OvsL3AgentNotifierTestCase): + plugin_str = test_linuxbridge_plugin.PLUGIN_NAME + l3_plugin = None + + +class LbDhcpAgentNotifierTestCase( + test_agent_scheduler.OvsDhcpAgentNotifierTestCase): + plugin_str = test_linuxbridge_plugin.PLUGIN_NAME diff --git a/neutron/tests/unit/linuxbridge/test_defaults.py b/neutron/tests/unit/linuxbridge/test_defaults.py new file mode 100644 index 000000000..1c395e81d --- /dev/null +++ b/neutron/tests/unit/linuxbridge/test_defaults.py @@ -0,0 +1,42 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from neutron.plugins.linuxbridge.common import config # noqa +from neutron.tests import base + + +class ConfigurationTest(base.BaseTestCase): + + def test_defaults(self): + self.assertEqual(2, + cfg.CONF.AGENT.polling_interval) + self.assertEqual(False, + cfg.CONF.AGENT.rpc_support_old_agents) + self.assertEqual('sudo', + cfg.CONF.AGENT.root_helper) + self.assertEqual('local', + cfg.CONF.VLANS.tenant_network_type) + self.assertEqual(0, + len(cfg.CONF.VLANS.network_vlan_ranges)) + self.assertEqual(0, + len(cfg.CONF.LINUX_BRIDGE. + physical_interface_mappings)) + self.assertEqual(False, cfg.CONF.VXLAN.enable_vxlan) + self.assertEqual(config.DEFAULT_VXLAN_GROUP, + cfg.CONF.VXLAN.vxlan_group) + self.assertEqual(0, len(cfg.CONF.VXLAN.local_ip)) + self.assertEqual(False, cfg.CONF.VXLAN.l2_population) diff --git a/neutron/tests/unit/linuxbridge/test_lb_db.py b/neutron/tests/unit/linuxbridge/test_lb_db.py new file mode 100644 index 000000000..41f56b52e --- /dev/null +++ b/neutron/tests/unit/linuxbridge/test_lb_db.py @@ -0,0 +1,172 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg +from six import moves +import testtools +from testtools import matchers + +from neutron.common import exceptions as n_exc +from neutron.db import api as db +from neutron.plugins.linuxbridge.db import l2network_db_v2 as lb_db +from neutron.tests import base +from neutron.tests.unit import test_db_plugin as test_plugin + +PHYS_NET = 'physnet1' +PHYS_NET_2 = 'physnet2' +VLAN_MIN = 10 +VLAN_MAX = 19 +VLAN_RANGES = {PHYS_NET: [(VLAN_MIN, VLAN_MAX)]} +UPDATED_VLAN_RANGES = {PHYS_NET: [(VLAN_MIN + 5, VLAN_MAX + 5)], + PHYS_NET_2: [(VLAN_MIN + 20, VLAN_MAX + 20)]} + +PLUGIN_NAME = ('neutron.plugins.linuxbridge.' + 'lb_neutron_plugin.LinuxBridgePluginV2') + + +class NetworkStatesTest(base.BaseTestCase): + def setUp(self): + super(NetworkStatesTest, self).setUp() + db.configure_db() + lb_db.sync_network_states(VLAN_RANGES) + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def test_sync_network_states(self): + self.assertIsNone(lb_db.get_network_state(PHYS_NET, + VLAN_MIN - 1)) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + VLAN_MIN).allocated) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + VLAN_MIN + 1).allocated) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + VLAN_MAX - 1).allocated) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + VLAN_MAX).allocated) + self.assertIsNone(lb_db.get_network_state(PHYS_NET, + VLAN_MAX + 1)) + + lb_db.sync_network_states(UPDATED_VLAN_RANGES) + + self.assertIsNone(lb_db.get_network_state(PHYS_NET, + VLAN_MIN + 5 - 1)) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + VLAN_MIN + 5).allocated) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + VLAN_MIN + 5 + 1).allocated) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + VLAN_MAX + 5 - 1).allocated) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + VLAN_MAX + 5).allocated) + self.assertIsNone(lb_db.get_network_state(PHYS_NET, + VLAN_MAX + 5 + 1)) + + self.assertIsNone(lb_db.get_network_state(PHYS_NET_2, + VLAN_MIN + 20 - 1)) + self.assertFalse(lb_db.get_network_state(PHYS_NET_2, + VLAN_MIN + 20).allocated) + self.assertFalse(lb_db.get_network_state(PHYS_NET_2, + VLAN_MIN + 20 + 1).allocated) + self.assertFalse(lb_db.get_network_state(PHYS_NET_2, + VLAN_MAX + 20 - 1).allocated) + self.assertFalse(lb_db.get_network_state(PHYS_NET_2, + VLAN_MAX + 20).allocated) + self.assertIsNone(lb_db.get_network_state(PHYS_NET_2, + VLAN_MAX + 20 + 1)) + + lb_db.sync_network_states(VLAN_RANGES) + + self.assertIsNone(lb_db.get_network_state(PHYS_NET, + VLAN_MIN - 1)) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + VLAN_MIN).allocated) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + VLAN_MIN + 1).allocated) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + VLAN_MAX - 1).allocated) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + VLAN_MAX).allocated) + self.assertIsNone(lb_db.get_network_state(PHYS_NET, + VLAN_MAX + 1)) + + self.assertIsNone(lb_db.get_network_state(PHYS_NET_2, + VLAN_MIN + 20)) + self.assertIsNone(lb_db.get_network_state(PHYS_NET_2, + VLAN_MAX + 20)) + + def test_network_pool(self): + vlan_ids = set() + for x in moves.xrange(VLAN_MIN, VLAN_MAX + 1): + physical_network, vlan_id = lb_db.reserve_network(self.session) + self.assertEqual(physical_network, PHYS_NET) + self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) + self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) + vlan_ids.add(vlan_id) + + with testtools.ExpectedException(n_exc.NoNetworkAvailable): + physical_network, vlan_id = lb_db.reserve_network(self.session) + + for vlan_id in vlan_ids: + lb_db.release_network(self.session, PHYS_NET, vlan_id, VLAN_RANGES) + + def test_specific_network_inside_pool(self): + vlan_id = VLAN_MIN + 5 + self.assertFalse(lb_db.get_network_state(PHYS_NET, + vlan_id).allocated) + lb_db.reserve_specific_network(self.session, PHYS_NET, vlan_id) + self.assertTrue(lb_db.get_network_state(PHYS_NET, + vlan_id).allocated) + + with testtools.ExpectedException(n_exc.VlanIdInUse): + lb_db.reserve_specific_network(self.session, PHYS_NET, vlan_id) + + lb_db.release_network(self.session, PHYS_NET, vlan_id, VLAN_RANGES) + self.assertFalse(lb_db.get_network_state(PHYS_NET, + vlan_id).allocated) + + def test_specific_network_outside_pool(self): + vlan_id = VLAN_MAX + 5 + self.assertIsNone(lb_db.get_network_state(PHYS_NET, vlan_id)) + lb_db.reserve_specific_network(self.session, PHYS_NET, vlan_id) + self.assertTrue(lb_db.get_network_state(PHYS_NET, + vlan_id).allocated) + + with testtools.ExpectedException(n_exc.VlanIdInUse): + lb_db.reserve_specific_network(self.session, PHYS_NET, vlan_id) + + lb_db.release_network(self.session, PHYS_NET, vlan_id, VLAN_RANGES) + self.assertIsNone(lb_db.get_network_state(PHYS_NET, vlan_id)) + + +class NetworkBindingsTest(test_plugin.NeutronDbPluginV2TestCase): + def setUp(self): + cfg.CONF.set_override('network_vlan_ranges', ['physnet1:1000:2999'], + group='VLANS') + super(NetworkBindingsTest, self).setUp(plugin=PLUGIN_NAME) + db.configure_db() + self.session = db.get_session() + + def test_add_network_binding(self): + params = {'provider:network_type': 'vlan', + 'provider:physical_network': PHYS_NET, + 'provider:segmentation_id': 1234} + params['arg_list'] = tuple(params.keys()) + with self.network(**params) as network: + TEST_NETWORK_ID = network['network']['id'] + binding = lb_db.get_network_binding(self.session, TEST_NETWORK_ID) + self.assertIsNotNone(binding) + self.assertEqual(binding.network_id, TEST_NETWORK_ID) + self.assertEqual(binding.physical_network, PHYS_NET) + self.assertEqual(binding.vlan_id, 1234) diff --git a/neutron/tests/unit/linuxbridge/test_lb_neutron_agent.py b/neutron/tests/unit/linuxbridge/test_lb_neutron_agent.py new file mode 100644 index 000000000..d72b5615f --- /dev/null +++ b/neutron/tests/unit/linuxbridge/test_lb_neutron_agent.py @@ -0,0 +1,1054 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import os + +import mock +from oslo.config import cfg +import testtools + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.common import constants +from neutron.common import exceptions +from neutron.plugins.common import constants as p_const +from neutron.plugins.linuxbridge.agent import linuxbridge_neutron_agent +from neutron.plugins.linuxbridge.common import constants as lconst +from neutron.tests import base + +LOCAL_IP = '192.168.0.33' +DEVICE_1 = 'tapabcdef01-12' + + +class FakeIpLinkCommand(object): + def set_up(self): + pass + + +class FakeIpDevice(object): + def __init__(self): + self.link = FakeIpLinkCommand() + + +class TestLinuxBridge(base.BaseTestCase): + + def setUp(self): + super(TestLinuxBridge, self).setUp() + interface_mappings = {'physnet1': 'eth1'} + root_helper = cfg.CONF.AGENT.root_helper + + self.linux_bridge = linuxbridge_neutron_agent.LinuxBridgeManager( + interface_mappings, root_helper) + + def test_ensure_physical_in_bridge_invalid(self): + result = self.linux_bridge.ensure_physical_in_bridge('network_id', + p_const.TYPE_VLAN, + 'physnetx', + 7) + self.assertFalse(result) + + def test_ensure_physical_in_bridge_flat(self): + with mock.patch.object(self.linux_bridge, + 'ensure_flat_bridge') as flat_bridge_func: + self.linux_bridge.ensure_physical_in_bridge( + 'network_id', p_const.TYPE_FLAT, 'physnet1', None) + self.assertTrue(flat_bridge_func.called) + + def test_ensure_physical_in_bridge_vlan(self): + with mock.patch.object(self.linux_bridge, + 'ensure_vlan_bridge') as vlan_bridge_func: + self.linux_bridge.ensure_physical_in_bridge( + 'network_id', p_const.TYPE_VLAN, 'physnet1', 7) + self.assertTrue(vlan_bridge_func.called) + + def test_ensure_physical_in_bridge_vxlan(self): + self.linux_bridge.vxlan_mode = lconst.VXLAN_UCAST + with mock.patch.object(self.linux_bridge, + 'ensure_vxlan_bridge') as vxlan_bridge_func: + self.linux_bridge.ensure_physical_in_bridge( + 'network_id', 'vxlan', 'physnet1', 7) + self.assertTrue(vxlan_bridge_func.called) + + +class TestLinuxBridgeAgent(base.BaseTestCase): + + LINK_SAMPLE = [ + '1: lo: mtu 16436 qdisc noqueue \\' + 'state UNKNOWN \\' + 'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00', + '2: eth77: mtu 1500 \\' + 'qdisc mq state UP qlen 1000\ link/ether \\' + 'cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff'] + + def setUp(self): + super(TestLinuxBridgeAgent, self).setUp() + # disable setting up periodic state reporting + cfg.CONF.set_override('report_interval', 0, 'AGENT') + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + cfg.CONF.set_default('firewall_driver', + 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute') + self.execute = self.execute_p.start() + self.execute.return_value = '\n'.join(self.LINK_SAMPLE) + self.get_mac_p = mock.patch('neutron.agent.linux.utils.' + 'get_interface_mac') + self.get_mac = self.get_mac_p.start() + self.get_mac.return_value = '00:00:00:00:00:01' + self.agent = linuxbridge_neutron_agent.LinuxBridgeNeutronAgentRPC({}, + 0, + None) + + def test_treat_devices_removed_with_existed_device(self): + agent = linuxbridge_neutron_agent.LinuxBridgeNeutronAgentRPC({}, + 0, + None) + devices = [DEVICE_1] + with contextlib.nested( + mock.patch.object(agent.plugin_rpc, "update_device_down"), + mock.patch.object(agent, "remove_devices_filter") + ) as (fn_udd, fn_rdf): + fn_udd.return_value = {'device': DEVICE_1, + 'exists': True} + with mock.patch.object(linuxbridge_neutron_agent.LOG, + 'info') as log: + resync = agent.treat_devices_removed(devices) + self.assertEqual(2, log.call_count) + self.assertFalse(resync) + self.assertTrue(fn_udd.called) + self.assertTrue(fn_rdf.called) + + def test_treat_devices_removed_with_not_existed_device(self): + agent = linuxbridge_neutron_agent.LinuxBridgeNeutronAgentRPC({}, + 0, + None) + devices = [DEVICE_1] + with contextlib.nested( + mock.patch.object(agent.plugin_rpc, "update_device_down"), + mock.patch.object(agent, "remove_devices_filter") + ) as (fn_udd, fn_rdf): + fn_udd.return_value = {'device': DEVICE_1, + 'exists': False} + with mock.patch.object(linuxbridge_neutron_agent.LOG, + 'debug') as log: + resync = agent.treat_devices_removed(devices) + self.assertEqual(1, log.call_count) + self.assertFalse(resync) + self.assertTrue(fn_udd.called) + self.assertTrue(fn_rdf.called) + + def test_treat_devices_removed_failed(self): + agent = linuxbridge_neutron_agent.LinuxBridgeNeutronAgentRPC({}, + 0, + None) + devices = [DEVICE_1] + with contextlib.nested( + mock.patch.object(agent.plugin_rpc, "update_device_down"), + mock.patch.object(agent, "remove_devices_filter") + ) as (fn_udd, fn_rdf): + fn_udd.side_effect = Exception() + with mock.patch.object(linuxbridge_neutron_agent.LOG, + 'debug') as log: + resync = agent.treat_devices_removed(devices) + self.assertEqual(2, log.call_count) + self.assertTrue(resync) + self.assertTrue(fn_udd.called) + self.assertTrue(fn_rdf.called) + + def test_loop_restores_updated_devices_on_exception(self): + agent = self.agent + agent.updated_devices = set(['tap1', 'tap2']) + + with contextlib.nested( + mock.patch.object(agent, 'scan_devices'), + mock.patch.object(linuxbridge_neutron_agent.LOG, 'info'), + mock.patch.object(agent, 'process_network_devices') + ) as (scan_devices, log, process_network_devices): + # Simulate effect of 2 port_update()s when loop is running. + # And break out of loop at start of 2nd iteration. + log.side_effect = [agent.updated_devices.add('tap3'), + agent.updated_devices.add('tap4'), + ValueError] + scan_devices.side_effect = RuntimeError + + with testtools.ExpectedException(ValueError): + agent.daemon_loop() + + # Check that the originals {tap1,tap2} have been restored + # and the new updates {tap3, tap4} have not been overwritten. + self.assertEqual(set(['tap1', 'tap2', 'tap3', 'tap4']), + agent.updated_devices) + self.assertEqual(3, log.call_count) + + def mock_scan_devices(self, expected, mock_current, + registered_devices, updated_devices): + self.agent.br_mgr = mock.Mock() + self.agent.br_mgr.get_tap_devices.return_value = mock_current + + results = self.agent.scan_devices(registered_devices, updated_devices) + self.assertEqual(expected, results) + + def test_scan_devices_returns_empty_sets(self): + registered = set() + updated = set() + mock_current = set() + expected = {'current': set(), + 'updated': set(), + 'added': set(), + 'removed': set()} + self.mock_scan_devices(expected, mock_current, registered, updated) + + def test_scan_devices_no_changes(self): + registered = set(['tap1', 'tap2']) + updated = set() + mock_current = set(['tap1', 'tap2']) + expected = {'current': set(['tap1', 'tap2']), + 'updated': set(), + 'added': set(), + 'removed': set()} + self.mock_scan_devices(expected, mock_current, registered, updated) + + def test_scan_devices_new_and_removed(self): + registered = set(['tap1', 'tap2']) + updated = set() + mock_current = set(['tap2', 'tap3']) + expected = {'current': set(['tap2', 'tap3']), + 'updated': set(), + 'added': set(['tap3']), + 'removed': set(['tap1'])} + self.mock_scan_devices(expected, mock_current, registered, updated) + + def test_scan_devices_new_updates(self): + registered = set(['tap1']) + updated = set(['tap2']) + mock_current = set(['tap1', 'tap2']) + expected = {'current': set(['tap1', 'tap2']), + 'updated': set(['tap2']), + 'added': set(['tap2']), + 'removed': set()} + self.mock_scan_devices(expected, mock_current, registered, updated) + + def test_scan_devices_updated_missing(self): + registered = set(['tap1']) + updated = set(['tap2']) + mock_current = set(['tap1']) + expected = {'current': set(['tap1']), + 'updated': set(), + 'added': set(), + 'removed': set()} + self.mock_scan_devices(expected, mock_current, registered, updated) + + def test_process_network_devices(self): + agent = self.agent + device_info = {'current': set(), + 'added': set(['tap3', 'tap4']), + 'updated': set(['tap2', 'tap3']), + 'removed': set(['tap1'])} + agent.prepare_devices_filter = mock.Mock() + agent.refresh_firewall = mock.Mock() + agent.treat_devices_added_updated = mock.Mock(return_value=False) + agent.treat_devices_removed = mock.Mock(return_value=False) + + agent.process_network_devices(device_info) + + agent.prepare_devices_filter.assert_called_with(set(['tap3', 'tap4'])) + self.assertTrue(agent.refresh_firewall.called) + agent.treat_devices_added_updated.assert_called_with(set(['tap2', + 'tap3', + 'tap4'])) + agent.treat_devices_removed.assert_called_with(set(['tap1'])) + + def test_treat_devices_added_updated_admin_state_up_true(self): + agent = self.agent + mock_details = {'port_id': 'port123', + 'network_id': 'net123', + 'admin_state_up': True, + 'network_type': 'vlan', + 'segmentation_id': 100, + 'physical_network': 'physnet1'} + agent.plugin_rpc = mock.Mock() + agent.plugin_rpc.get_device_details.return_value = mock_details + agent.br_mgr = mock.Mock() + agent.br_mgr.add_interface.return_value = True + + resync_needed = agent.treat_devices_added_updated(set(['tap1'])) + + self.assertFalse(resync_needed) + agent.br_mgr.add_interface.assert_called_with('net123', 'vlan', + 'physnet1', 100, + 'port123') + self.assertTrue(agent.plugin_rpc.update_device_up.called) + + def test_treat_devices_added_updated_admin_state_up_false(self): + mock_details = {'port_id': 'port123', + 'network_id': 'net123', + 'admin_state_up': False, + 'network_type': 'vlan', + 'segmentation_id': 100, + 'physical_network': 'physnet1'} + self.agent.plugin_rpc = mock.Mock() + self.agent.plugin_rpc.get_device_details.return_value = mock_details + self.agent.remove_port_binding = mock.Mock() + + resync_needed = self.agent.treat_devices_added_updated(set(['tap1'])) + + self.assertFalse(resync_needed) + self.agent.remove_port_binding.assert_called_with('net123', 'port123') + self.assertFalse(self.agent.plugin_rpc.update_device_up.called) + + +class TestLinuxBridgeManager(base.BaseTestCase): + def setUp(self): + super(TestLinuxBridgeManager, self).setUp() + self.interface_mappings = {'physnet1': 'eth1'} + self.root_helper = cfg.CONF.AGENT.root_helper + + self.lbm = linuxbridge_neutron_agent.LinuxBridgeManager( + self.interface_mappings, self.root_helper) + + def test_interface_exists_on_bridge(self): + with mock.patch.object(os, 'listdir') as listdir_fn: + listdir_fn.return_value = ["abc"] + self.assertTrue( + self.lbm.interface_exists_on_bridge("br-int", "abc") + ) + self.assertFalse( + self.lbm.interface_exists_on_bridge("br-int", "abd") + ) + + def test_get_bridge_name(self): + nw_id = "123456789101112" + self.assertEqual(self.lbm.get_bridge_name(nw_id), + "brq" + nw_id[0:11]) + nw_id = "" + self.assertEqual(self.lbm.get_bridge_name(nw_id), + "brq") + + def test_get_subinterface_name(self): + self.assertEqual(self.lbm.get_subinterface_name("eth0", "0"), + "eth0.0") + self.assertEqual(self.lbm.get_subinterface_name("eth0", ""), + "eth0.") + + def test_get_tap_device_name(self): + if_id = "123456789101112" + self.assertEqual(self.lbm.get_tap_device_name(if_id), + "tap" + if_id[0:11]) + if_id = "" + self.assertEqual(self.lbm.get_tap_device_name(if_id), + "tap") + + def test_get_vxlan_device_name(self): + vn_id = constants.MAX_VXLAN_VNI + self.assertEqual(self.lbm.get_vxlan_device_name(vn_id), + "vxlan-" + str(vn_id)) + self.assertIsNone(self.lbm.get_vxlan_device_name(vn_id + 1)) + + def test_get_all_neutron_bridges(self): + br_list = ["br-int", "brq1", "brq2", "br-ex"] + with mock.patch.object(os, 'listdir') as listdir_fn: + listdir_fn.return_value = br_list + self.assertEqual(self.lbm.get_all_neutron_bridges(), + br_list[1:3]) + self.assertTrue(listdir_fn.called) + + def test_get_interfaces_on_bridge(self): + with contextlib.nested( + mock.patch.object(utils, 'execute'), + mock.patch.object(os, 'listdir'), + mock.patch.object(ip_lib, 'device_exists', return_value=True) + ) as (exec_fn, listdir_fn, dev_exists_fn): + listdir_fn.return_value = ["qbr1"] + self.assertEqual(self.lbm.get_interfaces_on_bridge("br0"), + ["qbr1"]) + + def test_get_interfaces_on_bridge_not_existing(self): + with mock.patch.object(ip_lib, 'device_exists', return_value=False): + self.assertEqual([], self.lbm.get_interfaces_on_bridge("br0")) + + def test_get_tap_devices_count(self): + with mock.patch.object(os, 'listdir') as listdir_fn: + listdir_fn.return_value = ['tap2101', 'eth0.100', 'vxlan-1000'] + self.assertEqual(self.lbm.get_tap_devices_count('br0'), 1) + listdir_fn.side_effect = OSError() + self.assertEqual(self.lbm.get_tap_devices_count('br0'), 0) + + def test_get_interface_by_ip(self): + with contextlib.nested( + mock.patch.object(ip_lib.IPWrapper, 'get_devices'), + mock.patch.object(ip_lib.IpAddrCommand, 'list') + ) as (get_dev_fn, ip_list_fn): + device = mock.Mock() + device.name = 'dev_name' + get_dev_fn.return_value = [device] + ip_list_fn.returnvalue = mock.Mock() + self.assertEqual(self.lbm.get_interface_by_ip(LOCAL_IP), + 'dev_name') + + def test_get_bridge_for_tap_device(self): + with contextlib.nested( + mock.patch.object(self.lbm, "get_all_neutron_bridges"), + mock.patch.object(self.lbm, "get_interfaces_on_bridge") + ) as (get_all_qbr_fn, get_if_fn): + get_all_qbr_fn.return_value = ["br-int", "br-ex"] + get_if_fn.return_value = ["tap1", "tap2", "tap3"] + self.assertEqual(self.lbm.get_bridge_for_tap_device("tap1"), + "br-int") + self.assertIsNone(self.lbm.get_bridge_for_tap_device("tap4")) + + def test_is_device_on_bridge(self): + self.assertTrue(not self.lbm.is_device_on_bridge("")) + with mock.patch.object(os.path, 'exists') as exists_fn: + exists_fn.return_value = True + self.assertTrue(self.lbm.is_device_on_bridge("tap1")) + exists_fn.assert_called_with( + "/sys/devices/virtual/net/tap1/brport" + ) + + def test_get_interface_details(self): + with contextlib.nested( + mock.patch.object(ip_lib.IpAddrCommand, 'list'), + mock.patch.object(ip_lib.IpRouteCommand, 'get_gateway') + ) as (list_fn, getgw_fn): + gwdict = dict(gateway='1.1.1.1') + getgw_fn.return_value = gwdict + ipdict = dict(cidr='1.1.1.1/24', + broadcast='1.1.1.255', + scope='global', + ip_version=4, + dynamic=False) + list_fn.return_value = ipdict + ret = self.lbm.get_interface_details("eth0") + + self.assertTrue(list_fn.called) + self.assertTrue(getgw_fn.called) + self.assertEqual(ret, (ipdict, gwdict)) + + def test_ensure_flat_bridge(self): + with contextlib.nested( + mock.patch.object(ip_lib.IpAddrCommand, 'list'), + mock.patch.object(ip_lib.IpRouteCommand, 'get_gateway') + ) as (list_fn, getgw_fn): + gwdict = dict(gateway='1.1.1.1') + getgw_fn.return_value = gwdict + ipdict = dict(cidr='1.1.1.1/24', + broadcast='1.1.1.255', + scope='global', + ip_version=4, + dynamic=False) + list_fn.return_value = ipdict + with mock.patch.object(self.lbm, 'ensure_bridge') as ens: + self.assertEqual( + self.lbm.ensure_flat_bridge("123", "eth0"), + "eth0" + ) + self.assertTrue(list_fn.called) + self.assertTrue(getgw_fn.called) + ens.assert_called_once_with("brq123", "eth0", + ipdict, gwdict) + + def test_ensure_vlan_bridge(self): + with contextlib.nested( + mock.patch.object(self.lbm, 'ensure_vlan'), + mock.patch.object(self.lbm, 'ensure_bridge'), + mock.patch.object(self.lbm, 'get_interface_details'), + ) as (ens_vl_fn, ens, get_int_det_fn): + ens_vl_fn.return_value = "eth0.1" + get_int_det_fn.return_value = (None, None) + self.assertEqual(self.lbm.ensure_vlan_bridge("123", "eth0", "1"), + "eth0.1") + ens.assert_called_with("brq123", "eth0.1", None, None) + + get_int_det_fn.return_value = ("ips", "gateway") + self.assertEqual(self.lbm.ensure_vlan_bridge("123", "eth0", "1"), + "eth0.1") + ens.assert_called_with("brq123", "eth0.1", "ips", "gateway") + + def test_ensure_local_bridge(self): + with mock.patch.object(self.lbm, 'ensure_bridge') as ens_fn: + self.lbm.ensure_local_bridge("54321") + ens_fn.assert_called_once_with("brq54321") + + def test_ensure_vlan(self): + with mock.patch.object(ip_lib, 'device_exists') as de_fn: + de_fn.return_value = True + self.assertEqual(self.lbm.ensure_vlan("eth0", "1"), "eth0.1") + de_fn.return_value = False + with mock.patch.object(utils, 'execute') as exec_fn: + exec_fn.return_value = False + self.assertEqual(self.lbm.ensure_vlan("eth0", "1"), "eth0.1") + # FIXME(kevinbenton): validate the params to the exec_fn calls + self.assertEqual(exec_fn.call_count, 2) + exec_fn.return_value = True + self.assertIsNone(self.lbm.ensure_vlan("eth0", "1")) + self.assertEqual(exec_fn.call_count, 3) + + def test_ensure_vxlan(self): + seg_id = "12345678" + self.lbm.local_int = 'eth0' + self.lbm.vxlan_mode = lconst.VXLAN_MCAST + with mock.patch.object(ip_lib, 'device_exists') as de_fn: + de_fn.return_value = True + self.assertEqual(self.lbm.ensure_vxlan(seg_id), "vxlan-" + seg_id) + de_fn.return_value = False + with mock.patch.object(self.lbm.ip, + 'add_vxlan') as add_vxlan_fn: + add_vxlan_fn.return_value = FakeIpDevice() + self.assertEqual(self.lbm.ensure_vxlan(seg_id), + "vxlan-" + seg_id) + add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id, + group="224.0.0.1", + dev=self.lbm.local_int) + cfg.CONF.set_override('l2_population', 'True', 'VXLAN') + self.assertEqual(self.lbm.ensure_vxlan(seg_id), + "vxlan-" + seg_id) + add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id, + group="224.0.0.1", + dev=self.lbm.local_int, + proxy=True) + + def test_update_interface_ip_details(self): + gwdict = dict(gateway='1.1.1.1', + metric=50) + ipdict = dict(cidr='1.1.1.1/24', + broadcast='1.1.1.255', + scope='global', + ip_version=4, + dynamic=False) + with contextlib.nested( + mock.patch.object(ip_lib.IpAddrCommand, 'add'), + mock.patch.object(ip_lib.IpAddrCommand, 'delete') + ) as (add_fn, del_fn): + self.lbm.update_interface_ip_details("br0", "eth0", + [ipdict], None) + self.assertTrue(add_fn.called) + self.assertTrue(del_fn.called) + + with contextlib.nested( + mock.patch.object(ip_lib.IpRouteCommand, 'add_gateway'), + mock.patch.object(ip_lib.IpRouteCommand, 'delete_gateway') + ) as (addgw_fn, delgw_fn): + self.lbm.update_interface_ip_details("br0", "eth0", + None, gwdict) + self.assertTrue(addgw_fn.called) + self.assertTrue(delgw_fn.called) + + def test_bridge_exists_and_ensure_up(self): + ip_lib_mock = mock.Mock() + with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock): + # device exists + self.assertTrue(self.lbm._bridge_exists_and_ensure_up("br0")) + self.assertTrue(ip_lib_mock.link.set_up.called) + # device doesn't exists + ip_lib_mock.link.set_up.side_effect = RuntimeError + self.assertFalse(self.lbm._bridge_exists_and_ensure_up("br0")) + + def test_ensure_bridge(self): + with contextlib.nested( + mock.patch.object(self.lbm, '_bridge_exists_and_ensure_up'), + mock.patch.object(utils, 'execute'), + mock.patch.object(self.lbm, 'update_interface_ip_details'), + mock.patch.object(self.lbm, 'interface_exists_on_bridge'), + mock.patch.object(self.lbm, 'is_device_on_bridge'), + mock.patch.object(self.lbm, 'get_bridge_for_tap_device'), + ) as (de_fn, exec_fn, upd_fn, ie_fn, if_br_fn, get_if_br_fn): + de_fn.return_value = False + exec_fn.return_value = False + self.assertEqual(self.lbm.ensure_bridge("br0", None), "br0") + ie_fn.return_Value = False + self.lbm.ensure_bridge("br0", "eth0") + upd_fn.assert_called_with("br0", "eth0", None, None) + ie_fn.assert_called_with("br0", "eth0") + + self.lbm.ensure_bridge("br0", "eth0", "ips", "gateway") + upd_fn.assert_called_with("br0", "eth0", "ips", "gateway") + ie_fn.assert_called_with("br0", "eth0") + + exec_fn.side_effect = Exception() + de_fn.return_value = True + self.lbm.ensure_bridge("br0", "eth0") + ie_fn.assert_called_with("br0", "eth0") + + exec_fn.reset_mock() + exec_fn.side_effect = None + de_fn.return_value = True + ie_fn.return_value = False + get_if_br_fn.return_value = "br1" + self.lbm.ensure_bridge("br0", "eth0") + expected = [ + mock.call(['brctl', 'delif', 'br1', 'eth0'], + root_helper=self.root_helper), + mock.call(['brctl', 'addif', 'br0', 'eth0'], + root_helper=self.root_helper), + ] + exec_fn.assert_has_calls(expected) + + def test_ensure_physical_in_bridge(self): + self.assertFalse( + self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VLAN, + "phys", "1") + ) + with mock.patch.object(self.lbm, "ensure_flat_bridge") as flbr_fn: + self.assertTrue( + self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_FLAT, + "physnet1", None) + ) + self.assertTrue(flbr_fn.called) + with mock.patch.object(self.lbm, "ensure_vlan_bridge") as vlbr_fn: + self.assertTrue( + self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VLAN, + "physnet1", "1") + ) + self.assertTrue(vlbr_fn.called) + + with mock.patch.object(self.lbm, "ensure_vxlan_bridge") as vlbr_fn: + self.lbm.vxlan_mode = lconst.VXLAN_MCAST + self.assertTrue( + self.lbm.ensure_physical_in_bridge("123", p_const.TYPE_VXLAN, + "physnet1", "1") + ) + self.assertTrue(vlbr_fn.called) + + def test_add_tap_interface(self): + with mock.patch.object(ip_lib, "device_exists") as de_fn: + de_fn.return_value = False + self.assertFalse( + self.lbm.add_tap_interface("123", p_const.TYPE_VLAN, + "physnet1", "1", "tap1") + ) + + de_fn.return_value = True + with contextlib.nested( + mock.patch.object(self.lbm, "ensure_local_bridge"), + mock.patch.object(utils, "execute"), + mock.patch.object(self.lbm, "get_bridge_for_tap_device") + ) as (en_fn, exec_fn, get_br): + exec_fn.return_value = False + get_br.return_value = True + self.assertTrue(self.lbm.add_tap_interface("123", + p_const.TYPE_LOCAL, + "physnet1", None, + "tap1")) + en_fn.assert_called_with("123") + + get_br.return_value = False + exec_fn.return_value = True + self.assertFalse(self.lbm.add_tap_interface("123", + p_const.TYPE_LOCAL, + "physnet1", None, + "tap1")) + + with mock.patch.object(self.lbm, + "ensure_physical_in_bridge") as ens_fn: + ens_fn.return_value = False + self.assertFalse(self.lbm.add_tap_interface("123", + p_const.TYPE_VLAN, + "physnet1", "1", + "tap1")) + + def test_add_interface(self): + with mock.patch.object(self.lbm, "add_tap_interface") as add_tap: + self.lbm.add_interface("123", p_const.TYPE_VLAN, "physnet-1", + "1", "234") + add_tap.assert_called_with("123", p_const.TYPE_VLAN, "physnet-1", + "1", "tap234") + + def test_delete_vlan_bridge(self): + with contextlib.nested( + mock.patch.object(ip_lib, "device_exists"), + mock.patch.object(self.lbm, "get_interfaces_on_bridge"), + mock.patch.object(self.lbm, "remove_interface"), + mock.patch.object(self.lbm, "get_interface_details"), + mock.patch.object(self.lbm, "update_interface_ip_details"), + mock.patch.object(self.lbm, "delete_vxlan"), + mock.patch.object(utils, "execute") + ) as (de_fn, getif_fn, remif_fn, if_det_fn, + updif_fn, del_vxlan, exec_fn): + de_fn.return_value = False + self.lbm.delete_vlan_bridge("br0") + self.assertFalse(getif_fn.called) + + de_fn.return_value = True + getif_fn.return_value = ["eth0", "eth1", "vxlan-1002"] + if_det_fn.return_value = ("ips", "gateway") + exec_fn.return_value = False + self.lbm.delete_vlan_bridge("br0") + updif_fn.assert_called_with("eth1", "br0", "ips", "gateway") + del_vxlan.assert_called_with("vxlan-1002") + + def test_delete_vlan_bridge_with_ip(self): + with contextlib.nested( + mock.patch.object(ip_lib, "device_exists"), + mock.patch.object(self.lbm, "get_interfaces_on_bridge"), + mock.patch.object(self.lbm, "remove_interface"), + mock.patch.object(self.lbm, "get_interface_details"), + mock.patch.object(self.lbm, "update_interface_ip_details"), + mock.patch.object(self.lbm, "delete_vlan"), + mock.patch.object(utils, "execute") + ) as (de_fn, getif_fn, remif_fn, if_det_fn, + updif_fn, del_vlan, exec_fn): + de_fn.return_value = True + getif_fn.return_value = ["eth0", "eth1.1"] + if_det_fn.return_value = ("ips", "gateway") + exec_fn.return_value = False + self.lbm.delete_vlan_bridge("br0") + updif_fn.assert_called_with("eth1.1", "br0", "ips", "gateway") + self.assertFalse(del_vlan.called) + + def test_delete_vlan_bridge_no_ip(self): + with contextlib.nested( + mock.patch.object(ip_lib, "device_exists"), + mock.patch.object(self.lbm, "get_interfaces_on_bridge"), + mock.patch.object(self.lbm, "remove_interface"), + mock.patch.object(self.lbm, "get_interface_details"), + mock.patch.object(self.lbm, "update_interface_ip_details"), + mock.patch.object(self.lbm, "delete_vlan"), + mock.patch.object(utils, "execute") + ) as (de_fn, getif_fn, remif_fn, if_det_fn, + updif_fn, del_vlan, exec_fn): + de_fn.return_value = True + getif_fn.return_value = ["eth0", "eth1.1"] + exec_fn.return_value = False + if_det_fn.return_value = ([], None) + self.lbm.delete_vlan_bridge("br0") + del_vlan.assert_called_with("eth1.1") + self.assertFalse(updif_fn.called) + + def test_delete_vxlan_bridge_no_int_mappings(self): + interface_mappings = {} + lbm = linuxbridge_neutron_agent.LinuxBridgeManager( + interface_mappings, self.root_helper) + + with contextlib.nested( + mock.patch.object(ip_lib, "device_exists"), + mock.patch.object(lbm, "get_interfaces_on_bridge"), + mock.patch.object(lbm, "remove_interface"), + mock.patch.object(lbm, "delete_vxlan"), + mock.patch.object(utils, "execute") + ) as (de_fn, getif_fn, remif_fn, del_vxlan, exec_fn): + de_fn.return_value = False + lbm.delete_vlan_bridge("br0") + self.assertFalse(getif_fn.called) + + de_fn.return_value = True + getif_fn.return_value = ["vxlan-1002"] + exec_fn.return_value = False + lbm.delete_vlan_bridge("br0") + del_vxlan.assert_called_with("vxlan-1002") + + def test_remove_empty_bridges(self): + self.lbm.network_map = {'net1': mock.Mock(), 'net2': mock.Mock()} + + def tap_count_side_effect(*args): + return 0 if args[0] == 'brqnet1' else 1 + + with contextlib.nested( + mock.patch.object(self.lbm, "delete_vlan_bridge"), + mock.patch.object(self.lbm, "get_tap_devices_count", + side_effect=tap_count_side_effect), + ) as (del_br_fn, count_tap_fn): + self.lbm.remove_empty_bridges() + del_br_fn.assert_called_once_with('brqnet1') + + def test_remove_interface(self): + with contextlib.nested( + mock.patch.object(ip_lib, "device_exists"), + mock.patch.object(self.lbm, "is_device_on_bridge"), + mock.patch.object(utils, "execute") + ) as (de_fn, isdev_fn, exec_fn): + de_fn.return_value = False + self.assertFalse(self.lbm.remove_interface("br0", "eth0")) + self.assertFalse(isdev_fn.called) + + de_fn.return_value = True + isdev_fn.return_value = False + self.assertTrue(self.lbm.remove_interface("br0", "eth0")) + + isdev_fn.return_value = True + exec_fn.return_value = True + self.assertFalse(self.lbm.remove_interface("br0", "eth0")) + + exec_fn.return_value = False + self.assertTrue(self.lbm.remove_interface("br0", "eth0")) + + def test_delete_vlan(self): + with contextlib.nested( + mock.patch.object(ip_lib, "device_exists"), + mock.patch.object(utils, "execute") + ) as (de_fn, exec_fn): + de_fn.return_value = False + self.lbm.delete_vlan("eth1.1") + self.assertFalse(exec_fn.called) + + de_fn.return_value = True + exec_fn.return_value = False + self.lbm.delete_vlan("eth1.1") + self.assertTrue(exec_fn.called) + + def _check_vxlan_support(self, expected, vxlan_module_supported, + vxlan_ucast_supported, vxlan_mcast_supported): + with contextlib.nested( + mock.patch.object(self.lbm, 'vxlan_module_supported', + return_value=vxlan_module_supported), + mock.patch.object(self.lbm, 'vxlan_ucast_supported', + return_value=vxlan_ucast_supported), + mock.patch.object(self.lbm, 'vxlan_mcast_supported', + return_value=vxlan_mcast_supported)): + if expected == lconst.VXLAN_NONE: + self.assertRaises(exceptions.VxlanNetworkUnsupported, + self.lbm.check_vxlan_support) + self.assertEqual(expected, self.lbm.vxlan_mode) + else: + self.lbm.check_vxlan_support() + self.assertEqual(expected, self.lbm.vxlan_mode) + + def test_check_vxlan_support(self): + self._check_vxlan_support(expected=lconst.VXLAN_UCAST, + vxlan_module_supported=True, + vxlan_ucast_supported=True, + vxlan_mcast_supported=True) + self._check_vxlan_support(expected=lconst.VXLAN_MCAST, + vxlan_module_supported=True, + vxlan_ucast_supported=False, + vxlan_mcast_supported=True) + + self._check_vxlan_support(expected=lconst.VXLAN_NONE, + vxlan_module_supported=False, + vxlan_ucast_supported=False, + vxlan_mcast_supported=False) + self._check_vxlan_support(expected=lconst.VXLAN_NONE, + vxlan_module_supported=True, + vxlan_ucast_supported=False, + vxlan_mcast_supported=False) + + def _check_vxlan_module_supported(self, expected, execute_side_effect): + with mock.patch.object( + utils, 'execute', + side_effect=execute_side_effect): + self.assertEqual(expected, self.lbm.vxlan_module_supported()) + + def test_vxlan_module_supported(self): + self._check_vxlan_module_supported( + expected=True, + execute_side_effect=None) + self._check_vxlan_module_supported( + expected=False, + execute_side_effect=RuntimeError()) + + def _check_vxlan_ucast_supported( + self, expected, l2_population, iproute_arg_supported, fdb_append): + cfg.CONF.set_override('l2_population', l2_population, 'VXLAN') + with contextlib.nested( + mock.patch.object( + ip_lib, 'device_exists', return_value=False), + mock.patch.object(self.lbm, 'delete_vxlan', return_value=None), + mock.patch.object(self.lbm, 'ensure_vxlan', return_value=None), + mock.patch.object( + utils, 'execute', + side_effect=None if fdb_append else RuntimeError()), + mock.patch.object( + ip_lib, 'iproute_arg_supported', + return_value=iproute_arg_supported)): + self.assertEqual(expected, self.lbm.vxlan_ucast_supported()) + + def test_vxlan_ucast_supported(self): + self._check_vxlan_ucast_supported( + expected=False, + l2_population=False, iproute_arg_supported=True, fdb_append=True) + self._check_vxlan_ucast_supported( + expected=False, + l2_population=True, iproute_arg_supported=False, fdb_append=True) + self._check_vxlan_ucast_supported( + expected=False, + l2_population=True, iproute_arg_supported=True, fdb_append=False) + self._check_vxlan_ucast_supported( + expected=True, + l2_population=True, iproute_arg_supported=True, fdb_append=True) + + def _check_vxlan_mcast_supported( + self, expected, vxlan_group, iproute_arg_supported): + cfg.CONF.set_override('vxlan_group', vxlan_group, 'VXLAN') + with mock.patch.object( + ip_lib, 'iproute_arg_supported', + return_value=iproute_arg_supported): + self.assertEqual(expected, self.lbm.vxlan_mcast_supported()) + + def test_vxlan_mcast_supported(self): + self._check_vxlan_mcast_supported( + expected=False, + vxlan_group='', + iproute_arg_supported=True) + self._check_vxlan_mcast_supported( + expected=False, + vxlan_group='224.0.0.1', + iproute_arg_supported=False) + self._check_vxlan_mcast_supported( + expected=True, + vxlan_group='224.0.0.1', + iproute_arg_supported=True) + + +class TestLinuxBridgeRpcCallbacks(base.BaseTestCase): + def setUp(self): + cfg.CONF.set_override('local_ip', LOCAL_IP, 'VXLAN') + super(TestLinuxBridgeRpcCallbacks, self).setUp() + + self.u_execute_p = mock.patch('neutron.agent.linux.utils.execute') + self.u_execute = self.u_execute_p.start() + + class FakeLBAgent(object): + def __init__(self): + self.agent_id = 1 + self.br_mgr = (linuxbridge_neutron_agent. + LinuxBridgeManager({'physnet1': 'eth1'}, + cfg.CONF.AGENT.root_helper)) + + self.br_mgr.vxlan_mode = lconst.VXLAN_UCAST + segment = mock.Mock() + segment.network_type = 'vxlan' + segment.segmentation_id = 1 + self.br_mgr.network_map['net_id'] = segment + + self.lb_rpc = linuxbridge_neutron_agent.LinuxBridgeRpcCallbacks( + object(), + FakeLBAgent() + ) + + self.root_helper = cfg.CONF.AGENT.root_helper + + def test_network_delete(self): + with contextlib.nested( + mock.patch.object(self.lb_rpc.agent.br_mgr, "get_bridge_name"), + mock.patch.object(self.lb_rpc.agent.br_mgr, "delete_vlan_bridge") + ) as (get_br_fn, del_fn): + get_br_fn.return_value = "br0" + self.lb_rpc.network_delete("anycontext", network_id="123") + get_br_fn.assert_called_with("123") + del_fn.assert_called_with("br0") + + def test_fdb_add(self): + fdb_entries = {'net_id': + {'ports': + {'agent_ip': [constants.FLOODING_ENTRY, + ['port_mac', 'port_ip']]}, + 'network_type': 'vxlan', + 'segment_id': 1}} + + with mock.patch.object(utils, 'execute', + return_value='') as execute_fn: + self.lb_rpc.fdb_add(None, fdb_entries) + + expected = [ + mock.call(['bridge', 'fdb', 'show', 'dev', 'vxlan-1'], + root_helper=self.root_helper), + mock.call(['bridge', 'fdb', 'add', + constants.FLOODING_ENTRY[0], + 'dev', 'vxlan-1', 'dst', 'agent_ip'], + root_helper=self.root_helper, + check_exit_code=False), + mock.call(['ip', 'neigh', 'replace', 'port_ip', 'lladdr', + 'port_mac', 'dev', 'vxlan-1', 'nud', 'permanent'], + root_helper=self.root_helper, + check_exit_code=False), + mock.call(['bridge', 'fdb', 'add', 'port_mac', 'dev', + 'vxlan-1', 'dst', 'agent_ip'], + root_helper=self.root_helper, + check_exit_code=False), + ] + execute_fn.assert_has_calls(expected) + + def test_fdb_ignore(self): + fdb_entries = {'net_id': + {'ports': + {LOCAL_IP: [constants.FLOODING_ENTRY, + ['port_mac', 'port_ip']]}, + 'network_type': 'vxlan', + 'segment_id': 1}} + + with mock.patch.object(utils, 'execute', + return_value='') as execute_fn: + self.lb_rpc.fdb_add(None, fdb_entries) + self.lb_rpc.fdb_remove(None, fdb_entries) + + self.assertFalse(execute_fn.called) + + fdb_entries = {'other_net_id': + {'ports': + {'192.168.0.67': [constants.FLOODING_ENTRY, + ['port_mac', 'port_ip']]}, + 'network_type': 'vxlan', + 'segment_id': 1}} + + with mock.patch.object(utils, 'execute', + return_value='') as execute_fn: + self.lb_rpc.fdb_add(None, fdb_entries) + self.lb_rpc.fdb_remove(None, fdb_entries) + + self.assertFalse(execute_fn.called) + + def test_fdb_remove(self): + fdb_entries = {'net_id': + {'ports': + {'agent_ip': [constants.FLOODING_ENTRY, + ['port_mac', 'port_ip']]}, + 'network_type': 'vxlan', + 'segment_id': 1}} + + with mock.patch.object(utils, 'execute', + return_value='') as execute_fn: + self.lb_rpc.fdb_remove(None, fdb_entries) + + expected = [ + mock.call(['bridge', 'fdb', 'del', + constants.FLOODING_ENTRY[0], + 'dev', 'vxlan-1', 'dst', 'agent_ip'], + root_helper=self.root_helper, + check_exit_code=False), + mock.call(['ip', 'neigh', 'del', 'port_ip', 'lladdr', + 'port_mac', 'dev', 'vxlan-1'], + root_helper=self.root_helper, + check_exit_code=False), + mock.call(['bridge', 'fdb', 'del', 'port_mac', + 'dev', 'vxlan-1', 'dst', 'agent_ip'], + root_helper=self.root_helper, + check_exit_code=False), + ] + execute_fn.assert_has_calls(expected) + + def test_fdb_update_chg_ip(self): + fdb_entries = {'chg_ip': + {'net_id': + {'agent_ip': + {'before': [['port_mac', 'port_ip_1']], + 'after': [['port_mac', 'port_ip_2']]}}}} + + with mock.patch.object(utils, 'execute', + return_value='') as execute_fn: + self.lb_rpc.fdb_update(None, fdb_entries) + + expected = [ + mock.call(['ip', 'neigh', 'replace', 'port_ip_2', 'lladdr', + 'port_mac', 'dev', 'vxlan-1', 'nud', 'permanent'], + root_helper=self.root_helper, + check_exit_code=False), + mock.call(['ip', 'neigh', 'del', 'port_ip_1', 'lladdr', + 'port_mac', 'dev', 'vxlan-1'], + root_helper=self.root_helper, + check_exit_code=False) + ] + execute_fn.assert_has_calls(expected) diff --git a/neutron/tests/unit/linuxbridge/test_lb_security_group.py b/neutron/tests/unit/linuxbridge/test_lb_security_group.py new file mode 100644 index 000000000..62662036a --- /dev/null +++ b/neutron/tests/unit/linuxbridge/test_lb_security_group.py @@ -0,0 +1,99 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.api.v2 import attributes +from neutron.extensions import securitygroup as ext_sg +from neutron.plugins.linuxbridge.db import l2network_db_v2 as lb_db +from neutron.tests.unit import test_extension_security_group as test_sg +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + + +PLUGIN_NAME = ('neutron.plugins.linuxbridge.' + 'lb_neutron_plugin.LinuxBridgePluginV2') +NOTIFIER = ('neutron.plugins.linuxbridge.' + 'lb_neutron_plugin.AgentNotifierApi') + + +class LinuxBridgeSecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self, plugin=None): + test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_IPTABLES_DRIVER) + notifier_p = mock.patch(NOTIFIER) + notifier_cls = notifier_p.start() + self.notifier = mock.Mock() + notifier_cls.return_value = self.notifier + self._attribute_map_bk_ = {} + for item in attributes.RESOURCE_ATTRIBUTE_MAP: + self._attribute_map_bk_[item] = (attributes. + RESOURCE_ATTRIBUTE_MAP[item]. + copy()) + super(LinuxBridgeSecurityGroupsTestCase, self).setUp(PLUGIN_NAME) + + def tearDown(self): + attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_ + super(LinuxBridgeSecurityGroupsTestCase, self).tearDown() + + +class TestLinuxBridgeSecurityGroups(LinuxBridgeSecurityGroupsTestCase, + test_sg.TestSecurityGroups, + test_sg_rpc.SGNotificationTestMixin): + pass + + +class TestLinuxBridgeSecurityGroupsXML(TestLinuxBridgeSecurityGroups): + fmt = 'xml' + + +class TestLinuxBridgeSecurityGroupsDB(LinuxBridgeSecurityGroupsTestCase): + def test_security_group_get_port_from_device(self): + with self.network() as n: + with self.subnet(n): + with self.security_group() as sg: + security_group_id = sg['security_group']['id'] + res = self._create_port(self.fmt, n['network']['id']) + port = self.deserialize(self.fmt, res) + fixed_ips = port['port']['fixed_ips'] + data = {'port': {'fixed_ips': fixed_ips, + 'name': port['port']['name'], + ext_sg.SECURITYGROUPS: + [security_group_id]}} + + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + port_id = res['port']['id'] + device_id = port_id[:8] + port_dict = lb_db.get_port_from_device(device_id) + self.assertEqual(port_id, port_dict['id']) + self.assertEqual([security_group_id], + port_dict[ext_sg.SECURITYGROUPS]) + self.assertEqual([], port_dict['security_group_rules']) + self.assertEqual([fixed_ips[0]['ip_address']], + port_dict['fixed_ips']) + self._delete('ports', port['port']['id']) + + def test_security_group_get_port_from_device_with_no_port(self): + port_dict = lb_db.get_port_from_device('bad_device_id') + self.assertIsNone(port_dict) + + +class TestLinuxBridgeSecurityGroupsDBXML(TestLinuxBridgeSecurityGroupsDB): + fmt = 'xml' diff --git a/neutron/tests/unit/linuxbridge/test_linuxbridge_plugin.py b/neutron/tests/unit/linuxbridge/test_linuxbridge_plugin.py new file mode 100644 index 000000000..3ff0f7592 --- /dev/null +++ b/neutron/tests/unit/linuxbridge/test_linuxbridge_plugin.py @@ -0,0 +1,132 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib + +import mock +from oslo.config import cfg + +from neutron.common import constants as q_const +from neutron.extensions import portbindings +from neutron import manager +from neutron.plugins.linuxbridge import lb_neutron_plugin +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit import test_db_plugin as test_plugin +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + +PLUGIN_NAME = ('neutron.plugins.linuxbridge.' + 'lb_neutron_plugin.LinuxBridgePluginV2') + + +class LinuxBridgePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self): + super(LinuxBridgePluginV2TestCase, self).setUp(PLUGIN_NAME) + self.port_create_status = 'DOWN' + + +class TestLinuxBridgeBasicGet(test_plugin.TestBasicGet, + LinuxBridgePluginV2TestCase): + pass + + +class TestLinuxBridgeV2HTTPResponse(test_plugin.TestV2HTTPResponse, + LinuxBridgePluginV2TestCase): + pass + + +class TestLinuxBridgeNetworksV2(test_plugin.TestNetworksV2, + LinuxBridgePluginV2TestCase): + pass + + +class TestLinuxBridgePortsV2(test_plugin.TestPortsV2, + LinuxBridgePluginV2TestCase): + + def test_update_port_status_build(self): + with self.port() as port: + self.assertEqual(port['port']['status'], 'DOWN') + self.assertEqual(self.port_create_status, 'DOWN') + + +class TestLinuxBridgePortBinding(LinuxBridgePluginV2TestCase, + test_bindings.PortBindingsTestCase): + VIF_TYPE = portbindings.VIF_TYPE_BRIDGE + HAS_PORT_FILTER = True + ENABLE_SG = True + FIREWALL_DRIVER = test_sg_rpc.FIREWALL_IPTABLES_DRIVER + + def setUp(self): + test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER) + cfg.CONF.set_override( + 'enable_security_group', self.ENABLE_SG, + group='SECURITYGROUP') + super(TestLinuxBridgePortBinding, self).setUp() + + +class TestLinuxBridgePortBindingNoSG(TestLinuxBridgePortBinding): + HAS_PORT_FILTER = False + ENABLE_SG = False + FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER + + +class TestLinuxBridgePortBindingHost( + LinuxBridgePluginV2TestCase, + test_bindings.PortBindingsHostTestCaseMixin): + pass + + +class TestLinuxBridgePluginRpcCallbacks(test_plugin.NeutronDbPluginV2TestCase): + def setUp(self): + super(TestLinuxBridgePluginRpcCallbacks, self).setUp(PLUGIN_NAME) + self.callbacks = lb_neutron_plugin.LinuxBridgeRpcCallbacks() + + def test_update_device_down(self): + with contextlib.nested( + mock.patch.object(self.callbacks, "get_port_from_device", + return_value=None), + mock.patch.object(manager.NeutronManager, "get_plugin") + ) as (gpfd, gp): + self.assertEqual( + self.callbacks.update_device_down("fake_context", + agent_id="123", + device="device", + host="host"), + {'device': 'device', 'exists': False} + ) + gpfd.return_value = {'id': 'fakeid', + 'status': q_const.PORT_STATUS_ACTIVE} + self.assertEqual( + self.callbacks.update_device_down("fake_context", + agent_id="123", + device="device", + host="host"), + {'device': 'device', 'exists': True} + ) + + def test_update_device_up(self): + with contextlib.nested( + mock.patch.object(self.callbacks, "get_port_from_device", + return_value=None), + mock.patch.object(manager.NeutronManager, "get_plugin") + ) as (gpfd, gp): + gpfd.return_value = {'id': 'fakeid', + 'status': q_const.PORT_STATUS_ACTIVE} + self.callbacks.update_device_up("fake_context", + agent_id="123", + device="device", + host="host") + gpfd.assert_called_once_with('device') diff --git a/neutron/tests/unit/linuxbridge/test_rpcapi.py b/neutron/tests/unit/linuxbridge/test_rpcapi.py new file mode 100644 index 000000000..616a06acd --- /dev/null +++ b/neutron/tests/unit/linuxbridge/test_rpcapi.py @@ -0,0 +1,132 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for linuxbridge rpc +""" + +import fixtures +from oslo.config import cfg + +from neutron.agent import rpc as agent_rpc +from neutron.common import topics +from neutron.openstack.common import context +from neutron.plugins.linuxbridge import lb_neutron_plugin as plb +from neutron.tests import base + + +class rpcApiTestCase(base.BaseTestCase): + def _test_lb_api(self, rpcapi, topic, method, rpc_method, + expected_msg=None, **kwargs): + ctxt = context.RequestContext('fake_user', 'fake_project') + expected_retval = 'foo' if method == 'call' else None + if not expected_msg: + expected_msg = rpcapi.make_msg(method, **kwargs) + if rpc_method == 'cast' and method == 'run_instance': + kwargs['call'] = False + + self.fake_args = None + self.fake_kwargs = None + + def _fake_rpc_method(*args, **kwargs): + self.fake_args = args + self.fake_kwargs = kwargs + if expected_retval: + return expected_retval + + self.useFixture(fixtures.MonkeyPatch( + 'neutron.common.rpc_compat.RpcProxy.' + rpc_method, + _fake_rpc_method)) + + retval = getattr(rpcapi, method)(ctxt, **kwargs) + + self.assertEqual(expected_retval, retval) + expected_args = [ctxt, expected_msg] + expected_kwargs = {'topic': topic} + + # skip the first argument which is 'self' + for arg, expected_arg in zip(self.fake_args[1:], expected_args): + self.assertEqual(expected_arg, arg) + self.assertEqual(expected_kwargs, self.fake_kwargs) + + def test_delete_network(self): + rpcapi = plb.AgentNotifierApi(topics.AGENT) + self._test_lb_api(rpcapi, + topics.get_topic_name(topics.AGENT, + topics.NETWORK, + topics.DELETE), + 'network_delete', rpc_method='fanout_cast', + network_id='fake_request_spec') + + def test_port_update(self): + cfg.CONF.set_override('rpc_support_old_agents', False, 'AGENT') + rpcapi = plb.AgentNotifierApi(topics.AGENT) + expected_msg = rpcapi.make_msg('port_update', + port='fake_port', + network_type='vlan', + physical_network='fake_net', + segmentation_id='fake_vlan_id') + self._test_lb_api(rpcapi, + topics.get_topic_name(topics.AGENT, + topics.PORT, + topics.UPDATE), + 'port_update', rpc_method='fanout_cast', + expected_msg=expected_msg, + port='fake_port', + physical_network='fake_net', + vlan_id='fake_vlan_id') + + def test_port_update_old_agent(self): + cfg.CONF.set_override('rpc_support_old_agents', True, 'AGENT') + rpcapi = plb.AgentNotifierApi(topics.AGENT) + expected_msg = rpcapi.make_msg('port_update', + port='fake_port', + network_type='vlan', + physical_network='fake_net', + segmentation_id='fake_vlan_id', + vlan_id='fake_vlan_id') + self._test_lb_api(rpcapi, + topics.get_topic_name(topics.AGENT, + topics.PORT, + topics.UPDATE), + 'port_update', rpc_method='fanout_cast', + expected_msg=expected_msg, + port='fake_port', + physical_network='fake_net', + vlan_id='fake_vlan_id') + + def test_device_details(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_lb_api(rpcapi, topics.PLUGIN, + 'get_device_details', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id') + + def test_update_device_down(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_lb_api(rpcapi, topics.PLUGIN, + 'update_device_down', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id', + host='fake_host') + + def test_update_device_up(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_lb_api(rpcapi, topics.PLUGIN, + 'update_device_up', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id', + host='fake_host') diff --git a/neutron/tests/unit/metaplugin/__init__.py b/neutron/tests/unit/metaplugin/__init__.py new file mode 100644 index 000000000..d8bce7745 --- /dev/null +++ b/neutron/tests/unit/metaplugin/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/metaplugin/fake_plugin.py b/neutron/tests/unit/metaplugin/fake_plugin.py new file mode 100644 index 000000000..1430697f3 --- /dev/null +++ b/neutron/tests/unit/metaplugin/fake_plugin.py @@ -0,0 +1,79 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import l3_gwmode_db + + +class Fake1(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin): + supported_extension_aliases = ['external-net', 'router'] + + def fake_func(self): + return 'fake1' + + def create_network(self, context, network): + session = context.session + with session.begin(subtransactions=True): + net = super(Fake1, self).create_network(context, network) + self._process_l3_create(context, net, network['network']) + return net + + def update_network(self, context, id, network): + session = context.session + with session.begin(subtransactions=True): + net = super(Fake1, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + return net + + def delete_network(self, context, id): + session = context.session + with session.begin(subtransactions=True): + self._process_l3_delete(context, id) + return super(Fake1, self).delete_network(context, id) + + def create_port(self, context, port): + port = super(Fake1, self).create_port(context, port) + return port + + def create_subnet(self, context, subnet): + subnet = super(Fake1, self).create_subnet(context, subnet) + return subnet + + def update_port(self, context, id, port): + port = super(Fake1, self).update_port(context, id, port) + return port + + def delete_port(self, context, id, l3_port_check=True): + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + self.disassociate_floatingips(context, id) + return super(Fake1, self).delete_port(context, id) + + +class Fake2(Fake1): + def fake_func(self): + return 'fake2' + + def fake_func2(self): + return 'fake2' + + def start_rpc_listeners(self): + # return value is only used to confirm this method was called. + return 'OK' diff --git a/neutron/tests/unit/metaplugin/test_basic.py b/neutron/tests/unit/metaplugin/test_basic.py new file mode 100644 index 000000000..5f2046957 --- /dev/null +++ b/neutron/tests/unit/metaplugin/test_basic.py @@ -0,0 +1,78 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.tests.unit.metaplugin import test_metaplugin +from neutron.tests.unit import test_db_plugin as test_plugin +from neutron.tests.unit import test_l3_plugin + + +class MetaPluginV2DBTestCase(test_plugin.NeutronDbPluginV2TestCase): + + _plugin_name = ('neutron.plugins.metaplugin.' + 'meta_neutron_plugin.MetaPluginV2') + + def setUp(self, plugin=None, ext_mgr=None, + service_plugins=None): + # NOTE(salv-orlando): The plugin keyword argument is ignored, + # as this class will always invoke super with self._plugin_name. + # These keyword parameters ensure setUp methods always have the + # same signature. + test_metaplugin.setup_metaplugin_conf() + ext_mgr = ext_mgr or test_l3_plugin.L3TestExtensionManager() + self.addCleanup(test_metaplugin.unregister_meta_hooks) + super(MetaPluginV2DBTestCase, self).setUp( + plugin=self._plugin_name, ext_mgr=ext_mgr, + service_plugins=service_plugins) + + +class TestMetaBasicGet(test_plugin.TestBasicGet, + MetaPluginV2DBTestCase): + pass + + +class TestMetaV2HTTPResponse(test_plugin.TestV2HTTPResponse, + MetaPluginV2DBTestCase): + pass + + +class TestMetaPortsV2(test_plugin.TestPortsV2, + MetaPluginV2DBTestCase): + pass + + +class TestMetaNetworksV2(test_plugin.TestNetworksV2, + MetaPluginV2DBTestCase): + pass + + +class TestMetaSubnetsV2(test_plugin.TestSubnetsV2, + MetaPluginV2DBTestCase): + #TODO(nati) This test fails if we run all test, but It success just one + def test_update_subnet_route(self): + pass + + def test_update_subnet_dns_to_None(self): + pass + + def test_update_subnet_route_to_None(self): + pass + + def test_update_subnet_dns(self): + pass + + +class TestMetaL3NatDBTestCase(test_l3_plugin.L3NatDBIntTestCase, + MetaPluginV2DBTestCase): + pass diff --git a/neutron/tests/unit/metaplugin/test_metaplugin.py b/neutron/tests/unit/metaplugin/test_metaplugin.py new file mode 100644 index 000000000..7dc621978 --- /dev/null +++ b/neutron/tests/unit/metaplugin/test_metaplugin.py @@ -0,0 +1,404 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo.config import cfg +import testtools + +from neutron.common import exceptions as exc +from neutron.common import topics +from neutron import context +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import models_v2 +from neutron.extensions import flavor as ext_flavor +from neutron.openstack.common import uuidutils +from neutron.plugins.metaplugin import meta_neutron_plugin +from neutron.tests import base + +CONF_FILE = "" +META_PATH = "neutron.plugins.metaplugin" +FAKE_PATH = "neutron.tests.unit.metaplugin" +PROXY_PATH = "%s.proxy_neutron_plugin.ProxyPluginV2" % META_PATH +PLUGIN_LIST = """ +fake1:%s.fake_plugin.Fake1,fake2:%s.fake_plugin.Fake2,proxy:%s +""".strip() % (FAKE_PATH, FAKE_PATH, PROXY_PATH) +L3_PLUGIN_LIST = """ +fake1:%s.fake_plugin.Fake1,fake2:%s.fake_plugin.Fake2 +""".strip() % (FAKE_PATH, FAKE_PATH) + + +def setup_metaplugin_conf(has_l3=True): + cfg.CONF.set_override('auth_url', 'http://localhost:35357/v2.0', + 'PROXY') + cfg.CONF.set_override('auth_region', 'RegionOne', 'PROXY') + cfg.CONF.set_override('admin_user', 'neutron', 'PROXY') + cfg.CONF.set_override('admin_password', 'password', 'PROXY') + cfg.CONF.set_override('admin_tenant_name', 'service', 'PROXY') + cfg.CONF.set_override('plugin_list', PLUGIN_LIST, 'META') + if has_l3: + cfg.CONF.set_override('l3_plugin_list', L3_PLUGIN_LIST, 'META') + else: + cfg.CONF.set_override('l3_plugin_list', "", 'META') + cfg.CONF.set_override('default_flavor', 'fake2', 'META') + cfg.CONF.set_override('default_l3_flavor', 'fake1', 'META') + cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab") + #TODO(nati) remove this after subnet quota change is merged + cfg.CONF.set_override('max_dns_nameservers', 10) + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + + +# Hooks registered by metaplugin must not exist for other plugins UT. +# So hooks must be unregistered (overwrite to None in fact). +def unregister_meta_hooks(): + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Network, 'metaplugin_net', None, None, None) + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Port, 'metaplugin_port', None, None, None) + + +class MetaNeutronPluginV2Test(base.BaseTestCase): + """Class conisting of MetaNeutronPluginV2 unit tests.""" + + has_l3 = True + + def setUp(self): + super(MetaNeutronPluginV2Test, self).setUp() + db._ENGINE = None + db._MAKER = None + self.fake_tenant_id = uuidutils.generate_uuid() + self.context = context.get_admin_context() + + db.configure_db() + self.addCleanup(db.clear_db) + self.addCleanup(unregister_meta_hooks) + + setup_metaplugin_conf(self.has_l3) + + self.client_cls_p = mock.patch('neutronclient.v2_0.client.Client') + client_cls = self.client_cls_p.start() + self.client_inst = mock.Mock() + client_cls.return_value = self.client_inst + self.client_inst.create_network.return_value = \ + {'id': 'fake_id'} + self.client_inst.create_port.return_value = \ + {'id': 'fake_id'} + self.client_inst.create_subnet.return_value = \ + {'id': 'fake_id'} + self.client_inst.update_network.return_value = \ + {'id': 'fake_id'} + self.client_inst.update_port.return_value = \ + {'id': 'fake_id'} + self.client_inst.update_subnet.return_value = \ + {'id': 'fake_id'} + self.client_inst.delete_network.return_value = True + self.client_inst.delete_port.return_value = True + self.client_inst.delete_subnet.return_value = True + plugin = (meta_neutron_plugin.MetaPluginV2.__module__ + '.' + + meta_neutron_plugin.MetaPluginV2.__name__) + self.setup_coreplugin(plugin) + self.plugin = meta_neutron_plugin.MetaPluginV2(configfile=None) + + def _fake_network(self, flavor): + data = {'network': {'name': flavor, + 'admin_state_up': True, + 'shared': False, + 'router:external': [], + 'tenant_id': self.fake_tenant_id, + ext_flavor.FLAVOR_NETWORK: flavor}} + return data + + def _fake_port(self, net_id): + return {'port': {'name': net_id, + 'network_id': net_id, + 'admin_state_up': True, + 'device_id': 'bad_device_id', + 'device_owner': 'bad_device_owner', + 'admin_state_up': True, + 'host_routes': [], + 'fixed_ips': [], + 'mac_address': + self.plugin._generate_mac(self.context, net_id), + 'tenant_id': self.fake_tenant_id}} + + def _fake_subnet(self, net_id): + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.254'}] + return {'subnet': {'name': net_id, + 'network_id': net_id, + 'gateway_ip': '10.0.0.1', + 'dns_nameservers': ['10.0.0.2'], + 'host_routes': [], + 'cidr': '10.0.0.0/24', + 'allocation_pools': allocation_pools, + 'enable_dhcp': True, + 'ip_version': 4}} + + def _fake_router(self, flavor): + data = {'router': {'name': flavor, 'admin_state_up': True, + 'tenant_id': self.fake_tenant_id, + ext_flavor.FLAVOR_ROUTER: flavor, + 'external_gateway_info': None}} + return data + + def test_create_delete_network(self): + network1 = self._fake_network('fake1') + ret1 = self.plugin.create_network(self.context, network1) + self.assertEqual('fake1', ret1[ext_flavor.FLAVOR_NETWORK]) + + network2 = self._fake_network('fake2') + ret2 = self.plugin.create_network(self.context, network2) + self.assertEqual('fake2', ret2[ext_flavor.FLAVOR_NETWORK]) + + network3 = self._fake_network('proxy') + ret3 = self.plugin.create_network(self.context, network3) + self.assertEqual('proxy', ret3[ext_flavor.FLAVOR_NETWORK]) + + db_ret1 = self.plugin.get_network(self.context, ret1['id']) + self.assertEqual('fake1', db_ret1['name']) + + db_ret2 = self.plugin.get_network(self.context, ret2['id']) + self.assertEqual('fake2', db_ret2['name']) + + db_ret3 = self.plugin.get_network(self.context, ret3['id']) + self.assertEqual('proxy', db_ret3['name']) + + db_ret4 = self.plugin.get_networks(self.context) + self.assertEqual(3, len(db_ret4)) + + db_ret5 = self.plugin.get_networks( + self.context, + {ext_flavor.FLAVOR_NETWORK: ['fake1']}) + self.assertEqual(1, len(db_ret5)) + self.assertEqual('fake1', db_ret5[0]['name']) + self.plugin.delete_network(self.context, ret1['id']) + self.plugin.delete_network(self.context, ret2['id']) + self.plugin.delete_network(self.context, ret3['id']) + + def test_create_delete_port(self): + network1 = self._fake_network('fake1') + network_ret1 = self.plugin.create_network(self.context, network1) + network2 = self._fake_network('fake2') + network_ret2 = self.plugin.create_network(self.context, network2) + network3 = self._fake_network('proxy') + network_ret3 = self.plugin.create_network(self.context, network3) + + port1 = self._fake_port(network_ret1['id']) + port2 = self._fake_port(network_ret2['id']) + port3 = self._fake_port(network_ret3['id']) + + port1_ret = self.plugin.create_port(self.context, port1) + port2_ret = self.plugin.create_port(self.context, port2) + port3_ret = self.plugin.create_port(self.context, port3) + ports_all = self.plugin.get_ports(self.context) + + self.assertEqual(network_ret1['id'], port1_ret['network_id']) + self.assertEqual(network_ret2['id'], port2_ret['network_id']) + self.assertEqual(network_ret3['id'], port3_ret['network_id']) + self.assertEqual(3, len(ports_all)) + + port1_dict = self.plugin._make_port_dict(port1_ret) + port2_dict = self.plugin._make_port_dict(port2_ret) + port3_dict = self.plugin._make_port_dict(port3_ret) + + self.assertEqual(port1_dict, port1_ret) + self.assertEqual(port2_dict, port2_ret) + self.assertEqual(port3_dict, port3_ret) + + port1['port']['admin_state_up'] = False + port2['port']['admin_state_up'] = False + port3['port']['admin_state_up'] = False + self.plugin.update_port(self.context, port1_ret['id'], port1) + self.plugin.update_port(self.context, port2_ret['id'], port2) + self.plugin.update_port(self.context, port3_ret['id'], port3) + port_in_db1 = self.plugin.get_port(self.context, port1_ret['id']) + port_in_db2 = self.plugin.get_port(self.context, port2_ret['id']) + port_in_db3 = self.plugin.get_port(self.context, port3_ret['id']) + self.assertEqual(False, port_in_db1['admin_state_up']) + self.assertEqual(False, port_in_db2['admin_state_up']) + self.assertEqual(False, port_in_db3['admin_state_up']) + + self.plugin.delete_port(self.context, port1_ret['id']) + self.plugin.delete_port(self.context, port2_ret['id']) + self.plugin.delete_port(self.context, port3_ret['id']) + + self.plugin.delete_network(self.context, network_ret1['id']) + self.plugin.delete_network(self.context, network_ret2['id']) + self.plugin.delete_network(self.context, network_ret3['id']) + + def test_create_delete_subnet(self): + # for this test we need to enable overlapping ips + cfg.CONF.set_default('allow_overlapping_ips', True) + network1 = self._fake_network('fake1') + network_ret1 = self.plugin.create_network(self.context, network1) + network2 = self._fake_network('fake2') + network_ret2 = self.plugin.create_network(self.context, network2) + network3 = self._fake_network('proxy') + network_ret3 = self.plugin.create_network(self.context, network3) + + subnet1 = self._fake_subnet(network_ret1['id']) + subnet2 = self._fake_subnet(network_ret2['id']) + subnet3 = self._fake_subnet(network_ret3['id']) + + subnet1_ret = self.plugin.create_subnet(self.context, subnet1) + subnet2_ret = self.plugin.create_subnet(self.context, subnet2) + subnet3_ret = self.plugin.create_subnet(self.context, subnet3) + self.assertEqual(network_ret1['id'], subnet1_ret['network_id']) + self.assertEqual(network_ret2['id'], subnet2_ret['network_id']) + self.assertEqual(network_ret3['id'], subnet3_ret['network_id']) + + subnet_in_db1 = self.plugin.get_subnet(self.context, subnet1_ret['id']) + subnet_in_db2 = self.plugin.get_subnet(self.context, subnet2_ret['id']) + subnet_in_db3 = self.plugin.get_subnet(self.context, subnet3_ret['id']) + + subnet1['subnet']['allocation_pools'].pop() + subnet2['subnet']['allocation_pools'].pop() + subnet3['subnet']['allocation_pools'].pop() + + self.plugin.update_subnet(self.context, + subnet1_ret['id'], subnet1) + self.plugin.update_subnet(self.context, + subnet2_ret['id'], subnet2) + self.plugin.update_subnet(self.context, + subnet3_ret['id'], subnet3) + subnet_in_db1 = self.plugin.get_subnet(self.context, subnet1_ret['id']) + subnet_in_db2 = self.plugin.get_subnet(self.context, subnet2_ret['id']) + subnet_in_db3 = self.plugin.get_subnet(self.context, subnet3_ret['id']) + + self.assertEqual(4, subnet_in_db1['ip_version']) + self.assertEqual(4, subnet_in_db2['ip_version']) + self.assertEqual(4, subnet_in_db3['ip_version']) + + self.plugin.delete_subnet(self.context, subnet1_ret['id']) + self.plugin.delete_subnet(self.context, subnet2_ret['id']) + self.plugin.delete_subnet(self.context, subnet3_ret['id']) + + self.plugin.delete_network(self.context, network_ret1['id']) + self.plugin.delete_network(self.context, network_ret2['id']) + self.plugin.delete_network(self.context, network_ret3['id']) + + def test_create_delete_router(self): + router1 = self._fake_router('fake1') + router_ret1 = self.plugin.create_router(self.context, router1) + router2 = self._fake_router('fake2') + router_ret2 = self.plugin.create_router(self.context, router2) + + self.assertEqual('fake1', router_ret1[ext_flavor.FLAVOR_ROUTER]) + self.assertEqual('fake2', router_ret2[ext_flavor.FLAVOR_ROUTER]) + + router_in_db1 = self.plugin.get_router(self.context, router_ret1['id']) + router_in_db2 = self.plugin.get_router(self.context, router_ret2['id']) + + self.assertEqual('fake1', router_in_db1[ext_flavor.FLAVOR_ROUTER]) + self.assertEqual('fake2', router_in_db2[ext_flavor.FLAVOR_ROUTER]) + + self.plugin.delete_router(self.context, router_ret1['id']) + self.plugin.delete_router(self.context, router_ret2['id']) + with testtools.ExpectedException(meta_neutron_plugin.FlavorNotFound): + self.plugin.get_router(self.context, router_ret1['id']) + + def test_extension_method(self): + self.assertEqual('fake1', self.plugin.fake_func()) + self.assertEqual('fake2', self.plugin.fake_func2()) + + def test_extension_not_implemented_method(self): + try: + self.plugin.not_implemented() + except AttributeError: + return + except Exception: + self.fail("AttributeError Error is not raised") + + self.fail("No Error is not raised") + + def test_create_network_flavor_fail(self): + with mock.patch('neutron.plugins.metaplugin.meta_db_v2.' + 'add_network_flavor_binding', + side_effect=Exception): + network = self._fake_network('fake1') + self.assertRaises(meta_neutron_plugin.FaildToAddFlavorBinding, + self.plugin.create_network, + self.context, + network) + count = self.plugin.get_networks_count(self.context) + self.assertEqual(count, 0) + + def test_create_router_flavor_fail(self): + with mock.patch('neutron.plugins.metaplugin.meta_db_v2.' + 'add_router_flavor_binding', + side_effect=Exception): + router = self._fake_router('fake1') + self.assertRaises(meta_neutron_plugin.FaildToAddFlavorBinding, + self.plugin.create_router, + self.context, + router) + count = self.plugin.get_routers_count(self.context) + self.assertEqual(count, 0) + + +class MetaNeutronPluginV2TestWithoutL3(MetaNeutronPluginV2Test): + """Tests without l3_plugin_list configration.""" + + has_l3 = False + + def test_supported_extension_aliases(self): + self.assertEqual(self.plugin.supported_extension_aliases, + ['flavor', 'external-net']) + + def test_create_delete_router(self): + self.skipTest("Test case without router") + + def test_create_router_flavor_fail(self): + self.skipTest("Test case without router") + + +class MetaNeutronPluginV2TestRpcFlavor(base.BaseTestCase): + """Tests for rpc_flavor.""" + + def setUp(self): + super(MetaNeutronPluginV2TestRpcFlavor, self).setUp() + db._ENGINE = None + db._MAKER = None + db.configure_db() + self.addCleanup(db.clear_db) + self.addCleanup(unregister_meta_hooks) + + def test_rpc_flavor(self): + setup_metaplugin_conf() + cfg.CONF.set_override('rpc_flavor', 'fake1', 'META') + self.plugin = meta_neutron_plugin.MetaPluginV2() + self.assertEqual(topics.PLUGIN, 'q-plugin') + ret = self.plugin.rpc_workers_supported() + self.assertFalse(ret) + + def test_invalid_rpc_flavor(self): + setup_metaplugin_conf() + cfg.CONF.set_override('rpc_flavor', 'fake-fake', 'META') + self.assertRaises(exc.Invalid, + meta_neutron_plugin.MetaPluginV2) + self.assertEqual(topics.PLUGIN, 'q-plugin') + + def test_rpc_flavor_multiple_rpc_workers(self): + setup_metaplugin_conf() + cfg.CONF.set_override('rpc_flavor', 'fake2', 'META') + self.plugin = meta_neutron_plugin.MetaPluginV2() + self.assertEqual(topics.PLUGIN, 'q-plugin') + ret = self.plugin.rpc_workers_supported() + self.assertTrue(ret) + ret = self.plugin.start_rpc_listeners() + self.assertEqual('OK', ret) diff --git a/neutron/tests/unit/midonet/__init__.py b/neutron/tests/unit/midonet/__init__.py new file mode 100644 index 000000000..439ff6594 --- /dev/null +++ b/neutron/tests/unit/midonet/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2012 Midokura Japan K.K. +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/midonet/etc/midonet.ini.test b/neutron/tests/unit/midonet/etc/midonet.ini.test new file mode 100644 index 000000000..8e4fc847f --- /dev/null +++ b/neutron/tests/unit/midonet/etc/midonet.ini.test @@ -0,0 +1,16 @@ +[midonet] + +# MidoNet API server URI +midonet_uri = http://localhost:8080/midonet-api + +# MidoNet admin username +username = admin + +# MidoNet admin password +password = passw0rd + +# Virtual provider router ID +provider_router_id = 00112233-0011-0011-0011-001122334455 + +# Virtual metadata router ID +metadata_router_id = ffeeddcc-ffee-ffee-ffee-ffeeddccbbaa diff --git a/neutron/tests/unit/midonet/mock_lib.py b/neutron/tests/unit/midonet/mock_lib.py new file mode 100644 index 000000000..9fdae9cd6 --- /dev/null +++ b/neutron/tests/unit/midonet/mock_lib.py @@ -0,0 +1,265 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ryu Ishimoto, Midokura Japan KK + +import mock +import uuid + + +def get_bridge_mock(id=None, **kwargs): + if id is None: + id = str(uuid.uuid4()) + + bridge = mock.Mock() + bridge.get_id.return_value = id + bridge.get_tenant_id.return_value = kwargs.get("tenant_id", "test-tenant") + bridge.get_name.return_value = kwargs.get("name", "net") + bridge.get_ports.return_value = [] + bridge.get_peer_ports.return_value = [] + bridge.get_admin_state_up.return_value = kwargs.get("admin_state_up", True) + return bridge + + +def get_bridge_port_mock(id=None, bridge_id=None, **kwargs): + if id is None: + id = str(uuid.uuid4()) + if bridge_id is None: + bridge_id = str(uuid.uuid4()) + + port = mock.Mock() + port.get_id.return_value = id + port.get_bridge_id.return_value = bridge_id + port.get_admin_state_up.return_value = kwargs.get("admin_state_up", True) + port.get_type.return_value = "Bridge" + port.create.return_value = port + return port + + +def get_chain_mock(id=None, tenant_id='test-tenant', name='chain', + rules=None): + if id is None: + id = str(uuid.uuid4()) + + if rules is None: + rules = [] + + chain = mock.Mock() + chain.get_id.return_value = id + chain.get_tenant_id.return_value = tenant_id + chain.get_name.return_value = name + chain.get_rules.return_value = rules + return chain + + +def get_port_group_mock(id=None, tenant_id='test-tenant', name='pg'): + if id is None: + id = str(uuid.uuid4()) + + port_group = mock.Mock() + port_group.get_id.return_value = id + port_group.get_tenant_id.return_value = tenant_id + port_group.get_name.return_value = name + return port_group + + +def get_router_mock(id=None, **kwargs): + if id is None: + id = str(uuid.uuid4()) + + router = mock.Mock() + router.get_id.return_value = id + router.get_tenant_id.return_value = kwargs.get("tenant_id", "test-tenant") + router.get_name.return_value = kwargs.get("name", "router") + router.get_ports.return_value = [] + router.get_peer_ports.return_value = [] + router.get_routes.return_value = [] + router.get_admin_state_up.return_value = kwargs.get("admin_state_up", True) + return router + + +def get_rule_mock(id=None, chain_id=None, properties=None): + if id is None: + id = str(uuid.uuid4()) + + if chain_id is None: + chain_id = str(uuid.uuid4()) + + if properties is None: + properties = {} + + rule = mock.Mock() + rule.get_id.return_value = id + rule.get_chain_id.return_value = chain_id + rule.get_properties.return_value = properties + return rule + + +def get_subnet_mock(bridge_id=None, gateway_ip='10.0.0.1', + subnet_prefix='10.0.0.0', subnet_len=int(24)): + if bridge_id is None: + bridge_id = str(uuid.uuid4()) + + subnet = mock.Mock() + subnet.get_id.return_value = subnet_prefix + '/' + str(subnet_len) + subnet.get_bridge_id.return_value = bridge_id + subnet.get_default_gateway.return_value = gateway_ip + subnet.get_subnet_prefix.return_value = subnet_prefix + subnet.get_subnet_length.return_value = subnet_len + return subnet + + +class MidonetLibMockConfig(): + + def __init__(self, inst): + self.inst = inst + + def _create_bridge(self, **kwargs): + return get_bridge_mock(**kwargs) + + def _create_router(self, **kwargs): + return get_router_mock(**kwargs) + + def _create_subnet(self, bridge, gateway_ip, subnet_prefix, subnet_len): + return get_subnet_mock(bridge.get_id(), gateway_ip=gateway_ip, + subnet_prefix=subnet_prefix, + subnet_len=subnet_len) + + def _add_bridge_port(self, bridge, **kwargs): + return get_bridge_port_mock(bridge_id=bridge.get_id(), **kwargs) + + def _get_bridge(self, id): + return get_bridge_mock(id=id) + + def _get_port(self, id): + return get_bridge_port_mock(id=id) + + def _get_router(self, id): + return get_router_mock(id=id) + + def _update_bridge(self, id, **kwargs): + return get_bridge_mock(id=id, **kwargs) + + def setup(self): + # Bridge methods side effects + self.inst.create_bridge.side_effect = self._create_bridge + self.inst.get_bridge.side_effect = self._get_bridge + self.inst.update_bridge.side_effect = self._update_bridge + + # Subnet methods side effects + self.inst.create_subnet.side_effect = self._create_subnet + + # Port methods side effects + ex_bp = self.inst.add_bridge_port + ex_bp.side_effect = self._add_bridge_port + self.inst.get_port.side_effect = self._get_port + + # Router methods side effects + self.inst.create_router.side_effect = self._create_router + self.inst.get_router.side_effect = self._get_router + + +class MidoClientMockConfig(): + + def __init__(self, inst): + self.inst = inst + self.chains_in = None + self.port_groups_in = None + self.chains_out = None + self.rules_out = None + self.port_groups_out = None + + def _get_query_tenant_id(self, query): + if query is not None and query['tenant_id']: + tenant_id = query['tenant_id'] + else: + tenant_id = 'test-tenant' + return tenant_id + + def _get_bridge(self, id): + return get_bridge_mock(id=id) + + def _get_chain(self, id, query=None): + if not self.chains_in: + return [] + + tenant_id = self._get_query_tenant_id(query) + for chain in self.chains_in: + chain_id = chain['id'] + if chain_id is id: + rule_mocks = [] + if 'rules' in chain: + for rule in chain['rules']: + rule_mocks.append( + get_rule_mock(id=rule['id'], + chain_id=id, + properties=rule['properties'])) + + return get_chain_mock(id=chain_id, name=chain['name'], + tenant_id=tenant_id, rules=rule_mocks) + return None + + def _get_chains(self, query=None): + if not self.chains_in: + return [] + + tenant_id = self._get_query_tenant_id(query) + self.chains_out = [] + self.rules_out = [] + for chain in self.chains_in: + chain_id = chain['id'] + + rule_mocks = [] + if 'rules' in chain: + for rule in chain['rules']: + rule_mocks.append( + get_rule_mock(id=rule['id'], + chain_id=id, + properties=rule['properties'])) + self.rules_out += rule_mocks + + self.chains_out.append(get_chain_mock(id=chain_id, + name=chain['name'], + tenant_id=tenant_id, + rules=rule_mocks)) + return self.chains_out + + def _get_port_groups(self, query=None): + if not self.port_groups_in: + return [] + + tenant_id = self._get_query_tenant_id(query) + self.port_groups_out = [] + for port_group in self.port_groups_in: + self.port_groups_out.append(get_port_group_mock( + id=port_group['id'], name=port_group['name'], + tenant_id=tenant_id)) + return self.port_groups_out + + def _get_router(self, id): + return get_router_mock(id=id) + + def _add_bridge_port(self, bridge): + return get_bridge_port_mock(bridge_id=bridge.get_id()) + + def setup(self): + self.inst.get_bridge.side_effect = self._get_bridge + self.inst.get_chains.side_effect = self._get_chains + self.inst.get_chain.side_effect = self._get_chain + self.inst.get_port_groups.side_effect = self._get_port_groups + self.inst.get_router.side_effect = self._get_router + self.inst.add_bridge_port.side_effect = self._add_bridge_port diff --git a/neutron/tests/unit/midonet/test_midonet_driver.py b/neutron/tests/unit/midonet/test_midonet_driver.py new file mode 100644 index 000000000..67677c92a --- /dev/null +++ b/neutron/tests/unit/midonet/test_midonet_driver.py @@ -0,0 +1,55 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2012 Midokura Japan K.K. +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rossella Sblendido, Midokura Japan KK + +import mock + +from neutron.agent.common import config +from neutron.agent.linux import dhcp +from neutron.common import config as base_config +import neutron.plugins.midonet.agent.midonet_driver as driver +from neutron.tests import base + + +class FakeNetwork: + id = 'aaaabbbb-cccc-dddd-eeee-ffff00001111' + namespace = 'qdhcp-ns' + + +class TestDhcpNoOpDriver(base.BaseTestCase): + def setUp(self): + super(TestDhcpNoOpDriver, self).setUp() + self.conf = config.setup_conf() + config.register_interface_driver_opts_helper(self.conf) + self.conf.register_opts(base_config.core_opts) + self.conf.register_opts(dhcp.OPTS) + self.conf.enable_isolated_metadata = True + self.conf.use_namespaces = True + instance = mock.patch("neutron.agent.linux.dhcp.DeviceManager") + self.mock_mgr = instance.start() + + def test_disable_no_retain_port(self): + dhcp_driver = driver.DhcpNoOpDriver(self.conf, FakeNetwork()) + dhcp_driver.disable(retain_port=False) + self.assertTrue(self.mock_mgr.return_value.destroy.called) + + def test_disable_retain_port(self): + dhcp_driver = driver.DhcpNoOpDriver(self.conf, FakeNetwork()) + dhcp_driver.disable(retain_port=True) + self.assertFalse(self.mock_mgr.return_value.destroy.called) diff --git a/neutron/tests/unit/midonet/test_midonet_lib.py b/neutron/tests/unit/midonet/test_midonet_lib.py new file mode 100644 index 000000000..bed900fa3 --- /dev/null +++ b/neutron/tests/unit/midonet/test_midonet_lib.py @@ -0,0 +1,189 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2012 Midokura Japan K.K. +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ryu Ishimoto, Midokura Japan KK +# @author: Tomoe Sugihara, Midokura Japan KK +import sys + +import mock +import testtools +import webob.exc as w_exc + +from neutron.openstack.common import uuidutils +with mock.patch.dict(sys.modules, {'midonetclient': mock.Mock()}): + from neutron.plugins.midonet import midonet_lib +import neutron.tests.unit.midonet.mock_lib as mock_lib + + +def _create_test_chain(id, name, tenant_id): + return {'id': id, 'name': name, 'tenant_id': tenant_id} + + +def _create_test_port_group(id, name, tenant_id): + return {"id": id, "name": name, "tenant_id": tenant_id} + + +class MidoClientTestCase(testtools.TestCase): + + def setUp(self): + super(MidoClientTestCase, self).setUp() + self._tenant_id = 'test-tenant' + self.mock_api = mock.Mock() + self.mock_api_cfg = mock_lib.MidoClientMockConfig(self.mock_api) + self.mock_api_cfg.setup() + self.client = midonet_lib.MidoClient(self.mock_api) + + def test_delete_chains_by_names(self): + + tenant_id = uuidutils.generate_uuid() + chain1_id = uuidutils.generate_uuid() + chain1 = _create_test_chain(chain1_id, "chain1", tenant_id) + + chain2_id = uuidutils.generate_uuid() + chain2 = _create_test_chain(chain2_id, "chain2", tenant_id) + + calls = [mock.call.delete_chain(chain1_id), + mock.call.delete_chain(chain2_id)] + self.mock_api_cfg.chains_in = [chain2, chain1] + self.client.delete_chains_by_names(tenant_id, ["chain1", "chain2"]) + + self.mock_api.assert_has_calls(calls, any_order=True) + + def test_delete_port_group_by_name(self): + + tenant_id = uuidutils.generate_uuid() + pg1_id = uuidutils.generate_uuid() + pg1 = _create_test_port_group(pg1_id, "pg1", tenant_id) + pg2_id = uuidutils.generate_uuid() + pg2 = _create_test_port_group(pg2_id, "pg2", tenant_id) + + self.mock_api_cfg.port_groups_in = [pg1, pg2] + self.client.delete_port_group_by_name(tenant_id, "pg1") + self.mock_api.delete_port_group.assert_called_once_with(pg1_id) + + def test_create_dhcp(self): + + bridge = mock.Mock() + + gateway_ip = "192.168.1.1" + cidr = "192.168.1.0/24" + host_rts = [{'destination': '10.0.0.0/24', 'nexthop': '10.0.0.1'}, + {'destination': '10.0.1.0/24', 'nexthop': '10.0.1.1'}] + dns_servers = ["8.8.8.8", "8.8.4.4"] + + dhcp_call = mock.call.add_bridge_dhcp(bridge, gateway_ip, cidr, + host_rts=host_rts, + dns_nservers=dns_servers) + + self.client.create_dhcp(bridge, gateway_ip, cidr, host_rts=host_rts, + dns_servers=dns_servers) + self.mock_api.assert_has_calls([dhcp_call]) + + def test_delete_dhcp(self): + + bridge = mock.Mock() + subnet = mock.Mock() + subnet.get_subnet_prefix.return_value = "10.0.0.0" + subnets = mock.MagicMock(return_value=[subnet]) + bridge.get_dhcp_subnets.side_effect = subnets + self.client.delete_dhcp(bridge, "10.0.0.0/24") + bridge.assert_has_calls(mock.call.get_dhcp_subnets) + subnet.assert_has_calls([mock.call.get_subnet_prefix(), + mock.call.delete()]) + + def test_add_dhcp_host(self): + + bridge = mock.Mock() + dhcp_subnet_call = mock.call.get_dhcp_subnet("10.0.0.0_24") + ip_addr_call = dhcp_subnet_call.add_dhcp_host().ip_addr("10.0.0.10") + mac_addr_call = ip_addr_call.mac_addr("2A:DB:6B:8C:19:99") + calls = [dhcp_subnet_call, ip_addr_call, mac_addr_call, + mac_addr_call.create()] + + self.client.add_dhcp_host(bridge, "10.0.0.0/24", "10.0.0.10", + "2A:DB:6B:8C:19:99") + bridge.assert_has_calls(calls, any_order=True) + + def test_add_dhcp_route_option(self): + + bridge = mock.Mock() + subnet = bridge.get_dhcp_subnet.return_value + subnet.get_opt121_routes.return_value = None + dhcp_subnet_call = mock.call.get_dhcp_subnet("10.0.0.0_24") + dst_ip = "10.0.0.3/24" + gw_ip = "10.0.0.1" + prefix, length = dst_ip.split("/") + routes = [{'destinationPrefix': prefix, 'destinationLength': length, + 'gatewayAddr': gw_ip}] + opt121_routes_call = dhcp_subnet_call.opt121_routes(routes) + calls = [dhcp_subnet_call, opt121_routes_call, + opt121_routes_call.update()] + + self.client.add_dhcp_route_option(bridge, "10.0.0.0/24", + gw_ip, dst_ip) + bridge.assert_has_calls(calls, any_order=True) + + def test_get_router_error(self): + self.mock_api.get_router.side_effect = w_exc.HTTPInternalServerError() + self.assertRaises(midonet_lib.MidonetApiException, + self.client.get_router, uuidutils.generate_uuid()) + + def test_get_router_not_found(self): + self.mock_api.get_router.side_effect = w_exc.HTTPNotFound() + self.assertRaises(midonet_lib.MidonetResourceNotFound, + self.client.get_router, uuidutils.generate_uuid()) + + def test_get_bridge_error(self): + self.mock_api.get_bridge.side_effect = w_exc.HTTPInternalServerError() + self.assertRaises(midonet_lib.MidonetApiException, + self.client.get_bridge, uuidutils.generate_uuid()) + + def test_get_bridge_not_found(self): + self.mock_api.get_bridge.side_effect = w_exc.HTTPNotFound() + self.assertRaises(midonet_lib.MidonetResourceNotFound, + self.client.get_bridge, uuidutils.generate_uuid()) + + def test_get_bridge(self): + bridge_id = uuidutils.generate_uuid() + + bridge = self.client.get_bridge(bridge_id) + + self.assertIsNotNone(bridge) + self.assertEqual(bridge.get_id(), bridge_id) + self.assertTrue(bridge.get_admin_state_up()) + + def test_add_bridge_port(self): + bridge_id = uuidutils.generate_uuid() + + bridge = self.client.get_bridge(bridge_id) + + self.assertIsNotNone(bridge) + + port = self.client.add_bridge_port(bridge) + + self.assertEqual(bridge.get_id(), port.get_bridge_id()) + self.assertTrue(port.get_admin_state_up()) + + def test_get_router(self): + router_id = uuidutils.generate_uuid() + + router = self.client.get_router(router_id) + + self.assertIsNotNone(router) + self.assertEqual(router.get_id(), router_id) + self.assertTrue(router.get_admin_state_up()) diff --git a/neutron/tests/unit/midonet/test_midonet_plugin.py b/neutron/tests/unit/midonet/test_midonet_plugin.py new file mode 100644 index 000000000..46ed5bf0d --- /dev/null +++ b/neutron/tests/unit/midonet/test_midonet_plugin.py @@ -0,0 +1,218 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2012 Midokura Japan K.K. +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rossella Sblendido, Midokura Europe SARL +# @author: Ryu Ishimoto, Midokura Japan KK +# @author: Tomoe Sugihara, Midokura Japan KK + +import mock +import os +import sys + +import neutron.common.test_lib as test_lib +from neutron.extensions import portbindings +from neutron.tests.unit import _test_extension_portbindings as test_bindings +import neutron.tests.unit.midonet.mock_lib as mock_lib +import neutron.tests.unit.test_db_plugin as test_plugin +import neutron.tests.unit.test_extension_security_group as sg +import neutron.tests.unit.test_l3_plugin as test_l3_plugin + +MIDOKURA_PKG_PATH = "neutron.plugins.midonet.plugin" +MIDONET_PLUGIN_NAME = ('%s.MidonetPluginV2' % MIDOKURA_PKG_PATH) + + +class MidonetPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): + + def setUp(self, + plugin=MIDONET_PLUGIN_NAME, + ext_mgr=None, + service_plugins=None): + self.mock_api = mock.patch( + 'neutron.plugins.midonet.midonet_lib.MidoClient') + etc_path = os.path.join(os.path.dirname(__file__), 'etc') + test_lib.test_config['config_files'] = [os.path.join( + etc_path, 'midonet.ini.test')] + + p = mock.patch.dict(sys.modules, {'midonetclient': mock.Mock()}) + p.start() + # dict patches must be explicitly stopped + self.addCleanup(p.stop) + self.instance = self.mock_api.start() + mock_cfg = mock_lib.MidonetLibMockConfig(self.instance.return_value) + mock_cfg.setup() + super(MidonetPluginV2TestCase, self).setUp(plugin=plugin, + ext_mgr=ext_mgr) + + def tearDown(self): + super(MidonetPluginV2TestCase, self).tearDown() + self.mock_api.stop() + + +class TestMidonetNetworksV2(test_plugin.TestNetworksV2, + MidonetPluginV2TestCase): + + pass + + +class TestMidonetL3NatTestCase(MidonetPluginV2TestCase, + test_l3_plugin.L3NatDBIntTestCase): + def setUp(self, + plugin=MIDONET_PLUGIN_NAME, + ext_mgr=None, + service_plugins=None): + super(TestMidonetL3NatTestCase, self).setUp(plugin=plugin, + ext_mgr=None, + service_plugins=None) + + def test_floatingip_with_invalid_create_port(self): + self._test_floatingip_with_invalid_create_port(MIDONET_PLUGIN_NAME) + + def test_floatingip_assoc_no_port(self): + with self.subnet(cidr='200.0.0.0/24') as public_sub: + self._set_net_external(public_sub['subnet']['network_id']) + res = super(TestMidonetL3NatTestCase, self)._create_floatingip( + self.fmt, public_sub['subnet']['network_id']) + # Cleanup + floatingip = self.deserialize(self.fmt, res) + self._delete('floatingips', floatingip['floatingip']['id']) + self.assertFalse(self.instance.return_value.add_static_nat.called) + + def test_floatingip_assoc_with_port(self): + with self.subnet(cidr='200.0.0.0/24') as public_sub: + self._set_net_external(public_sub['subnet']['network_id']) + with self.port() as private_port: + with self.router() as r: + # We need to hook up the private subnet to the external + # network in order to associate the fip. + sid = private_port['port']['fixed_ips'][0]['subnet_id'] + private_sub = {'subnet': {'id': sid}} + self._add_external_gateway_to_router( + r['router']['id'], + public_sub['subnet']['network_id']) + + # Check that get_link_port was called - if not, Source NAT + # will not be set up correctly on the MidoNet side + self.assertTrue( + self.instance.return_value.get_link_port.called) + + self._router_interface_action('add', r['router']['id'], + private_sub['subnet']['id'], + None) + + # Create the fip. + res = super(TestMidonetL3NatTestCase, + self)._create_floatingip( + self.fmt, + public_sub['subnet']['network_id'], + port_id=private_port['port']['id']) + + # Cleanup the resources used for the test + floatingip = self.deserialize(self.fmt, res) + self._delete('floatingips', floatingip['floatingip']['id']) + self._remove_external_gateway_from_router( + r['router']['id'], + public_sub['subnet']['network_id']) + self._router_interface_action('remove', + r['router']['id'], + private_sub['subnet']['id'], + None) + self.assertTrue(self.instance.return_value.add_static_nat.called) + + +class TestMidonetSecurityGroupsTestCase(sg.SecurityGroupDBTestCase): + + _plugin_name = ('%s.MidonetPluginV2' % MIDOKURA_PKG_PATH) + + def setUp(self): + self.mock_api = mock.patch( + 'neutron.plugins.midonet.midonet_lib.MidoClient') + etc_path = os.path.join(os.path.dirname(__file__), 'etc') + test_lib.test_config['config_files'] = [os.path.join( + etc_path, 'midonet.ini.test')] + + self.instance = self.mock_api.start() + mock_cfg = mock_lib.MidonetLibMockConfig(self.instance.return_value) + mock_cfg.setup() + p = mock.patch.dict(sys.modules, {'midonetclient': mock.Mock()}) + p.start() + # dict patches must be explicitly stopped + self.addCleanup(p.stop) + super(TestMidonetSecurityGroupsTestCase, self).setUp(self._plugin_name) + + +class TestMidonetSecurityGroup(sg.TestSecurityGroups, + TestMidonetSecurityGroupsTestCase): + + pass + + +class TestMidonetSubnetsV2(test_plugin.TestSubnetsV2, + MidonetPluginV2TestCase): + + # IPv6 is not supported by MidoNet yet. Ignore tests that attempt to + # create IPv6 subnet. + def test_create_subnet_inconsistent_ipv6_cidrv4(self): + pass + + def test_create_subnet_inconsistent_ipv6_dns_v4(self): + pass + + def test_create_subnet_with_v6_allocation_pool(self): + pass + + def test_update_subnet_inconsistent_ipv6_gatewayv4(self): + pass + + def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): + pass + + def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): + pass + + def test_create_subnet_inconsistent_ipv6_gatewayv4(self): + pass + + def test_create_subnet_dhcp_disabled(self): + super(TestMidonetSubnetsV2, self)._test_create_subnet( + enable_dhcp=False) + self.assertFalse(self.instance.return_value.create_dhcp.called) + + +class TestMidonetPortsV2(test_plugin.TestPortsV2, + MidonetPluginV2TestCase): + + # IPv6 is not supported by MidoNet yet. Ignore tests that attempt to + # create IPv6 subnet. + + def test_requested_subnet_id_v4_and_v6(self): + pass + + def test_vif_port_binding(self): + with self.port(name='myname') as port: + self.assertEqual('midonet', port['port']['binding:vif_type']) + self.assertTrue(port['port']['admin_state_up']) + + +class TestMidonetPluginPortBinding(test_bindings.PortBindingsTestCase, + MidonetPluginV2TestCase): + + VIF_TYPE = portbindings.VIF_TYPE_MIDONET + HAS_PORT_FILTER = True + + def setUp(self): + super(TestMidonetPluginPortBinding, self).setUp() diff --git a/neutron/tests/unit/ml2/__init__.py b/neutron/tests/unit/ml2/__init__.py new file mode 100644 index 000000000..788cea1f7 --- /dev/null +++ b/neutron/tests/unit/ml2/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/ml2/_test_mech_agent.py b/neutron/tests/unit/ml2/_test_mech_agent.py new file mode 100644 index 000000000..4fbdc10e5 --- /dev/null +++ b/neutron/tests/unit/ml2/_test_mech_agent.py @@ -0,0 +1,218 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from neutron.extensions import portbindings +from neutron.plugins.ml2 import driver_api as api +from neutron.tests import base + +NETWORK_ID = "fake_network" +PORT_ID = "fake_port" + + +class FakeNetworkContext(api.NetworkContext): + def __init__(self, segments): + self._network_segments = segments + + @property + def current(self): + return {'id': NETWORK_ID} + + @property + def original(self): + return None + + @property + def network_segments(self): + return self._network_segments + + +class FakePortContext(api.PortContext): + def __init__(self, agent_type, agents, segments, + vnic_type=portbindings.VNIC_NORMAL): + self._agent_type = agent_type + self._agents = agents + self._network_context = FakeNetworkContext(segments) + self._bound_vnic_type = vnic_type + self._bound_segment_id = None + self._bound_vif_type = None + self._bound_vif_details = None + + @property + def current(self): + return {'id': PORT_ID, + 'binding:vnic_type': self._bound_vnic_type} + + @property + def original(self): + return None + + @property + def network(self): + return self._network_context + + @property + def bound_segment(self): + if self._bound_segment_id: + for segment in self._network_context.network_segments: + if segment[api.ID] == self._bound_segment_id: + return segment + + @property + def original_bound_segment(self): + return None + + @property + def bound_driver(self): + return None + + @property + def original_bound_driver(self): + return None + + def host_agents(self, agent_type): + if agent_type == self._agent_type: + return self._agents + else: + return [] + + def set_binding(self, segment_id, vif_type, vif_details): + self._bound_segment_id = segment_id + self._bound_vif_type = vif_type + self._bound_vif_details = vif_details + + +class AgentMechanismBaseTestCase(base.BaseTestCase): + # These following must be overriden for the specific mechanism + # driver being tested: + VIF_TYPE = None + CAP_PORT_FILTER = None + AGENT_TYPE = None + AGENTS = None + AGENTS_DEAD = None + AGENTS_BAD = None + + def _check_unbound(self, context): + self.assertIsNone(context._bound_segment_id) + self.assertIsNone(context._bound_vif_type) + self.assertIsNone(context._bound_vif_details) + + def _check_bound(self, context, segment): + self.assertEqual(context._bound_segment_id, segment[api.ID]) + self.assertEqual(context._bound_vif_type, self.VIF_TYPE) + vif_details = context._bound_vif_details + self.assertIsNotNone(vif_details) + self.assertEqual(vif_details[portbindings.CAP_PORT_FILTER], + self.CAP_PORT_FILTER) + + +class AgentMechanismGenericTestCase(AgentMechanismBaseTestCase): + UNKNOWN_TYPE_SEGMENTS = [{api.ID: 'unknown_segment_id', + api.NETWORK_TYPE: 'no_such_type'}] + + def test_unknown_type(self): + context = FakePortContext(self.AGENT_TYPE, + self.AGENTS, + self.UNKNOWN_TYPE_SEGMENTS) + self.driver.bind_port(context) + self._check_unbound(context) + + +class AgentMechanismLocalTestCase(AgentMechanismBaseTestCase): + LOCAL_SEGMENTS = [{api.ID: 'unknown_segment_id', + api.NETWORK_TYPE: 'no_such_type'}, + {api.ID: 'local_segment_id', + api.NETWORK_TYPE: 'local'}] + + def test_type_local(self): + context = FakePortContext(self.AGENT_TYPE, + self.AGENTS, + self.LOCAL_SEGMENTS) + self.driver.bind_port(context) + self._check_bound(context, self.LOCAL_SEGMENTS[1]) + + def test_type_local_dead(self): + context = FakePortContext(self.AGENT_TYPE, + self.AGENTS_DEAD, + self.LOCAL_SEGMENTS) + self.driver.bind_port(context) + self._check_unbound(context) + + +class AgentMechanismFlatTestCase(AgentMechanismBaseTestCase): + FLAT_SEGMENTS = [{api.ID: 'unknown_segment_id', + api.NETWORK_TYPE: 'no_such_type'}, + {api.ID: 'flat_segment_id', + api.NETWORK_TYPE: 'flat', + api.PHYSICAL_NETWORK: 'fake_physical_network'}] + + def test_type_flat(self): + context = FakePortContext(self.AGENT_TYPE, + self.AGENTS, + self.FLAT_SEGMENTS) + self.driver.bind_port(context) + self._check_bound(context, self.FLAT_SEGMENTS[1]) + + def test_type_flat_bad(self): + context = FakePortContext(self.AGENT_TYPE, + self.AGENTS_BAD, + self.FLAT_SEGMENTS) + self.driver.bind_port(context) + self._check_unbound(context) + + +class AgentMechanismVlanTestCase(AgentMechanismBaseTestCase): + VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id', + api.NETWORK_TYPE: 'no_such_type'}, + {api.ID: 'vlan_segment_id', + api.NETWORK_TYPE: 'vlan', + api.PHYSICAL_NETWORK: 'fake_physical_network', + api.SEGMENTATION_ID: 1234}] + + def test_type_vlan(self): + context = FakePortContext(self.AGENT_TYPE, + self.AGENTS, + self.VLAN_SEGMENTS) + self.driver.bind_port(context) + self._check_bound(context, self.VLAN_SEGMENTS[1]) + + def test_type_vlan_bad(self): + context = FakePortContext(self.AGENT_TYPE, + self.AGENTS_BAD, + self.VLAN_SEGMENTS) + self.driver.bind_port(context) + self._check_unbound(context) + + +class AgentMechanismGreTestCase(AgentMechanismBaseTestCase): + GRE_SEGMENTS = [{api.ID: 'unknown_segment_id', + api.NETWORK_TYPE: 'no_such_type'}, + {api.ID: 'gre_segment_id', + api.NETWORK_TYPE: 'gre', + api.SEGMENTATION_ID: 1234}] + + def test_type_gre(self): + context = FakePortContext(self.AGENT_TYPE, + self.AGENTS, + self.GRE_SEGMENTS) + self.driver.bind_port(context) + self._check_bound(context, self.GRE_SEGMENTS[1]) + + def test_type_gre_bad(self): + context = FakePortContext(self.AGENT_TYPE, + self.AGENTS_BAD, + self.GRE_SEGMENTS) + self.driver.bind_port(context) + self._check_unbound(context) diff --git a/neutron/tests/unit/ml2/drivers/__init__.py b/neutron/tests/unit/ml2/drivers/__init__.py new file mode 100644 index 000000000..788cea1f7 --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/ml2/drivers/brocade/__init__.py b/neutron/tests/unit/ml2/drivers/brocade/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/ml2/drivers/brocade/test_brocade_mechanism_driver.py b/neutron/tests/unit/ml2/drivers/brocade/test_brocade_mechanism_driver.py new file mode 100644 index 000000000..2dac0fe2a --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/brocade/test_brocade_mechanism_driver.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.ml2 import config as ml2_config +from neutron.plugins.ml2.drivers.brocade import (mechanism_brocade + as brocademechanism) +from neutron.tests.unit import test_db_plugin + +LOG = logging.getLogger(__name__) +MECHANISM_NAME = ('neutron.plugins.ml2.' + 'drivers.brocade.mechanism_brocade.BrocadeMechanism') + + +class TestBrocadeMechDriverV2(test_db_plugin.NeutronDbPluginV2TestCase): + """Test Brocade VCS/VDX mechanism driver. + """ + + _mechanism_name = MECHANISM_NAME + + def setUp(self): + + _mechanism_name = MECHANISM_NAME + + ml2_opts = { + 'mechanism_drivers': ['brocade'], + 'tenant_network_types': ['vlan']} + + for opt, val in ml2_opts.items(): + ml2_config.cfg.CONF.set_override(opt, val, 'ml2') + + def mocked_brocade_init(self): + self._driver = mock.MagicMock() + + with mock.patch.object(brocademechanism.BrocadeMechanism, + 'brocade_init', new=mocked_brocade_init): + super(TestBrocadeMechDriverV2, self).setUp() + self.mechanism_driver = importutils.import_object(_mechanism_name) + + +class TestBrocadeMechDriverNetworksV2(test_db_plugin.TestNetworksV2, + TestBrocadeMechDriverV2): + pass + + +class TestBrocadeMechDriverPortsV2(test_db_plugin.TestPortsV2, + TestBrocadeMechDriverV2): + pass + + +class TestBrocadeMechDriverSubnetsV2(test_db_plugin.TestSubnetsV2, + TestBrocadeMechDriverV2): + pass diff --git a/neutron/tests/unit/ml2/drivers/cisco/__init__.py b/neutron/tests/unit/ml2/drivers/cisco/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/ml2/drivers/cisco/apic/__init__.py b/neutron/tests/unit/ml2/drivers/cisco/apic/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_client.py b/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_client.py new file mode 100644 index 000000000..23444033a --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_client.py @@ -0,0 +1,272 @@ +# Copyright (c) 2014 Cisco Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Henry Gessau, Cisco Systems + +import mock +import requests +import requests.exceptions + +from neutron.plugins.ml2.drivers.cisco.apic import apic_client as apic +from neutron.plugins.ml2.drivers.cisco.apic import exceptions as cexc +from neutron.tests import base +from neutron.tests.unit.ml2.drivers.cisco.apic import ( + test_cisco_apic_common as mocked) + + +class TestCiscoApicClient(base.BaseTestCase, mocked.ControllerMixin): + + def setUp(self): + super(TestCiscoApicClient, self).setUp() + self.set_up_mocks() + self.apic = apic.RestClient(mocked.APIC_HOST) + self.addCleanup(mock.patch.stopall) + + def _mock_authenticate(self, timeout=300): + self.reset_reponses() + self.mock_apic_manager_login_responses(timeout=timeout) + self.apic.login(mocked.APIC_USR, mocked.APIC_PWD) + + def test_login_by_instantiation(self): + self.reset_reponses() + self.mock_apic_manager_login_responses() + apic2 = apic.RestClient(mocked.APIC_HOST, + usr=mocked.APIC_USR, pwd=mocked.APIC_PWD) + self.assertIsNotNone(apic2.authentication) + self.assertEqual(apic2.username, mocked.APIC_USR) + + def test_client_session_login_ok(self): + self._mock_authenticate() + self.assertEqual( + self.apic.authentication['userName'], mocked.APIC_USR) + self.assertTrue(self.apic.api_base.startswith('http://')) + self.assertEqual(self.apic.username, mocked.APIC_USR) + self.assertIsNotNone(self.apic.authentication) + self.apic = apic.RestClient(mocked.APIC_HOST, mocked.APIC_PORT, + ssl=True) + self.assertTrue(self.apic.api_base.startswith('https://')) + + def test_client_session_login_fail(self): + self.mock_error_post_response(requests.codes.unauthorized, + code='599', + text=u'Fake error') + self.assertRaises(cexc.ApicResponseNotOk, self.apic.login, + mocked.APIC_USR, mocked.APIC_PWD) + + def test_client_session_login_timeout(self): + self.response['post'].append(requests.exceptions.Timeout) + self.assertRaises(cexc.ApicHostNoResponse, self.apic.login, + mocked.APIC_USR, mocked.APIC_PWD) + + def test_client_session_logout_ok(self): + self.mock_response_for_post('aaaLogout') + self.apic.logout() + self.assertIsNone(self.apic.authentication) + # Multiple signouts should not cause an error + self.apic.logout() + self.assertIsNone(self.apic.authentication) + + def test_client_session_logout_fail(self): + self._mock_authenticate() + self.mock_error_post_response(requests.codes.timeout, + code='123', text='failed') + self.assertRaises(cexc.ApicResponseNotOk, self.apic.logout) + + def test_query_not_logged_in(self): + self.apic.authentication = None + self.assertRaises(cexc.ApicSessionNotLoggedIn, + self.apic.fvTenant.get, mocked.APIC_TENANT) + + def test_query_no_response(self): + self._mock_authenticate() + requests.Session.get = mock.Mock(return_value=None) + self.assertRaises(cexc.ApicHostNoResponse, + self.apic.fvTenant.get, mocked.APIC_TENANT) + + def test_query_error_response_no_data(self): + self._mock_authenticate() + self.mock_error_get_response(requests.codes.bad) # No error attrs. + self.assertRaises(cexc.ApicResponseNotOk, + self.apic.fvTenant.get, mocked.APIC_TENANT) + + def test_generic_get_data(self): + self._mock_authenticate() + self.mock_response_for_get('topSystem', name='ifc1') + top_system = self.apic.get_data('class/topSystem') + self.assertIsNotNone(top_system) + name = top_system[0]['topSystem']['attributes']['name'] + self.assertEqual(name, 'ifc1') + + def test_session_timeout_refresh_ok(self): + self._mock_authenticate(timeout=-1) + # Client will do refresh before getting tenant + self.mock_response_for_get('aaaLogin', token='ok', + refreshTimeoutSeconds=300) + self.mock_response_for_get('fvTenant', name=mocked.APIC_TENANT) + tenant = self.apic.fvTenant.get(mocked.APIC_TENANT) + self.assertEqual(tenant['name'], mocked.APIC_TENANT) + + def test_session_timeout_refresh_no_cookie(self): + self._mock_authenticate(timeout=-1) + # Client will do refresh before getting tenant + self.mock_response_for_get('aaaLogin', notoken='test') + self.assertRaises(cexc.ApicResponseNoCookie, + self.apic.fvTenant.get, mocked.APIC_TENANT) + + def test_session_timeout_refresh_error(self): + self._mock_authenticate(timeout=-1) + self.mock_error_get_response(requests.codes.timeout, + code='503', text=u'timed out') + self.assertRaises(cexc.ApicResponseNotOk, + self.apic.fvTenant.get, mocked.APIC_TENANT) + + def test_session_timeout_refresh_timeout_error(self): + self._mock_authenticate(timeout=-1) + # Client will try to get refresh, we fake a refresh error. + self.mock_error_get_response(requests.codes.bad_request, + code='403', + text=u'Token was invalid. Expired.') + # Client will then try to re-login. + self.mock_apic_manager_login_responses() + # Finally the client will try to get the tenant. + self.mock_response_for_get('fvTenant', name=mocked.APIC_TENANT) + tenant = self.apic.fvTenant.get(mocked.APIC_TENANT) + self.assertEqual(tenant['name'], mocked.APIC_TENANT) + + def test_lookup_mo_bad_token_retry(self): + self._mock_authenticate() + # For the first get request we mock a bad token. + self.mock_error_get_response(requests.codes.bad_request, + code='403', + text=u'Token was invalid. Expired.') + # Client will then try to re-login. + self.mock_apic_manager_login_responses() + # Then the client will retry to get the tenant. + self.mock_response_for_get('fvTenant', name=mocked.APIC_TENANT) + tenant = self.apic.fvTenant.get(mocked.APIC_TENANT) + self.assertEqual(tenant['name'], mocked.APIC_TENANT) + + def test_use_unsupported_managed_object(self): + self._mock_authenticate() + # unittest.assertRaises cannot catch exceptions raised in + # __getattr__, so we need to defer the evaluation using lambda. + self.assertRaises(cexc.ApicManagedObjectNotSupported, + lambda: self.apic.nonexistentObject) + + def test_lookup_nonexistant_mo(self): + self._mock_authenticate() + self.mock_response_for_get('fvTenant') + self.assertIsNone(self.apic.fvTenant.get(mocked.APIC_TENANT)) + + def test_lookup_existing_mo(self): + self._mock_authenticate() + self.mock_response_for_get('fvTenant', name='infra') + tenant = self.apic.fvTenant.get('infra') + self.assertEqual(tenant['name'], 'infra') + + def test_list_mos_ok(self): + self._mock_authenticate() + self.mock_response_for_get('fvTenant', name='t1') + self.mock_append_to_response('fvTenant', name='t2') + tlist = self.apic.fvTenant.list_all() + self.assertIsNotNone(tlist) + self.assertEqual(len(tlist), 2) + self.assertIn({'name': 't1'}, tlist) + self.assertIn({'name': 't2'}, tlist) + + def test_list_mo_names_ok(self): + self._mock_authenticate() + self.mock_response_for_get('fvTenant', name='t1') + self.mock_append_to_response('fvTenant', name='t2') + tnlist = self.apic.fvTenant.list_names() + self.assertIsNotNone(tnlist) + self.assertEqual(len(tnlist), 2) + self.assertIn('t1', tnlist) + self.assertIn('t2', tnlist) + + def test_list_mos_split_class_fail(self): + self._mock_authenticate() + self.mock_response_for_get('fvnsEncapBlk', name='Blk1') + encap_blks = self.apic.fvnsEncapBlk__vlan.list_all() + self.assertEqual(len(encap_blks), 1) + + def test_delete_mo_ok(self): + self._mock_authenticate() + self.mock_response_for_post('fvTenant') + self.assertTrue(self.apic.fvTenant.delete(mocked.APIC_TENANT)) + + def test_create_mo_ok(self): + self._mock_authenticate() + self.mock_response_for_post('fvTenant', name=mocked.APIC_TENANT) + self.mock_response_for_get('fvTenant', name=mocked.APIC_TENANT) + self.apic.fvTenant.create(mocked.APIC_TENANT) + tenant = self.apic.fvTenant.get(mocked.APIC_TENANT) + self.assertEqual(tenant['name'], mocked.APIC_TENANT) + + def test_create_mo_already_exists(self): + self._mock_authenticate() + self.mock_error_post_response(requests.codes.bad_request, + code='103', + text=u'Fake 103 error') + self.assertRaises(cexc.ApicResponseNotOk, + self.apic.vmmProvP.create, mocked.APIC_VMMP) + + def test_create_mo_with_prereq(self): + self._mock_authenticate() + self.mock_response_for_post('fvTenant', name=mocked.APIC_TENANT) + self.mock_response_for_post('fvBD', name=mocked.APIC_NETWORK) + self.mock_response_for_get('fvBD', name=mocked.APIC_NETWORK) + bd_args = mocked.APIC_TENANT, mocked.APIC_NETWORK + self.apic.fvBD.create(*bd_args) + network = self.apic.fvBD.get(*bd_args) + self.assertEqual(network['name'], mocked.APIC_NETWORK) + + def test_create_mo_prereq_exists(self): + self._mock_authenticate() + self.mock_response_for_post('vmmDomP', name=mocked.APIC_DOMAIN) + self.mock_response_for_get('vmmDomP', name=mocked.APIC_DOMAIN) + self.apic.vmmDomP.create(mocked.APIC_VMMP, mocked.APIC_DOMAIN) + dom = self.apic.vmmDomP.get(mocked.APIC_VMMP, mocked.APIC_DOMAIN) + self.assertEqual(dom['name'], mocked.APIC_DOMAIN) + + def test_create_mo_fails(self): + self._mock_authenticate() + self.mock_response_for_post('fvTenant', name=mocked.APIC_TENANT) + self.mock_error_post_response(requests.codes.bad_request, + code='not103', + text=u'Fake not103 error') + bd_args = mocked.APIC_TENANT, mocked.APIC_NETWORK + self.assertRaises(cexc.ApicResponseNotOk, + self.apic.fvBD.create, *bd_args) + + def test_update_mo(self): + self._mock_authenticate() + self.mock_response_for_post('fvTenant', name=mocked.APIC_TENANT) + self.mock_response_for_get('fvTenant', name=mocked.APIC_TENANT, + more='extra') + self.apic.fvTenant.update(mocked.APIC_TENANT, more='extra') + tenant = self.apic.fvTenant.get(mocked.APIC_TENANT) + self.assertEqual(tenant['name'], mocked.APIC_TENANT) + self.assertEqual(tenant['more'], 'extra') + + def test_attr_fail_empty_list(self): + self._mock_authenticate() + self.mock_response_for_get('fvTenant') # No attrs for tenant. + self.assertIsNone(self.apic.fvTenant.get(mocked.APIC_TENANT)) + + def test_attr_fail_other_obj(self): + self._mock_authenticate() + self.mock_response_for_get('other', name=mocked.APIC_TENANT) + self.assertIsNone(self.apic.fvTenant.get(mocked.APIC_TENANT)) diff --git a/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_common.py b/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_common.py new file mode 100644 index 000000000..e150c1f09 --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_common.py @@ -0,0 +1,225 @@ +# Copyright (c) 2014 Cisco Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Henry Gessau, Cisco Systems + +import mock +import requests + +from oslo.config import cfg + +from neutron.common import config as neutron_config +from neutron.plugins.ml2 import config as ml2_config +from neutron.plugins.ml2.drivers.cisco.apic import apic_client as apic +from neutron.tests import base + + +OK = requests.codes.ok + +APIC_HOST = 'fake.controller.local' +APIC_PORT = 7580 +APIC_USR = 'notadmin' +APIC_PWD = 'topsecret' + +APIC_TENANT = 'citizen14' +APIC_NETWORK = 'network99' +APIC_NETNAME = 'net99name' +APIC_SUBNET = '10.3.2.1/24' +APIC_L3CTX = 'layer3context' +APIC_AP = 'appProfile001' +APIC_EPG = 'endPointGroup001' + +APIC_CONTRACT = 'signedContract' +APIC_SUBJECT = 'testSubject' +APIC_FILTER = 'carbonFilter' +APIC_ENTRY = 'forcedEntry' + +APIC_VMMP = 'OpenStack' +APIC_DOMAIN = 'cumuloNimbus' +APIC_PDOM = 'rainStorm' + +APIC_NODE_PROF = 'red' +APIC_LEAF = 'green' +APIC_LEAF_TYPE = 'range' +APIC_NODE_BLK = 'blue' +APIC_PORT_PROF = 'yellow' +APIC_PORT_SEL = 'front' +APIC_PORT_TYPE = 'range' +APIC_PORT_BLK1 = 'block01' +APIC_PORT_BLK2 = 'block02' +APIC_ACC_PORT_GRP = 'alpha' +APIC_FUNC_PROF = 'beta' +APIC_ATT_ENT_PROF = 'delta' +APIC_VLAN_NAME = 'gamma' +APIC_VLAN_MODE = 'dynamic' +APIC_VLANID_FROM = 2900 +APIC_VLANID_TO = 2999 +APIC_VLAN_FROM = 'vlan-%d' % APIC_VLANID_FROM +APIC_VLAN_TO = 'vlan-%d' % APIC_VLANID_TO + + +class ControllerMixin(object): + + """Mock the controller for APIC driver and service unit tests.""" + + def __init__(self): + self.response = None + + def set_up_mocks(self): + # The mocked responses from the server are lists used by + # mock.side_effect, which means each call to post or get will + # return the next item in the list. This allows the test cases + # to stage a sequence of responses to method(s) under test. + self.response = {'post': [], 'get': []} + self.reset_reponses() + + def reset_reponses(self, req=None): + # Clear all staged responses. + reqs = req and [req] or ['post', 'get'] # Both if none specified. + for req in reqs: + del self.response[req][:] + self.restart_responses(req) + + def restart_responses(self, req): + responses = mock.MagicMock(side_effect=self.response[req]) + if req == 'post': + requests.Session.post = responses + elif req == 'get': + requests.Session.get = responses + + def mock_response_for_post(self, mo, **attrs): + attrs['debug_mo'] = mo # useful for debugging + self._stage_mocked_response('post', OK, mo, **attrs) + + def mock_response_for_get(self, mo, **attrs): + self._stage_mocked_response('get', OK, mo, **attrs) + + def mock_append_to_response(self, mo, **attrs): + # Append a MO to the last get response. + mo_attrs = attrs and {mo: {'attributes': attrs}} or {} + self.response['get'][-1].json.return_value['imdata'].append(mo_attrs) + + def mock_error_post_response(self, status, **attrs): + self._stage_mocked_response('post', status, 'error', **attrs) + + def mock_error_get_response(self, status, **attrs): + self._stage_mocked_response('get', status, 'error', **attrs) + + def _stage_mocked_response(self, req, mock_status, mo, **attrs): + response = mock.MagicMock() + response.status_code = mock_status + mo_attrs = attrs and [{mo: {'attributes': attrs}}] or [] + response.json.return_value = {'imdata': mo_attrs} + self.response[req].append(response) + + def mock_responses_for_create(self, obj): + self._mock_container_responses_for_create( + apic.ManagedObjectClass(obj).container) + name = '-'.join([obj, 'name']) # useful for debugging + self._stage_mocked_response('post', OK, obj, name=name) + + def _mock_container_responses_for_create(self, obj): + # Recursively generate responses for creating obj's containers. + if obj: + mo = apic.ManagedObjectClass(obj) + if mo.can_create: + if mo.container: + self._mock_container_responses_for_create(mo.container) + name = '-'.join([obj, 'name']) # useful for debugging + self._stage_mocked_response('post', OK, obj, debug_name=name) + + def mock_apic_manager_login_responses(self, timeout=300): + # APIC Manager tests are based on authenticated session + self.mock_response_for_post('aaaLogin', userName=APIC_USR, + token='ok', refreshTimeoutSeconds=timeout) + + def assert_responses_drained(self, req=None): + """Fail if all the expected responses have not been consumed.""" + request = {'post': self.session.post, 'get': self.session.get} + reqs = req and [req] or ['post', 'get'] # Both if none specified. + for req in reqs: + try: + request[req]('some url') + except StopIteration: + pass + else: + # User-friendly error message + msg = req + ' response queue not drained' + self.fail(msg=msg) + + +class ConfigMixin(object): + + """Mock the config for APIC driver and service unit tests.""" + + def __init__(self): + self.mocked_parser = None + + def set_up_mocks(self): + # Mock the configuration file + args = ['--config-file', base.etcdir('neutron.conf.test')] + neutron_config.init(args=args) + + # Configure the ML2 mechanism drivers and network types + ml2_opts = { + 'mechanism_drivers': ['apic'], + 'tenant_network_types': ['vlan'], + } + for opt, val in ml2_opts.items(): + ml2_config.cfg.CONF.set_override(opt, val, 'ml2') + + # Configure the Cisco APIC mechanism driver + apic_test_config = { + 'apic_host': APIC_HOST, + 'apic_username': APIC_USR, + 'apic_password': APIC_PWD, + 'apic_port': APIC_PORT, + 'apic_vmm_domain': APIC_DOMAIN, + 'apic_vlan_ns_name': APIC_VLAN_NAME, + 'apic_vlan_range': '%d:%d' % (APIC_VLANID_FROM, APIC_VLANID_TO), + 'apic_node_profile': APIC_NODE_PROF, + 'apic_entity_profile': APIC_ATT_ENT_PROF, + 'apic_function_profile': APIC_FUNC_PROF, + } + for opt, val in apic_test_config.items(): + cfg.CONF.set_override(opt, val, 'ml2_cisco_apic') + + apic_switch_cfg = { + 'apic_switch:east01': {'ubuntu1,ubuntu2': ['3/11']}, + 'apic_switch:east02': {'rhel01,rhel02': ['4/21'], + 'rhel03': ['4/22']}, + } + self.mocked_parser = mock.patch.object(cfg, + 'MultiConfigParser').start() + self.mocked_parser.return_value.read.return_value = [apic_switch_cfg] + self.mocked_parser.return_value.parsed = [apic_switch_cfg] + + +class DbModelMixin(object): + + """Mock the DB models for the APIC driver and service unit tests.""" + + def __init__(self): + self.mocked_session = None + + def set_up_mocks(self): + self.mocked_session = mock.Mock() + get_session = mock.patch('neutron.db.api.get_session').start() + get_session.return_value = self.mocked_session + + def mock_db_query_filterby_first_return(self, value): + """Mock db.session.query().filterby().first() to return value.""" + query = self.mocked_session.query.return_value + query.filter_by.return_value.first.return_value = value diff --git a/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_manager.py b/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_manager.py new file mode 100644 index 000000000..24a2c217d --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_manager.py @@ -0,0 +1,698 @@ +# Copyright (c) 2014 Cisco Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Henry Gessau, Cisco Systems + +import mock +from webob import exc as wexc + +from neutron.openstack.common import uuidutils + +from neutron.plugins.ml2.drivers.cisco.apic import apic_manager +from neutron.plugins.ml2.drivers.cisco.apic import exceptions as cexc +from neutron.tests import base +from neutron.tests.unit.ml2.drivers.cisco.apic import ( + test_cisco_apic_common as mocked) + + +class TestCiscoApicManager(base.BaseTestCase, + mocked.ControllerMixin, + mocked.ConfigMixin, + mocked.DbModelMixin): + + def setUp(self): + super(TestCiscoApicManager, self).setUp() + mocked.ControllerMixin.set_up_mocks(self) + mocked.ConfigMixin.set_up_mocks(self) + mocked.DbModelMixin.set_up_mocks(self) + + self.mock_apic_manager_login_responses() + self.mgr = apic_manager.APICManager() + self.session = self.mgr.apic.session + self.assert_responses_drained() + self.reset_reponses() + + def test_mgr_session_login(self): + login = self.mgr.apic.authentication + self.assertEqual(login['userName'], mocked.APIC_USR) + + def test_mgr_session_logout(self): + self.mock_response_for_post('aaaLogout') + self.mgr.apic.logout() + self.assert_responses_drained() + self.assertIsNone(self.mgr.apic.authentication) + + def test_to_range(self): + port_list = [4, 2, 3, 1, 7, 8, 10, 20, 6, 22, 21] + expected_ranges = [(1, 4), (6, 8), (10, 10), (20, 22)] + port_ranges = [r for r in apic_manager.group_by_ranges(port_list)] + self.assertEqual(port_ranges, expected_ranges) + + def test_get_profiles(self): + self.mock_db_query_filterby_first_return('faked') + self.assertEqual( + self.mgr.db.get_port_profile_for_node('node'), + 'faked' + ) + self.assertEqual( + self.mgr.db.get_profile_for_module('node', 'prof', 'module'), + 'faked' + ) + self.assertEqual( + self.mgr.db.get_profile_for_module_and_ports( + 'node', 'prof', 'module', 'from', 'to' + ), + 'faked' + ) + + def test_add_profile(self): + self.mgr.db.add_profile_for_module_and_ports( + 'node', 'prof', 'hpselc', 'module', 'from', 'to') + self.assertTrue(self.mocked_session.add.called) + self.assertTrue(self.mocked_session.flush.called) + + def test_ensure_port_profile_created(self): + port_name = mocked.APIC_PORT + self.mock_responses_for_create('infraAccPortP') + self.mock_response_for_get('infraAccPortP', name=port_name) + port = self.mgr.ensure_port_profile_created_on_apic(port_name) + self.assert_responses_drained() + self.assertEqual(port['name'], port_name) + + def test_ensure_port_profile_created_exc(self): + port_name = mocked.APIC_PORT + self.mock_error_post_response(wexc.HTTPBadRequest) + self.mock_response_for_post('infraAccPortP') + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.ensure_port_profile_created_on_apic, + port_name) + self.assert_responses_drained() + + def test_ensure_node_profile_created_for_switch_old(self): + old_switch = mocked.APIC_NODE_PROF + self.mock_response_for_get('infraNodeP', name=old_switch) + self.mgr.ensure_node_profile_created_for_switch(old_switch) + self.assert_responses_drained() + old_name = self.mgr.node_profiles[old_switch]['object']['name'] + self.assertEqual(old_name, old_switch) + + def test_ensure_node_profile_created_for_switch_new(self): + new_switch = mocked.APIC_NODE_PROF + self.mock_response_for_get('infraNodeP') + self.mock_responses_for_create('infraNodeP') + self.mock_responses_for_create('infraLeafS') + self.mock_responses_for_create('infraNodeBlk') + self.mock_response_for_get('infraNodeP', name=new_switch) + self.mgr.ensure_node_profile_created_for_switch(new_switch) + self.assert_responses_drained() + new_name = self.mgr.node_profiles[new_switch]['object']['name'] + self.assertEqual(new_name, new_switch) + + def test_ensure_node_profile_created_for_switch_new_exc(self): + new_switch = mocked.APIC_NODE_PROF + self.mock_response_for_get('infraNodeP') + self.mock_error_post_response(wexc.HTTPBadRequest) + self.mock_response_for_post('infraNodeP') + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.ensure_node_profile_created_for_switch, + new_switch) + self.assert_responses_drained() + + def test_ensure_vmm_domain_created_old(self): + dom = mocked.APIC_DOMAIN + self.mock_response_for_get('vmmDomP', name=dom) + self.mgr.ensure_vmm_domain_created_on_apic(dom) + self.assert_responses_drained() + old_dom = self.mgr.vmm_domain['name'] + self.assertEqual(old_dom, dom) + + def _mock_new_vmm_dom_responses(self, dom, seg_type=None): + vmm = mocked.APIC_VMMP + dn = self.mgr.apic.vmmDomP.mo.dn(vmm, dom) + self.mock_response_for_get('vmmDomP') + self.mock_responses_for_create('vmmDomP') + if seg_type: + self.mock_responses_for_create(seg_type) + self.mock_response_for_get('vmmDomP', name=dom, dn=dn) + + def test_ensure_vmm_domain_created_new_no_vlan_ns(self): + dom = mocked.APIC_DOMAIN + self._mock_new_vmm_dom_responses(dom) + self.mgr.ensure_vmm_domain_created_on_apic(dom) + self.assert_responses_drained() + new_dom = self.mgr.vmm_domain['name'] + self.assertEqual(new_dom, dom) + + def test_ensure_vmm_domain_created_new_no_vlan_ns_exc(self): + dom = mocked.APIC_DOMAIN + self.mock_response_for_get('vmmDomP') + self.mock_error_post_response(wexc.HTTPBadRequest) + self.mock_response_for_post('vmmDomP') + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.ensure_vmm_domain_created_on_apic, dom) + self.assert_responses_drained() + + def test_ensure_vmm_domain_created_new_with_vlan_ns(self): + dom = mocked.APIC_DOMAIN + self._mock_new_vmm_dom_responses(dom, seg_type='infraRsVlanNs__vmm') + ns = {'dn': 'test_vlan_ns'} + self.mgr.ensure_vmm_domain_created_on_apic(dom, vlan_ns=ns) + self.assert_responses_drained() + new_dom = self.mgr.vmm_domain['name'] + self.assertEqual(new_dom, dom) + + def test_ensure_vmm_domain_created_new_with_vxlan_ns(self): + dom = mocked.APIC_DOMAIN + # TODO(Henry): mock seg_type vxlan when vxlan is ready + self._mock_new_vmm_dom_responses(dom, seg_type=None) + ns = {'dn': 'test_vxlan_ns'} + self.mgr.ensure_vmm_domain_created_on_apic(dom, vxlan_ns=ns) + self.assert_responses_drained() + new_dom = self.mgr.vmm_domain['name'] + self.assertEqual(new_dom, dom) + + def test_ensure_infra_created_no_infra(self): + self.mgr.switch_dict = {} + self.mgr.ensure_infra_created_on_apic() + + def _ensure_infra_created_seq1_setup(self): + am = 'neutron.plugins.ml2.drivers.cisco.apic.apic_manager.APICManager' + np_create_for_switch = mock.patch( + am + '.ensure_node_profile_created_for_switch').start() + self.mock_db_query_filterby_first_return(None) + pp_create_for_switch = mock.patch( + am + '.ensure_port_profile_created_on_apic').start() + pp_create_for_switch.return_value = {'dn': 'port_profile_dn'} + return np_create_for_switch, pp_create_for_switch + + def test_ensure_infra_created_seq1(self): + np_create_for_switch, pp_create_for_switch = ( + self._ensure_infra_created_seq1_setup()) + + def _profile_for_module(aswitch, ppn, module): + profile = mock.Mock() + profile.ppn = ppn + profile.hpselc_id = '-'.join([aswitch, module, 'hpselc_id']) + return profile + + self.mgr.db.get_profile_for_module = mock.Mock( + side_effect=_profile_for_module) + self.mgr.db.get_profile_for_module_and_ports = mock.Mock( + return_value=None) + self.mgr.db.add_profile_for_module_and_ports = mock.Mock() + + num_switches = len(self.mgr.switch_dict) + for loop in range(num_switches): + self.mock_responses_for_create('infraRsAccPortP') + self.mock_responses_for_create('infraPortBlk') + + self.mgr.ensure_infra_created_on_apic() + self.assert_responses_drained() + self.assertEqual(np_create_for_switch.call_count, num_switches) + self.assertEqual(pp_create_for_switch.call_count, num_switches) + for switch in self.mgr.switch_dict: + np_create_for_switch.assert_any_call(switch) + + def test_ensure_infra_created_seq1_exc(self): + np_create_for_switch, __ = self._ensure_infra_created_seq1_setup() + self.mock_error_post_response(wexc.HTTPBadRequest) + self.mock_response_for_post('infraAccPortP') + + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.ensure_infra_created_on_apic) + self.assert_responses_drained() + self.assertTrue(np_create_for_switch.called) + self.assertEqual(np_create_for_switch.call_count, 1) + + def _ensure_infra_created_seq2_setup(self): + am = 'neutron.plugins.ml2.drivers.cisco.apic.apic_manager.APICManager' + np_create_for_switch = mock.patch( + am + '.ensure_node_profile_created_for_switch').start() + + def _profile_for_node(aswitch): + profile = mock.Mock() + profile.profile_id = '-'.join([aswitch, 'profile_id']) + return profile + + self.mgr.db.get_port_profile_for_node = mock.Mock( + side_effect=_profile_for_node) + self.mgr.db.get_profile_for_module = mock.Mock( + return_value=None) + self.mgr.function_profile = {'dn': 'dn'} + self.mgr.db.get_profile_for_module_and_ports = mock.Mock( + return_value=True) + + return np_create_for_switch + + def test_ensure_infra_created_seq2(self): + np_create_for_switch = self._ensure_infra_created_seq2_setup() + + num_switches = len(self.mgr.switch_dict) + for loop in range(num_switches): + self.mock_responses_for_create('infraHPortS') + self.mock_responses_for_create('infraRsAccBaseGrp') + + self.mgr.ensure_infra_created_on_apic() + self.assert_responses_drained() + self.assertEqual(np_create_for_switch.call_count, num_switches) + for switch in self.mgr.switch_dict: + np_create_for_switch.assert_any_call(switch) + + def test_ensure_infra_created_seq2_exc(self): + np_create_for_switch = self._ensure_infra_created_seq2_setup() + + self.mock_error_post_response(wexc.HTTPBadRequest) + self.mock_response_for_post('infraHPortS') + + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.ensure_infra_created_on_apic) + self.assert_responses_drained() + self.assertTrue(np_create_for_switch.called) + self.assertEqual(np_create_for_switch.call_count, 1) + + def test_ensure_context_unenforced_new_ctx(self): + self.mock_response_for_get('fvCtx') + self.mock_responses_for_create('fvCtx') + self.mgr.ensure_context_unenforced() + self.assert_responses_drained() + + def test_ensure_context_unenforced_pref1(self): + self.mock_response_for_get('fvCtx', pcEnfPref='1') + self.mock_response_for_post('fvCtx') + self.mgr.ensure_context_unenforced() + self.assert_responses_drained() + + def test_ensure_context_unenforced_pref2(self): + self.mock_response_for_get('fvCtx', pcEnfPref='2') + self.mgr.ensure_context_unenforced() + self.assert_responses_drained() + + def _mock_vmm_dom_prereq(self, dom): + self._mock_new_vmm_dom_responses(dom) + self.mgr.ensure_vmm_domain_created_on_apic(dom) + + def _mock_new_phys_dom_responses(self, dom, seg_type=None): + dn = self.mgr.apic.physDomP.mo.dn(dom) + self.mock_response_for_get('physDomP') + self.mock_responses_for_create('physDomP') + if seg_type: + self.mock_responses_for_create(seg_type) + self.mock_response_for_get('physDomP', name=dom, dn=dn) + + def _mock_phys_dom_prereq(self, dom): + self._mock_new_phys_dom_responses(dom) + self.mgr.ensure_phys_domain_created_on_apic(dom) + + def test_ensure_entity_profile_created_old(self): + ep = mocked.APIC_ATT_ENT_PROF + self.mock_response_for_get('infraAttEntityP', name=ep) + self.mgr.ensure_entity_profile_created_on_apic(ep) + self.assert_responses_drained() + + def _mock_new_entity_profile(self, exc=None): + self.mock_response_for_get('infraAttEntityP') + self.mock_responses_for_create('infraAttEntityP') + self.mock_responses_for_create('infraRsDomP') + if exc: + self.mock_error_get_response(exc, code='103', text=u'Fail') + else: + self.mock_response_for_get('infraAttEntityP') + + def test_ensure_entity_profile_created_new(self): + self._mock_phys_dom_prereq(mocked.APIC_PDOM) + ep = mocked.APIC_ATT_ENT_PROF + self._mock_new_entity_profile() + self.mgr.ensure_entity_profile_created_on_apic(ep) + self.assert_responses_drained() + + def test_ensure_entity_profile_created_new_exc(self): + self._mock_phys_dom_prereq(mocked.APIC_PDOM) + ep = mocked.APIC_ATT_ENT_PROF + self._mock_new_entity_profile(exc=wexc.HTTPBadRequest) + self.mock_response_for_post('infraAttEntityP') + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.ensure_entity_profile_created_on_apic, ep) + self.assert_responses_drained() + + def _mock_entity_profile_preqreq(self): + self._mock_phys_dom_prereq(mocked.APIC_PDOM) + ep = mocked.APIC_ATT_ENT_PROF + self._mock_new_entity_profile() + self.mgr.ensure_entity_profile_created_on_apic(ep) + + def test_ensure_function_profile_created_old(self): + self._mock_entity_profile_preqreq() + fp = mocked.APIC_FUNC_PROF + self.mock_response_for_get('infraAccPortGrp', name=fp) + self.mgr.ensure_function_profile_created_on_apic(fp) + self.assert_responses_drained() + old_fp = self.mgr.function_profile['name'] + self.assertEqual(old_fp, fp) + + def _mock_new_function_profile(self, fp): + dn = self.mgr.apic.infraAttEntityP.mo.dn(fp) + self.mock_responses_for_create('infraAccPortGrp') + self.mock_responses_for_create('infraRsAttEntP') + self.mock_response_for_get('infraAccPortGrp', name=fp, dn=dn) + + def test_ensure_function_profile_created_new(self): + fp = mocked.APIC_FUNC_PROF + dn = self.mgr.apic.infraAttEntityP.mo.dn(fp) + self.mgr.entity_profile = {'dn': dn} + self.mock_response_for_get('infraAccPortGrp') + self.mock_responses_for_create('infraAccPortGrp') + self.mock_responses_for_create('infraRsAttEntP') + self.mock_response_for_get('infraAccPortGrp', name=fp, dn=dn) + self.mgr.ensure_function_profile_created_on_apic(fp) + self.assert_responses_drained() + new_fp = self.mgr.function_profile['name'] + self.assertEqual(new_fp, fp) + + def test_ensure_function_profile_created_new_exc(self): + fp = mocked.APIC_FUNC_PROF + dn = self.mgr.apic.infraAttEntityP.mo.dn(fp) + self.mgr.entity_profile = {'dn': dn} + self.mock_response_for_get('infraAccPortGrp') + self.mock_error_post_response(wexc.HTTPBadRequest) + self.mock_response_for_post('infraAccPortGrp') + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.ensure_function_profile_created_on_apic, fp) + self.assert_responses_drained() + + def test_ensure_vlan_ns_created_old(self): + ns = mocked.APIC_VLAN_NAME + mode = mocked.APIC_VLAN_MODE + self.mock_response_for_get('fvnsVlanInstP', name=ns, mode=mode) + new_ns = self.mgr.ensure_vlan_ns_created_on_apic(ns, '100', '199') + self.assert_responses_drained() + self.assertIsNone(new_ns) + + def _mock_new_vlan_instance(self, ns, vlan_encap=None): + self.mock_responses_for_create('fvnsVlanInstP') + if vlan_encap: + self.mock_response_for_get('fvnsEncapBlk', **vlan_encap) + else: + self.mock_response_for_get('fvnsEncapBlk') + self.mock_responses_for_create('fvnsEncapBlk__vlan') + self.mock_response_for_get('fvnsVlanInstP', name=ns) + + def test_ensure_vlan_ns_created_new_no_encap(self): + ns = mocked.APIC_VLAN_NAME + self.mock_response_for_get('fvnsVlanInstP') + self._mock_new_vlan_instance(ns) + new_ns = self.mgr.ensure_vlan_ns_created_on_apic(ns, '200', '299') + self.assert_responses_drained() + self.assertEqual(new_ns['name'], ns) + + def test_ensure_vlan_ns_created_new_exc(self): + ns = mocked.APIC_VLAN_NAME + self.mock_response_for_get('fvnsVlanInstP') + self.mock_error_post_response(wexc.HTTPBadRequest) + self.mock_response_for_post('fvnsVlanInstP') + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.ensure_vlan_ns_created_on_apic, + ns, '200', '299') + self.assert_responses_drained() + + def test_ensure_vlan_ns_created_new_with_encap(self): + ns = mocked.APIC_VLAN_NAME + self.mock_response_for_get('fvnsVlanInstP') + ns_args = {'name': 'encap', 'from': '300', 'to': '399'} + self._mock_new_vlan_instance(ns, vlan_encap=ns_args) + new_ns = self.mgr.ensure_vlan_ns_created_on_apic(ns, '300', '399') + self.assert_responses_drained() + self.assertEqual(new_ns['name'], ns) + + def test_ensure_tenant_created_on_apic(self): + self.mock_response_for_get('fvTenant', name='any') + self.mgr.ensure_tenant_created_on_apic('two') + self.mock_response_for_get('fvTenant') + self.mock_responses_for_create('fvTenant') + self.mgr.ensure_tenant_created_on_apic('four') + self.assert_responses_drained() + + def test_ensure_bd_created_existing_bd(self): + self.mock_response_for_get('fvBD', name='BD') + self.mgr.ensure_bd_created_on_apic('t1', 'two') + self.assert_responses_drained() + + def test_ensure_bd_created_not_ctx(self): + self.mock_response_for_get('fvBD') + self.mock_responses_for_create('fvBD') + self.mock_response_for_get('fvCtx') + self.mock_responses_for_create('fvCtx') + self.mock_responses_for_create('fvRsCtx') + self.mgr.ensure_bd_created_on_apic('t2', 'three') + self.assert_responses_drained() + + def test_ensure_bd_created_exc(self): + self.mock_response_for_get('fvBD') + self.mock_error_post_response(wexc.HTTPBadRequest) + self.mock_response_for_post('fvBD') + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.ensure_bd_created_on_apic, 't2', 'three') + self.assert_responses_drained() + + def test_ensure_bd_created_ctx_pref1(self): + self.mock_response_for_get('fvBD') + self.mock_responses_for_create('fvBD') + self.mock_response_for_get('fvCtx', pcEnfPref='1') + self.mock_responses_for_create('fvRsCtx') + self.mgr.ensure_bd_created_on_apic('t3', 'four') + self.assert_responses_drained() + + def test_ensure_bd_created_ctx_pref2(self): + self.mock_response_for_get('fvBD') + self.mock_responses_for_create('fvBD') + self.mock_response_for_get('fvCtx', pcEnfPref='2') + self.mock_response_for_post('fvCtx') + self.mock_responses_for_create('fvRsCtx') + self.mgr.ensure_bd_created_on_apic('t3', 'four') + self.assert_responses_drained() + + def test_delete_bd(self): + self.mock_response_for_post('fvBD') + self.mgr.delete_bd_on_apic('t1', 'bd') + self.assert_responses_drained() + + def test_ensure_subnet_created(self): + self.mock_response_for_get('fvSubnet', name='sn1') + self.mgr.ensure_subnet_created_on_apic('t0', 'bd1', '2.2.2.2/8') + self.mock_response_for_get('fvSubnet') + self.mock_responses_for_create('fvSubnet') + self.mgr.ensure_subnet_created_on_apic('t2', 'bd3', '4.4.4.4/16') + self.assert_responses_drained() + + def test_ensure_filter_created(self): + self.mock_response_for_get('vzFilter', name='f1') + self.mgr.ensure_filter_created_on_apic('t1', 'two') + self.mock_response_for_get('vzFilter') + self.mock_responses_for_create('vzFilter') + self.mgr.ensure_filter_created_on_apic('t2', 'four') + self.assert_responses_drained() + + def test_ensure_epg_created_for_network_old(self): + self.mock_db_query_filterby_first_return('faked') + epg = self.mgr.ensure_epg_created_for_network('X', 'Y', 'Z') + self.assertEqual(epg, 'faked') + + def test_ensure_epg_created_for_network_new(self): + tenant = mocked.APIC_TENANT + network = mocked.APIC_NETWORK + netname = mocked.APIC_NETNAME + self._mock_phys_dom_prereq(mocked.APIC_PDOM) + self.mock_db_query_filterby_first_return(None) + self.mock_responses_for_create('fvAEPg') + self.mock_response_for_get('fvBD', name=network) + self.mock_responses_for_create('fvRsBd') + self.mock_responses_for_create('fvRsDomAtt') + new_epg = self.mgr.ensure_epg_created_for_network(tenant, + network, netname) + self.assert_responses_drained() + self.assertEqual(new_epg.network_id, network) + self.assertTrue(self.mocked_session.add.called) + self.assertTrue(self.mocked_session.flush.called) + + def test_ensure_epg_created_for_network_exc(self): + tenant = mocked.APIC_TENANT + network = mocked.APIC_NETWORK + netname = mocked.APIC_NETNAME + self.mock_db_query_filterby_first_return(None) + self.mock_error_post_response(wexc.HTTPBadRequest) + self.mock_response_for_post('fvAEPg') + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.ensure_epg_created_for_network, + tenant, network, netname) + self.assert_responses_drained() + + def test_delete_epg_for_network_no_epg(self): + self.mock_db_query_filterby_first_return(None) + self.mgr.delete_epg_for_network('tenant', 'network') + + def test_delete_epg_for_network(self): + epg = mock.Mock() + epg.epg_id = mocked.APIC_EPG + self.mock_db_query_filterby_first_return(epg) + self.mock_response_for_post('fvAEPg') + self.mgr.delete_epg_for_network('tenant', 'network') + self.assertTrue(self.mocked_session.delete.called) + self.assertTrue(self.mocked_session.flush.called) + + def test_ensure_path_created_for_port(self): + epg = mock.Mock() + epg.epg_id = 'epg01' + eepg = mock.Mock(return_value=epg) + apic_manager.APICManager.ensure_epg_created_for_network = eepg + self.mock_response_for_get('fvRsPathAtt', tDn='foo') + self.mgr.ensure_path_created_for_port('tenant', 'network', 'rhel01', + 'static', 'netname') + self.assert_responses_drained() + + def test_ensure_path_created_for_port_no_path_att(self): + epg = mock.Mock() + epg.epg_id = 'epg2' + eepg = mock.Mock(return_value=epg) + self.mgr.ensure_epg_created_for_network = eepg + self.mock_response_for_get('fvRsPathAtt') + self.mock_responses_for_create('fvRsPathAtt') + self.mgr.ensure_path_created_for_port('tenant', 'network', 'ubuntu2', + 'static', 'netname') + self.assert_responses_drained() + + def test_ensure_path_created_for_port_unknown_host(self): + epg = mock.Mock() + epg.epg_id = 'epg3' + eepg = mock.Mock(return_value=epg) + apic_manager.APICManager.ensure_epg_created_for_network = eepg + self.mock_response_for_get('fvRsPathAtt', tDn='foo') + self.assertRaises(cexc.ApicHostNotConfigured, + self.mgr.ensure_path_created_for_port, + 'tenant', 'network', 'cirros3', 'static', 'netname') + + def test_create_tenant_filter(self): + tenant = mocked.APIC_TENANT + self.mock_responses_for_create('vzFilter') + self.mock_responses_for_create('vzEntry') + filter_id = self.mgr.create_tenant_filter(tenant) + self.assert_responses_drained() + self.assertTrue(uuidutils.is_uuid_like(str(filter_id))) + + def test_create_tenant_filter_exc(self): + tenant = mocked.APIC_TENANT + self.mock_error_post_response(wexc.HTTPBadRequest) + self.mock_response_for_post('vzFilter') + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.create_tenant_filter, tenant) + self.assert_responses_drained() + + def test_set_contract_for_epg_consumer(self): + tenant = mocked.APIC_TENANT + epg = mocked.APIC_EPG + contract = mocked.APIC_CONTRACT + self.mock_responses_for_create('fvRsCons') + self.mgr.set_contract_for_epg(tenant, epg, contract) + self.assert_responses_drained() + + def test_set_contract_for_epg_provider(self): + tenant = mocked.APIC_TENANT + epg = mocked.APIC_EPG + contract = mocked.APIC_CONTRACT + epg_obj = mock.Mock() + epg_obj.epg_id = epg + epg_obj.provider = False + self.mock_db_query_filterby_first_return(epg_obj) + self.mock_responses_for_create('fvRsProv') + self.mock_response_for_post('vzBrCP') + self.mgr.set_contract_for_epg(tenant, epg, contract, provider=True) + self.assert_responses_drained() + self.assertTrue(self.mocked_session.merge.called) + self.assertTrue(self.mocked_session.flush.called) + self.assertTrue(epg_obj.provider) + + def test_set_contract_for_epg_provider_exc(self): + tenant = mocked.APIC_TENANT + epg = mocked.APIC_EPG + contract = mocked.APIC_CONTRACT + self.mock_error_post_response(wexc.HTTPBadRequest) + self.mock_response_for_post('vzBrCP') + self.mock_response_for_post('fvRsProv') + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.set_contract_for_epg, + tenant, epg, contract, provider=True) + self.assert_responses_drained() + + def test_delete_contract_for_epg_consumer(self): + tenant = mocked.APIC_TENANT + epg = mocked.APIC_EPG + contract = mocked.APIC_CONTRACT + self.mock_response_for_post('fvRsCons') + self.mgr.delete_contract_for_epg(tenant, epg, contract) + self.assert_responses_drained() + + def test_delete_contract_for_epg_provider(self): + tenant = mocked.APIC_TENANT + epg = mocked.APIC_EPG + contract = mocked.APIC_CONTRACT + epg_obj = mock.Mock() + epg_obj.epg_id = epg + '-other' + epg_obj.provider = False + self.mock_db_query_filterby_first_return(epg_obj) + self.mock_response_for_post('fvRsProv') + self.mock_response_for_post('fvRsCons') + self.mock_responses_for_create('fvRsProv') + self.mock_response_for_post('vzBrCP') + self.mgr.delete_contract_for_epg(tenant, epg, contract, provider=True) + self.assert_responses_drained() + self.assertTrue(self.mocked_session.merge.called) + self.assertTrue(self.mocked_session.flush.called) + self.assertTrue(epg_obj.provider) + + def test_create_tenant_contract_existing(self): + tenant = mocked.APIC_TENANT + contract = mocked.APIC_CONTRACT + self.mock_db_query_filterby_first_return(contract) + new_contract = self.mgr.create_tenant_contract(tenant) + self.assertEqual(new_contract, contract) + + def test_create_tenant_contract_new(self): + tenant = mocked.APIC_TENANT + contract = mocked.APIC_CONTRACT + dn = self.mgr.apic.vzBrCP.mo.dn(tenant, contract) + self.mock_db_query_filterby_first_return(None) + self.mock_responses_for_create('vzBrCP') + self.mock_response_for_get('vzBrCP', dn=dn) + self.mock_responses_for_create('vzSubj') + self.mock_responses_for_create('vzFilter') + self.mock_responses_for_create('vzEntry') + self.mock_responses_for_create('vzInTerm') + self.mock_responses_for_create('vzRsFiltAtt__In') + self.mock_responses_for_create('vzOutTerm') + self.mock_responses_for_create('vzRsFiltAtt__Out') + self.mock_responses_for_create('vzCPIf') + self.mock_responses_for_create('vzRsIf') + new_contract = self.mgr.create_tenant_contract(tenant) + self.assert_responses_drained() + self.assertTrue(self.mocked_session.add.called) + self.assertTrue(self.mocked_session.flush.called) + self.assertEqual(new_contract['tenant_id'], tenant) + + def test_create_tenant_contract_exc(self): + tenant = mocked.APIC_TENANT + self.mock_db_query_filterby_first_return(None) + self.mock_error_post_response(wexc.HTTPBadRequest) + self.mock_response_for_post('vzBrCP') + self.assertRaises(cexc.ApicResponseNotOk, + self.mgr.create_tenant_contract, tenant) + self.assert_responses_drained() diff --git a/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_mechanism_driver.py b/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_mechanism_driver.py new file mode 100644 index 000000000..6addd4382 --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_mechanism_driver.py @@ -0,0 +1,226 @@ +# Copyright (c) 2014 Cisco Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Henry Gessau, Cisco Systems + +import mock + +from oslo.config import cfg + +from neutron.plugins.ml2.drivers.cisco.apic import mechanism_apic as md +from neutron.plugins.ml2.drivers import type_vlan # noqa +from neutron.tests import base +from neutron.tests.unit.ml2.drivers.cisco.apic import ( + test_cisco_apic_common as mocked) + + +HOST_ID1 = 'ubuntu' +HOST_ID2 = 'rhel' +ENCAP = '101' + +SUBNET_GATEWAY = '10.3.2.1' +SUBNET_CIDR = '10.3.1.0/24' +SUBNET_NETMASK = '24' + +TEST_SEGMENT1 = 'test-segment1' +TEST_SEGMENT2 = 'test-segment2' + + +class TestCiscoApicMechDriver(base.BaseTestCase, + mocked.ControllerMixin, + mocked.ConfigMixin, + mocked.DbModelMixin): + + def setUp(self): + super(TestCiscoApicMechDriver, self).setUp() + mocked.ControllerMixin.set_up_mocks(self) + mocked.ConfigMixin.set_up_mocks(self) + mocked.DbModelMixin.set_up_mocks(self) + + self.mock_apic_manager_login_responses() + self.driver = md.APICMechanismDriver() + self.driver.vif_type = 'test-vif_type' + self.driver.cap_port_filter = 'test-cap_port_filter' + + def test_initialize(self): + cfg.CONF.set_override('network_vlan_ranges', ['physnet1:100:199'], + 'ml2_type_vlan') + ns = mocked.APIC_VLAN_NAME + mode = mocked.APIC_VLAN_MODE + self.mock_response_for_get('fvnsVlanInstP', name=ns, mode=mode) + self.mock_response_for_get('physDomP', name=mocked.APIC_DOMAIN) + self.mock_response_for_get('infraAttEntityP', + name=mocked.APIC_ATT_ENT_PROF) + self.mock_response_for_get('infraAccPortGrp', + name=mocked.APIC_ACC_PORT_GRP) + mock.patch('neutron.plugins.ml2.drivers.cisco.apic.apic_manager.' + 'APICManager.ensure_infra_created_on_apic').start() + self.driver.initialize() + self.session = self.driver.apic_manager.apic.session + self.assert_responses_drained() + + def test_update_port_postcommit(self): + net_ctx = self._get_network_context(mocked.APIC_TENANT, + mocked.APIC_NETWORK, + TEST_SEGMENT1) + port_ctx = self._get_port_context(mocked.APIC_TENANT, + mocked.APIC_NETWORK, + 'vm1', net_ctx, HOST_ID1) + mgr = self.driver.apic_manager = mock.Mock() + self.driver.update_port_postcommit(port_ctx) + mgr.ensure_tenant_created_on_apic.assert_called_once_with( + mocked.APIC_TENANT) + mgr.ensure_path_created_for_port.assert_called_once_with( + mocked.APIC_TENANT, mocked.APIC_NETWORK, HOST_ID1, + ENCAP, mocked.APIC_NETWORK + '-name') + + def test_create_network_postcommit(self): + ctx = self._get_network_context(mocked.APIC_TENANT, + mocked.APIC_NETWORK, + TEST_SEGMENT1) + mgr = self.driver.apic_manager = mock.Mock() + self.driver.create_network_postcommit(ctx) + mgr.ensure_bd_created_on_apic.assert_called_once_with( + mocked.APIC_TENANT, mocked.APIC_NETWORK) + mgr.ensure_epg_created_for_network.assert_called_once_with( + mocked.APIC_TENANT, mocked.APIC_NETWORK, + mocked.APIC_NETWORK + '-name') + + def test_delete_network_postcommit(self): + ctx = self._get_network_context(mocked.APIC_TENANT, + mocked.APIC_NETWORK, + TEST_SEGMENT1) + mgr = self.driver.apic_manager = mock.Mock() + self.driver.delete_network_postcommit(ctx) + mgr.delete_bd_on_apic.assert_called_once_with( + mocked.APIC_TENANT, mocked.APIC_NETWORK) + mgr.delete_epg_for_network.assert_called_once_with( + mocked.APIC_TENANT, mocked.APIC_NETWORK) + + def test_create_subnet_postcommit(self): + net_ctx = self._get_network_context(mocked.APIC_TENANT, + mocked.APIC_NETWORK, + TEST_SEGMENT1) + subnet_ctx = self._get_subnet_context(SUBNET_GATEWAY, + SUBNET_CIDR, + net_ctx) + mgr = self.driver.apic_manager = mock.Mock() + self.driver.create_subnet_postcommit(subnet_ctx) + mgr.ensure_subnet_created_on_apic.assert_called_once_with( + mocked.APIC_TENANT, mocked.APIC_NETWORK, + '%s/%s' % (SUBNET_GATEWAY, SUBNET_NETMASK)) + + def _get_network_context(self, tenant_id, net_id, seg_id=None, + seg_type='vlan'): + network = {'id': net_id, + 'name': net_id + '-name', + 'tenant_id': tenant_id, + 'provider:segmentation_id': seg_id} + if seg_id: + network_segments = [{'id': seg_id, + 'segmentation_id': ENCAP, + 'network_type': seg_type, + 'physical_network': 'physnet1'}] + else: + network_segments = [] + return FakeNetworkContext(network, network_segments) + + def _get_subnet_context(self, gateway_ip, cidr, network): + subnet = {'tenant_id': network.current['tenant_id'], + 'network_id': network.current['id'], + 'id': '[%s/%s]' % (gateway_ip, cidr), + 'gateway_ip': gateway_ip, + 'cidr': cidr} + return FakeSubnetContext(subnet, network) + + def _get_port_context(self, tenant_id, net_id, vm_id, network, host): + port = {'device_id': vm_id, + 'device_owner': 'compute', + 'binding:host_id': host, + 'tenant_id': tenant_id, + 'id': mocked.APIC_PORT, + 'name': mocked.APIC_PORT, + 'network_id': net_id} + return FakePortContext(port, network) + + +class FakeNetworkContext(object): + """To generate network context for testing purposes only.""" + + def __init__(self, network, segments): + self._network = network + self._segments = segments + + @property + def current(self): + return self._network + + @property + def network_segments(self): + return self._segments + + +class FakeSubnetContext(object): + """To generate subnet context for testing purposes only.""" + + def __init__(self, subnet, network): + self._subnet = subnet + self._network = network + + @property + def current(self): + return self._subnet + + @property + def network(self): + return self._network + + +class FakePortContext(object): + """To generate port context for testing purposes only.""" + + def __init__(self, port, network): + self._fake_plugin = mock.Mock() + self._fake_plugin.get_ports.return_value = [] + self._fake_plugin_context = None + self._port = port + self._network = network + if network.network_segments: + self._bound_segment = network.network_segments[0] + else: + self._bound_segment = None + + @property + def current(self): + return self._port + + @property + def _plugin(self): + return self._fake_plugin + + @property + def _plugin_context(self): + return self._fake_plugin_context + + @property + def network(self): + return self._network + + @property + def bound_segment(self): + return self._bound_segment + + def set_binding(self, segment_id, vif_type, cap_port_filter): + pass diff --git a/neutron/tests/unit/ml2/drivers/cisco/nexus/__init__.py b/neutron/tests/unit/ml2/drivers/cisco/nexus/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_config.py b/neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_config.py new file mode 100644 index 000000000..55f0db3da --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_config.py @@ -0,0 +1,71 @@ +# Copyright (c) 2014 Cisco Systems, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from oslo.config import cfg + +from neutron.plugins.ml2.drivers.cisco.nexus import config as cisco_config +from neutron.tests import base + + +class TestCiscoNexusPluginConfig(base.BaseTestCase): + + def setUp(self): + self.config_parse() + super(TestCiscoNexusPluginConfig, self).setUp() + + def test_config_parse_error(self): + """Check that config error is raised upon config parser failure.""" + with mock.patch.object(cfg, 'MultiConfigParser') as parser: + parser.return_value.read.return_value = [] + self.assertRaises(cfg.Error, cisco_config.ML2MechCiscoConfig) + + def test_create_device_dictionary(self): + """Test creation of the device dictionary based on nexus config.""" + test_config = { + 'ml2_mech_cisco_nexus:1.1.1.1': { + 'username': ['admin'], + 'password': ['mySecretPassword'], + 'ssh_port': [22], + 'compute1': ['1/1'], + 'compute2': ['1/2'], + }, + 'ml2_mech_cisco_nexus:2.2.2.2': { + 'username': ['admin'], + 'password': ['mySecretPassword'], + 'ssh_port': [22], + 'compute3': ['1/1'], + 'compute4': ['1/2'], + }, + } + expected_dev_dict = { + ('1.1.1.1', 'username'): 'admin', + ('1.1.1.1', 'password'): 'mySecretPassword', + ('1.1.1.1', 'ssh_port'): 22, + ('1.1.1.1', 'compute1'): '1/1', + ('1.1.1.1', 'compute2'): '1/2', + ('2.2.2.2', 'username'): 'admin', + ('2.2.2.2', 'password'): 'mySecretPassword', + ('2.2.2.2', 'ssh_port'): 22, + ('2.2.2.2', 'compute3'): '1/1', + ('2.2.2.2', 'compute4'): '1/2', + } + with mock.patch.object(cfg, 'MultiConfigParser') as parser: + parser.return_value.read.return_value = cfg.CONF.config_file + parser.return_value.parsed = [test_config] + cisco_config.ML2MechCiscoConfig() + self.assertEqual(expected_dev_dict, + cisco_config.ML2MechCiscoConfig.nexus_dict) diff --git a/neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_mech.py b/neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_mech.py new file mode 100644 index 000000000..55717ebe2 --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_mech.py @@ -0,0 +1,715 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import mock + +import webob.exc as wexc + +from neutron.api.v2 import base +from neutron.common import constants as n_const +from neutron import context +from neutron.extensions import portbindings +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import config as ml2_config +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2 import driver_context +from neutron.plugins.ml2.drivers.cisco.nexus import config as cisco_config +from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as c_exc +from neutron.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2 +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_network_driver +from neutron.plugins.ml2.drivers import type_vlan as vlan_config +from neutron.tests.unit import test_db_plugin + + +LOG = logging.getLogger(__name__) +ML2_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' +PHYS_NET = 'physnet1' +COMP_HOST_NAME = 'testhost' +COMP_HOST_NAME_2 = 'testhost_2' +VLAN_START = 1000 +VLAN_END = 1100 +NEXUS_IP_ADDR = '1.1.1.1' +NETWORK_NAME = 'test_network' +NETWORK_NAME_2 = 'test_network_2' +NEXUS_INTERFACE = '1/1' +NEXUS_INTERFACE_2 = '1/2' +CIDR_1 = '10.0.0.0/24' +CIDR_2 = '10.0.1.0/24' +DEVICE_ID_1 = '11111111-1111-1111-1111-111111111111' +DEVICE_ID_2 = '22222222-2222-2222-2222-222222222222' +DEVICE_OWNER = 'compute:None' +BOUND_SEGMENT1 = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: PHYS_NET, + api.SEGMENTATION_ID: VLAN_START} +BOUND_SEGMENT2 = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: PHYS_NET, + api.SEGMENTATION_ID: VLAN_START + 1} + + +class CiscoML2MechanismTestCase(test_db_plugin.NeutronDbPluginV2TestCase): + + def setUp(self): + """Configure for end-to-end neutron testing using a mock ncclient. + + This setup includes: + - Configure the ML2 plugin to use VLANs in the range of 1000-1100. + - Configure the Cisco mechanism driver to use an imaginary switch + at NEXUS_IP_ADDR. + - Create a mock NETCONF client (ncclient) for the Cisco mechanism + driver + + """ + + # Configure the ML2 mechanism drivers and network types + ml2_opts = { + 'mechanism_drivers': ['cisco_nexus'], + 'tenant_network_types': ['vlan'], + } + for opt, val in ml2_opts.items(): + ml2_config.cfg.CONF.set_override(opt, val, 'ml2') + + # Configure the ML2 VLAN parameters + phys_vrange = ':'.join([PHYS_NET, str(VLAN_START), str(VLAN_END)]) + vlan_config.cfg.CONF.set_override('network_vlan_ranges', + [phys_vrange], + 'ml2_type_vlan') + + # Configure the Cisco Nexus mechanism driver + nexus_config = { + (NEXUS_IP_ADDR, 'username'): 'admin', + (NEXUS_IP_ADDR, 'password'): 'mySecretPassword', + (NEXUS_IP_ADDR, 'ssh_port'): 22, + (NEXUS_IP_ADDR, COMP_HOST_NAME): NEXUS_INTERFACE, + (NEXUS_IP_ADDR, COMP_HOST_NAME_2): NEXUS_INTERFACE_2} + nexus_patch = mock.patch.dict( + cisco_config.ML2MechCiscoConfig.nexus_dict, + nexus_config) + nexus_patch.start() + self.addCleanup(nexus_patch.stop) + + # The NETCONF client module is not included in the DevStack + # distribution, so mock this module for unit testing. + self.mock_ncclient = mock.Mock() + mock.patch.object(nexus_network_driver.CiscoNexusDriver, + '_import_ncclient', + return_value=self.mock_ncclient).start() + + # Mock port context values for bound_segments and 'status'. + self.mock_bound_segment = mock.patch.object( + driver_context.PortContext, + 'bound_segment', + new_callable=mock.PropertyMock).start() + self.mock_bound_segment.return_value = BOUND_SEGMENT1 + + self.mock_original_bound_segment = mock.patch.object( + driver_context.PortContext, + 'original_bound_segment', + new_callable=mock.PropertyMock).start() + self.mock_original_bound_segment.return_value = None + + mock_status = mock.patch.object( + mech_cisco_nexus.CiscoNexusMechanismDriver, + '_is_status_active').start() + mock_status.return_value = n_const.PORT_STATUS_ACTIVE + + super(CiscoML2MechanismTestCase, self).setUp(ML2_PLUGIN) + + self.port_create_status = 'DOWN' + + @contextlib.contextmanager + def _patch_ncclient(self, attr, value): + """Configure an attribute on the mock ncclient module. + + This method can be used to inject errors by setting a side effect + or a return value for an ncclient method. + + :param attr: ncclient attribute (typically method) to be configured. + :param value: Value to be configured on the attribute. + + """ + # Configure attribute. + config = {attr: value} + self.mock_ncclient.configure_mock(**config) + # Continue testing + yield + # Unconfigure attribute + config = {attr: None} + self.mock_ncclient.configure_mock(**config) + + @staticmethod + def _config_dependent_side_effect(match_config, exc): + """Generates a config-dependent side effect for ncclient edit_config. + + This method generates a mock side-effect function which can be + configured on the mock ncclient module for the edit_config method. + This side effect will cause a given exception to be raised whenever + the XML config string that is passed to edit_config contains all + words in a given match config string. + + :param match_config: String containing keywords to be matched + :param exc: Exception to be raised when match is found + :return: Side effect function for the mock ncclient module's + edit_config method. + + """ + keywords = match_config.split() + + def _side_effect_function(target, config): + if all(word in config for word in keywords): + raise exc + return _side_effect_function + + def _is_in_nexus_cfg(self, words): + """Check if any config sent to Nexus contains all words in a list.""" + for call in (self.mock_ncclient.connect.return_value. + edit_config.mock_calls): + configlet = call[2]['config'] + if all(word in configlet for word in words): + return True + return False + + def _is_in_last_nexus_cfg(self, words): + """Confirm last config sent to Nexus contains specified keywords.""" + last_cfg = (self.mock_ncclient.connect.return_value. + edit_config.mock_calls[-1][2]['config']) + return all(word in last_cfg for word in words) + + def _is_vlan_configured(self, vlan_creation_expected=True, + add_keyword_expected=False): + vlan_created = self._is_in_nexus_cfg(['vlan', 'vlan-name']) + add_appears = self._is_in_last_nexus_cfg(['add']) + return (self._is_in_last_nexus_cfg(['allowed', 'vlan']) and + vlan_created == vlan_creation_expected and + add_appears == add_keyword_expected) + + def _is_vlan_unconfigured(self, vlan_deletion_expected=True): + vlan_deleted = self._is_in_last_nexus_cfg( + ['no', 'vlan', 'vlan-id-create-delete']) + return (self._is_in_nexus_cfg(['allowed', 'vlan', 'remove']) and + vlan_deleted == vlan_deletion_expected) + + +class TestCiscoBasicGet(CiscoML2MechanismTestCase, + test_db_plugin.TestBasicGet): + + pass + + +class TestCiscoV2HTTPResponse(CiscoML2MechanismTestCase, + test_db_plugin.TestV2HTTPResponse): + + pass + + +class TestCiscoPortsV2(CiscoML2MechanismTestCase, + test_db_plugin.TestPortsV2): + + @contextlib.contextmanager + def _create_resources(self, name=NETWORK_NAME, cidr=CIDR_1, + device_id=DEVICE_ID_1, + host_id=COMP_HOST_NAME): + """Create network, subnet, and port resources for test cases. + + Create a network, subnet, port and then update the port, yield the + result, then delete the port, subnet and network. + + :param name: Name of network to be created. + :param cidr: cidr address of subnetwork to be created. + :param device_id: Device ID to use for port to be created/updated. + :param host_id: Host ID to use for port create/update. + + """ + with self.network(name=name) as network: + with self.subnet(network=network, cidr=cidr) as subnet: + with self.port(subnet=subnet, cidr=cidr) as port: + data = {'port': {portbindings.HOST_ID: host_id, + 'device_id': device_id, + 'device_owner': 'compute:none', + 'admin_state_up': True}} + req = self.new_update_request('ports', data, + port['port']['id']) + yield req.get_response(self.api) + + def _assertExpectedHTTP(self, status, exc): + """Confirm that an HTTP status corresponds to an expected exception. + + Confirm that an HTTP status which has been returned for an + neutron API request matches the HTTP status corresponding + to an expected exception. + + :param status: HTTP status + :param exc: Expected exception + + """ + if exc in base.FAULT_MAP: + expected_http = base.FAULT_MAP[exc].code + else: + expected_http = wexc.HTTPInternalServerError.code + self.assertEqual(status, expected_http) + + def test_create_ports_bulk_emulated_plugin_failure(self): + real_has_attr = hasattr + + #ensures the API chooses the emulation code path + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + plugin_obj = manager.NeutronManager.get_plugin() + orig = plugin_obj.create_port + with mock.patch.object(plugin_obj, + 'create_port') as patched_plugin: + + def side_effect(*args, **kwargs): + return self._do_side_effect(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + with self.network() as net: + res = self._create_port_bulk(self.fmt, 2, + net['network']['id'], + 'test', + True) + # Expect an internal server error as we injected a fault + self._validate_behavior_on_bulk_failure( + res, + 'ports', + wexc.HTTPInternalServerError.code) + + def test_create_ports_bulk_native(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk port create") + + def test_create_ports_bulk_emulated(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk port create") + + def test_create_ports_bulk_native_plugin_failure(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk port create") + ctx = context.get_admin_context() + with self.network() as net: + plugin_obj = manager.NeutronManager.get_plugin() + orig = plugin_obj.create_port + with mock.patch.object(plugin_obj, + 'create_port') as patched_plugin: + + def side_effect(*args, **kwargs): + return self._do_side_effect(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + res = self._create_port_bulk(self.fmt, 2, net['network']['id'], + 'test', True, context=ctx) + # We expect an internal server error as we injected a fault + self._validate_behavior_on_bulk_failure( + res, + 'ports', + wexc.HTTPInternalServerError.code) + + def test_nexus_enable_vlan_cmd(self): + """Verify the syntax of the command to enable a vlan on an intf. + + Confirm that for the first VLAN configured on a Nexus interface, + the command string sent to the switch does not contain the + keyword 'add'. + + Confirm that for the second VLAN configured on a Nexus interface, + the command string sent to the switch contains the keyword 'add'. + + """ + # First vlan should be configured without 'add' keyword + with self._create_resources(): + self.assertTrue(self._is_vlan_configured( + vlan_creation_expected=True, + add_keyword_expected=False)) + self.mock_ncclient.reset_mock() + self.mock_bound_segment.return_value = BOUND_SEGMENT2 + + # Second vlan should be configured with 'add' keyword + with self._create_resources(name=NETWORK_NAME_2, + device_id=DEVICE_ID_2, + cidr=CIDR_2): + self.assertTrue(self._is_vlan_configured( + vlan_creation_expected=True, + add_keyword_expected=True)) + + # Return to first segment for delete port calls. + self.mock_bound_segment.return_value = BOUND_SEGMENT1 + + def test_nexus_add_trunk(self): + """Verify syntax to enable a vlan on an interface. + + Test also verifies that the vlan interface is not created. + + Test of the following ml2_conf_cisco_ini config: + [ml2_mech_cisco_nexus:1.1.1.1] + hostA=1/1 + hostB=1/2 + where vlan_id = 100 + + Confirm that for the first host configured on a Nexus interface, + the command string sent to the switch does not contain the + keyword 'add'. + + Confirm that for the second host configured on a Nexus interface, + the command staring sent to the switch contains does not contain + the keyword 'name' [signifies vlan intf creation]. + + """ + with self._create_resources(name='net1', cidr=CIDR_1): + self.assertTrue(self._is_in_last_nexus_cfg(['allowed', 'vlan'])) + self.assertFalse(self._is_in_last_nexus_cfg(['add'])) + with self._create_resources(name='net2', + cidr=CIDR_2, host_id=COMP_HOST_NAME_2): + self.assertTrue( + self._is_in_last_nexus_cfg(['allowed', 'vlan'])) + self.assertFalse(self._is_in_last_nexus_cfg(['name'])) + + def test_nexus_connect_fail(self): + """Test failure to connect to a Nexus switch. + + While creating a network, subnet, and port, simulate a connection + failure to a nexus switch. Confirm that the expected HTTP code + is returned for the create port operation. + + """ + with self._patch_ncclient('connect.side_effect', + AttributeError): + with self._create_resources() as result: + self._assertExpectedHTTP(result.status_int, + c_exc.NexusConnectFailed) + + def test_nexus_vlan_config_two_hosts(self): + """Verify config/unconfig of vlan on two compute hosts.""" + + @contextlib.contextmanager + def _create_port_check_vlan(comp_host_name, device_id, + vlan_creation_expected=True): + with self.port(subnet=subnet, fmt=self.fmt) as port: + data = {'port': {portbindings.HOST_ID: comp_host_name, + 'device_id': device_id, + 'device_owner': DEVICE_OWNER, + 'admin_state_up': True}} + req = self.new_update_request('ports', data, + port['port']['id']) + req.get_response(self.api) + self.assertTrue(self._is_vlan_configured( + vlan_creation_expected=vlan_creation_expected, + add_keyword_expected=False)) + self.mock_ncclient.reset_mock() + yield + + # Create network and subnet + with self.network(name=NETWORK_NAME) as network: + with self.subnet(network=network, cidr=CIDR_1) as subnet: + + # Create an instance on first compute host + with _create_port_check_vlan(COMP_HOST_NAME, DEVICE_ID_1, + vlan_creation_expected=True): + # Create an instance on second compute host + with _create_port_check_vlan(COMP_HOST_NAME_2, DEVICE_ID_2, + vlan_creation_expected=False): + pass + + # Instance on second host is now terminated. + # Vlan should be untrunked from port, but vlan should + # still exist on the switch. + self.assertTrue(self._is_vlan_unconfigured( + vlan_deletion_expected=False)) + self.mock_ncclient.reset_mock() + + # Instance on first host is now terminated. + # Vlan should be untrunked from port and vlan should have + # been deleted from the switch. + self.assertTrue(self._is_vlan_unconfigured( + vlan_deletion_expected=True)) + + def test_nexus_vm_migration(self): + """Verify VM (live) migration. + + Simulate the following: + Nova informs neutron of live-migration with port-update(new host). + This should trigger two update_port_pre/postcommit() calls. + + The first one should only change the current host_id and remove the + binding resulting in the mechanism drivers receiving: + PortContext.original['binding:host_id']: previous value + PortContext.original_bound_segment: previous value + PortContext.current['binding:host_id']: current (new) value + PortContext.bound_segment: None + + The second one binds the new host resulting in the mechanism + drivers receiving: + PortContext.original['binding:host_id']: previous value + PortContext.original_bound_segment: None + PortContext.current['binding:host_id']: previous value + PortContext.bound_segment: new value + """ + + # Create network, subnet and port. + with self._create_resources() as result: + # Verify initial database entry. + # Use port_id to verify that 1st host name was used. + binding = nexus_db_v2.get_nexusvm_bindings(VLAN_START, + DEVICE_ID_1)[0] + intf_type, nexus_port = binding.port_id.split(':') + self.assertEqual(nexus_port, NEXUS_INTERFACE) + + port = self.deserialize(self.fmt, result) + port_id = port['port']['id'] + + # Trigger update event to unbind segment. + # Results in port being deleted from nexus DB and switch. + data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME_2}} + self.mock_bound_segment.return_value = None + self.mock_original_bound_segment.return_value = BOUND_SEGMENT1 + self.new_update_request('ports', data, + port_id).get_response(self.api) + + # Verify that port entry has been deleted. + self.assertRaises(c_exc.NexusPortBindingNotFound, + nexus_db_v2.get_nexusvm_bindings, + VLAN_START, DEVICE_ID_1) + + # Trigger update event to bind segment with new host. + self.mock_bound_segment.return_value = BOUND_SEGMENT1 + self.mock_original_bound_segment.return_value = None + self.new_update_request('ports', data, + port_id).get_response(self.api) + + # Verify that port entry has been added using new host name. + # Use port_id to verify that 2nd host name was used. + binding = nexus_db_v2.get_nexusvm_bindings(VLAN_START, + DEVICE_ID_1)[0] + intf_type, nexus_port = binding.port_id.split(':') + self.assertEqual(nexus_port, NEXUS_INTERFACE_2) + + def test_nexus_config_fail(self): + """Test a Nexus switch configuration failure. + + While creating a network, subnet, and port, simulate a nexus + switch configuration error. Confirm that the expected HTTP code + is returned for the create port operation. + + """ + with self._patch_ncclient( + 'connect.return_value.edit_config.side_effect', + AttributeError): + with self._create_resources() as result: + self._assertExpectedHTTP(result.status_int, + c_exc.NexusConfigFailed) + + def test_nexus_extended_vlan_range_failure(self): + """Test that extended VLAN range config errors are ignored. + + Some versions of Nexus switch do not allow state changes for + the extended VLAN range (1006-4094), but these errors can be + ignored (default values are appropriate). Test that such errors + are ignored by the Nexus plugin. + + """ + def mock_edit_config_a(target, config): + if all(word in config for word in ['state', 'active']): + raise Exception("Can't modify state for extended") + + with self._patch_ncclient( + 'connect.return_value.edit_config.side_effect', + mock_edit_config_a): + with self._create_resources() as result: + self.assertEqual(result.status_int, wexc.HTTPOk.code) + + def mock_edit_config_b(target, config): + if all(word in config for word in ['no', 'shutdown']): + raise Exception("Command is only allowed on VLAN") + + with self._patch_ncclient( + 'connect.return_value.edit_config.side_effect', + mock_edit_config_b): + with self._create_resources() as result: + self.assertEqual(result.status_int, wexc.HTTPOk.code) + + def test_nexus_vlan_config_rollback(self): + """Test rollback following Nexus VLAN state config failure. + + Test that the Cisco Nexus plugin correctly deletes the VLAN + on the Nexus switch when the 'state active' command fails (for + a reason other than state configuration change is rejected + for the extended VLAN range). + + """ + vlan_state_configs = ['state active', 'no shutdown'] + for config in vlan_state_configs: + with self._patch_ncclient( + 'connect.return_value.edit_config.side_effect', + self._config_dependent_side_effect(config, ValueError)): + with self._create_resources() as result: + # Confirm that the last configuration sent to the Nexus + # switch was deletion of the VLAN. + self.assertTrue( + self._is_in_last_nexus_cfg(['', '']) + ) + self._assertExpectedHTTP(result.status_int, + c_exc.NexusConfigFailed) + + def test_nexus_host_not_configured(self): + """Test handling of a NexusComputeHostNotConfigured exception. + + Test the Cisco NexusComputeHostNotConfigured exception by using + a fictitious host name during port creation. + + """ + with self._create_resources(host_id='fake_host') as result: + self._assertExpectedHTTP(result.status_int, + c_exc.NexusComputeHostNotConfigured) + + def test_nexus_missing_fields(self): + """Test handling of a NexusMissingRequiredFields exception. + + Test the Cisco NexusMissingRequiredFields exception by using + empty host_id and device_id values during port creation. + + """ + with self._create_resources(device_id='', host_id='') as result: + self._assertExpectedHTTP(result.status_int, + c_exc.NexusMissingRequiredFields) + + +class TestCiscoNetworksV2(CiscoML2MechanismTestCase, + test_db_plugin.TestNetworksV2): + + def test_create_networks_bulk_emulated_plugin_failure(self): + real_has_attr = hasattr + + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + plugin_obj = manager.NeutronManager.get_plugin() + orig = plugin_obj.create_network + #ensures the API choose the emulation code path + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + with mock.patch.object(plugin_obj, + 'create_network') as patched_plugin: + def side_effect(*args, **kwargs): + return self._do_side_effect(patched_plugin, orig, + *args, **kwargs) + patched_plugin.side_effect = side_effect + res = self._create_network_bulk(self.fmt, 2, 'test', True) + LOG.debug("response is %s" % res) + # We expect an internal server error as we injected a fault + self._validate_behavior_on_bulk_failure( + res, + 'networks', + wexc.HTTPInternalServerError.code) + + def test_create_networks_bulk_native_plugin_failure(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk network create") + plugin_obj = manager.NeutronManager.get_plugin() + orig = plugin_obj.create_network + with mock.patch.object(plugin_obj, + 'create_network') as patched_plugin: + + def side_effect(*args, **kwargs): + return self._do_side_effect(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + res = self._create_network_bulk(self.fmt, 2, 'test', True) + # We expect an internal server error as we injected a fault + self._validate_behavior_on_bulk_failure( + res, + 'networks', + wexc.HTTPInternalServerError.code) + + +class TestCiscoSubnetsV2(CiscoML2MechanismTestCase, + test_db_plugin.TestSubnetsV2): + + def test_create_subnets_bulk_emulated_plugin_failure(self): + real_has_attr = hasattr + + #ensures the API choose the emulation code path + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + plugin_obj = manager.NeutronManager.get_plugin() + orig = plugin_obj.create_subnet + with mock.patch.object(plugin_obj, + 'create_subnet') as patched_plugin: + + def side_effect(*args, **kwargs): + self._do_side_effect(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + with self.network() as net: + res = self._create_subnet_bulk(self.fmt, 2, + net['network']['id'], + 'test') + # We expect an internal server error as we injected a fault + self._validate_behavior_on_bulk_failure( + res, + 'subnets', + wexc.HTTPInternalServerError.code) + + def test_create_subnets_bulk_native_plugin_failure(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk subnet create") + plugin_obj = manager.NeutronManager.get_plugin() + orig = plugin_obj.create_subnet + with mock.patch.object(plugin_obj, + 'create_subnet') as patched_plugin: + def side_effect(*args, **kwargs): + return self._do_side_effect(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + with self.network() as net: + res = self._create_subnet_bulk(self.fmt, 2, + net['network']['id'], + 'test') + + # We expect an internal server error as we injected a fault + self._validate_behavior_on_bulk_failure( + res, + 'subnets', + wexc.HTTPInternalServerError.code) + + +class TestCiscoPortsV2XML(TestCiscoPortsV2): + fmt = 'xml' + + +class TestCiscoNetworksV2XML(TestCiscoNetworksV2): + fmt = 'xml' + + +class TestCiscoSubnetsV2XML(TestCiscoSubnetsV2): + fmt = 'xml' diff --git a/neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_nexus.py b/neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_nexus.py new file mode 100644 index 000000000..31573b82b --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_nexus.py @@ -0,0 +1,201 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import mock +import testtools + +from neutron.common import constants as n_const +from neutron.db import api as db +from neutron.extensions import portbindings +from neutron.openstack.common import importutils +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers.cisco.nexus import constants +from neutron.plugins.ml2.drivers.cisco.nexus import exceptions +from neutron.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2 +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_network_driver +from neutron.tests import base + + +NEXUS_IP_ADDRESS = '1.1.1.1' +NEXUS_IP_ADDRESS_PC = '2.2.2.2' +HOST_NAME_1 = 'testhost1' +HOST_NAME_2 = 'testhost2' +HOST_NAME_PC = 'testpchost' +INSTANCE_1 = 'testvm1' +INSTANCE_2 = 'testvm2' +INSTANCE_PC = 'testpcvm' +NEXUS_PORT_1 = 'ethernet:1/10' +NEXUS_PORT_2 = 'ethernet:1/20' +NEXUS_PORTCHANNELS = 'portchannel:2' +VLAN_ID_1 = 267 +VLAN_ID_2 = 265 +VLAN_ID_PC = 268 +DEVICE_OWNER = 'compute:test' +NEXUS_SSH_PORT = '22' +PORT_STATE = n_const.PORT_STATUS_ACTIVE +NETWORK_TYPE = 'vlan' +NEXUS_DRIVER = ('neutron.plugins.ml2.drivers.cisco.nexus.' + 'nexus_network_driver.CiscoNexusDriver') + + +class FakeNetworkContext(object): + + """Network context for testing purposes only.""" + + def __init__(self, segment_id): + self._network_segments = {api.SEGMENTATION_ID: segment_id, + api.NETWORK_TYPE: NETWORK_TYPE} + + @property + def network_segments(self): + return self._network_segments + + +class FakePortContext(object): + + """Port context for testing purposes only.""" + + def __init__(self, device_id, host_name, network_context): + self._port = { + 'status': PORT_STATE, + 'device_id': device_id, + 'device_owner': DEVICE_OWNER, + portbindings.HOST_ID: host_name + } + self._network = network_context + self._segment = network_context.network_segments + + @property + def current(self): + return self._port + + @property + def network(self): + return self._network + + @property + def bound_segment(self): + return self._segment + + +class TestCiscoNexusDevice(base.BaseTestCase): + + """Unit tests for Cisco ML2 Nexus device driver.""" + + TestConfigObj = collections.namedtuple( + 'TestConfigObj', + 'nexus_ip_addr host_name nexus_port instance_id vlan_id') + + test_configs = { + 'test_config1': TestConfigObj( + NEXUS_IP_ADDRESS, + HOST_NAME_1, + NEXUS_PORT_1, + INSTANCE_1, + VLAN_ID_1), + 'test_config2': TestConfigObj( + NEXUS_IP_ADDRESS, + HOST_NAME_2, + NEXUS_PORT_2, + INSTANCE_2, + VLAN_ID_2), + 'test_config_portchannel': TestConfigObj( + NEXUS_IP_ADDRESS_PC, + HOST_NAME_PC, + NEXUS_PORTCHANNELS, + INSTANCE_PC, + VLAN_ID_PC), + } + + def setUp(self): + """Sets up mock ncclient, and switch and credentials dictionaries.""" + super(TestCiscoNexusDevice, self).setUp() + + # Use a mock netconf client + mock_ncclient = mock.Mock() + mock.patch.object(nexus_network_driver.CiscoNexusDriver, + '_import_ncclient', + return_value=mock_ncclient).start() + + def new_nexus_init(mech_instance): + mech_instance.driver = importutils.import_object(NEXUS_DRIVER) + + mech_instance._nexus_switches = {} + for name, config in TestCiscoNexusDevice.test_configs.items(): + ip_addr = config.nexus_ip_addr + host_name = config.host_name + nexus_port = config.nexus_port + mech_instance._nexus_switches[(ip_addr, + host_name)] = nexus_port + mech_instance._nexus_switches[(ip_addr, + 'ssh_port')] = NEXUS_SSH_PORT + mech_instance._nexus_switches[(ip_addr, + constants.USERNAME)] = 'admin' + mech_instance._nexus_switches[(ip_addr, + constants.PASSWORD)] = 'password' + mech_instance.driver.nexus_switches = ( + mech_instance._nexus_switches) + + db.configure_db() + + mock.patch.object(mech_cisco_nexus.CiscoNexusMechanismDriver, + '__init__', new=new_nexus_init).start() + self._cisco_mech_driver = (mech_cisco_nexus. + CiscoNexusMechanismDriver()) + + self.addCleanup(db.clear_db) + + def _create_delete_port(self, port_config): + """Tests creation and deletion of a virtual port.""" + nexus_ip_addr = port_config.nexus_ip_addr + host_name = port_config.host_name + nexus_port = port_config.nexus_port + instance_id = port_config.instance_id + vlan_id = port_config.vlan_id + + network_context = FakeNetworkContext(vlan_id) + port_context = FakePortContext(instance_id, host_name, + network_context) + + self._cisco_mech_driver.update_port_precommit(port_context) + self._cisco_mech_driver.update_port_postcommit(port_context) + bindings = nexus_db_v2.get_nexusport_binding(nexus_port, + vlan_id, + nexus_ip_addr, + instance_id) + self.assertEqual(len(bindings), 1) + + self._cisco_mech_driver.delete_port_precommit(port_context) + self._cisco_mech_driver.delete_port_postcommit(port_context) + with testtools.ExpectedException(exceptions.NexusPortBindingNotFound): + nexus_db_v2.get_nexusport_binding(nexus_port, + vlan_id, + nexus_ip_addr, + instance_id) + + def test_create_delete_ports(self): + """Tests creation and deletion of two new virtual Ports.""" + self._create_delete_port( + TestCiscoNexusDevice.test_configs['test_config1']) + + self._create_delete_port( + TestCiscoNexusDevice.test_configs['test_config2']) + + def test_create_delete_portchannel(self): + """Tests creation of a port over a portchannel.""" + self._create_delete_port( + TestCiscoNexusDevice.test_configs['test_config_portchannel']) diff --git a/neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_nexus_db.py b/neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_nexus_db.py new file mode 100644 index 000000000..08cc9a951 --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_nexus_db.py @@ -0,0 +1,206 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import testtools + +from neutron.db import api as db +from neutron.plugins.ml2.drivers.cisco.nexus import exceptions +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2 +from neutron.tests import base + + +class CiscoNexusDbTest(base.BaseTestCase): + + """Unit tests for Cisco mechanism driver's Nexus port binding database.""" + + NpbObj = collections.namedtuple('NpbObj', 'port vlan switch instance') + + def setUp(self): + super(CiscoNexusDbTest, self).setUp() + db.configure_db() + self.addCleanup(db.clear_db) + + def _npb_test_obj(self, pnum, vnum, switch='10.9.8.7', instance=None): + """Creates a Nexus port binding test object from a pair of numbers.""" + if pnum is 'router': + port = pnum + else: + port = '1/%s' % pnum + if instance is None: + instance = 'instance_%s_%s' % (pnum, vnum) + return self.NpbObj(port, vnum, switch, instance) + + def _assert_bindings_match(self, npb, npb_obj): + """Asserts that a port binding matches a port binding test obj.""" + self.assertEqual(npb.port_id, npb_obj.port) + self.assertEqual(npb.vlan_id, npb_obj.vlan) + self.assertEqual(npb.switch_ip, npb_obj.switch) + self.assertEqual(npb.instance_id, npb_obj.instance) + + def _add_binding_to_db(self, npb): + """Adds a port binding to the Nexus database.""" + return nexus_db_v2.add_nexusport_binding( + npb.port, npb.vlan, npb.switch, npb.instance) + + def _add_bindings_to_db(self, npbs): + """Adds a list of port bindings to the Nexus database.""" + for npb in npbs: + nexus_db_v2.add_nexusport_binding( + npb.port, npb.vlan, npb.switch, npb.instance) + + def _remove_binding_from_db(self, npb): + """Removes a port binding from the Nexus database.""" + return nexus_db_v2.remove_nexusport_binding( + npb.port, npb.vlan, npb.switch, npb.instance) + + def _get_nexusport_binding(self, npb): + """Gets a port binding based on port, vlan, switch, and instance.""" + return nexus_db_v2.get_nexusport_binding( + npb.port, npb.vlan, npb.switch, npb.instance) + + def _get_nexusvlan_binding(self, npb): + """Gets port bindings based on vlan and switch.""" + return nexus_db_v2.get_nexusvlan_binding(npb.vlan, npb.switch) + + def _get_nexusvm_binding(self, npb): + """Gets port binding based on vlan and instance.""" + return nexus_db_v2.get_nexusvm_bindings(npb.vlan, npb.instance)[0] + + def _get_port_vlan_switch_binding(self, npb): + """Gets port bindings based on port, vlan, and switch.""" + return nexus_db_v2.get_port_vlan_switch_binding( + npb.port, npb.vlan, npb.switch) + + def _get_port_switch_bindings(self, npb): + """Get port bindings based on port and switch.""" + return nexus_db_v2.get_port_switch_bindings(npb.port, npb.switch) + + def test_nexusportbinding_add_remove(self): + """Tests add and removal of port bindings from the Nexus database.""" + npb11 = self._npb_test_obj(10, 100) + npb = self._add_binding_to_db(npb11) + self._assert_bindings_match(npb, npb11) + npb = self._remove_binding_from_db(npb11) + self.assertEqual(len(npb), 1) + self._assert_bindings_match(npb[0], npb11) + with testtools.ExpectedException(exceptions.NexusPortBindingNotFound): + self._remove_binding_from_db(npb11) + + def test_nexusportbinding_get(self): + """Tests get of specific port bindings from the database.""" + npb11 = self._npb_test_obj(10, 100) + npb21 = self._npb_test_obj(20, 100) + npb22 = self._npb_test_obj(20, 200) + self._add_bindings_to_db([npb11, npb21, npb22]) + + npb = self._get_nexusport_binding(npb11) + self.assertEqual(len(npb), 1) + self._assert_bindings_match(npb[0], npb11) + npb = self._get_nexusport_binding(npb21) + self.assertEqual(len(npb), 1) + self._assert_bindings_match(npb[0], npb21) + npb = self._get_nexusport_binding(npb22) + self.assertEqual(len(npb), 1) + self._assert_bindings_match(npb[0], npb22) + + with testtools.ExpectedException(exceptions.NexusPortBindingNotFound): + nexus_db_v2.get_nexusport_binding( + npb21.port, npb21.vlan, npb21.switch, "dummyInstance") + + def test_nexusvlanbinding_get(self): + """Test get of port bindings based on vlan and switch.""" + npb11 = self._npb_test_obj(10, 100) + npb21 = self._npb_test_obj(20, 100) + npb22 = self._npb_test_obj(20, 200) + self._add_bindings_to_db([npb11, npb21, npb22]) + + npb_all_v100 = self._get_nexusvlan_binding(npb11) + self.assertEqual(len(npb_all_v100), 2) + npb_v200 = self._get_nexusvlan_binding(npb22) + self.assertEqual(len(npb_v200), 1) + self._assert_bindings_match(npb_v200[0], npb22) + + with testtools.ExpectedException(exceptions.NexusPortBindingNotFound): + nexus_db_v2.get_nexusvlan_binding(npb21.vlan, "dummySwitch") + + def test_nexusvmbinding_get(self): + """Test get of port bindings based on vlan and instance.""" + npb11 = self._npb_test_obj(10, 100) + npb21 = self._npb_test_obj(20, 100) + npb22 = self._npb_test_obj(20, 200) + self._add_bindings_to_db([npb11, npb21, npb22]) + + npb = self._get_nexusvm_binding(npb21) + self._assert_bindings_match(npb, npb21) + npb = self._get_nexusvm_binding(npb22) + self._assert_bindings_match(npb, npb22) + + with testtools.ExpectedException(exceptions.NexusPortBindingNotFound): + nexus_db_v2.get_nexusvm_bindings(npb21.vlan, "dummyInstance")[0] + + def test_nexusportvlanswitchbinding_get(self): + """Tests get of port bindings based on port, vlan, and switch.""" + npb11 = self._npb_test_obj(10, 100) + npb21 = self._npb_test_obj(20, 100) + self._add_bindings_to_db([npb11, npb21]) + + npb = self._get_port_vlan_switch_binding(npb11) + self.assertEqual(len(npb), 1) + self._assert_bindings_match(npb[0], npb11) + + with testtools.ExpectedException(exceptions.NexusPortBindingNotFound): + nexus_db_v2.get_port_vlan_switch_binding( + npb21.port, npb21.vlan, "dummySwitch") + + def test_nexusportswitchbinding_get(self): + """Tests get of port bindings based on port and switch.""" + npb11 = self._npb_test_obj(10, 100) + npb21 = self._npb_test_obj(20, 100, switch='2.2.2.2') + npb22 = self._npb_test_obj(20, 200, switch='2.2.2.2') + self._add_bindings_to_db([npb11, npb21, npb22]) + + npb = self._get_port_switch_bindings(npb11) + self.assertEqual(len(npb), 1) + self._assert_bindings_match(npb[0], npb11) + npb_all_p20 = self._get_port_switch_bindings(npb21) + self.assertEqual(len(npb_all_p20), 2) + + npb = nexus_db_v2.get_port_switch_bindings(npb21.port, "dummySwitch") + self.assertIsNone(npb) + + def test_nexusbinding_update(self): + """Tests update of vlan IDs for port bindings.""" + npb11 = self._npb_test_obj(10, 100, switch='1.1.1.1', instance='test') + npb21 = self._npb_test_obj(20, 100, switch='1.1.1.1', instance='test') + self._add_bindings_to_db([npb11, npb21]) + + npb_all_v100 = nexus_db_v2.get_nexusvlan_binding(100, '1.1.1.1') + self.assertEqual(len(npb_all_v100), 2) + + npb22 = self._npb_test_obj(20, 200, switch='1.1.1.1', instance='test') + npb = nexus_db_v2.update_nexusport_binding(npb21.port, 200) + self._assert_bindings_match(npb, npb22) + + npb_all_v100 = nexus_db_v2.get_nexusvlan_binding(100, '1.1.1.1') + self.assertEqual(len(npb_all_v100), 1) + self._assert_bindings_match(npb_all_v100[0], npb11) + + npb = nexus_db_v2.update_nexusport_binding(npb21.port, 0) + self.assertIsNone(npb) + + npb33 = self._npb_test_obj(30, 300, switch='1.1.1.1', instance='test') + with testtools.ExpectedException(exceptions.NexusPortBindingNotFound): + nexus_db_v2.update_nexusport_binding(npb33.port, 200) diff --git a/neutron/tests/unit/ml2/drivers/mechanism_bulkless.py b/neutron/tests/unit/ml2/drivers/mechanism_bulkless.py new file mode 100644 index 000000000..0a0d3de93 --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/mechanism_bulkless.py @@ -0,0 +1,23 @@ +# Copyright (c) 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.plugins.ml2 import driver_api as api + + +class BulklessMechanismDriver(api.MechanismDriver): + """Test mechanism driver for testing bulk emulation.""" + + def initialize(self): + self.native_bulk_support = False diff --git a/neutron/tests/unit/ml2/drivers/mechanism_logger.py b/neutron/tests/unit/ml2/drivers/mechanism_logger.py new file mode 100644 index 000000000..401badb16 --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/mechanism_logger.py @@ -0,0 +1,120 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.openstack.common import log +from neutron.plugins.ml2 import driver_api as api + +LOG = log.getLogger(__name__) + + +class LoggerMechanismDriver(api.MechanismDriver): + """Mechanism driver that logs all calls and parameters made. + + Generally used for testing and debugging. + """ + + def initialize(self): + pass + + def _log_network_call(self, method_name, context): + LOG.info(_("%(method)s called with network settings %(current)s " + "(original settings %(original)s) and " + "network segments %(segments)s"), + {'method': method_name, + 'current': context.current, + 'original': context.original, + 'segments': context.network_segments}) + + def create_network_precommit(self, context): + self._log_network_call("create_network_precommit", context) + + def create_network_postcommit(self, context): + self._log_network_call("create_network_postcommit", context) + + def update_network_precommit(self, context): + self._log_network_call("update_network_precommit", context) + + def update_network_postcommit(self, context): + self._log_network_call("update_network_postcommit", context) + + def delete_network_precommit(self, context): + self._log_network_call("delete_network_precommit", context) + + def delete_network_postcommit(self, context): + self._log_network_call("delete_network_postcommit", context) + + def _log_subnet_call(self, method_name, context): + LOG.info(_("%(method)s called with subnet settings %(current)s " + "(original settings %(original)s)"), + {'method': method_name, + 'current': context.current, + 'original': context.original}) + + def create_subnet_precommit(self, context): + self._log_subnet_call("create_subnet_precommit", context) + + def create_subnet_postcommit(self, context): + self._log_subnet_call("create_subnet_postcommit", context) + + def update_subnet_precommit(self, context): + self._log_subnet_call("update_subnet_precommit", context) + + def update_subnet_postcommit(self, context): + self._log_subnet_call("update_subnet_postcommit", context) + + def delete_subnet_precommit(self, context): + self._log_subnet_call("delete_subnet_precommit", context) + + def delete_subnet_postcommit(self, context): + self._log_subnet_call("delete_subnet_postcommit", context) + + def _log_port_call(self, method_name, context): + network_context = context.network + LOG.info(_("%(method)s called with port settings %(current)s " + "(original settings %(original)s) " + "bound to segment %(segment)s " + "(original segment %(original_segment)s) " + "using driver %(driver)s " + "(original driver %(original_driver)s) " + "on network %(network)s"), + {'method': method_name, + 'current': context.current, + 'original': context.original, + 'segment': context.bound_segment, + 'original_segment': context.original_bound_segment, + 'driver': context.bound_driver, + 'original_driver': context.original_bound_driver, + 'network': network_context.current}) + + def create_port_precommit(self, context): + self._log_port_call("create_port_precommit", context) + + def create_port_postcommit(self, context): + self._log_port_call("create_port_postcommit", context) + + def update_port_precommit(self, context): + self._log_port_call("update_port_precommit", context) + + def update_port_postcommit(self, context): + self._log_port_call("update_port_postcommit", context) + + def delete_port_precommit(self, context): + self._log_port_call("delete_port_precommit", context) + + def delete_port_postcommit(self, context): + self._log_port_call("delete_port_postcommit", context) + + def bind_port(self, context): + self._log_port_call("bind_port", context) diff --git a/neutron/tests/unit/ml2/drivers/mechanism_test.py b/neutron/tests/unit/ml2/drivers/mechanism_test.py new file mode 100644 index 000000000..6a0ca1e86 --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/mechanism_test.py @@ -0,0 +1,171 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants as const +from neutron.extensions import portbindings +from neutron.plugins.ml2 import driver_api as api + + +class TestMechanismDriver(api.MechanismDriver): + """Test mechanism driver for testing mechanism driver api.""" + + def initialize(self): + self.bound_ports = set() + + def _check_network_context(self, context, original_expected): + assert(isinstance(context, api.NetworkContext)) + assert(isinstance(context.current, dict)) + assert(context.current['id'] is not None) + if original_expected: + assert(isinstance(context.original, dict)) + assert(context.current['id'] == context.original['id']) + else: + assert(not context.original) + assert(context.network_segments) + + def create_network_precommit(self, context): + self._check_network_context(context, False) + + def create_network_postcommit(self, context): + self._check_network_context(context, False) + + def update_network_precommit(self, context): + self._check_network_context(context, True) + + def update_network_postcommit(self, context): + self._check_network_context(context, True) + + def delete_network_precommit(self, context): + self._check_network_context(context, False) + + def delete_network_postcommit(self, context): + self._check_network_context(context, False) + + def _check_subnet_context(self, context, original_expected): + assert(isinstance(context, api.SubnetContext)) + assert(isinstance(context.current, dict)) + assert(context.current['id'] is not None) + if original_expected: + assert(isinstance(context.original, dict)) + assert(context.current['id'] == context.original['id']) + else: + assert(not context.original) + + def create_subnet_precommit(self, context): + self._check_subnet_context(context, False) + + def create_subnet_postcommit(self, context): + self._check_subnet_context(context, False) + + def update_subnet_precommit(self, context): + self._check_subnet_context(context, True) + + def update_subnet_postcommit(self, context): + self._check_subnet_context(context, True) + + def delete_subnet_precommit(self, context): + self._check_subnet_context(context, False) + + def delete_subnet_postcommit(self, context): + self._check_subnet_context(context, False) + + def _check_port_context(self, context, original_expected): + assert(isinstance(context, api.PortContext)) + assert(isinstance(context.current, dict)) + assert(context.current['id'] is not None) + + vif_type = context.current.get(portbindings.VIF_TYPE) + assert(vif_type is not None) + + if vif_type in (portbindings.VIF_TYPE_UNBOUND, + portbindings.VIF_TYPE_BINDING_FAILED): + assert(context.bound_segment is None) + assert(context.bound_driver is None) + assert(context.current['id'] not in self.bound_ports) + else: + assert(isinstance(context.bound_segment, dict)) + assert(context.bound_driver == 'test') + assert(context.current['id'] in self.bound_ports) + + if original_expected: + assert(isinstance(context.original, dict)) + assert(context.current['id'] == context.original['id']) + vif_type = context.original.get(portbindings.VIF_TYPE) + assert(vif_type is not None) + if vif_type in (portbindings.VIF_TYPE_UNBOUND, + portbindings.VIF_TYPE_BINDING_FAILED): + assert(context.original_bound_segment is None) + assert(context.original_bound_driver is None) + else: + assert(isinstance(context.original_bound_segment, dict)) + assert(context.original_bound_driver == 'test') + else: + assert(context.original is None) + assert(context.original_bound_segment is None) + assert(context.original_bound_driver is None) + + network_context = context.network + assert(isinstance(network_context, api.NetworkContext)) + self._check_network_context(network_context, False) + + def create_port_precommit(self, context): + self._check_port_context(context, False) + + def create_port_postcommit(self, context): + self._check_port_context(context, False) + + def update_port_precommit(self, context): + if (context.original_bound_driver == 'test' and + context.bound_driver != 'test'): + self.bound_ports.remove(context.original['id']) + self._check_port_context(context, True) + + def update_port_postcommit(self, context): + self._check_port_context(context, True) + + def delete_port_precommit(self, context): + self._check_port_context(context, False) + + def delete_port_postcommit(self, context): + self._check_port_context(context, False) + + def bind_port(self, context): + # REVISIT(rkukura): The upcoming fix for bug 1276391 will + # ensure the MDs see the unbinding of the port as a port + # update prior to re-binding, at which point this should be + # removed. + self.bound_ports.discard(context.current['id']) + + # REVISIT(rkukura): Currently, bind_port() is called as part + # of either a create or update transaction. The fix for bug + # 1276391 will change it to be called outside any transaction, + # so the context.original* will no longer be available. + self._check_port_context(context, context.original is not None) + + host = context.current.get(portbindings.HOST_ID, None) + segment = context.network.network_segments[0][api.ID] + if host == "host-ovs-no_filter": + context.set_binding(segment, portbindings.VIF_TYPE_OVS, + {portbindings.CAP_PORT_FILTER: False}) + self.bound_ports.add(context.current['id']) + elif host == "host-bridge-filter": + context.set_binding(segment, portbindings.VIF_TYPE_BRIDGE, + {portbindings.CAP_PORT_FILTER: True}) + self.bound_ports.add(context.current['id']) + elif host == "host-ovs-filter-active": + context.set_binding(segment, portbindings.VIF_TYPE_OVS, + {portbindings.CAP_PORT_FILTER: True}, + status=const.PORT_STATUS_ACTIVE) + self.bound_ports.add(context.current['id']) diff --git a/neutron/tests/unit/ml2/drivers/test_arista_mechanism_driver.py b/neutron/tests/unit/ml2/drivers/test_arista_mechanism_driver.py new file mode 100644 index 000000000..039839532 --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/test_arista_mechanism_driver.py @@ -0,0 +1,726 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from oslo.config import cfg + +from neutron.common import constants as n_const +import neutron.db.api as ndb +from neutron.plugins.ml2.drivers.mech_arista import db +from neutron.plugins.ml2.drivers.mech_arista import exceptions as arista_exc +from neutron.plugins.ml2.drivers.mech_arista import mechanism_arista as arista +from neutron.tests import base + + +def setup_arista_wrapper_config(value=''): + cfg.CONF.keystone_authtoken = fake_keystone_info_class() + cfg.CONF.set_override('eapi_host', value, "ml2_arista") + cfg.CONF.set_override('eapi_username', value, "ml2_arista") + + +def setup_valid_config(): + # Config is not valid if value is not set + setup_arista_wrapper_config('value') + + +class AristaProvisionedVlansStorageTestCase(base.BaseTestCase): + """Test storing and retriving functionality of Arista mechanism driver. + + Tests all methods of this class by invoking them separately as well + as a group. + """ + + def setUp(self): + super(AristaProvisionedVlansStorageTestCase, self).setUp() + ndb.configure_db() + self.addCleanup(ndb.clear_db) + + def test_tenant_is_remembered(self): + tenant_id = 'test' + + db.remember_tenant(tenant_id) + net_provisioned = db.is_tenant_provisioned(tenant_id) + self.assertTrue(net_provisioned, 'Tenant must be provisioned') + + def test_tenant_is_removed(self): + tenant_id = 'test' + + db.remember_tenant(tenant_id) + db.forget_tenant(tenant_id) + net_provisioned = db.is_tenant_provisioned(tenant_id) + self.assertFalse(net_provisioned, 'The Tenant should be deleted') + + def test_network_is_remembered(self): + tenant_id = 'test' + network_id = '123' + segmentation_id = 456 + + db.remember_network(tenant_id, network_id, segmentation_id) + net_provisioned = db.is_network_provisioned(tenant_id, + network_id) + self.assertTrue(net_provisioned, 'Network must be provisioned') + + def test_network_is_removed(self): + tenant_id = 'test' + network_id = '123' + + db.remember_network(tenant_id, network_id, '123') + db.forget_network(tenant_id, network_id) + net_provisioned = db.is_network_provisioned(tenant_id, network_id) + self.assertFalse(net_provisioned, 'The network should be deleted') + + def test_vm_is_remembered(self): + vm_id = 'VM-1' + tenant_id = 'test' + network_id = '123' + port_id = 456 + host_id = 'ubuntu1' + + db.remember_vm(vm_id, host_id, port_id, network_id, tenant_id) + vm_provisioned = db.is_vm_provisioned(vm_id, host_id, port_id, + network_id, tenant_id) + self.assertTrue(vm_provisioned, 'VM must be provisioned') + + def test_vm_is_removed(self): + vm_id = 'VM-1' + tenant_id = 'test' + network_id = '123' + port_id = 456 + host_id = 'ubuntu1' + + db.remember_vm(vm_id, host_id, port_id, network_id, tenant_id) + db.forget_vm(vm_id, host_id, port_id, network_id, tenant_id) + vm_provisioned = db.is_vm_provisioned(vm_id, host_id, port_id, + network_id, tenant_id) + self.assertFalse(vm_provisioned, 'The vm should be deleted') + + def test_remembers_multiple_networks(self): + tenant_id = 'test' + expected_num_nets = 100 + nets = ['id%s' % n for n in range(expected_num_nets)] + for net_id in nets: + db.remember_network(tenant_id, net_id, 123) + + num_nets_provisioned = db.num_nets_provisioned(tenant_id) + self.assertEqual(expected_num_nets, num_nets_provisioned, + 'There should be %d nets, not %d' % + (expected_num_nets, num_nets_provisioned)) + + def test_removes_all_networks(self): + tenant_id = 'test' + num_nets = 100 + old_nets = db.num_nets_provisioned(tenant_id) + nets = ['id_%s' % n for n in range(num_nets)] + for net_id in nets: + db.remember_network(tenant_id, net_id, 123) + for net_id in nets: + db.forget_network(tenant_id, net_id) + + num_nets_provisioned = db.num_nets_provisioned(tenant_id) + expected = old_nets + self.assertEqual(expected, num_nets_provisioned, + 'There should be %d nets, not %d' % + (expected, num_nets_provisioned)) + + def test_remembers_multiple_tenants(self): + expected_num_tenants = 100 + tenants = ['id%s' % n for n in range(expected_num_tenants)] + for tenant_id in tenants: + db.remember_tenant(tenant_id) + + num_tenants_provisioned = db.num_provisioned_tenants() + self.assertEqual(expected_num_tenants, num_tenants_provisioned, + 'There should be %d tenants, not %d' % + (expected_num_tenants, num_tenants_provisioned)) + + def test_removes_multiple_tenants(self): + num_tenants = 100 + tenants = ['id%s' % n for n in range(num_tenants)] + for tenant_id in tenants: + db.remember_tenant(tenant_id) + for tenant_id in tenants: + db.forget_tenant(tenant_id) + + num_tenants_provisioned = db.num_provisioned_tenants() + expected = 0 + self.assertEqual(expected, num_tenants_provisioned, + 'There should be %d tenants, not %d' % + (expected, num_tenants_provisioned)) + + def test_num_vm_is_valid(self): + tenant_id = 'test' + network_id = '123' + port_id = 456 + host_id = 'ubuntu1' + + vm_to_remember = ['vm1', 'vm2', 'vm3'] + vm_to_forget = ['vm2', 'vm1'] + + for vm in vm_to_remember: + db.remember_vm(vm, host_id, port_id, network_id, tenant_id) + for vm in vm_to_forget: + db.forget_vm(vm, host_id, port_id, network_id, tenant_id) + + num_vms = len(db.get_vms(tenant_id)) + expected = len(vm_to_remember) - len(vm_to_forget) + + self.assertEqual(expected, num_vms, + 'There should be %d records, ' + 'got %d records' % (expected, num_vms)) + # clean up afterwards + db.forget_vm('vm3', host_id, port_id, network_id, tenant_id) + + def test_get_network_list_returns_eos_compatible_data(self): + tenant = u'test-1' + segm_type = 'vlan' + network_id = u'123' + network2_id = u'1234' + vlan_id = 123 + vlan2_id = 1234 + expected_eos_net_list = {network_id: {u'networkId': network_id, + u'segmentationTypeId': vlan_id, + u'segmentationType': segm_type}, + network2_id: {u'networkId': network2_id, + u'segmentationTypeId': vlan2_id, + u'segmentationType': segm_type}} + + db.remember_network(tenant, network_id, vlan_id) + db.remember_network(tenant, network2_id, vlan2_id) + + net_list = db.get_networks(tenant) + self.assertNotEqual(net_list != expected_eos_net_list, ('%s != %s' % + (net_list, expected_eos_net_list))) + + +class PositiveRPCWrapperValidConfigTestCase(base.BaseTestCase): + """Test cases to test the RPC between Arista Driver and EOS. + + Tests all methods used to send commands between Arista Driver and EOS + """ + + def setUp(self): + super(PositiveRPCWrapperValidConfigTestCase, self).setUp() + setup_valid_config() + self.drv = arista.AristaRPCWrapper() + self.region = 'RegionOne' + self.drv._server = mock.MagicMock() + + def _get_exit_mode_cmds(self, modes): + return ['exit'] * len(modes) + + def test_no_exception_on_correct_configuration(self): + self.assertIsNotNone(self.drv) + + def test_plug_host_into_network(self): + tenant_id = 'ten-1' + vm_id = 'vm-1' + port_id = 123 + network_id = 'net-id' + host = 'host' + port_name = '123-port' + + self.drv.plug_host_into_network(vm_id, host, port_id, + network_id, tenant_id, port_name) + cmds = ['enable', 'configure', 'cvx', 'service openstack', + 'region RegionOne', + 'tenant ten-1', 'vm id vm-1 hostid host', + 'port id 123 name "123-port" network-id net-id', + 'exit', 'exit', 'exit', 'exit', 'exit'] + + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_plug_dhcp_port_into_network(self): + tenant_id = 'ten-1' + vm_id = 'vm-1' + port_id = 123 + network_id = 'net-id' + host = 'host' + port_name = '123-port' + + self.drv.plug_dhcp_port_into_network(vm_id, host, port_id, + network_id, tenant_id, port_name) + cmds = ['enable', 'configure', 'cvx', 'service openstack', + 'region RegionOne', + 'tenant ten-1', 'network id net-id', + 'dhcp id vm-1 hostid host port-id 123 name "123-port"', + 'exit', 'exit', 'exit', 'exit'] + + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_unplug_host_from_network(self): + tenant_id = 'ten-1' + vm_id = 'vm-1' + port_id = 123 + network_id = 'net-id' + host = 'host' + self.drv.unplug_host_from_network(vm_id, host, port_id, + network_id, tenant_id) + cmds = ['enable', 'configure', 'cvx', 'service openstack', + 'region RegionOne', + 'tenant ten-1', 'vm id vm-1 hostid host', + 'no port id 123', + 'exit', 'exit', 'exit', 'exit', 'exit'] + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_unplug_dhcp_port_from_network(self): + tenant_id = 'ten-1' + vm_id = 'vm-1' + port_id = 123 + network_id = 'net-id' + host = 'host' + + self.drv.unplug_dhcp_port_from_network(vm_id, host, port_id, + network_id, tenant_id) + cmds = ['enable', 'configure', 'cvx', 'service openstack', + 'region RegionOne', + 'tenant ten-1', 'network id net-id', + 'no dhcp id vm-1 port-id 123', + 'exit', 'exit', 'exit', 'exit'] + + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_create_network(self): + tenant_id = 'ten-1' + network = { + 'network_id': 'net-id', + 'network_name': 'net-name', + 'segmentation_id': 123} + self.drv.create_network(tenant_id, network) + cmds = ['enable', 'configure', 'cvx', 'service openstack', + 'region RegionOne', + 'tenant ten-1', 'network id net-id name "net-name"', + 'segment 1 type vlan id 123', + 'exit', 'exit', 'exit', 'exit', 'exit', 'exit'] + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_create_network_bulk(self): + tenant_id = 'ten-2' + num_networks = 10 + networks = [{ + 'network_id': 'net-id-%d' % net_id, + 'network_name': 'net-name-%d' % net_id, + 'segmentation_id': net_id} for net_id in range(1, num_networks) + ] + + self.drv.create_network_bulk(tenant_id, networks) + cmds = ['enable', + 'configure', + 'cvx', + 'service openstack', + 'region RegionOne', + 'tenant ten-2'] + for net_id in range(1, num_networks): + cmds.append('network id net-id-%d name "net-name-%d"' % + (net_id, net_id)) + cmds.append('segment 1 type vlan id %d' % net_id) + + cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack', + 'cvx', 'configure', 'enable'])) + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_delete_network(self): + tenant_id = 'ten-1' + network_id = 'net-id' + self.drv.delete_network(tenant_id, network_id) + cmds = ['enable', 'configure', 'cvx', 'service openstack', + 'region RegionOne', + 'tenant ten-1', 'no network id net-id', + 'exit', 'exit', 'exit', 'exit', 'exit'] + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_delete_network_bulk(self): + tenant_id = 'ten-2' + num_networks = 10 + networks = [{ + 'network_id': 'net-id-%d' % net_id, + 'network_name': 'net-name-%d' % net_id, + 'segmentation_id': net_id} for net_id in range(1, num_networks) + ] + + networks = ['net-id-%d' % net_id for net_id in range(1, num_networks)] + self.drv.delete_network_bulk(tenant_id, networks) + cmds = ['enable', + 'configure', + 'cvx', + 'service openstack', + 'region RegionOne', + 'tenant ten-2'] + for net_id in range(1, num_networks): + cmds.append('no network id net-id-%d' % net_id) + + cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack', + 'cvx', 'configure'])) + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_delete_vm(self): + tenant_id = 'ten-1' + vm_id = 'vm-id' + self.drv.delete_vm(tenant_id, vm_id) + cmds = ['enable', 'configure', 'cvx', 'service openstack', + 'region RegionOne', + 'tenant ten-1', 'no vm id vm-id', + 'exit', 'exit', 'exit', 'exit', 'exit'] + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_delete_vm_bulk(self): + tenant_id = 'ten-2' + num_vms = 10 + vm_ids = ['vm-id-%d' % vm_id for vm_id in range(1, num_vms)] + self.drv.delete_vm_bulk(tenant_id, vm_ids) + + cmds = ['enable', + 'configure', + 'cvx', + 'service openstack', + 'region RegionOne', + 'tenant ten-2'] + + for vm_id in range(1, num_vms): + cmds.append('no vm id vm-id-%d' % vm_id) + + cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack', + 'cvx', 'configure'])) + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_create_vm_port_bulk(self): + tenant_id = 'ten-3' + num_vms = 10 + num_ports_per_vm = 2 + + vms = dict( + ('vm-id-%d' % vm_id, { + 'vmId': 'vm-id-%d' % vm_id, + 'host': 'host_%d' % vm_id, + } + ) for vm_id in range(1, num_vms) + ) + + devices = [n_const.DEVICE_OWNER_DHCP, 'compute'] + vm_port_list = [] + + net_count = 1 + for vm_id in range(1, num_vms): + for port_id in range(1, num_ports_per_vm): + port = { + 'id': 'port-id-%d-%d' % (vm_id, port_id), + 'device_id': 'vm-id-%d' % vm_id, + 'device_owner': devices[(vm_id + port_id) % 2], + 'network_id': 'network-id-%d' % net_count, + 'name': 'port-%d-%d' % (vm_id, port_id) + } + vm_port_list.append(port) + net_count += 1 + + self.drv.create_vm_port_bulk(tenant_id, vm_port_list, vms) + cmds = ['enable', + 'configure', + 'cvx', + 'service openstack', + 'region RegionOne', + 'tenant ten-3'] + + net_count = 1 + for vm_count in range(1, num_vms): + host = 'host_%s' % vm_count + for port_count in range(1, num_ports_per_vm): + vm_id = 'vm-id-%d' % vm_count + device_owner = devices[(vm_count + port_count) % 2] + port_name = '"port-%d-%d"' % (vm_count, port_count) + network_id = 'network-id-%d' % net_count + port_id = 'port-id-%d-%d' % (vm_count, port_count) + if device_owner == 'network:dhcp': + cmds.append('network id %s' % network_id) + cmds.append('dhcp id %s hostid %s port-id %s name %s' % ( + vm_id, host, port_id, port_name)) + elif device_owner == 'compute': + cmds.append('vm id %s hostid %s' % (vm_id, host)) + cmds.append('port id %s name %s network-id %s' % ( + port_id, port_name, network_id)) + net_count += 1 + + cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', + 'openstack', 'cvx'])) + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_delete_tenant(self): + tenant_id = 'ten-1' + self.drv.delete_tenant(tenant_id) + cmds = ['enable', 'configure', 'cvx', 'service openstack', + 'region RegionOne', 'no tenant ten-1', + 'exit', 'exit', 'exit', 'exit'] + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_delete_tenant_bulk(self): + num_tenants = 10 + tenant_list = ['ten-%d' % t_id for t_id in range(1, num_tenants)] + self.drv.delete_tenant_bulk(tenant_list) + cmds = ['enable', + 'configure', + 'cvx', + 'service openstack', + 'region RegionOne'] + for ten_id in range(1, num_tenants): + cmds.append('no tenant ten-%d' % ten_id) + + cmds.extend(self._get_exit_mode_cmds(['region', 'openstack', + 'cvx', 'configure'])) + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + def test_get_network_info_returns_none_when_no_such_net(self): + expected = [] + self.drv.get_tenants = mock.MagicMock() + self.drv.get_tenants.return_value = [] + + net_info = self.drv.get_tenants() + + self.drv.get_tenants.assert_called_once_with() + self.assertEqual(net_info, expected, ('Network info must be "None"' + 'for unknown network')) + + def test_get_network_info_returns_info_for_available_net(self): + valid_network_id = '12345' + valid_net_info = {'network_id': valid_network_id, + 'some_info': 'net info'} + known_nets = valid_net_info + + self.drv.get_tenants = mock.MagicMock() + self.drv.get_tenants.return_value = known_nets + + net_info = self.drv.get_tenants() + self.assertEqual(net_info, valid_net_info, + ('Must return network info for a valid net')) + + def test_check_cli_commands(self): + self.drv.check_cli_commands() + cmds = ['show openstack config region RegionOne timestamp'] + self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds) + + +class AristaRPCWrapperInvalidConfigTestCase(base.BaseTestCase): + """Negative test cases to test the Arista Driver configuration.""" + + def setUp(self): + super(AristaRPCWrapperInvalidConfigTestCase, self).setUp() + self.setup_invalid_config() # Invalid config, required options not set + + def setup_invalid_config(self): + setup_arista_wrapper_config('') + + def test_raises_exception_on_wrong_configuration(self): + self.assertRaises(arista_exc.AristaConfigError, + arista.AristaRPCWrapper) + + +class NegativeRPCWrapperTestCase(base.BaseTestCase): + """Negative test cases to test the RPC between Arista Driver and EOS.""" + + def setUp(self): + super(NegativeRPCWrapperTestCase, self).setUp() + setup_valid_config() + + def test_exception_is_raised_on_json_server_error(self): + drv = arista.AristaRPCWrapper() + + drv._server = mock.MagicMock() + drv._server.runCmds.side_effect = Exception('server error') + self.assertRaises(arista_exc.AristaRpcError, drv.get_tenants) + + +class RealNetStorageAristaDriverTestCase(base.BaseTestCase): + """Main test cases for Arista Mechanism driver. + + Tests all mechanism driver APIs supported by Arista Driver. It invokes + all the APIs as they would be invoked in real world scenarios and + verifies the functionality. + """ + def setUp(self): + super(RealNetStorageAristaDriverTestCase, self).setUp() + self.fake_rpc = mock.MagicMock() + ndb.configure_db() + self.drv = arista.AristaDriver(self.fake_rpc) + + def tearDown(self): + super(RealNetStorageAristaDriverTestCase, self).tearDown() + self.drv.stop_synchronization_thread() + + def test_create_and_delete_network(self): + tenant_id = 'ten-1' + network_id = 'net1-id' + segmentation_id = 1001 + + network_context = self._get_network_context(tenant_id, + network_id, + segmentation_id) + self.drv.create_network_precommit(network_context) + net_provisioned = db.is_network_provisioned(tenant_id, network_id) + self.assertTrue(net_provisioned, 'The network should be created') + + expected_num_nets = 1 + num_nets_provisioned = db.num_nets_provisioned(tenant_id) + self.assertEqual(expected_num_nets, num_nets_provisioned, + 'There should be %d nets, not %d' % + (expected_num_nets, num_nets_provisioned)) + + #Now test the delete network + self.drv.delete_network_precommit(network_context) + net_provisioned = db.is_network_provisioned(tenant_id, network_id) + self.assertFalse(net_provisioned, 'The network should be created') + + expected_num_nets = 0 + num_nets_provisioned = db.num_nets_provisioned(tenant_id) + self.assertEqual(expected_num_nets, num_nets_provisioned, + 'There should be %d nets, not %d' % + (expected_num_nets, num_nets_provisioned)) + + def test_create_and_delete_multiple_networks(self): + tenant_id = 'ten-1' + expected_num_nets = 100 + segmentation_id = 1001 + nets = ['id%s' % n for n in range(expected_num_nets)] + for net_id in nets: + network_context = self._get_network_context(tenant_id, + net_id, + segmentation_id) + self.drv.create_network_precommit(network_context) + + num_nets_provisioned = db.num_nets_provisioned(tenant_id) + self.assertEqual(expected_num_nets, num_nets_provisioned, + 'There should be %d nets, not %d' % + (expected_num_nets, num_nets_provisioned)) + + #now test the delete networks + for net_id in nets: + network_context = self._get_network_context(tenant_id, + net_id, + segmentation_id) + self.drv.delete_network_precommit(network_context) + + num_nets_provisioned = db.num_nets_provisioned(tenant_id) + expected_num_nets = 0 + self.assertEqual(expected_num_nets, num_nets_provisioned, + 'There should be %d nets, not %d' % + (expected_num_nets, num_nets_provisioned)) + + def test_create_and_delete_ports(self): + tenant_id = 'ten-1' + network_id = 'net1-id' + segmentation_id = 1001 + vms = ['vm1', 'vm2', 'vm3'] + + network_context = self._get_network_context(tenant_id, + network_id, + segmentation_id) + self.drv.create_network_precommit(network_context) + + for vm_id in vms: + port_context = self._get_port_context(tenant_id, + network_id, + vm_id, + network_context) + self.drv.create_port_precommit(port_context) + + vm_list = db.get_vms(tenant_id) + provisioned_vms = len(vm_list) + expected_vms = len(vms) + self.assertEqual(expected_vms, provisioned_vms, + 'There should be %d ' + 'hosts, not %d' % (expected_vms, provisioned_vms)) + + # Now test the delete ports + for vm_id in vms: + port_context = self._get_port_context(tenant_id, + network_id, + vm_id, + network_context) + self.drv.delete_port_precommit(port_context) + + vm_list = db.get_vms(tenant_id) + provisioned_vms = len(vm_list) + expected_vms = 0 + self.assertEqual(expected_vms, provisioned_vms, + 'There should be %d ' + 'VMs, not %d' % (expected_vms, provisioned_vms)) + + def _get_network_context(self, tenant_id, net_id, seg_id): + network = {'id': net_id, + 'tenant_id': tenant_id} + network_segments = [{'segmentation_id': seg_id}] + return FakeNetworkContext(network, network_segments, network) + + def _get_port_context(self, tenant_id, net_id, vm_id, network): + port = {'device_id': vm_id, + 'device_owner': 'compute', + 'binding:host_id': 'ubuntu1', + 'tenant_id': tenant_id, + 'id': 101, + 'network_id': net_id + } + return FakePortContext(port, port, network) + + +class fake_keystone_info_class(object): + """To generate fake Keystone Authentification token information + + Arista Driver expects Keystone auth info. This fake information + is for testing only + """ + auth_protocol = 'abc' + auth_host = 'host' + auth_port = 5000 + admin_user = 'neutron' + admin_password = 'fun' + + +class FakeNetworkContext(object): + """To generate network context for testing purposes only.""" + + def __init__(self, network, segments=None, original_network=None): + self._network = network + self._original_network = original_network + self._segments = segments + + @property + def current(self): + return self._network + + @property + def original(self): + return self._original_network + + @property + def network_segments(self): + return self._segments + + +class FakePortContext(object): + """To generate port context for testing purposes only.""" + + def __init__(self, port, original_port, network): + self._port = port + self._original_port = original_port + self._network_context = network + + @property + def current(self): + return self._port + + @property + def original(self): + return self._original_port + + @property + def network(self): + return self._network_context diff --git a/neutron/tests/unit/ml2/drivers/test_bigswitch_mech.py b/neutron/tests/unit/ml2/drivers/test_bigswitch_mech.py new file mode 100644 index 000000000..ea884ae51 --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/test_bigswitch_mech.py @@ -0,0 +1,144 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2014 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import mock +import webob.exc + +from neutron import context as neutron_context +from neutron.extensions import portbindings +from neutron import manager +from neutron.plugins.bigswitch import servermanager +from neutron.plugins.ml2 import config as ml2_config +from neutron.plugins.ml2.drivers import type_vlan as vlan_config +import neutron.tests.unit.bigswitch.test_restproxy_plugin as trp +from neutron.tests.unit.ml2 import test_ml2_plugin +from neutron.tests.unit import test_db_plugin + +PHYS_NET = 'physnet1' +VLAN_START = 1000 +VLAN_END = 1100 +SERVER_POOL = 'neutron.plugins.bigswitch.servermanager.ServerPool' +DRIVER_MOD = 'neutron.plugins.ml2.drivers.mech_bigswitch.driver' +DRIVER = DRIVER_MOD + '.BigSwitchMechanismDriver' + + +class TestBigSwitchMechDriverBase(trp.BigSwitchProxyPluginV2TestCase): + + def setUp(self): + # Configure the ML2 mechanism drivers and network types + ml2_opts = { + 'mechanism_drivers': ['bigswitch'], + 'tenant_network_types': ['vlan'], + } + for opt, val in ml2_opts.items(): + ml2_config.cfg.CONF.set_override(opt, val, 'ml2') + + # Configure the ML2 VLAN parameters + phys_vrange = ':'.join([PHYS_NET, str(VLAN_START), str(VLAN_END)]) + vlan_config.cfg.CONF.set_override('network_vlan_ranges', + [phys_vrange], + 'ml2_type_vlan') + super(TestBigSwitchMechDriverBase, + self).setUp(test_ml2_plugin.PLUGIN_NAME) + + +class TestBigSwitchMechDriverNetworksV2(test_db_plugin.TestNetworksV2, + TestBigSwitchMechDriverBase): + pass + + +class TestBigSwitchMechDriverPortsV2(test_db_plugin.TestPortsV2, + TestBigSwitchMechDriverBase): + + VIF_TYPE = portbindings.VIF_TYPE_OVS + + def setUp(self): + super(TestBigSwitchMechDriverPortsV2, self).setUp() + self.port_create_status = 'DOWN' + + def test_update_port_status_build(self): + with self.port() as port: + self.assertEqual(port['port']['status'], 'DOWN') + self.assertEqual(self.port_create_status, 'DOWN') + + def _make_port(self, fmt, net_id, expected_res_status=None, arg_list=None, + **kwargs): + arg_list = arg_list or () + arg_list += ('binding:host_id', ) + res = self._create_port(fmt, net_id, expected_res_status, + arg_list, **kwargs) + # Things can go wrong - raise HTTP exc with res code only + # so it can be caught by unit tests + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + return self.deserialize(fmt, res) + + def test_create404_triggers_background_sync(self): + # allow the async background thread to run for this test + self.spawn_p.stop() + with contextlib.nested( + mock.patch(SERVER_POOL + '.rest_create_port', + side_effect=servermanager.RemoteRestError( + reason=servermanager.NXNETWORK, status=404)), + mock.patch(DRIVER + '._send_all_data'), + self.port(**{'device_id': 'devid', 'binding:host_id': 'host'}) + ) as (mock_http, mock_send_all, p): + # wait for thread to finish + mm = manager.NeutronManager.get_plugin().mechanism_manager + bigdriver = mm.mech_drivers['bigswitch'].obj + bigdriver.evpool.waitall() + mock_send_all.assert_has_calls([ + mock.call( + send_routers=False, send_ports=True, + send_floating_ips=False, + triggered_by_tenant=p['port']['tenant_id'] + ) + ]) + self.spawn_p.start() + + def test_udpate404_triggers_background_sync(self): + with contextlib.nested( + mock.patch(SERVER_POOL + '.rest_update_port', + side_effect=servermanager.RemoteRestError( + reason=servermanager.NXNETWORK, status=404)), + mock.patch(DRIVER + '._send_all_data'), + self.port() + ) as (mock_update, mock_send_all, p): + plugin = manager.NeutronManager.get_plugin() + context = neutron_context.get_admin_context() + plugin.update_port(context, p['port']['id'], + {'port': {'device_id': 'devid', + 'binding:host_id': 'host'}}) + mock_send_all.assert_has_calls([ + mock.call( + send_routers=False, send_ports=True, + send_floating_ips=False, + triggered_by_tenant=p['port']['tenant_id'] + ) + ]) + + def test_backend_request_contents(self): + with contextlib.nested( + mock.patch(SERVER_POOL + '.rest_create_port'), + self.port(**{'device_id': 'devid', 'binding:host_id': 'host'}) + ) as (mock_rest, p): + # make sure basic expected keys are present in the port body + pb = mock_rest.mock_calls[0][1][2] + self.assertEqual('host', pb['binding:host_id']) + self.assertIn('bound_segment', pb) + self.assertIn('network', pb) diff --git a/neutron/tests/unit/ml2/drivers/test_l2population.py b/neutron/tests/unit/ml2/drivers/test_l2population.py new file mode 100644 index 000000000..d96be1ccd --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/test_l2population.py @@ -0,0 +1,724 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +import mock + +from neutron.common import constants +from neutron.common import topics +from neutron import context +from neutron.db import agents_db +from neutron.db import api as db_api +from neutron.extensions import portbindings +from neutron.extensions import providernet as pnet +from neutron import manager +from neutron.openstack.common import timeutils +from neutron.plugins.ml2 import config as config +from neutron.plugins.ml2.drivers.l2pop import constants as l2_consts +from neutron.plugins.ml2 import managers +from neutron.plugins.ml2 import rpc +from neutron.tests.unit import test_db_plugin as test_plugin + +HOST = 'my_l2_host' +L2_AGENT = { + 'binary': 'neutron-openvswitch-agent', + 'host': HOST, + 'topic': constants.L2_AGENT_TOPIC, + 'configurations': {'tunneling_ip': '20.0.0.1', + 'tunnel_types': ['vxlan']}, + 'agent_type': constants.AGENT_TYPE_OVS, + 'tunnel_type': [], + 'start_flag': True +} + +L2_AGENT_2 = { + 'binary': 'neutron-openvswitch-agent', + 'host': HOST + '_2', + 'topic': constants.L2_AGENT_TOPIC, + 'configurations': {'tunneling_ip': '20.0.0.2', + 'tunnel_types': ['vxlan']}, + 'agent_type': constants.AGENT_TYPE_OVS, + 'tunnel_type': [], + 'start_flag': True +} + +L2_AGENT_3 = { + 'binary': 'neutron-openvswitch-agent', + 'host': HOST + '_3', + 'topic': constants.L2_AGENT_TOPIC, + 'configurations': {'tunneling_ip': '20.0.0.3', + 'tunnel_types': []}, + 'agent_type': constants.AGENT_TYPE_OVS, + 'tunnel_type': [], + 'start_flag': True +} + +L2_AGENT_4 = { + 'binary': 'neutron-openvswitch-agent', + 'host': HOST + '_4', + 'topic': constants.L2_AGENT_TOPIC, + 'configurations': {'tunneling_ip': '20.0.0.4', + 'tunnel_types': ['vxlan']}, + 'agent_type': constants.AGENT_TYPE_OVS, + 'tunnel_type': [], + 'start_flag': True +} + +PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' +NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi' + + +class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase): + + def setUp(self): + # Enable the test mechanism driver to ensure that + # we can successfully call through to all mechanism + # driver apis. + config.cfg.CONF.set_override('mechanism_drivers', + ['openvswitch', 'linuxbridge', + 'l2population'], + 'ml2') + super(TestL2PopulationRpcTestCase, self).setUp(PLUGIN_NAME) + + self.adminContext = context.get_admin_context() + + self.type_manager = managers.TypeManager() + self.notifier = rpc.AgentNotifierApi(topics.AGENT) + self.callbacks = rpc.RpcCallbacks(self.notifier, self.type_manager) + + self.orig_supported_agents = l2_consts.SUPPORTED_AGENT_TYPES + l2_consts.SUPPORTED_AGENT_TYPES = [constants.AGENT_TYPE_OVS] + + net_arg = {pnet.NETWORK_TYPE: 'vxlan', + pnet.SEGMENTATION_ID: '1'} + self._network = self._make_network(self.fmt, 'net1', True, + arg_list=(pnet.NETWORK_TYPE, + pnet.SEGMENTATION_ID,), + **net_arg) + + notifier_patch = mock.patch(NOTIFIER) + notifier_patch.start() + + self.fanout_topic = topics.get_topic_name(topics.AGENT, + topics.L2POPULATION, + topics.UPDATE) + fanout = ('neutron.common.rpc_compat.RpcProxy.fanout_cast') + fanout_patch = mock.patch(fanout) + self.mock_fanout = fanout_patch.start() + + cast = ('neutron.common.rpc_compat.RpcProxy.cast') + cast_patch = mock.patch(cast) + self.mock_cast = cast_patch.start() + + uptime = ('neutron.plugins.ml2.drivers.l2pop.db.L2populationDbMixin.' + 'get_agent_uptime') + uptime_patch = mock.patch(uptime, return_value=190) + uptime_patch.start() + + self.addCleanup(db_api.clear_db) + + def tearDown(self): + l2_consts.SUPPORTED_AGENT_TYPES = self.orig_supported_agents + super(TestL2PopulationRpcTestCase, self).tearDown() + + def _register_ml2_agents(self): + callback = agents_db.AgentExtRpcCallback() + callback.report_state(self.adminContext, + agent_state={'agent_state': L2_AGENT}, + time=timeutils.strtime()) + callback.report_state(self.adminContext, + agent_state={'agent_state': L2_AGENT_2}, + time=timeutils.strtime()) + callback.report_state(self.adminContext, + agent_state={'agent_state': L2_AGENT_3}, + time=timeutils.strtime()) + callback.report_state(self.adminContext, + agent_state={'agent_state': L2_AGENT_4}, + time=timeutils.strtime()) + + def test_fdb_add_called(self): + self._register_ml2_agents() + + with self.subnet(network=self._network) as subnet: + host_arg = {portbindings.HOST_ID: HOST} + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg) as port1: + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg): + p1 = port1['port'] + + device = 'tap' + p1['id'] + + self.mock_fanout.reset_mock() + self.callbacks.update_device_up(self.adminContext, + agent_id=HOST, + device=device) + + p1_ips = [p['ip_address'] for p in p1['fixed_ips']] + expected = {'args': + {'fdb_entries': + {p1['network_id']: + {'ports': + {'20.0.0.1': [constants.FLOODING_ENTRY, + [p1['mac_address'], + p1_ips[0]]]}, + 'network_type': 'vxlan', + 'segment_id': 1}}}, + 'namespace': None, + 'method': 'add_fdb_entries'} + + self.mock_fanout.assert_called_with( + mock.ANY, expected, topic=self.fanout_topic) + + def test_fdb_add_not_called_type_local(self): + self._register_ml2_agents() + + with self.subnet(network=self._network) as subnet: + host_arg = {portbindings.HOST_ID: HOST + '_3'} + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg) as port1: + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg): + p1 = port1['port'] + + device = 'tap' + p1['id'] + + self.mock_fanout.reset_mock() + self.callbacks.update_device_up(self.adminContext, + agent_id=HOST, + device=device) + + self.assertFalse(self.mock_fanout.called) + + def test_fdb_add_two_agents(self): + self._register_ml2_agents() + + with self.subnet(network=self._network) as subnet: + host_arg = {portbindings.HOST_ID: HOST, + 'admin_state_up': True} + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID, 'admin_state_up',), + **host_arg) as port1: + host_arg = {portbindings.HOST_ID: HOST + '_2', + 'admin_state_up': True} + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID, + 'admin_state_up',), + **host_arg) as port2: + p1 = port1['port'] + p2 = port2['port'] + + device = 'tap' + p1['id'] + + self.mock_cast.reset_mock() + self.mock_fanout.reset_mock() + self.callbacks.update_device_up(self.adminContext, + agent_id=HOST, + device=device) + + p1_ips = [p['ip_address'] for p in p1['fixed_ips']] + p2_ips = [p['ip_address'] for p in p2['fixed_ips']] + + expected1 = {'args': + {'fdb_entries': + {p1['network_id']: + {'ports': + {'20.0.0.2': [constants.FLOODING_ENTRY, + [p2['mac_address'], + p2_ips[0]]]}, + 'network_type': 'vxlan', + 'segment_id': 1}}}, + 'namespace': None, + 'method': 'add_fdb_entries'} + + topic = topics.get_topic_name(topics.AGENT, + topics.L2POPULATION, + topics.UPDATE, + HOST) + + self.mock_cast.assert_called_with(mock.ANY, + expected1, + topic=topic) + + expected2 = {'args': + {'fdb_entries': + {p1['network_id']: + {'ports': + {'20.0.0.1': [constants.FLOODING_ENTRY, + [p1['mac_address'], + p1_ips[0]]]}, + 'network_type': 'vxlan', + 'segment_id': 1}}}, + 'namespace': None, + 'method': 'add_fdb_entries'} + + self.mock_fanout.assert_called_with( + mock.ANY, expected2, topic=self.fanout_topic) + + def test_fdb_add_called_two_networks(self): + self._register_ml2_agents() + + with self.subnet(network=self._network) as subnet: + host_arg = {portbindings.HOST_ID: HOST + '_2'} + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg) as port1: + with self.subnet(cidr='10.1.0.0/24') as subnet2: + with self.port(subnet=subnet2, + arg_list=(portbindings.HOST_ID,), + **host_arg): + host_arg = {portbindings.HOST_ID: HOST} + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg) as port3: + p1 = port1['port'] + p3 = port3['port'] + + device = 'tap' + p3['id'] + + self.mock_cast.reset_mock() + self.mock_fanout.reset_mock() + self.callbacks.update_device_up( + self.adminContext, agent_id=HOST, + device=device) + + p1_ips = [p['ip_address'] + for p in p1['fixed_ips']] + expected1 = {'args': + {'fdb_entries': + {p1['network_id']: + {'ports': + {'20.0.0.2': + [constants.FLOODING_ENTRY, + [p1['mac_address'], + p1_ips[0]]]}, + 'network_type': 'vxlan', + 'segment_id': 1}}}, + 'namespace': None, + 'method': 'add_fdb_entries'} + + topic = topics.get_topic_name(topics.AGENT, + topics.L2POPULATION, + topics.UPDATE, + HOST) + + self.mock_cast.assert_called_with(mock.ANY, + expected1, + topic=topic) + + p3_ips = [p['ip_address'] + for p in p3['fixed_ips']] + expected2 = {'args': + {'fdb_entries': + {p1['network_id']: + {'ports': + {'20.0.0.1': + [constants.FLOODING_ENTRY, + [p3['mac_address'], + p3_ips[0]]]}, + 'network_type': 'vxlan', + 'segment_id': 1}}}, + 'namespace': None, + 'method': 'add_fdb_entries'} + + self.mock_fanout.assert_called_with( + mock.ANY, expected2, + topic=self.fanout_topic) + + def test_update_port_down(self): + self._register_ml2_agents() + + with self.subnet(network=self._network) as subnet: + host_arg = {portbindings.HOST_ID: HOST} + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg) as port1: + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg) as port2: + p2 = port2['port'] + device2 = 'tap' + p2['id'] + + self.mock_fanout.reset_mock() + self.callbacks.update_device_up(self.adminContext, + agent_id=HOST, + device=device2) + + p1 = port1['port'] + device1 = 'tap' + p1['id'] + + self.callbacks.update_device_up(self.adminContext, + agent_id=HOST, + device=device1) + self.mock_fanout.reset_mock() + self.callbacks.update_device_down(self.adminContext, + agent_id=HOST, + device=device2) + + p2_ips = [p['ip_address'] for p in p2['fixed_ips']] + expected = {'args': + {'fdb_entries': + {p2['network_id']: + {'ports': + {'20.0.0.1': [[p2['mac_address'], + p2_ips[0]]]}, + 'network_type': 'vxlan', + 'segment_id': 1}}}, + 'namespace': None, + 'method': 'remove_fdb_entries'} + + self.mock_fanout.assert_called_with( + mock.ANY, expected, topic=self.fanout_topic) + + def test_update_port_down_last_port_up(self): + self._register_ml2_agents() + + with self.subnet(network=self._network) as subnet: + host_arg = {portbindings.HOST_ID: HOST} + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg): + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg) as port2: + p2 = port2['port'] + device2 = 'tap' + p2['id'] + + self.mock_fanout.reset_mock() + self.callbacks.update_device_up(self.adminContext, + agent_id=HOST, + device=device2) + + self.callbacks.update_device_down(self.adminContext, + agent_id=HOST, + device=device2) + + p2_ips = [p['ip_address'] for p in p2['fixed_ips']] + expected = {'args': + {'fdb_entries': + {p2['network_id']: + {'ports': + {'20.0.0.1': [constants.FLOODING_ENTRY, + [p2['mac_address'], + p2_ips[0]]]}, + 'network_type': 'vxlan', + 'segment_id': 1}}}, + 'namespace': None, + 'method': 'remove_fdb_entries'} + + self.mock_fanout.assert_called_with( + mock.ANY, expected, topic=self.fanout_topic) + + def test_delete_port(self): + self._register_ml2_agents() + + with self.subnet(network=self._network) as subnet: + host_arg = {portbindings.HOST_ID: HOST} + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg) as port: + p1 = port['port'] + device = 'tap' + p1['id'] + + self.mock_fanout.reset_mock() + self.callbacks.update_device_up(self.adminContext, + agent_id=HOST, + device=device) + + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg) as port2: + p2 = port2['port'] + device1 = 'tap' + p2['id'] + + self.mock_fanout.reset_mock() + self.callbacks.update_device_up(self.adminContext, + agent_id=HOST, + device=device1) + + p2_ips = [p['ip_address'] for p in p2['fixed_ips']] + expected = {'args': + {'fdb_entries': + {p2['network_id']: + {'ports': + {'20.0.0.1': [[p2['mac_address'], + p2_ips[0]]]}, + 'network_type': 'vxlan', + 'segment_id': 1}}}, + 'namespace': None, + 'method': 'remove_fdb_entries'} + + self.mock_fanout.assert_any_call( + mock.ANY, expected, topic=self.fanout_topic) + + def test_delete_port_last_port_up(self): + self._register_ml2_agents() + + with self.subnet(network=self._network) as subnet: + host_arg = {portbindings.HOST_ID: HOST} + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg): + with self.port(subnet=subnet, + arg_list=(portbindings.HOST_ID,), + **host_arg) as port: + p1 = port['port'] + + device = 'tap' + p1['id'] + + self.callbacks.update_device_up(self.adminContext, + agent_id=HOST, + device=device) + + p1_ips = [p['ip_address'] for p in p1['fixed_ips']] + expected = {'args': + {'fdb_entries': + {p1['network_id']: + {'ports': + {'20.0.0.1': [constants.FLOODING_ENTRY, + [p1['mac_address'], + p1_ips[0]]]}, + 'network_type': 'vxlan', + 'segment_id': 1}}}, + 'namespace': None, + 'method': 'remove_fdb_entries'} + + self.mock_fanout.assert_any_call( + mock.ANY, expected, topic=self.fanout_topic) + + def test_fixed_ips_changed(self): + self._register_ml2_agents() + + with self.subnet(network=self._network) as subnet: + host_arg = {portbindings.HOST_ID: HOST} + with self.port(subnet=subnet, cidr='10.0.0.0/24', + arg_list=(portbindings.HOST_ID,), + **host_arg) as port1: + p1 = port1['port'] + + device = 'tap' + p1['id'] + + self.callbacks.update_device_up(self.adminContext, + agent_id=HOST, + device=device) + + self.mock_fanout.reset_mock() + + data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'}, + {'ip_address': '10.0.0.10'}]}} + req = self.new_update_request('ports', data, p1['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + ips = res['port']['fixed_ips'] + self.assertEqual(len(ips), 2) + + add_expected = {'args': + {'fdb_entries': + {'chg_ip': + {p1['network_id']: + {'20.0.0.1': + {'after': [[p1['mac_address'], + '10.0.0.10']]}}}}}, + 'namespace': None, + 'method': 'update_fdb_entries'} + + self.mock_fanout.assert_any_call( + mock.ANY, add_expected, topic=self.fanout_topic) + + self.mock_fanout.reset_mock() + + data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'}, + {'ip_address': '10.0.0.16'}]}} + req = self.new_update_request('ports', data, p1['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + ips = res['port']['fixed_ips'] + self.assertEqual(len(ips), 2) + + upd_expected = {'args': + {'fdb_entries': + {'chg_ip': + {p1['network_id']: + {'20.0.0.1': + {'before': [[p1['mac_address'], + '10.0.0.10']], + 'after': [[p1['mac_address'], + '10.0.0.16']]}}}}}, + 'namespace': None, + 'method': 'update_fdb_entries'} + + self.mock_fanout.assert_any_call( + mock.ANY, upd_expected, topic=self.fanout_topic) + + self.mock_fanout.reset_mock() + + data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.16'}]}} + req = self.new_update_request('ports', data, p1['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + ips = res['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + + del_expected = {'args': + {'fdb_entries': + {'chg_ip': + {p1['network_id']: + {'20.0.0.1': + {'before': [[p1['mac_address'], + '10.0.0.2']]}}}}}, + 'namespace': None, + 'method': 'update_fdb_entries'} + + self.mock_fanout.assert_any_call( + mock.ANY, del_expected, topic=self.fanout_topic) + + def test_no_fdb_updates_without_port_updates(self): + self._register_ml2_agents() + + with self.subnet(network=self._network) as subnet: + host_arg = {portbindings.HOST_ID: HOST} + with self.port(subnet=subnet, cidr='10.0.0.0/24', + arg_list=(portbindings.HOST_ID,), + **host_arg) as port1: + p1 = port1['port'] + + device = 'tap' + p1['id'] + + self.callbacks.update_device_up(self.adminContext, + agent_id=HOST, + device=device) + p1['status'] = 'ACTIVE' + self.mock_fanout.reset_mock() + + fanout = ('neutron.plugins.ml2.drivers.l2pop.rpc.' + 'L2populationAgentNotifyAPI._notification_fanout') + fanout_patch = mock.patch(fanout) + mock_fanout = fanout_patch.start() + + plugin = manager.NeutronManager.get_plugin() + plugin.update_port(self.adminContext, p1['id'], port1) + + self.assertFalse(mock_fanout.called) + fanout_patch.stop() + + def test_host_changed(self): + self._register_ml2_agents() + with self.subnet(network=self._network) as subnet: + host_arg = {portbindings.HOST_ID: L2_AGENT['host']} + host2_arg = {portbindings.HOST_ID: L2_AGENT_2['host']} + with self.port(subnet=subnet, cidr='10.0.0.0/24', + arg_list=(portbindings.HOST_ID,), + **host_arg) as port1: + with self.port(subnet=subnet, cidr='10.0.0.0/24', + arg_list=(portbindings.HOST_ID,), + **host2_arg) as port2: + p1 = port1['port'] + device1 = 'tap' + p1['id'] + self.callbacks.update_device_up( + self.adminContext, + agent_id=L2_AGENT['host'], + device=device1) + p2 = port2['port'] + device2 = 'tap' + p2['id'] + self.callbacks.update_device_up( + self.adminContext, + agent_id=L2_AGENT_2['host'], + device=device2) + data2 = {'port': {'binding:host_id': L2_AGENT_2['host']}} + req = self.new_update_request('ports', data2, p1['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + self.assertEqual(res['port']['binding:host_id'], + L2_AGENT_2['host']) + self.mock_fanout.reset_mock() + self.callbacks.get_device_details( + self.adminContext, + device=device1, + agent_id=L2_AGENT_2['host']) + p1_ips = [p['ip_address'] for p in p1['fixed_ips']] + expected = {'args': + {'fdb_entries': + {p1['network_id']: + {'ports': + {'20.0.0.1': [constants.FLOODING_ENTRY, + [p1['mac_address'], + p1_ips[0]]]}, + 'network_type': 'vxlan', + 'segment_id': 1}}}, + 'namespace': None, + 'method': 'remove_fdb_entries'} + + self.mock_fanout.assert_called_with( + mock.ANY, expected, topic=self.fanout_topic) + + def test_host_changed_twice(self): + self._register_ml2_agents() + with self.subnet(network=self._network) as subnet: + host_arg = {portbindings.HOST_ID: L2_AGENT['host']} + host2_arg = {portbindings.HOST_ID: L2_AGENT_2['host']} + with self.port(subnet=subnet, cidr='10.0.0.0/24', + arg_list=(portbindings.HOST_ID,), + **host_arg) as port1: + with self.port(subnet=subnet, cidr='10.0.0.0/24', + arg_list=(portbindings.HOST_ID,), + **host2_arg) as port2: + p1 = port1['port'] + device1 = 'tap' + p1['id'] + self.callbacks.update_device_up( + self.adminContext, + agent_id=L2_AGENT['host'], + device=device1) + p2 = port2['port'] + device2 = 'tap' + p2['id'] + self.callbacks.update_device_up( + self.adminContext, + agent_id=L2_AGENT_2['host'], + device=device2) + data2 = {'port': {'binding:host_id': L2_AGENT_2['host']}} + req = self.new_update_request('ports', data2, p1['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + self.assertEqual(res['port']['binding:host_id'], + L2_AGENT_2['host']) + data4 = {'port': {'binding:host_id': L2_AGENT_4['host']}} + req = self.new_update_request('ports', data4, p1['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + self.assertEqual(res['port']['binding:host_id'], + L2_AGENT_4['host']) + self.mock_fanout.reset_mock() + self.callbacks.get_device_details( + self.adminContext, + device=device1, + agent_id=L2_AGENT_4['host']) + p1_ips = [p['ip_address'] for p in p1['fixed_ips']] + expected = {'args': + {'fdb_entries': + {p1['network_id']: + {'ports': + {'20.0.0.1': [constants.FLOODING_ENTRY, + [p1['mac_address'], + p1_ips[0]]]}, + 'network_type': 'vxlan', + 'segment_id': 1}}}, + 'namespace': None, + 'method': 'remove_fdb_entries'} + + self.mock_fanout.assert_called_with( + mock.ANY, expected, topic=self.fanout_topic) diff --git a/neutron/tests/unit/ml2/drivers/test_mech_mlnx.py b/neutron/tests/unit/ml2/drivers/test_mech_mlnx.py new file mode 100644 index 000000000..343af4233 --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/test_mech_mlnx.py @@ -0,0 +1,139 @@ +# Copyright (c) 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import mock +from oslo.config import cfg + +from neutron.common import constants +from neutron.extensions import portbindings +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers.mlnx import mech_mlnx +from neutron.tests.unit.ml2 import _test_mech_agent as base + + +class MlnxMechanismBaseTestCase(base.AgentMechanismBaseTestCase): + VIF_TYPE = portbindings.VIF_TYPE_MLNX_DIRECT + CAP_PORT_FILTER = False + AGENT_TYPE = constants.AGENT_TYPE_MLNX + + GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'} + GOOD_CONFIGS = {'interface_mappings': GOOD_MAPPINGS} + + BAD_MAPPINGS = {'wrong_physical_network': 'wrong_bridge'} + BAD_CONFIGS = {'interface_mappings': BAD_MAPPINGS} + + AGENTS = [{'alive': True, + 'configurations': GOOD_CONFIGS}] + AGENTS_DEAD = [{'alive': False, + 'configurations': GOOD_CONFIGS}] + AGENTS_BAD = [{'alive': False, + 'configurations': GOOD_CONFIGS}, + {'alive': True, + 'configurations': BAD_CONFIGS}] + + def setUp(self): + super(MlnxMechanismBaseTestCase, self).setUp() + self.driver = mech_mlnx.MlnxMechanismDriver() + self.driver.initialize() + + +class MlnxMechanismGenericTestCase(MlnxMechanismBaseTestCase, + base.AgentMechanismGenericTestCase): + pass + + +class MlnxMechanismLocalTestCase(MlnxMechanismBaseTestCase, + base.AgentMechanismLocalTestCase): + pass + + +class MlnxMechanismFlatTestCase(MlnxMechanismBaseTestCase, + base.AgentMechanismFlatTestCase): + pass + + +class MlnxMechanismVlanTestCase(MlnxMechanismBaseTestCase, + base.AgentMechanismVlanTestCase): + pass + + +class MlnxMechanismVnicTypeTestCase(MlnxMechanismBaseTestCase, + base.AgentMechanismVlanTestCase): + def _check_vif_type_for_vnic_type(self, vnic_type, + expected_vif_type): + context = base.FakePortContext(self.AGENT_TYPE, + self.AGENTS, + self.VLAN_SEGMENTS, + vnic_type) + self.driver.bind_port(context) + self.assertEqual(expected_vif_type, context._bound_vif_type) + + def test_vnic_type_direct(self): + self._check_vif_type_for_vnic_type(portbindings.VNIC_DIRECT, + portbindings.VIF_TYPE_MLNX_HOSTDEV) + + def test_vnic_type_macvtap(self): + self._check_vif_type_for_vnic_type(portbindings.VNIC_MACVTAP, + portbindings.VIF_TYPE_MLNX_DIRECT) + + def test_vnic_type_normal(self): + self._check_vif_type_for_vnic_type(portbindings.VNIC_NORMAL, + self.VIF_TYPE) + + +class MlnxMechanismProfileTestCase(MlnxMechanismBaseTestCase): + def setUp(self): + cfg.CONF.set_override('apply_profile_patch', True, 'ESWITCH') + super(MlnxMechanismProfileTestCase, self).setUp() + + def test_profile_contains_physical_net(self): + VLAN_SEGMENTS = [{api.ID: 'vlan_segment_id', + api.NETWORK_TYPE: 'vlan', + api.PHYSICAL_NETWORK: 'fake_physical_network', + api.SEGMENTATION_ID: 1234}] + + context = base.FakePortContext(self.AGENT_TYPE, + self.AGENTS, + VLAN_SEGMENTS, + portbindings.VNIC_DIRECT) + context._binding = mock.Mock() + context._binding.profile = {} + segment = VLAN_SEGMENTS[0] + agent = self.AGENTS[0] + self.driver.try_to_bind_segment_for_agent(context, segment, agent) + self.assertEqual('{"physical_network": "fake_physical_network"}', + context._binding.profile) + + +class MlnxMechanismVifDetailsTestCase(MlnxMechanismBaseTestCase): + def setUp(self): + super(MlnxMechanismVifDetailsTestCase, self).setUp() + + def test_vif_details_contains_physical_net(self): + VLAN_SEGMENTS = [{api.ID: 'vlan_segment_id', + api.NETWORK_TYPE: 'vlan', + api.PHYSICAL_NETWORK: 'fake_physical_network', + api.SEGMENTATION_ID: 1234}] + + context = base.FakePortContext(self.AGENT_TYPE, + self.AGENTS, + VLAN_SEGMENTS, + portbindings.VNIC_DIRECT) + segment = VLAN_SEGMENTS[0] + agent = self.AGENTS[0] + self.driver.try_to_bind_segment_for_agent(context, segment, agent) + set({"physical_network": "fake_physical_network"}).issubset( + set(context._bound_vif_details.items())) diff --git a/neutron/tests/unit/ml2/drivers/test_ofagent_mech.py b/neutron/tests/unit/ml2/drivers/test_ofagent_mech.py new file mode 100644 index 000000000..63daf9ec0 --- /dev/null +++ b/neutron/tests/unit/ml2/drivers/test_ofagent_mech.py @@ -0,0 +1,74 @@ +# Copyright (c) 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants +from neutron.extensions import portbindings +from neutron.plugins.ml2.drivers import mech_ofagent +from neutron.tests.unit.ml2 import _test_mech_agent as base + + +class OfagentMechanismBaseTestCase(base.AgentMechanismBaseTestCase): + VIF_TYPE = portbindings.VIF_TYPE_OVS + CAP_PORT_FILTER = True + AGENT_TYPE = constants.AGENT_TYPE_OFA + + GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'} + GOOD_TUNNEL_TYPES = ['gre', 'vxlan'] + GOOD_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS, + 'tunnel_types': GOOD_TUNNEL_TYPES} + + BAD_MAPPINGS = {'wrong_physical_network': 'wrong_bridge'} + BAD_TUNNEL_TYPES = ['bad_tunnel_type'] + BAD_CONFIGS = {'bridge_mappings': BAD_MAPPINGS, + 'tunnel_types': BAD_TUNNEL_TYPES} + + AGENTS = [{'alive': True, + 'configurations': GOOD_CONFIGS}] + AGENTS_DEAD = [{'alive': False, + 'configurations': GOOD_CONFIGS}] + AGENTS_BAD = [{'alive': False, + 'configurations': GOOD_CONFIGS}, + {'alive': True, + 'configurations': BAD_CONFIGS}] + + def setUp(self): + super(OfagentMechanismBaseTestCase, self).setUp() + self.driver = mech_ofagent.OfagentMechanismDriver() + self.driver.initialize() + + +class OfagentMechanismGenericTestCase(OfagentMechanismBaseTestCase, + base.AgentMechanismGenericTestCase): + pass + + +class OfagentMechanismLocalTestCase(OfagentMechanismBaseTestCase, + base.AgentMechanismLocalTestCase): + pass + + +class OfagentMechanismFlatTestCase(OfagentMechanismBaseTestCase, + base.AgentMechanismFlatTestCase): + pass + + +class OfagentMechanismVlanTestCase(OfagentMechanismBaseTestCase, + base.AgentMechanismVlanTestCase): + pass + + +class OfagentMechanismGreTestCase(OfagentMechanismBaseTestCase, + base.AgentMechanismGreTestCase): + pass diff --git a/neutron/tests/unit/ml2/test_agent_scheduler.py b/neutron/tests/unit/ml2/test_agent_scheduler.py new file mode 100644 index 000000000..fbf94a1b1 --- /dev/null +++ b/neutron/tests/unit/ml2/test_agent_scheduler.py @@ -0,0 +1,36 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.tests.unit.ml2 import test_ml2_plugin +from neutron.tests.unit.openvswitch import test_agent_scheduler + + +class Ml2AgentSchedulerTestCase( + test_agent_scheduler.OvsAgentSchedulerTestCase): + plugin_str = test_ml2_plugin.PLUGIN_NAME + l3_plugin = ('neutron.services.l3_router.' + 'l3_router_plugin.L3RouterPlugin') + + +class Ml2L3AgentNotifierTestCase( + test_agent_scheduler.OvsL3AgentNotifierTestCase): + plugin_str = test_ml2_plugin.PLUGIN_NAME + l3_plugin = ('neutron.services.l3_router.' + 'l3_router_plugin.L3RouterPlugin') + + +class Ml2DhcpAgentNotifierTestCase( + test_agent_scheduler.OvsDhcpAgentNotifierTestCase): + plugin_str = test_ml2_plugin.PLUGIN_NAME diff --git a/neutron/tests/unit/ml2/test_mech_hyperv.py b/neutron/tests/unit/ml2/test_mech_hyperv.py new file mode 100644 index 000000000..60ac1a620 --- /dev/null +++ b/neutron/tests/unit/ml2/test_mech_hyperv.py @@ -0,0 +1,65 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants +from neutron.extensions import portbindings +from neutron.plugins.ml2.drivers import mech_hyperv +from neutron.tests.unit.ml2 import _test_mech_agent as base + + +class HypervMechanismBaseTestCase(base.AgentMechanismBaseTestCase): + VIF_TYPE = portbindings.VIF_TYPE_HYPERV + CAP_PORT_FILTER = False + AGENT_TYPE = constants.AGENT_TYPE_HYPERV + + GOOD_MAPPINGS = {'fake_physical_network': 'fake_vswitch'} + GOOD_CONFIGS = {'vswitch_mappings': GOOD_MAPPINGS} + + BAD_MAPPINGS = {'wrong_physical_network': 'wrong_vswitch'} + BAD_CONFIGS = {'vswitch_mappings': BAD_MAPPINGS} + + AGENTS = [{'alive': True, + 'configurations': GOOD_CONFIGS}] + AGENTS_DEAD = [{'alive': False, + 'configurations': GOOD_CONFIGS}] + AGENTS_BAD = [{'alive': False, + 'configurations': GOOD_CONFIGS}, + {'alive': True, + 'configurations': BAD_CONFIGS}] + + def setUp(self): + super(HypervMechanismBaseTestCase, self).setUp() + self.driver = mech_hyperv.HypervMechanismDriver() + self.driver.initialize() + + +class HypervMechanismGenericTestCase(HypervMechanismBaseTestCase, + base.AgentMechanismGenericTestCase): + pass + + +class HypervMechanismLocalTestCase(HypervMechanismBaseTestCase, + base.AgentMechanismLocalTestCase): + pass + + +class HypervMechanismFlatTestCase(HypervMechanismBaseTestCase, + base.AgentMechanismFlatTestCase): + pass + + +class HypervMechanismVlanTestCase(HypervMechanismBaseTestCase, + base.AgentMechanismVlanTestCase): + pass diff --git a/neutron/tests/unit/ml2/test_mech_linuxbridge.py b/neutron/tests/unit/ml2/test_mech_linuxbridge.py new file mode 100644 index 000000000..66903c02b --- /dev/null +++ b/neutron/tests/unit/ml2/test_mech_linuxbridge.py @@ -0,0 +1,74 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants +from neutron.extensions import portbindings +from neutron.plugins.ml2.drivers import mech_linuxbridge +from neutron.tests.unit.ml2 import _test_mech_agent as base + + +class LinuxbridgeMechanismBaseTestCase(base.AgentMechanismBaseTestCase): + VIF_TYPE = portbindings.VIF_TYPE_BRIDGE + CAP_PORT_FILTER = True + AGENT_TYPE = constants.AGENT_TYPE_LINUXBRIDGE + + GOOD_MAPPINGS = {'fake_physical_network': 'fake_interface'} + GOOD_TUNNEL_TYPES = ['gre', 'vxlan'] + GOOD_CONFIGS = {'interface_mappings': GOOD_MAPPINGS, + 'tunnel_types': GOOD_TUNNEL_TYPES} + + BAD_MAPPINGS = {'wrong_physical_network': 'wrong_interface'} + BAD_TUNNEL_TYPES = ['bad_tunnel_type'] + BAD_CONFIGS = {'interface_mappings': BAD_MAPPINGS, + 'tunnel_types': BAD_TUNNEL_TYPES} + + AGENTS = [{'alive': True, + 'configurations': GOOD_CONFIGS}] + AGENTS_DEAD = [{'alive': False, + 'configurations': GOOD_CONFIGS}] + AGENTS_BAD = [{'alive': False, + 'configurations': GOOD_CONFIGS}, + {'alive': True, + 'configurations': BAD_CONFIGS}] + + def setUp(self): + super(LinuxbridgeMechanismBaseTestCase, self).setUp() + self.driver = mech_linuxbridge.LinuxbridgeMechanismDriver() + self.driver.initialize() + + +class LinuxbridgeMechanismGenericTestCase(LinuxbridgeMechanismBaseTestCase, + base.AgentMechanismGenericTestCase): + pass + + +class LinuxbridgeMechanismLocalTestCase(LinuxbridgeMechanismBaseTestCase, + base.AgentMechanismLocalTestCase): + pass + + +class LinuxbridgeMechanismFlatTestCase(LinuxbridgeMechanismBaseTestCase, + base.AgentMechanismFlatTestCase): + pass + + +class LinuxbridgeMechanismVlanTestCase(LinuxbridgeMechanismBaseTestCase, + base.AgentMechanismVlanTestCase): + pass + + +class LinuxbridgeMechanismGreTestCase(LinuxbridgeMechanismBaseTestCase, + base.AgentMechanismGreTestCase): + pass diff --git a/neutron/tests/unit/ml2/test_mech_openvswitch.py b/neutron/tests/unit/ml2/test_mech_openvswitch.py new file mode 100644 index 000000000..b1af1b7fa --- /dev/null +++ b/neutron/tests/unit/ml2/test_mech_openvswitch.py @@ -0,0 +1,74 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants +from neutron.extensions import portbindings +from neutron.plugins.ml2.drivers import mech_openvswitch +from neutron.tests.unit.ml2 import _test_mech_agent as base + + +class OpenvswitchMechanismBaseTestCase(base.AgentMechanismBaseTestCase): + VIF_TYPE = portbindings.VIF_TYPE_OVS + CAP_PORT_FILTER = True + AGENT_TYPE = constants.AGENT_TYPE_OVS + + GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'} + GOOD_TUNNEL_TYPES = ['gre', 'vxlan'] + GOOD_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS, + 'tunnel_types': GOOD_TUNNEL_TYPES} + + BAD_MAPPINGS = {'wrong_physical_network': 'wrong_bridge'} + BAD_TUNNEL_TYPES = ['bad_tunnel_type'] + BAD_CONFIGS = {'bridge_mappings': BAD_MAPPINGS, + 'tunnel_types': BAD_TUNNEL_TYPES} + + AGENTS = [{'alive': True, + 'configurations': GOOD_CONFIGS}] + AGENTS_DEAD = [{'alive': False, + 'configurations': GOOD_CONFIGS}] + AGENTS_BAD = [{'alive': False, + 'configurations': GOOD_CONFIGS}, + {'alive': True, + 'configurations': BAD_CONFIGS}] + + def setUp(self): + super(OpenvswitchMechanismBaseTestCase, self).setUp() + self.driver = mech_openvswitch.OpenvswitchMechanismDriver() + self.driver.initialize() + + +class OpenvswitchMechanismGenericTestCase(OpenvswitchMechanismBaseTestCase, + base.AgentMechanismGenericTestCase): + pass + + +class OpenvswitchMechanismLocalTestCase(OpenvswitchMechanismBaseTestCase, + base.AgentMechanismLocalTestCase): + pass + + +class OpenvswitchMechanismFlatTestCase(OpenvswitchMechanismBaseTestCase, + base.AgentMechanismFlatTestCase): + pass + + +class OpenvswitchMechanismVlanTestCase(OpenvswitchMechanismBaseTestCase, + base.AgentMechanismVlanTestCase): + pass + + +class OpenvswitchMechanismGreTestCase(OpenvswitchMechanismBaseTestCase, + base.AgentMechanismGreTestCase): + pass diff --git a/neutron/tests/unit/ml2/test_mechanism_fslsdn.py b/neutron/tests/unit/ml2/test_mechanism_fslsdn.py new file mode 100644 index 000000000..d39e5973f --- /dev/null +++ b/neutron/tests/unit/ml2/test_mechanism_fslsdn.py @@ -0,0 +1,293 @@ +# Copyright (c) 2014 Freescale, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# @author: Trinath Somanchi, Freescale, Inc. + +import mock +from oslo.config import cfg + +from neutron.extensions import portbindings +from neutron.plugins.ml2.drivers import mechanism_fslsdn +from neutron.tests import base +from neutron.tests.unit import test_db_plugin + + +"""Unit testing for Freescale SDN mechanism driver.""" + + +def setup_driver_config(): + """Mechanism Driver specific configuration.""" + + # Configure mechanism driver as 'fslsdn' + cfg.CONF.set_override('mechanism_drivers', ['fslsdn'], 'ml2') + # Configure FSL SDN Mechanism driver specific options + cfg.CONF.set_override('crd_user_name', 'crd', 'ml2_fslsdn') + cfg.CONF.set_override('crd_password', 'CRD_PASS', 'ml2_fslsdn') + cfg.CONF.set_override('crd_tenant_name', 'service', 'ml2_fslsdn') + cfg.CONF.set_override('crd_auth_url', + 'http://127.0.0.1:5000/v2.0', 'ml2_fslsdn') + cfg.CONF.set_override('crd_url', + 'http://127.0.0.1:9797', 'ml2_fslsdn') + cfg.CONF.set_override('crd_auth_strategy', 'keystone', 'ml2_fslsdn') + + +class TestFslSdnMechDriverV2(test_db_plugin.NeutronDbPluginV2TestCase): + + """Testing mechanism driver with ML2 plugin.""" + + def setUp(self): + setup_driver_config() + + def mocked_fslsdn_init(self): + # Mock CRD client, since it requires CRD service running. + self._crdclieint = mock.Mock() + + with mock.patch.object(mechanism_fslsdn.FslsdnMechanismDriver, + 'initialize', new=mocked_fslsdn_init): + super(TestFslSdnMechDriverV2, self).setUp() + + +class TestFslSdnMechDriverNetworksV2(test_db_plugin.TestNetworksV2, + TestFslSdnMechDriverV2): + pass + + +class TestFslSdnMechDriverPortsV2(test_db_plugin.TestPortsV2, + TestFslSdnMechDriverV2): + VIF_TYPE = portbindings.VIF_TYPE_OVS + CAP_PORT_FILTER = True + + +class TestFslSdnMechDriverSubnetsV2(test_db_plugin.TestSubnetsV2, + TestFslSdnMechDriverV2): + pass + + +class TestFslSdnMechanismDriver(base.BaseTestCase): + + """Testing FSL SDN Mechanism driver.""" + + def setUp(self): + super(TestFslSdnMechanismDriver, self).setUp() + setup_driver_config() + self.driver = mechanism_fslsdn.FslsdnMechanismDriver() + self.driver.initialize() + self.client = self.driver._crdclient = mock.Mock() + + def test_create_update_delete_network_postcommit(self): + """Testing create/update/delete network postcommit operations.""" + + tenant_id = 'test' + network_id = '123' + segmentation_id = 456 + expected_seg = [{'segmentation_id': segmentation_id}] + expected_crd_network = {'network': + {'network_id': network_id, + 'tenant_id': tenant_id, + 'name': 'FakeNetwork', + 'status': 'ACTIVE', + 'admin_state_up': True, + 'segments': expected_seg}} + network_context = self._get_network_context(tenant_id, network_id, + segmentation_id) + network = network_context.current + segments = network_context.network_segments + net_id = network['id'] + req = self.driver._prepare_crd_network(network, segments) + # test crd network dict + self.assertEqual(expected_crd_network, req) + # test create_network. + self.driver.create_network_postcommit(network_context) + self.client.create_network.assert_called_once_with(body=req) + # test update_network. + self.driver.update_network_postcommit(network_context) + self.client.update_network.assert_called_once_with(net_id, body=req) + # test delete_network. + self.driver.delete_network_postcommit(network_context) + self.client.delete_network.assert_called_once_with(net_id) + + def test_create_update_delete_subnet_postcommit(self): + """Testing create/update/delete subnet postcommit operations.""" + + tenant_id = 'test' + network_id = '123' + subnet_id = '122' + cidr = '192.0.0.0/8' + gateway_ip = '192.0.0.1' + expected_crd_subnet = {'subnet': + {'subnet_id': subnet_id, 'tenant_id': tenant_id, + 'name': 'FakeSubnet', 'network_id': network_id, + 'ip_version': 4, 'cidr': cidr, + 'gateway_ip': gateway_ip, + 'dns_nameservers': '', + 'allocation_pools': '', + 'host_routes': ''}} + subnet_context = self._get_subnet_context(tenant_id, network_id, + subnet_id, cidr, gateway_ip) + subnet = subnet_context.current + subnet_id = subnet['id'] + req = self.driver._prepare_crd_subnet(subnet) + # test crd subnet dict + self.assertEqual(expected_crd_subnet, req) + # test create_subnet. + self.driver.create_subnet_postcommit(subnet_context) + self.client.create_subnet.assert_called_once_with(body=req) + # test update_subnet. + self.driver.update_subnet_postcommit(subnet_context) + self.client.update_subnet.assert_called_once_with(subnet_id, body=req) + # test delete_subnet. + self.driver.delete_subnet_postcommit(subnet_context) + self.client.delete_subnet.assert_called_once_with(subnet_id) + + def test_create_delete_port_postcommit(self): + """Testing create/delete port postcommit operations.""" + + tenant_id = 'test' + network_id = '123' + port_id = '453' + expected_crd_port = {'port': + {'port_id': port_id, 'tenant_id': tenant_id, + 'name': 'FakePort', 'network_id': network_id, + 'subnet_id': '', 'mac_address': 'aabb', + 'device_id': '1234', 'ip_address': '', + 'admin_state_up': True, 'status': 'ACTIVE', + 'device_owner': 'compute', + 'security_groups': ''}} + # Test with empty fixed IP + port_context = self._get_port_context(tenant_id, network_id, port_id) + port = port_context.current + req = self.driver._prepare_crd_port(port) + # Test crd port dict + self.assertEqual(expected_crd_port, req) + # test create_port. + self.driver.create_port_postcommit(port_context) + self.client.create_port.assert_called_once_with(body=req) + # Test delete_port + self.driver.delete_port_postcommit(port_context) + self.client.delete_port.assert_called_once_with(port['id']) + + def test_prepare_port_with_single_fixed_ip(self): + """Test _prepare_crd_port with single fixed_ip.""" + + tenant_id = 'test' + network_id = '123' + port_id = '453' + fips = [{"subnet_id": "sub-1", "ip_address": "10.0.0.1"}] + expected_crd_port = {'port': + {'port_id': port_id, 'tenant_id': tenant_id, + 'name': 'FakePort', 'network_id': network_id, + 'subnet_id': '', 'mac_address': 'aabb', + 'device_id': '1234', 'ip_address': '', + 'admin_state_up': True, 'status': 'ACTIVE', + 'device_owner': 'compute', + 'security_groups': ''}} + port_context = self._get_port_context(tenant_id, network_id, port_id, + fips) + port = port_context.current + req = self.driver._prepare_crd_port(port) + expected_crd_port['port']['subnet_id'] = 'sub-1' + expected_crd_port['port']['ip_address'] = '10.0.0.1' + self.assertEqual(expected_crd_port, req) + + def test_prepare_port_with_multiple_fixed_ips(self): + """Test _prepare_crd_port with multiple fixed_ips.""" + + tenant_id = 'test' + network_id = '123' + port_id = '453' + multiple_fips = [{"subnet_id": "sub-1", "ip_address": "10.0.0.1"}, + {"subnet_id": "sub-1", "ip_address": "10.0.0.4"}] + expected_crd_port = {'port': + {'port_id': port_id, 'tenant_id': tenant_id, + 'name': 'FakePort', 'network_id': network_id, + 'subnet_id': '', 'mac_address': 'aabb', + 'device_id': '1234', 'ip_address': '', + 'admin_state_up': True, 'status': 'ACTIVE', + 'device_owner': 'compute', + 'security_groups': ''}} + port_context = self._get_port_context(tenant_id, network_id, port_id, + multiple_fips) + port = port_context.current + req = self.driver._prepare_crd_port(port) + expected_crd_port['port']['subnet_id'] = 'sub-1' + expected_crd_port['port']['ip_address'] = '10.0.0.1' + self.assertEqual(expected_crd_port, req) + + def _get_subnet_context(self, tenant_id, net_id, subnet_id, cidr, + gateway_ip): + # sample data for testing purpose only. + subnet = {'tenant_id': tenant_id, + 'network_id': net_id, + 'id': subnet_id, + 'cidr': cidr, + 'name': 'FakeSubnet', + 'ip_version': 4, + 'gateway_ip': gateway_ip, + } + return FakeContext(subnet) + + def _get_port_context(self, tenant_id, net_id, port_id, + fixed_ips=[]): + # sample data for testing purpose only + port = {'device_id': '1234', + 'name': 'FakePort', + 'mac_address': 'aabb', + 'device_owner': 'compute', + 'tenant_id': tenant_id, + 'id': port_id, + 'fixed_ips': fixed_ips, + 'admin_state_up': True, + 'status': 'ACTIVE', + 'network_id': net_id} + return FakeContext(port) + + def _get_network_context(self, tenant_id, net_id, seg_id): + # sample data for testing purpose only. + network = {'id': net_id, + 'tenant_id': tenant_id, + 'admin_state_up': True, + 'status': 'ACTIVE', + 'name': 'FakeNetwork', } + segments = [{'segmentation_id': seg_id}] + return FakeNetworkContext(network, segments) + + +class FakeNetworkContext(object): + + """To generate network context for testing purposes only.""" + + def __init__(self, network, segments): + self._network = network + self._segments = segments + + @property + def current(self): + return self._network + + @property + def network_segments(self): + return self._segments + + +class FakeContext(object): + + """To generate context for testing purposes only.""" + + def __init__(self, record): + self._record = record + + @property + def current(self): + return self._record diff --git a/neutron/tests/unit/ml2/test_mechanism_ncs.py b/neutron/tests/unit/ml2/test_mechanism_ncs.py new file mode 100644 index 000000000..9ae267823 --- /dev/null +++ b/neutron/tests/unit/ml2/test_mechanism_ncs.py @@ -0,0 +1,50 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.plugins.ml2 import config as config +from neutron.plugins.ml2.drivers import mechanism_ncs +from neutron.tests.unit import test_db_plugin as test_plugin + +PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' + + +class NCSTestCase(test_plugin.NeutronDbPluginV2TestCase): + + def setUp(self): + # Enable the test mechanism driver to ensure that + # we can successfully call through to all mechanism + # driver apis. + config.cfg.CONF.set_override('mechanism_drivers', + ['logger', 'ncs'], + 'ml2') + super(NCSTestCase, self).setUp(PLUGIN_NAME) + self.port_create_status = 'DOWN' + mechanism_ncs.NCSMechanismDriver.sendjson = self.check_sendjson + + def check_sendjson(self, method, urlpath, obj): + # Confirm fix for bug #1224981 + self.assertFalse(urlpath.startswith("http://")) + + +class NCSMechanismTestBasicGet(test_plugin.TestBasicGet, NCSTestCase): + pass + + +class NCSMechanismTestNetworksV2(test_plugin.TestNetworksV2, NCSTestCase): + pass + + +class NCSMechanismTestPortsV2(test_plugin.TestPortsV2, NCSTestCase): + pass diff --git a/neutron/tests/unit/ml2/test_mechanism_odl.py b/neutron/tests/unit/ml2/test_mechanism_odl.py new file mode 100644 index 000000000..472387c20 --- /dev/null +++ b/neutron/tests/unit/ml2/test_mechanism_odl.py @@ -0,0 +1,117 @@ +# Copyright (c) 2013-2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Kyle Mestery, Cisco Systems, Inc. + +from neutron.plugins.common import constants +from neutron.plugins.ml2 import config as config +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import mechanism_odl +from neutron.plugins.ml2 import plugin +from neutron.tests import base +from neutron.tests.unit import test_db_plugin as test_plugin + +PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' + + +class OpenDaylightTestCase(test_plugin.NeutronDbPluginV2TestCase): + + def setUp(self): + # Enable the test mechanism driver to ensure that + # we can successfully call through to all mechanism + # driver apis. + config.cfg.CONF.set_override('mechanism_drivers', + ['logger', 'opendaylight'], + 'ml2') + # Set URL/user/pass so init doesn't throw a cfg required error. + # They are not used in these tests since sendjson is overwritten. + config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl') + config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl') + config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl') + + super(OpenDaylightTestCase, self).setUp(PLUGIN_NAME) + self.port_create_status = 'DOWN' + self.segment = {'api.NETWORK_TYPE': ""} + self.mech = mechanism_odl.OpenDaylightMechanismDriver() + mechanism_odl.OpenDaylightMechanismDriver.sendjson = ( + self.check_sendjson) + + def check_sendjson(self, method, urlpath, obj, ignorecodes=[]): + self.assertFalse(urlpath.startswith("http://")) + + def test_check_segment(self): + """Validate the check_segment call.""" + self.segment[api.NETWORK_TYPE] = constants.TYPE_LOCAL + self.assertTrue(self.mech.check_segment(self.segment)) + self.segment[api.NETWORK_TYPE] = constants.TYPE_FLAT + self.assertFalse(self.mech.check_segment(self.segment)) + self.segment[api.NETWORK_TYPE] = constants.TYPE_VLAN + self.assertTrue(self.mech.check_segment(self.segment)) + self.segment[api.NETWORK_TYPE] = constants.TYPE_GRE + self.assertTrue(self.mech.check_segment(self.segment)) + self.segment[api.NETWORK_TYPE] = constants.TYPE_VXLAN + self.assertTrue(self.mech.check_segment(self.segment)) + # Validate a network type not currently supported + self.segment[api.NETWORK_TYPE] = 'mpls' + self.assertFalse(self.mech.check_segment(self.segment)) + + +class OpenDayLightMechanismConfigTests(base.BaseTestCase): + + def _set_config(self, url='http://127.0.0.1:9999', username='someuser', + password='somepass'): + config.cfg.CONF.set_override('mechanism_drivers', + ['logger', 'opendaylight'], + 'ml2') + config.cfg.CONF.set_override('url', url, 'ml2_odl') + config.cfg.CONF.set_override('username', username, 'ml2_odl') + config.cfg.CONF.set_override('password', password, 'ml2_odl') + + def _test_missing_config(self, **kwargs): + self._set_config(**kwargs) + self.assertRaises(config.cfg.RequiredOptError, + plugin.Ml2Plugin) + + def test_valid_config(self): + self._set_config() + plugin.Ml2Plugin() + + def test_missing_url_raises_exception(self): + self._test_missing_config(url=None) + + def test_missing_username_raises_exception(self): + self._test_missing_config(username=None) + + def test_missing_password_raises_exception(self): + self._test_missing_config(password=None) + + +class OpenDaylightMechanismTestBasicGet(test_plugin.TestBasicGet, + OpenDaylightTestCase): + pass + + +class OpenDaylightMechanismTestNetworksV2(test_plugin.TestNetworksV2, + OpenDaylightTestCase): + pass + + +class OpenDaylightMechanismTestSubnetsV2(test_plugin.TestSubnetsV2, + OpenDaylightTestCase): + pass + + +class OpenDaylightMechanismTestPortsV2(test_plugin.TestPortsV2, + OpenDaylightTestCase): + pass diff --git a/neutron/tests/unit/ml2/test_ml2_plugin.py b/neutron/tests/unit/ml2/test_ml2_plugin.py new file mode 100644 index 000000000..20613a585 --- /dev/null +++ b/neutron/tests/unit/ml2/test_ml2_plugin.py @@ -0,0 +1,477 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +import testtools +import webob + +from neutron.common import exceptions as exc +from neutron import context +from neutron.extensions import multiprovidernet as mpnet +from neutron.extensions import portbindings +from neutron.extensions import providernet as pnet +from neutron import manager +from neutron.plugins.ml2.common import exceptions as ml2_exc +from neutron.plugins.ml2 import config +from neutron.plugins.ml2 import driver_api +from neutron.plugins.ml2 import plugin as ml2_plugin +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit.ml2.drivers import mechanism_logger as mech_logger +from neutron.tests.unit.ml2.drivers import mechanism_test as mech_test +from neutron.tests.unit import test_db_plugin as test_plugin +from neutron.tests.unit import test_extension_allowedaddresspairs as test_pair +from neutron.tests.unit import test_extension_extradhcpopts as test_dhcpopts +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + + +config.cfg.CONF.import_opt('network_vlan_ranges', + 'neutron.plugins.ml2.drivers.type_vlan', + group='ml2_type_vlan') + + +PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' + + +class Ml2PluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): + + _plugin_name = PLUGIN_NAME + _mechanism_drivers = ['logger', 'test'] + + def setUp(self): + # We need a L3 service plugin + l3_plugin = ('neutron.tests.unit.test_l3_plugin.' + 'TestL3NatServicePlugin') + service_plugins = {'l3_plugin_name': l3_plugin} + # Enable the test mechanism driver to ensure that + # we can successfully call through to all mechanism + # driver apis. + config.cfg.CONF.set_override('mechanism_drivers', + self._mechanism_drivers, + group='ml2') + self.physnet = 'physnet1' + self.vlan_range = '1:100' + self.phys_vrange = ':'.join([self.physnet, self.vlan_range]) + config.cfg.CONF.set_override('network_vlan_ranges', [self.phys_vrange], + group='ml2_type_vlan') + super(Ml2PluginV2TestCase, self).setUp(PLUGIN_NAME, + service_plugins=service_plugins) + self.port_create_status = 'DOWN' + self.driver = ml2_plugin.Ml2Plugin() + self.context = context.get_admin_context() + + +class TestMl2BulkToggleWithBulkless(Ml2PluginV2TestCase): + + _mechanism_drivers = ['logger', 'test', 'bulkless'] + + def test_bulk_disable_with_bulkless_driver(self): + self.assertTrue(self._skip_native_bulk) + + +class TestMl2BulkToggleWithoutBulkless(Ml2PluginV2TestCase): + + _mechanism_drivers = ['logger', 'test'] + + def test_bulk_enabled_with_bulk_drivers(self): + self.assertFalse(self._skip_native_bulk) + + +class TestMl2BasicGet(test_plugin.TestBasicGet, + Ml2PluginV2TestCase): + pass + + +class TestMl2V2HTTPResponse(test_plugin.TestV2HTTPResponse, + Ml2PluginV2TestCase): + pass + + +class TestMl2NetworksV2(test_plugin.TestNetworksV2, + Ml2PluginV2TestCase): + pass + + +class TestMl2SubnetsV2(test_plugin.TestSubnetsV2, + Ml2PluginV2TestCase): + pass + + +class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): + + def test_update_port_status_build(self): + with self.port() as port: + self.assertEqual(port['port']['status'], 'DOWN') + self.assertEqual(self.port_create_status, 'DOWN') + + def test_update_non_existent_port(self): + ctx = context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + data = {'port': {'admin_state_up': False}} + self.assertRaises(exc.PortNotFound, plugin.update_port, ctx, + 'invalid-uuid', data) + + def test_delete_non_existent_port(self): + ctx = context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + with mock.patch.object(ml2_plugin.LOG, 'debug') as log_debug: + plugin.delete_port(ctx, 'invalid-uuid', l3_port_check=False) + log_debug.assert_has_calls([ + mock.call(_("Deleting port %s"), 'invalid-uuid'), + mock.call(_("The port '%s' was deleted"), 'invalid-uuid') + ]) + + +class TestMl2PortBinding(Ml2PluginV2TestCase, + test_bindings.PortBindingsTestCase): + # Test case does not set binding:host_id, so ml2 does not attempt + # to bind port + VIF_TYPE = portbindings.VIF_TYPE_UNBOUND + HAS_PORT_FILTER = False + ENABLE_SG = True + FIREWALL_DRIVER = test_sg_rpc.FIREWALL_HYBRID_DRIVER + + def setUp(self, firewall_driver=None): + test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER) + config.cfg.CONF.set_override( + 'enable_security_group', self.ENABLE_SG, + group='SECURITYGROUP') + super(TestMl2PortBinding, self).setUp() + + def _check_port_binding_profile(self, port, profile=None): + self.assertIn('id', port) + self.assertIn(portbindings.PROFILE, port) + value = port[portbindings.PROFILE] + self.assertEqual(profile or {}, value) + + def test_create_port_binding_profile(self): + self._test_create_port_binding_profile({'a': 1, 'b': 2}) + + def test_update_port_binding_profile(self): + self._test_update_port_binding_profile({'c': 3}) + + def test_create_port_binding_profile_too_big(self): + s = 'x' * 5000 + profile_arg = {portbindings.PROFILE: {'d': s}} + try: + with self.port(expected_res_status=400, + arg_list=(portbindings.PROFILE,), + **profile_arg): + pass + except webob.exc.HTTPClientError: + pass + + def test_remove_port_binding_profile(self): + profile = {'e': 5} + profile_arg = {portbindings.PROFILE: profile} + with self.port(arg_list=(portbindings.PROFILE,), + **profile_arg) as port: + self._check_port_binding_profile(port['port'], profile) + port_id = port['port']['id'] + profile_arg = {portbindings.PROFILE: None} + port = self._update('ports', port_id, + {'port': profile_arg})['port'] + self._check_port_binding_profile(port) + port = self._show('ports', port_id)['port'] + self._check_port_binding_profile(port) + + +class TestMl2PortBindingNoSG(TestMl2PortBinding): + HAS_PORT_FILTER = False + ENABLE_SG = False + FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER + + +class TestMl2PortBindingHost(Ml2PluginV2TestCase, + test_bindings.PortBindingsHostTestCaseMixin): + pass + + +class TestMl2PortBindingVnicType(Ml2PluginV2TestCase, + test_bindings.PortBindingsVnicTestCaseMixin): + pass + + +class TestMultiSegmentNetworks(Ml2PluginV2TestCase): + + def setUp(self, plugin=None): + super(TestMultiSegmentNetworks, self).setUp() + + def test_create_network_provider(self): + data = {'network': {'name': 'net1', + pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1, + 'tenant_id': 'tenant_one'}} + network_req = self.new_create_request('networks', data) + network = self.deserialize(self.fmt, + network_req.get_response(self.api)) + self.assertEqual(network['network'][pnet.NETWORK_TYPE], 'vlan') + self.assertEqual(network['network'][pnet.PHYSICAL_NETWORK], 'physnet1') + self.assertEqual(network['network'][pnet.SEGMENTATION_ID], 1) + self.assertNotIn(mpnet.SEGMENTS, network['network']) + + def test_create_network_single_multiprovider(self): + data = {'network': {'name': 'net1', + mpnet.SEGMENTS: + [{pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1}], + 'tenant_id': 'tenant_one'}} + net_req = self.new_create_request('networks', data) + network = self.deserialize(self.fmt, net_req.get_response(self.api)) + self.assertEqual(network['network'][pnet.NETWORK_TYPE], 'vlan') + self.assertEqual(network['network'][pnet.PHYSICAL_NETWORK], 'physnet1') + self.assertEqual(network['network'][pnet.SEGMENTATION_ID], 1) + self.assertNotIn(mpnet.SEGMENTS, network['network']) + + # Tests get_network() + net_req = self.new_show_request('networks', network['network']['id']) + network = self.deserialize(self.fmt, net_req.get_response(self.api)) + self.assertEqual(network['network'][pnet.NETWORK_TYPE], 'vlan') + self.assertEqual(network['network'][pnet.PHYSICAL_NETWORK], 'physnet1') + self.assertEqual(network['network'][pnet.SEGMENTATION_ID], 1) + self.assertNotIn(mpnet.SEGMENTS, network['network']) + + def test_create_network_multiprovider(self): + data = {'network': {'name': 'net1', + mpnet.SEGMENTS: + [{pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1}, + {pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 2}], + 'tenant_id': 'tenant_one'}} + network_req = self.new_create_request('networks', data) + network = self.deserialize(self.fmt, + network_req.get_response(self.api)) + tz = network['network'][mpnet.SEGMENTS] + for tz in data['network'][mpnet.SEGMENTS]: + for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID]: + self.assertEqual(tz.get(field), tz.get(field)) + + # Tests get_network() + net_req = self.new_show_request('networks', network['network']['id']) + network = self.deserialize(self.fmt, net_req.get_response(self.api)) + tz = network['network'][mpnet.SEGMENTS] + for tz in data['network'][mpnet.SEGMENTS]: + for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID]: + self.assertEqual(tz.get(field), tz.get(field)) + + def test_create_network_with_provider_and_multiprovider_fail(self): + data = {'network': {'name': 'net1', + mpnet.SEGMENTS: + [{pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1}], + pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1, + 'tenant_id': 'tenant_one'}} + + network_req = self.new_create_request('networks', data) + res = network_req.get_response(self.api) + self.assertEqual(res.status_int, 400) + + def test_create_network_duplicate_segments(self): + data = {'network': {'name': 'net1', + mpnet.SEGMENTS: + [{pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1}, + {pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1}], + 'tenant_id': 'tenant_one'}} + network_req = self.new_create_request('networks', data) + res = network_req.get_response(self.api) + self.assertEqual(res.status_int, 400) + + def test_release_segment_no_type_driver(self): + segment = {driver_api.NETWORK_TYPE: 'faketype', + driver_api.PHYSICAL_NETWORK: 'physnet1', + driver_api.ID: 1} + with mock.patch('neutron.plugins.ml2.managers.LOG') as log: + self.driver.type_manager.release_segment(session=None, + segment=segment) + log.error.assert_called_once_with( + "Failed to release segment '%s' because " + "network type is not supported.", segment) + + def test_create_provider_fail(self): + segment = {pnet.NETWORK_TYPE: None, + pnet.PHYSICAL_NETWORK: 'phys_net', + pnet.SEGMENTATION_ID: None} + with testtools.ExpectedException(exc.InvalidInput): + self.driver._process_provider_create(segment) + + def test_create_network_plugin(self): + data = {'network': {'name': 'net1', + 'admin_state_up': True, + 'shared': False, + pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1, + 'tenant_id': 'tenant_one'}} + + def raise_mechanism_exc(*args, **kwargs): + raise ml2_exc.MechanismDriverError( + method='create_network_postcommit') + + with mock.patch('neutron.plugins.ml2.managers.MechanismManager.' + 'create_network_precommit', new=raise_mechanism_exc): + with testtools.ExpectedException(ml2_exc.MechanismDriverError): + self.driver.create_network(self.context, data) + + def test_extend_dictionary_no_segments(self): + network = dict(name='net_no_segment', id='5', tenant_id='tenant_one') + self.driver._extend_network_dict_provider(self.context, network) + self.assertIsNone(network[pnet.NETWORK_TYPE]) + self.assertIsNone(network[pnet.PHYSICAL_NETWORK]) + self.assertIsNone(network[pnet.SEGMENTATION_ID]) + + +class TestMl2AllowedAddressPairs(Ml2PluginV2TestCase, + test_pair.TestAllowedAddressPairs): + def setUp(self, plugin=None): + super(test_pair.TestAllowedAddressPairs, self).setUp( + plugin=PLUGIN_NAME) + + +class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt): + + def setUp(self, plugin=None): + super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp( + plugin=PLUGIN_NAME) + + +class Ml2PluginV2FaultyDriverTestCase(test_plugin.NeutronDbPluginV2TestCase): + + def setUp(self): + # Enable the test mechanism driver to ensure that + # we can successfully call through to all mechanism + # driver apis. + config.cfg.CONF.set_override('mechanism_drivers', + ['test', 'logger'], + group='ml2') + super(Ml2PluginV2FaultyDriverTestCase, self).setUp(PLUGIN_NAME) + self.port_create_status = 'DOWN' + + +class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase): + + def test_update_network_faulty(self): + + def mock_update_network_postcommit(self, context): + raise ml2_exc.MechanismDriverError( + method='update_network_postcommit') + + with mock.patch.object(mech_test.TestMechanismDriver, + 'update_network_postcommit', + new=mock_update_network_postcommit): + with mock.patch.object(mech_logger.LoggerMechanismDriver, + 'update_network_postcommit') as unp: + + data = {'network': {'name': 'net1', + 'tenant_id': 'tenant_one'}} + network_req = self.new_create_request('networks', data) + network = self.deserialize( + self.fmt, + network_req.get_response(self.api)) + + data = {'network': {'name': 'a_brand_new_name'}} + req = self.new_update_request('networks', + data, + network['network']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 500) + # Test if other mechanism driver was called + self.assertTrue(unp.called) + + self._delete('networks', network['network']['id']) + + def test_update_subnet_faulty(self): + + def mock_update_subnet_postcommit(self, context): + raise ml2_exc.MechanismDriverError( + method='update_subnet_postcommit') + + with mock.patch.object(mech_test.TestMechanismDriver, + 'update_subnet_postcommit', + new=mock_update_subnet_postcommit): + with mock.patch.object(mech_logger.LoggerMechanismDriver, + 'update_subnet_postcommit') as usp: + + with self.network() as network: + data = {'subnet': {'network_id': + network['network']['id'], + 'cidr': '10.0.20.0/24', + 'ip_version': '4', + 'name': 'subnet1', + 'tenant_id': + network['network']['tenant_id'], + 'gateway_ip': '10.0.2.1'}} + subnet_req = self.new_create_request('subnets', data) + subnet = self.deserialize( + self.fmt, + subnet_req.get_response(self.api)) + + data = {'subnet': {'name': 'a_brand_new_name'}} + req = self.new_update_request('subnets', + data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 500) + # Test if other mechanism driver was called + self.assertTrue(usp.called) + + self._delete('subnets', subnet['subnet']['id']) + + def test_update_port_faulty(self): + + def mock_update_port_postcommit(self, context): + raise ml2_exc.MechanismDriverError( + method='update_port_postcommit') + + with mock.patch.object(mech_test.TestMechanismDriver, + 'update_port_postcommit', + new=mock_update_port_postcommit): + with mock.patch.object(mech_logger.LoggerMechanismDriver, + 'update_port_postcommit') as upp: + + with self.network() as network: + data = {'port': {'network_id': network['network']['id'], + 'tenant_id': + network['network']['tenant_id'], + 'name': 'port1', + 'admin_state_up': 1, + 'fixed_ips': []}} + port_req = self.new_create_request('ports', data) + port = self.deserialize( + self.fmt, + port_req.get_response(self.api)) + + data = {'port': {'name': 'a_brand_new_name'}} + req = self.new_update_request('ports', + data, + port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 500) + # Test if other mechanism driver was called + self.assertTrue(upp.called) + + self._delete('ports', port['port']['id']) diff --git a/neutron/tests/unit/ml2/test_port_binding.py b/neutron/tests/unit/ml2/test_port_binding.py new file mode 100644 index 000000000..b4aa19a9c --- /dev/null +++ b/neutron/tests/unit/ml2/test_port_binding.py @@ -0,0 +1,136 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron import context +from neutron.extensions import portbindings +from neutron import manager +from neutron.plugins.ml2 import config as config +from neutron.tests.unit import test_db_plugin as test_plugin + + +PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' + + +class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase): + + _plugin_name = PLUGIN_NAME + + def setUp(self): + # Enable the test mechanism driver to ensure that + # we can successfully call through to all mechanism + # driver apis. + config.cfg.CONF.set_override('mechanism_drivers', + ['logger', 'test'], + 'ml2') + super(PortBindingTestCase, self).setUp(PLUGIN_NAME) + self.port_create_status = 'DOWN' + self.plugin = manager.NeutronManager.get_plugin() + self.plugin.start_rpc_listeners() + + def _check_response(self, port, vif_type, has_port_filter, bound, status): + self.assertEqual(port[portbindings.VIF_TYPE], vif_type) + vif_details = port[portbindings.VIF_DETAILS] + port_status = port['status'] + if bound: + # TODO(rkukura): Replace with new VIF security details + self.assertEqual(vif_details[portbindings.CAP_PORT_FILTER], + has_port_filter) + self.assertEqual(port_status, status or 'DOWN') + else: + self.assertEqual(port_status, 'DOWN') + + def _test_port_binding(self, host, vif_type, has_port_filter, bound, + status=None): + host_arg = {portbindings.HOST_ID: host} + with self.port(name='name', arg_list=(portbindings.HOST_ID,), + **host_arg) as port: + self._check_response(port['port'], vif_type, has_port_filter, + bound, status) + port_id = port['port']['id'] + neutron_context = context.get_admin_context() + details = self.plugin.endpoints[0].get_device_details( + neutron_context, agent_id="theAgentId", device=port_id) + if bound: + self.assertEqual(details['network_type'], 'local') + else: + self.assertNotIn('network_type', details) + + def test_unbound(self): + self._test_port_binding("", + portbindings.VIF_TYPE_UNBOUND, + False, False) + + def test_binding_failed(self): + self._test_port_binding("host-fail", + portbindings.VIF_TYPE_BINDING_FAILED, + False, False) + + def test_binding_no_filter(self): + self._test_port_binding("host-ovs-no_filter", + portbindings.VIF_TYPE_OVS, + False, True) + + def test_binding_filter(self): + self._test_port_binding("host-bridge-filter", + portbindings.VIF_TYPE_BRIDGE, + True, True) + + def test_binding_status_active(self): + self._test_port_binding("host-ovs-filter-active", + portbindings.VIF_TYPE_OVS, + True, True, 'ACTIVE') + + def _test_update_port_binding(self, host, new_host=None): + with mock.patch.object(self.plugin, + '_notify_port_updated') as notify_mock: + host_arg = {portbindings.HOST_ID: host} + update_body = {'name': 'test_update'} + if new_host is not None: + update_body[portbindings.HOST_ID] = new_host + with self.port(name='name', arg_list=(portbindings.HOST_ID,), + **host_arg) as port: + neutron_context = context.get_admin_context() + updated_port = self._update('ports', port['port']['id'], + {'port': update_body}, + neutron_context=neutron_context) + port_data = updated_port['port'] + if new_host is not None: + self.assertEqual(port_data[portbindings.HOST_ID], + new_host) + else: + self.assertEqual(port_data[portbindings.HOST_ID], host) + if new_host is not None and new_host != host: + notify_mock.assert_called_once_with(mock.ANY) + else: + self.assertFalse(notify_mock.called) + + def test_update_with_new_host_binding_notifies_agent(self): + self._test_update_port_binding('host-ovs-no_filter', + 'host-bridge-filter') + + def test_update_with_same_host_binding_does_not_notify(self): + self._test_update_port_binding('host-ovs-no_filter', + 'host-ovs-no_filter') + + def test_update_without_binding_does_not_notify(self): + self._test_update_port_binding('host-ovs-no_filter') + + def testt_update_from_empty_to_host_binding_notifies_agent(self): + self._test_update_port_binding('', 'host-ovs-no_filter') + + def test_update_from_host_to_empty_binding_notifies_agent(self): + self._test_update_port_binding('host-ovs-no_filter', '') diff --git a/neutron/tests/unit/ml2/test_rpcapi.py b/neutron/tests/unit/ml2/test_rpcapi.py new file mode 100644 index 000000000..af48a74f1 --- /dev/null +++ b/neutron/tests/unit/ml2/test_rpcapi.py @@ -0,0 +1,109 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for ml2 rpc +""" + +import mock + +from neutron.agent import rpc as agent_rpc +from neutron.common import rpc_compat +from neutron.common import topics +from neutron.openstack.common import context +from neutron.plugins.ml2.drivers import type_tunnel +from neutron.plugins.ml2 import rpc as plugin_rpc +from neutron.tests import base + + +class RpcApiTestCase(base.BaseTestCase): + + def _test_rpc_api(self, rpcapi, topic, method, rpc_method, **kwargs): + ctxt = context.RequestContext('fake_user', 'fake_project') + expected_retval = 'foo' if method == 'call' else None + expected_msg = rpcapi.make_msg(method, **kwargs) + if rpc_method == 'cast' and method == 'run_instance': + kwargs['call'] = False + + rpc = rpc_compat.RpcProxy + with mock.patch.object(rpc, rpc_method) as rpc_method_mock: + rpc_method_mock.return_value = expected_retval + retval = getattr(rpcapi, method)(ctxt, **kwargs) + + self.assertEqual(retval, expected_retval) + expected = [ + mock.call(ctxt, expected_msg, topic=topic) + ] + rpc_method_mock.assert_has_calls(expected) + + def test_delete_network(self): + rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) + self._test_rpc_api(rpcapi, + topics.get_topic_name(topics.AGENT, + topics.NETWORK, + topics.DELETE), + 'network_delete', rpc_method='fanout_cast', + network_id='fake_request_spec') + + def test_port_update(self): + rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) + self._test_rpc_api(rpcapi, + topics.get_topic_name(topics.AGENT, + topics.PORT, + topics.UPDATE), + 'port_update', rpc_method='fanout_cast', + port='fake_port', + network_type='fake_network_type', + segmentation_id='fake_segmentation_id', + physical_network='fake_physical_network') + + def test_tunnel_update(self): + rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) + self._test_rpc_api(rpcapi, + topics.get_topic_name(topics.AGENT, + type_tunnel.TUNNEL, + topics.UPDATE), + 'tunnel_update', rpc_method='fanout_cast', + tunnel_ip='fake_ip', tunnel_type='gre') + + def test_device_details(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_rpc_api(rpcapi, topics.PLUGIN, + 'get_device_details', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id') + + def test_update_device_down(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_rpc_api(rpcapi, topics.PLUGIN, + 'update_device_down', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id', + host='fake_host') + + def test_tunnel_sync(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_rpc_api(rpcapi, topics.PLUGIN, + 'tunnel_sync', rpc_method='call', + tunnel_ip='fake_tunnel_ip', + tunnel_type=None) + + def test_update_device_up(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_rpc_api(rpcapi, topics.PLUGIN, + 'update_device_up', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id', + host='fake_host') diff --git a/neutron/tests/unit/ml2/test_security_group.py b/neutron/tests/unit/ml2/test_security_group.py new file mode 100644 index 000000000..3e82c91e4 --- /dev/null +++ b/neutron/tests/unit/ml2/test_security_group.py @@ -0,0 +1,106 @@ +# Copyright (c) 2013 OpenStack Foundation +# Copyright 2013, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.api.v2 import attributes +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.tests.unit import test_extension_security_group as test_sg +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + +PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' +NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi' + + +class Ml2SecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self, plugin=None): + test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER) + notifier_p = mock.patch(NOTIFIER) + notifier_cls = notifier_p.start() + self.notifier = mock.Mock() + notifier_cls.return_value = self.notifier + self._attribute_map_bk_ = {} + for item in attributes.RESOURCE_ATTRIBUTE_MAP: + self._attribute_map_bk_[item] = (attributes. + RESOURCE_ATTRIBUTE_MAP[item]. + copy()) + super(Ml2SecurityGroupsTestCase, self).setUp(PLUGIN_NAME) + + def tearDown(self): + super(Ml2SecurityGroupsTestCase, self).tearDown() + attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_ + + +class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase, + test_sg.TestSecurityGroups, + test_sg_rpc.SGNotificationTestMixin): + def setUp(self): + super(TestMl2SecurityGroups, self).setUp() + plugin = manager.NeutronManager.get_plugin() + plugin.start_rpc_listeners() + + def test_security_group_get_port_from_device(self): + with self.network() as n: + with self.subnet(n): + with self.security_group() as sg: + security_group_id = sg['security_group']['id'] + res = self._create_port(self.fmt, n['network']['id']) + port = self.deserialize(self.fmt, res) + fixed_ips = port['port']['fixed_ips'] + data = {'port': {'fixed_ips': fixed_ips, + 'name': port['port']['name'], + ext_sg.SECURITYGROUPS: + [security_group_id]}} + + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + port_id = res['port']['id'] + plugin = manager.NeutronManager.get_plugin() + callbacks = plugin.endpoints[0] + port_dict = callbacks.get_port_from_device(port_id) + self.assertEqual(port_id, port_dict['id']) + self.assertEqual([security_group_id], + port_dict[ext_sg.SECURITYGROUPS]) + self.assertEqual([], port_dict['security_group_rules']) + self.assertEqual([fixed_ips[0]['ip_address']], + port_dict['fixed_ips']) + self._delete('ports', port_id) + + def test_security_group_get_port_from_device_with_no_port(self): + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin.endpoints[0].get_port_from_device('bad_device_id') + self.assertIsNone(port_dict) + + +class TestMl2SecurityGroupsXML(TestMl2SecurityGroups): + fmt = 'xml' + + +class TestMl2SGServerRpcCallBack( + Ml2SecurityGroupsTestCase, + test_sg_rpc.SGServerRpcCallBackMixinTestCase): + pass + + +class TestMl2SGServerRpcCallBackXML( + Ml2SecurityGroupsTestCase, + test_sg_rpc.SGServerRpcCallBackMixinTestCaseXML): + pass diff --git a/neutron/tests/unit/ml2/test_type_flat.py b/neutron/tests/unit/ml2/test_type_flat.py new file mode 100644 index 000000000..711418ccd --- /dev/null +++ b/neutron/tests/unit/ml2/test_type_flat.py @@ -0,0 +1,98 @@ +# Copyright (c) 2014 Thales Services SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions as exc +import neutron.db.api as db +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import type_flat +from neutron.tests import base + + +FLAT_NETWORKS = 'flat_net1, flat_net2' + + +class FlatTypeTest(base.BaseTestCase): + + def setUp(self): + super(FlatTypeTest, self).setUp() + db.configure_db() + self.driver = type_flat.FlatTypeDriver() + self.driver._parse_networks(FLAT_NETWORKS) + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def _get_allocation(self, session, segment): + return session.query(type_flat.FlatAllocation).filter_by( + physical_network=segment[api.PHYSICAL_NETWORK]).first() + + def test_validate_provider_segment(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, + api.PHYSICAL_NETWORK: 'flat_net1'} + self.driver.validate_provider_segment(segment) + + def test_validate_provider_segment_without_physnet_restriction(self): + self.driver._parse_networks('*') + segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, + api.PHYSICAL_NETWORK: 'other_flat_net'} + self.driver.validate_provider_segment(segment) + + def test_validate_provider_segment_with_missing_physical_network(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT} + self.assertRaises(exc.InvalidInput, + self.driver.validate_provider_segment, + segment) + + def test_validate_provider_segment_with_unsupported_physical_network(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, + api.PHYSICAL_NETWORK: 'other_flat_net'} + self.assertRaises(exc.InvalidInput, + self.driver.validate_provider_segment, + segment) + + def test_validate_provider_segment_with_unallowed_segmentation_id(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, + api.PHYSICAL_NETWORK: 'flat_net1', + api.SEGMENTATION_ID: 1234} + self.assertRaises(exc.InvalidInput, + self.driver.validate_provider_segment, + segment) + + def test_reserve_provider_segment(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, + api.PHYSICAL_NETWORK: 'flat_net1'} + self.driver.reserve_provider_segment(self.session, segment) + alloc = self._get_allocation(self.session, segment) + self.assertEqual(segment[api.PHYSICAL_NETWORK], alloc.physical_network) + + def test_release_segment(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, + api.PHYSICAL_NETWORK: 'flat_net1'} + self.driver.reserve_provider_segment(self.session, segment) + self.driver.release_segment(self.session, segment) + alloc = self._get_allocation(self.session, segment) + self.assertIsNone(alloc) + + def test_reserve_provider_segment_already_reserved(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, + api.PHYSICAL_NETWORK: 'flat_net1'} + self.driver.reserve_provider_segment(self.session, segment) + self.assertRaises(exc.FlatNetworkInUse, + self.driver.reserve_provider_segment, + self.session, segment) + + def test_allocate_tenant_segment(self): + observed = self.driver.allocate_tenant_segment(self.session) + self.assertIsNone(observed) diff --git a/neutron/tests/unit/ml2/test_type_gre.py b/neutron/tests/unit/ml2/test_type_gre.py new file mode 100644 index 000000000..36420e2b6 --- /dev/null +++ b/neutron/tests/unit/ml2/test_type_gre.py @@ -0,0 +1,208 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from six import moves +import testtools +from testtools import matchers + +from neutron.common import exceptions as exc +import neutron.db.api as db +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import type_gre +from neutron.tests import base + +TUNNEL_IP_ONE = "10.10.10.10" +TUNNEL_IP_TWO = "10.10.10.20" +TUN_MIN = 100 +TUN_MAX = 109 +TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)] +UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)] + + +class GreTypeTest(base.BaseTestCase): + + def setUp(self): + super(GreTypeTest, self).setUp() + db.configure_db() + self.driver = type_gre.GreTypeDriver() + self.driver.gre_id_ranges = TUNNEL_RANGES + self.driver._sync_gre_allocations() + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def test_validate_provider_segment(self): + segment = {api.NETWORK_TYPE: 'gre', + api.PHYSICAL_NETWORK: 'phys_net', + api.SEGMENTATION_ID: None} + + with testtools.ExpectedException(exc.InvalidInput): + self.driver.validate_provider_segment(segment) + + segment[api.PHYSICAL_NETWORK] = None + with testtools.ExpectedException(exc.InvalidInput): + self.driver.validate_provider_segment(segment) + + def test_sync_tunnel_allocations(self): + self.assertIsNone( + self.driver.get_gre_allocation(self.session, + (TUN_MIN - 1)) + ) + self.assertFalse( + self.driver.get_gre_allocation(self.session, + (TUN_MIN)).allocated + ) + self.assertFalse( + self.driver.get_gre_allocation(self.session, + (TUN_MIN + 1)).allocated + ) + self.assertFalse( + self.driver.get_gre_allocation(self.session, + (TUN_MAX - 1)).allocated + ) + self.assertFalse( + self.driver.get_gre_allocation(self.session, + (TUN_MAX)).allocated + ) + self.assertIsNone( + self.driver.get_gre_allocation(self.session, + (TUN_MAX + 1)) + ) + + self.driver.gre_id_ranges = UPDATED_TUNNEL_RANGES + self.driver._sync_gre_allocations() + + self.assertIsNone( + self.driver.get_gre_allocation(self.session, + (TUN_MIN + 5 - 1)) + ) + self.assertFalse( + self.driver.get_gre_allocation(self.session, + (TUN_MIN + 5)).allocated + ) + self.assertFalse( + self.driver.get_gre_allocation(self.session, + (TUN_MIN + 5 + 1)).allocated + ) + self.assertFalse( + self.driver.get_gre_allocation(self.session, + (TUN_MAX + 5 - 1)).allocated + ) + self.assertFalse( + self.driver.get_gre_allocation(self.session, + (TUN_MAX + 5)).allocated + ) + self.assertIsNone( + self.driver.get_gre_allocation(self.session, + (TUN_MAX + 5 + 1)) + ) + + def test_reserve_provider_segment(self): + segment = {api.NETWORK_TYPE: 'gre', + api.PHYSICAL_NETWORK: 'None', + api.SEGMENTATION_ID: 101} + self.driver.reserve_provider_segment(self.session, segment) + alloc = self.driver.get_gre_allocation(self.session, + segment[api.SEGMENTATION_ID]) + self.assertTrue(alloc.allocated) + + with testtools.ExpectedException(exc.TunnelIdInUse): + self.driver.reserve_provider_segment(self.session, segment) + + self.driver.release_segment(self.session, segment) + alloc = self.driver.get_gre_allocation(self.session, + segment[api.SEGMENTATION_ID]) + self.assertFalse(alloc.allocated) + + segment[api.SEGMENTATION_ID] = 1000 + self.driver.reserve_provider_segment(self.session, segment) + alloc = self.driver.get_gre_allocation(self.session, + segment[api.SEGMENTATION_ID]) + self.assertTrue(alloc.allocated) + + self.driver.release_segment(self.session, segment) + alloc = self.driver.get_gre_allocation(self.session, + segment[api.SEGMENTATION_ID]) + self.assertIsNone(alloc) + + def test_allocate_tenant_segment(self): + tunnel_ids = set() + for x in moves.xrange(TUN_MIN, TUN_MAX + 1): + segment = self.driver.allocate_tenant_segment(self.session) + self.assertThat(segment[api.SEGMENTATION_ID], + matchers.GreaterThan(TUN_MIN - 1)) + self.assertThat(segment[api.SEGMENTATION_ID], + matchers.LessThan(TUN_MAX + 1)) + tunnel_ids.add(segment[api.SEGMENTATION_ID]) + + segment = self.driver.allocate_tenant_segment(self.session) + self.assertIsNone(segment) + + segment = {api.NETWORK_TYPE: 'gre', + api.PHYSICAL_NETWORK: 'None', + api.SEGMENTATION_ID: tunnel_ids.pop()} + self.driver.release_segment(self.session, segment) + segment = self.driver.allocate_tenant_segment(self.session) + self.assertThat(segment[api.SEGMENTATION_ID], + matchers.GreaterThan(TUN_MIN - 1)) + self.assertThat(segment[api.SEGMENTATION_ID], + matchers.LessThan(TUN_MAX + 1)) + tunnel_ids.add(segment[api.SEGMENTATION_ID]) + + for tunnel_id in tunnel_ids: + segment[api.SEGMENTATION_ID] = tunnel_id + self.driver.release_segment(self.session, segment) + + def test_gre_endpoints(self): + tun_1 = self.driver.add_endpoint(TUNNEL_IP_ONE) + tun_2 = self.driver.add_endpoint(TUNNEL_IP_TWO) + self.assertEqual(TUNNEL_IP_ONE, tun_1.ip_address) + self.assertEqual(TUNNEL_IP_TWO, tun_2.ip_address) + + # Get all the endpoints + endpoints = self.driver.get_endpoints() + for endpoint in endpoints: + self.assertIn(endpoint['ip_address'], + [TUNNEL_IP_ONE, TUNNEL_IP_TWO]) + + +class GreTypeMultiRangeTest(base.BaseTestCase): + + TUN_MIN0 = 100 + TUN_MAX0 = 101 + TUN_MIN1 = 200 + TUN_MAX1 = 201 + TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)] + + def setUp(self): + super(GreTypeMultiRangeTest, self).setUp() + db.configure_db() + self.driver = type_gre.GreTypeDriver() + self.driver.gre_id_ranges = self.TUNNEL_MULTI_RANGES + self.driver._sync_gre_allocations() + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def test_release_segment(self): + segments = [self.driver.allocate_tenant_segment(self.session) + for i in range(4)] + + # Release them in random order. No special meaning. + for i in (0, 2, 1, 3): + self.driver.release_segment(self.session, segments[i]) + + for key in (self.TUN_MIN0, self.TUN_MAX0, + self.TUN_MIN1, self.TUN_MAX1): + alloc = self.driver.get_gre_allocation(self.session, key) + self.assertFalse(alloc.allocated) diff --git a/neutron/tests/unit/ml2/test_type_local.py b/neutron/tests/unit/ml2/test_type_local.py new file mode 100644 index 000000000..8d835bfd4 --- /dev/null +++ b/neutron/tests/unit/ml2/test_type_local.py @@ -0,0 +1,56 @@ +# Copyright (c) 2014 Thales Services SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions as exc +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import type_local +from neutron.tests import base + + +class LocalTypeTest(base.BaseTestCase): + + def setUp(self): + super(LocalTypeTest, self).setUp() + self.driver = type_local.LocalTypeDriver() + self.session = None + + def test_validate_provider_segment(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} + self.driver.validate_provider_segment(segment) + + def test_validate_provider_segment_with_unallowed_physical_network(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL, + api.PHYSICAL_NETWORK: 'phys_net'} + self.assertRaises(exc.InvalidInput, + self.driver.validate_provider_segment, + segment) + + def test_validate_provider_segment_with_unallowed_segmentation_id(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL, + api.SEGMENTATION_ID: 2} + self.assertRaises(exc.InvalidInput, + self.driver.validate_provider_segment, + segment) + + def test_reserve_provider_segment(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} + self.driver.reserve_provider_segment(self.session, segment) + self.driver.release_segment(self.session, segment) + + def test_allocate_tenant_segment(self): + expected = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} + observed = self.driver.allocate_tenant_segment(self.session) + self.assertEqual(expected, observed) diff --git a/neutron/tests/unit/ml2/test_type_vlan.py b/neutron/tests/unit/ml2/test_type_vlan.py new file mode 100644 index 000000000..deb86c0af --- /dev/null +++ b/neutron/tests/unit/ml2/test_type_vlan.py @@ -0,0 +1,187 @@ +# Copyright (c) 2014 Thales Services SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from testtools import matchers + +from neutron.common import exceptions as exc +import neutron.db.api as db +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import type_vlan +from neutron.tests import base + +PROVIDER_NET = 'phys_net1' +TENANT_NET = 'phys_net2' +VLAN_MIN = 200 +VLAN_MAX = 209 +NETWORK_VLAN_RANGES = { + PROVIDER_NET: [], + TENANT_NET: [(VLAN_MIN, VLAN_MAX)], +} +UPDATED_VLAN_RANGES = { + PROVIDER_NET: [], + TENANT_NET: [(VLAN_MIN + 5, VLAN_MAX + 5)], +} + + +class VlanTypeTest(base.BaseTestCase): + + def setUp(self): + super(VlanTypeTest, self).setUp() + db.configure_db() + self.driver = type_vlan.VlanTypeDriver() + self.driver.network_vlan_ranges = NETWORK_VLAN_RANGES + self.driver._sync_vlan_allocations() + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def _get_allocation(self, session, segment): + return session.query(type_vlan.VlanAllocation).filter_by( + physical_network=segment[api.PHYSICAL_NETWORK], + vlan_id=segment[api.SEGMENTATION_ID]).first() + + def test_validate_provider_segment(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: PROVIDER_NET, + api.SEGMENTATION_ID: 1} + self.assertIsNone(self.driver.validate_provider_segment(segment)) + + def test_validate_provider_segment_with_missing_physical_network(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.SEGMENTATION_ID: 1} + self.assertRaises(exc.InvalidInput, + self.driver.validate_provider_segment, + segment) + + def test_validate_provider_segment_with_missing_segmentation_id(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: PROVIDER_NET} + self.assertRaises(exc.InvalidInput, + self.driver.validate_provider_segment, + segment) + + def test_validate_provider_segment_with_invalid_physical_network(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: 'other_phys_net', + api.SEGMENTATION_ID: 1} + self.assertRaises(exc.InvalidInput, + self.driver.validate_provider_segment, + segment) + + def test_validate_provider_segment_with_invalid_segmentation_id(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: PROVIDER_NET, + api.SEGMENTATION_ID: 5000} + self.assertRaises(exc.InvalidInput, + self.driver.validate_provider_segment, + segment) + + def test_validate_provider_segment_with_invalid_input(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: PROVIDER_NET, + api.SEGMENTATION_ID: 1, + 'invalid': 1} + self.assertRaises(exc.InvalidInput, + self.driver.validate_provider_segment, + segment) + + def test_sync_vlan_allocations(self): + def check_in_ranges(network_vlan_ranges): + vlan_min, vlan_max = network_vlan_ranges[TENANT_NET][0] + segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: TENANT_NET} + + segment[api.SEGMENTATION_ID] = vlan_min - 1 + self.assertIsNone( + self._get_allocation(self.session, segment)) + segment[api.SEGMENTATION_ID] = vlan_max + 1 + self.assertIsNone( + self._get_allocation(self.session, segment)) + + segment[api.SEGMENTATION_ID] = vlan_min + self.assertFalse( + self._get_allocation(self.session, segment).allocated) + segment[api.SEGMENTATION_ID] = vlan_max + self.assertFalse( + self._get_allocation(self.session, segment).allocated) + + check_in_ranges(NETWORK_VLAN_RANGES) + self.driver.network_vlan_ranges = UPDATED_VLAN_RANGES + self.driver._sync_vlan_allocations() + check_in_ranges(UPDATED_VLAN_RANGES) + + def test_reserve_provider_segment(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: PROVIDER_NET, + api.SEGMENTATION_ID: 101} + alloc = self._get_allocation(self.session, segment) + self.assertIsNone(alloc) + self.driver.reserve_provider_segment(self.session, segment) + alloc = self._get_allocation(self.session, segment) + self.assertTrue(alloc.allocated) + + def test_reserve_provider_segment_already_allocated(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: PROVIDER_NET, + api.SEGMENTATION_ID: 101} + self.driver.reserve_provider_segment(self.session, segment) + self.assertRaises(exc.VlanIdInUse, + self.driver.reserve_provider_segment, + self.session, + segment) + + def test_reserve_provider_segment_in_tenant_pools(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: TENANT_NET, + api.SEGMENTATION_ID: VLAN_MIN} + alloc = self._get_allocation(self.session, segment) + self.assertFalse(alloc.allocated) + self.driver.reserve_provider_segment(self.session, segment) + alloc = self._get_allocation(self.session, segment) + self.assertTrue(alloc.allocated) + + def test_allocate_tenant_segment(self): + for __ in range(VLAN_MIN, VLAN_MAX + 1): + segment = self.driver.allocate_tenant_segment(self.session) + alloc = self._get_allocation(self.session, segment) + self.assertTrue(alloc.allocated) + vlan_id = segment[api.SEGMENTATION_ID] + self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) + self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) + self.assertEqual(TENANT_NET, segment[api.PHYSICAL_NETWORK]) + + def test_allocate_tenant_segment_no_available(self): + for __ in range(VLAN_MIN, VLAN_MAX + 1): + self.driver.allocate_tenant_segment(self.session) + segment = self.driver.allocate_tenant_segment(self.session) + self.assertIsNone(segment) + + def test_release_segment(self): + segment = self.driver.allocate_tenant_segment(self.session) + self.driver.release_segment(self.session, segment) + alloc = self._get_allocation(self.session, segment) + self.assertFalse(alloc.allocated) + + def test_release_segment_unallocated(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: PROVIDER_NET, + api.SEGMENTATION_ID: 101} + with mock.patch.object(type_vlan.LOG, 'warning') as log_warn: + self.driver.release_segment(self.session, segment) + log_warn.assert_called_once_with( + "No vlan_id %(vlan_id)s found on physical network " + "%(physical_network)s", + {'vlan_id': 101, 'physical_network': PROVIDER_NET}) diff --git a/neutron/tests/unit/ml2/test_type_vxlan.py b/neutron/tests/unit/ml2/test_type_vxlan.py new file mode 100644 index 000000000..1242df2ab --- /dev/null +++ b/neutron/tests/unit/ml2/test_type_vxlan.py @@ -0,0 +1,227 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Kyle Mestery, Cisco Systems, Inc. + +from oslo.config import cfg +from six import moves +import testtools +from testtools import matchers + +from neutron.common import exceptions as exc +from neutron.db import api as db +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import type_vxlan +from neutron.tests import base + + +TUNNEL_IP_ONE = "10.10.10.10" +TUNNEL_IP_TWO = "10.10.10.20" +TUN_MIN = 100 +TUN_MAX = 109 +TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)] +UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)] +INVALID_VXLAN_VNI = 7337 +MULTICAST_GROUP = "239.1.1.1" +VXLAN_UDP_PORT_ONE = 9999 +VXLAN_UDP_PORT_TWO = 8888 + + +class VxlanTypeTest(base.BaseTestCase): + def setUp(self): + super(VxlanTypeTest, self).setUp() + db.configure_db() + cfg.CONF.set_override('vni_ranges', [TUNNEL_RANGES], + group='ml2_type_vxlan') + cfg.CONF.set_override('vxlan_group', MULTICAST_GROUP, + group='ml2_type_vxlan') + self.driver = type_vxlan.VxlanTypeDriver() + self.driver.vxlan_vni_ranges = TUNNEL_RANGES + self.driver._sync_vxlan_allocations() + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def test_vxlan_tunnel_type(self): + self.assertEqual(self.driver.get_type(), p_const.TYPE_VXLAN) + + def test_validate_provider_segment(self): + segment = {api.NETWORK_TYPE: 'vxlan', + api.PHYSICAL_NETWORK: 'phys_net', + api.SEGMENTATION_ID: None} + + with testtools.ExpectedException(exc.InvalidInput): + self.driver.validate_provider_segment(segment) + + segment[api.PHYSICAL_NETWORK] = None + with testtools.ExpectedException(exc.InvalidInput): + self.driver.validate_provider_segment(segment) + + def test_sync_tunnel_allocations(self): + self.assertIsNone( + self.driver.get_vxlan_allocation(self.session, + (TUN_MIN - 1)) + ) + self.assertFalse( + self.driver.get_vxlan_allocation(self.session, + (TUN_MIN)).allocated + ) + self.assertFalse( + self.driver.get_vxlan_allocation(self.session, + (TUN_MIN + 1)).allocated + ) + self.assertFalse( + self.driver.get_vxlan_allocation(self.session, + (TUN_MAX - 1)).allocated + ) + self.assertFalse( + self.driver.get_vxlan_allocation(self.session, + (TUN_MAX)).allocated + ) + self.assertIsNone( + self.driver.get_vxlan_allocation(self.session, + (TUN_MAX + 1)) + ) + + self.driver.vxlan_vni_ranges = UPDATED_TUNNEL_RANGES + self.driver._sync_vxlan_allocations() + + self.assertIsNone(self.driver. + get_vxlan_allocation(self.session, + (TUN_MIN + 5 - 1))) + self.assertFalse(self.driver. + get_vxlan_allocation(self.session, (TUN_MIN + 5)). + allocated) + self.assertFalse(self.driver. + get_vxlan_allocation(self.session, (TUN_MIN + 5 + 1)). + allocated) + self.assertFalse(self.driver. + get_vxlan_allocation(self.session, (TUN_MAX + 5 - 1)). + allocated) + self.assertFalse(self.driver. + get_vxlan_allocation(self.session, (TUN_MAX + 5)). + allocated) + self.assertIsNone(self.driver. + get_vxlan_allocation(self.session, + (TUN_MAX + 5 + 1))) + + def test_reserve_provider_segment(self): + segment = {api.NETWORK_TYPE: 'vxlan', + api.PHYSICAL_NETWORK: 'None', + api.SEGMENTATION_ID: 101} + self.driver.reserve_provider_segment(self.session, segment) + alloc = self.driver.get_vxlan_allocation(self.session, + segment[api.SEGMENTATION_ID]) + self.assertTrue(alloc.allocated) + + with testtools.ExpectedException(exc.TunnelIdInUse): + self.driver.reserve_provider_segment(self.session, segment) + + self.driver.release_segment(self.session, segment) + alloc = self.driver.get_vxlan_allocation(self.session, + segment[api.SEGMENTATION_ID]) + self.assertFalse(alloc.allocated) + + segment[api.SEGMENTATION_ID] = 1000 + self.driver.reserve_provider_segment(self.session, segment) + alloc = self.driver.get_vxlan_allocation(self.session, + segment[api.SEGMENTATION_ID]) + self.assertTrue(alloc.allocated) + + self.driver.release_segment(self.session, segment) + alloc = self.driver.get_vxlan_allocation(self.session, + segment[api.SEGMENTATION_ID]) + self.assertIsNone(alloc) + + def test_allocate_tenant_segment(self): + tunnel_ids = set() + for x in moves.xrange(TUN_MIN, TUN_MAX + 1): + segment = self.driver.allocate_tenant_segment(self.session) + self.assertThat(segment[api.SEGMENTATION_ID], + matchers.GreaterThan(TUN_MIN - 1)) + self.assertThat(segment[api.SEGMENTATION_ID], + matchers.LessThan(TUN_MAX + 1)) + tunnel_ids.add(segment[api.SEGMENTATION_ID]) + + segment = self.driver.allocate_tenant_segment(self.session) + self.assertIsNone(segment) + + segment = {api.NETWORK_TYPE: 'vxlan', + api.PHYSICAL_NETWORK: 'None', + api.SEGMENTATION_ID: tunnel_ids.pop()} + self.driver.release_segment(self.session, segment) + segment = self.driver.allocate_tenant_segment(self.session) + self.assertThat(segment[api.SEGMENTATION_ID], + matchers.GreaterThan(TUN_MIN - 1)) + self.assertThat(segment[api.SEGMENTATION_ID], + matchers.LessThan(TUN_MAX + 1)) + tunnel_ids.add(segment[api.SEGMENTATION_ID]) + + for tunnel_id in tunnel_ids: + segment[api.SEGMENTATION_ID] = tunnel_id + self.driver.release_segment(self.session, segment) + + def test_vxlan_endpoints(self): + """Test VXLAN allocation/de-allocation.""" + + # Set first endpoint, verify it gets VXLAN VNI 1 + vxlan1_endpoint = self.driver.add_endpoint(TUNNEL_IP_ONE, + VXLAN_UDP_PORT_ONE) + self.assertEqual(TUNNEL_IP_ONE, vxlan1_endpoint.ip_address) + self.assertEqual(VXLAN_UDP_PORT_ONE, vxlan1_endpoint.udp_port) + + # Set second endpoint, verify it gets VXLAN VNI 2 + vxlan2_endpoint = self.driver.add_endpoint(TUNNEL_IP_TWO, + VXLAN_UDP_PORT_TWO) + self.assertEqual(TUNNEL_IP_TWO, vxlan2_endpoint.ip_address) + self.assertEqual(VXLAN_UDP_PORT_TWO, vxlan2_endpoint.udp_port) + + # Get all the endpoints + endpoints = self.driver.get_endpoints() + for endpoint in endpoints: + if endpoint['ip_address'] == TUNNEL_IP_ONE: + self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint['udp_port']) + elif endpoint['ip_address'] == TUNNEL_IP_TWO: + self.assertEqual(VXLAN_UDP_PORT_TWO, endpoint['udp_port']) + + +class VxlanTypeMultiRangeTest(base.BaseTestCase): + + TUN_MIN0 = 100 + TUN_MAX0 = 101 + TUN_MIN1 = 200 + TUN_MAX1 = 201 + TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)] + + def setUp(self): + super(VxlanTypeMultiRangeTest, self).setUp() + db.configure_db() + self.driver = type_vxlan.VxlanTypeDriver() + self.driver.vxlan_vni_ranges = self.TUNNEL_MULTI_RANGES + self.driver._sync_vxlan_allocations() + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def test_release_segment(self): + segments = [self.driver.allocate_tenant_segment(self.session) + for i in range(4)] + + # Release them in random order. No special meaning. + for i in (0, 2, 1, 3): + self.driver.release_segment(self.session, segments[i]) + + for key in (self.TUN_MIN0, self.TUN_MAX0, + self.TUN_MIN1, self.TUN_MAX1): + alloc = self.driver.get_vxlan_allocation(self.session, key) + self.assertFalse(alloc.allocated) diff --git a/neutron/tests/unit/mlnx/__init__.py b/neutron/tests/unit/mlnx/__init__.py new file mode 100644 index 000000000..c818bfe31 --- /dev/null +++ b/neutron/tests/unit/mlnx/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/neutron/tests/unit/mlnx/test_agent_scheduler.py b/neutron/tests/unit/mlnx/test_agent_scheduler.py new file mode 100644 index 000000000..f53f40d3e --- /dev/null +++ b/neutron/tests/unit/mlnx/test_agent_scheduler.py @@ -0,0 +1,34 @@ +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.tests.unit.mlnx import test_mlnx_plugin +from neutron.tests.unit.openvswitch import test_agent_scheduler + + +class MlnxAgentSchedulerTestCase( + test_agent_scheduler.OvsAgentSchedulerTestCase): + plugin_str = test_mlnx_plugin.PLUGIN_NAME + l3_plugin = None + + +class MlnxL3AgentNotifierTestCase( + test_agent_scheduler.OvsL3AgentNotifierTestCase): + plugin_str = test_mlnx_plugin.PLUGIN_NAME + l3_plugin = None + + +class MlnxDhcpAgentNotifierTestCase( + test_agent_scheduler.OvsDhcpAgentNotifierTestCase): + plugin_str = test_mlnx_plugin.PLUGIN_NAME diff --git a/neutron/tests/unit/mlnx/test_defaults.py b/neutron/tests/unit/mlnx/test_defaults.py new file mode 100644 index 000000000..8d9cc8c4e --- /dev/null +++ b/neutron/tests/unit/mlnx/test_defaults.py @@ -0,0 +1,39 @@ +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +#NOTE this import loads tests required options +from neutron.plugins.mlnx.common import config # noqa +from neutron.tests import base + + +class ConfigurationTest(base.BaseTestCase): + + def test_defaults(self): + self.assertEqual(2, + cfg.CONF.AGENT.polling_interval) + self.assertEqual('vlan', + cfg.CONF.MLNX.tenant_network_type) + self.assertEqual(1, + len(cfg.CONF.MLNX.network_vlan_ranges)) + self.assertEqual('eth', + cfg.CONF.MLNX.physical_network_type) + self.assertFalse(cfg.CONF.MLNX.physical_network_type_mappings) + self.assertEqual(0, + len(cfg.CONF.ESWITCH. + physical_interface_mappings)) + self.assertEqual('tcp://127.0.0.1:60001', + cfg.CONF.ESWITCH.daemon_endpoint) diff --git a/neutron/tests/unit/mlnx/test_mlnx_comm_utils.py b/neutron/tests/unit/mlnx/test_mlnx_comm_utils.py new file mode 100644 index 000000000..bb8bdcced --- /dev/null +++ b/neutron/tests/unit/mlnx/test_mlnx_comm_utils.py @@ -0,0 +1,139 @@ +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from oslo.config import cfg + +from neutron.plugins.mlnx.common import comm_utils +from neutron.plugins.mlnx.common import config # noqa +from neutron.plugins.mlnx.common import exceptions +from neutron.tests import base + + +class WrongException(Exception): + pass + + +class TestRetryDecorator(base.BaseTestCase): + def setUp(self): + super(TestRetryDecorator, self).setUp() + self.sleep_fn_p = mock.patch.object(comm_utils.RetryDecorator, + 'sleep_fn') + self.sleep_fn = self.sleep_fn_p.start() + + def test_no_retry_required(self): + self.counter = 0 + + @comm_utils.RetryDecorator(exceptions.RequestTimeout, interval=2, + retries=3, backoff_rate=2) + def succeeds(): + self.counter += 1 + return 'success' + + ret = succeeds() + self.assertFalse(self.sleep_fn.called) + self.assertEqual(ret, 'success') + self.assertEqual(self.counter, 1) + + def test_retry_zero_times(self): + self.counter = 0 + interval = 2 + backoff_rate = 2 + retries = 0 + + @comm_utils.RetryDecorator(exceptions.RequestTimeout, interval, + retries, backoff_rate) + def always_fails(): + self.counter += 1 + raise exceptions.RequestTimeout() + + self.assertRaises(exceptions.RequestTimeout, always_fails) + self.assertEqual(self.counter, 1) + self.assertFalse(self.sleep_fn.called) + + def test_retries_once(self): + self.counter = 0 + interval = 2 + backoff_rate = 2 + retries = 3 + + @comm_utils.RetryDecorator(exceptions.RequestTimeout, interval, + retries, backoff_rate) + def fails_once(): + self.counter += 1 + if self.counter < 2: + raise exceptions.RequestTimeout() + else: + return 'success' + + ret = fails_once() + self.assertEqual(ret, 'success') + self.assertEqual(self.counter, 2) + self.assertEqual(self.sleep_fn.call_count, 1) + self.sleep_fn.assert_called_with(interval) + + def test_limit_is_reached(self): + self.counter = 0 + retries = 3 + interval = 2 + backoff_rate = 4 + + @comm_utils.RetryDecorator(exceptions.RequestTimeout, interval, + retries, backoff_rate) + def always_fails(): + self.counter += 1 + raise exceptions.RequestTimeout() + + self.assertRaises(exceptions.RequestTimeout, always_fails) + self.assertEqual(self.counter, retries + 1) + self.assertEqual(self.sleep_fn.call_count, retries) + + expected_sleep_fn_arg = [] + for i in range(retries): + expected_sleep_fn_arg.append(interval) + interval *= backoff_rate + + self.sleep_fn.assert_has_calls(map(mock.call, expected_sleep_fn_arg)) + + def test_limit_is_reached_with_conf(self): + self.counter = 0 + + @comm_utils.RetryDecorator(exceptions.RequestTimeout) + def always_fails(): + self.counter += 1 + raise exceptions.RequestTimeout() + + retry = cfg.CONF.ESWITCH.retries + interval = cfg.CONF.ESWITCH.request_timeout / 1000 + delay_rate = cfg.CONF.ESWITCH.backoff_rate + + expected_sleep_fn_arg = [] + for i in range(retry): + expected_sleep_fn_arg.append(interval) + interval *= delay_rate + + self.assertRaises(exceptions.RequestTimeout, always_fails) + self.assertEqual(self.counter, retry + 1) + self.assertEqual(self.sleep_fn.call_count, retry) + self.sleep_fn.assert_has_calls(map(mock.call, expected_sleep_fn_arg)) + + def test_wrong_exception_no_retry(self): + + @comm_utils.RetryDecorator(exceptions.RequestTimeout) + def raise_unexpected_error(): + raise WrongException("wrong exception") + + self.assertRaises(WrongException, raise_unexpected_error) + self.assertFalse(self.sleep_fn.called) diff --git a/neutron/tests/unit/mlnx/test_mlnx_db.py b/neutron/tests/unit/mlnx/test_mlnx_db.py new file mode 100644 index 000000000..a72ed0865 --- /dev/null +++ b/neutron/tests/unit/mlnx/test_mlnx_db.py @@ -0,0 +1,181 @@ +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import moves +from testtools import matchers + +from neutron.common import exceptions as n_exc +from neutron.db import api as db +from neutron.plugins.mlnx.db import mlnx_db_v2 as mlnx_db +from neutron.tests import base +from neutron.tests.unit import test_db_plugin as test_plugin + +PHYS_NET = 'physnet1' +PHYS_NET_2 = 'physnet2' +NET_TYPE = 'vlan' +VLAN_MIN = 10 +VLAN_MAX = 19 +VLAN_RANGES = {PHYS_NET: [(VLAN_MIN, VLAN_MAX)]} +UPDATED_VLAN_RANGES = {PHYS_NET: [(VLAN_MIN + 5, VLAN_MAX + 5)], + PHYS_NET_2: [(VLAN_MIN + 20, VLAN_MAX + 20)]} +TEST_NETWORK_ID = 'abcdefghijklmnopqrstuvwxyz' + + +class SegmentationIdAllocationTest(base.BaseTestCase): + def setUp(self): + super(SegmentationIdAllocationTest, self).setUp() + db.configure_db() + mlnx_db.sync_network_states(VLAN_RANGES) + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def test_sync_segmentationIdAllocation(self): + self.assertIsNone(mlnx_db.get_network_state(PHYS_NET, + VLAN_MIN - 1)) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + VLAN_MIN).allocated) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + VLAN_MIN + 1).allocated) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + VLAN_MAX - 1).allocated) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + VLAN_MAX).allocated) + self.assertIsNone(mlnx_db.get_network_state(PHYS_NET, + VLAN_MAX + 1)) + + mlnx_db.sync_network_states(UPDATED_VLAN_RANGES) + + self.assertIsNone(mlnx_db.get_network_state(PHYS_NET, + VLAN_MIN + 5 - 1)) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + VLAN_MIN + 5).allocated) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + VLAN_MIN + 5 + 1).allocated) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + VLAN_MAX + 5 - 1).allocated) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + VLAN_MAX + 5).allocated) + self.assertIsNone(mlnx_db.get_network_state(PHYS_NET, + VLAN_MAX + 5 + 1)) + + self.assertIsNone(mlnx_db.get_network_state(PHYS_NET_2, + VLAN_MIN + 20 - 1)) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET_2, + VLAN_MIN + 20).allocated) + self.assertFalse( + mlnx_db.get_network_state(PHYS_NET_2, + VLAN_MIN + 20 + 1).allocated) + self.assertFalse( + mlnx_db.get_network_state(PHYS_NET_2, + VLAN_MAX + 20 - 1).allocated) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET_2, + VLAN_MAX + 20).allocated) + self.assertIsNone(mlnx_db.get_network_state(PHYS_NET_2, + VLAN_MAX + 20 + 1)) + + mlnx_db.sync_network_states(VLAN_RANGES) + + self.assertIsNone(mlnx_db.get_network_state(PHYS_NET, + VLAN_MIN - 1)) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + VLAN_MIN).allocated) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + VLAN_MIN + 1).allocated) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + VLAN_MAX - 1).allocated) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + VLAN_MAX).allocated) + self.assertIsNone(mlnx_db.get_network_state(PHYS_NET, + VLAN_MAX + 1)) + + self.assertIsNone(mlnx_db.get_network_state(PHYS_NET_2, + VLAN_MIN + 20)) + self.assertIsNone(mlnx_db.get_network_state(PHYS_NET_2, + VLAN_MAX + 20)) + + def test_segmentationId_pool(self): + vlan_ids = set() + for x in moves.xrange(VLAN_MIN, VLAN_MAX + 1): + physical_network, vlan_id = mlnx_db.reserve_network(self.session) + self.assertEqual(physical_network, PHYS_NET) + self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) + self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) + vlan_ids.add(vlan_id) + + self.assertRaises(n_exc.NoNetworkAvailable, + mlnx_db.reserve_network, + self.session) + for vlan_id in vlan_ids: + mlnx_db.release_network(self.session, PHYS_NET, + vlan_id, VLAN_RANGES) + + def test_specific_segmentationId_inside_pool(self): + vlan_id = VLAN_MIN + 5 + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + vlan_id).allocated) + mlnx_db.reserve_specific_network(self.session, PHYS_NET, vlan_id) + self.assertTrue(mlnx_db.get_network_state(PHYS_NET, + vlan_id).allocated) + + self.assertRaises(n_exc.VlanIdInUse, + mlnx_db.reserve_specific_network, + self.session, + PHYS_NET, + vlan_id) + + mlnx_db.release_network(self.session, PHYS_NET, vlan_id, VLAN_RANGES) + self.assertFalse(mlnx_db.get_network_state(PHYS_NET, + vlan_id).allocated) + + def test_specific_segmentationId_outside_pool(self): + vlan_id = VLAN_MAX + 5 + self.assertIsNone(mlnx_db.get_network_state(PHYS_NET, vlan_id)) + mlnx_db.reserve_specific_network(self.session, PHYS_NET, vlan_id) + self.assertTrue(mlnx_db.get_network_state(PHYS_NET, + vlan_id).allocated) + + self.assertRaises(n_exc.VlanIdInUse, + mlnx_db.reserve_specific_network, + self.session, + PHYS_NET, + vlan_id) + + mlnx_db.release_network(self.session, PHYS_NET, vlan_id, VLAN_RANGES) + self.assertIsNone(mlnx_db.get_network_state(PHYS_NET, vlan_id)) + + +class NetworkBindingsTest(test_plugin.NeutronDbPluginV2TestCase): + def setUp(self): + super(NetworkBindingsTest, self).setUp() + db.configure_db() + self.session = db.get_session() + + def test_add_network_binding(self): + with self.network() as network: + TEST_NETWORK_ID = network['network']['id'] + self.assertIsNone(mlnx_db.get_network_binding(self.session, + TEST_NETWORK_ID)) + mlnx_db.add_network_binding(self.session, + TEST_NETWORK_ID, + NET_TYPE, + PHYS_NET, + 1234) + binding = mlnx_db.get_network_binding(self.session, + TEST_NETWORK_ID) + self.assertIsNotNone(binding) + self.assertEqual(binding.network_id, TEST_NETWORK_ID) + self.assertEqual(binding.network_type, NET_TYPE) + self.assertEqual(binding.physical_network, PHYS_NET) + self.assertEqual(binding.segmentation_id, 1234) diff --git a/neutron/tests/unit/mlnx/test_mlnx_neutron_agent.py b/neutron/tests/unit/mlnx/test_mlnx_neutron_agent.py new file mode 100644 index 000000000..7191f0b38 --- /dev/null +++ b/neutron/tests/unit/mlnx/test_mlnx_neutron_agent.py @@ -0,0 +1,156 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2014 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib + +import mock +from oslo.config import cfg +import testtools + +from neutron.plugins.mlnx.agent import eswitch_neutron_agent +from neutron.plugins.mlnx.agent import utils +from neutron.plugins.mlnx.common import exceptions +from neutron.tests import base + + +class TestEswichManager(base.BaseTestCase): + + def setUp(self): + super(TestEswichManager, self).setUp() + + class MockEswitchUtils(object): + def __init__(self, endpoint, timeout): + pass + + mock.patch('neutron.plugins.mlnx.agent.utils.EswitchManager', + new=MockEswitchUtils) + + with mock.patch.object(utils, 'zmq'): + self.manager = eswitch_neutron_agent.EswitchManager({}, None, None) + + def test_get_not_exist_port_id(self): + with testtools.ExpectedException(exceptions.MlnxException): + self.manager.get_port_id_by_mac('no-such-mac') + + +class TestEswitchAgent(base.BaseTestCase): + + def setUp(self): + super(TestEswitchAgent, self).setUp() + # Avoid rpc initialization for unit tests + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + cfg.CONF.set_default('firewall_driver', + 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + + class MockFixedIntervalLoopingCall(object): + def __init__(self, f): + self.f = f + + def start(self, interval=0): + self.f() + + mock.patch('neutron.openstack.common.loopingcall.' + 'FixedIntervalLoopingCall', + new=MockFixedIntervalLoopingCall) + + with mock.patch.object(utils, 'zmq'): + self.agent = eswitch_neutron_agent.MlnxEswitchNeutronAgent({}) + self.agent.plugin_rpc = mock.Mock() + self.agent.context = mock.Mock() + self.agent.agent_id = mock.Mock() + self.agent.eswitch = mock.Mock() + self.agent.eswitch.get_vnics_mac.return_value = [] + + def test_treat_devices_added_returns_true_for_missing_device(self): + attrs = {'get_device_details.side_effect': Exception()} + self.agent.plugin_rpc.configure_mock(**attrs) + with contextlib.nested( + mock.patch('neutron.plugins.mlnx.agent.eswitch_neutron_agent.' + 'EswitchManager.get_vnics_mac', + return_value=[])): + self.assertTrue(self.agent.treat_devices_added([{}])) + + def _mock_treat_devices_added(self, details, func_name): + """Mock treat devices added. + + :param details: the details to return for the device + :param func_name: the function that should be called + :returns: whether the named function was called + """ + with contextlib.nested( + mock.patch('neutron.plugins.mlnx.agent.eswitch_neutron_agent.' + 'EswitchManager.get_vnics_mac', + return_value=[]), + mock.patch.object(self.agent.plugin_rpc, 'get_device_details', + return_value=details), + mock.patch.object(self.agent.plugin_rpc, 'update_device_up'), + mock.patch.object(self.agent, func_name) + ) as (vnics_fn, get_dev_fn, upd_dev_up, func): + self.assertFalse(self.agent.treat_devices_added([{}])) + return (func.called, upd_dev_up.called) + + def test_treat_devices_added_updates_known_port(self): + details = mock.MagicMock() + details.__contains__.side_effect = lambda x: True + func, dev_up = self._mock_treat_devices_added(details, + 'treat_vif_port') + self.assertTrue(func) + self.assertTrue(dev_up) + + def test_treat_devices_added_updates_known_port_admin_down(self): + details = {'port_id': '1234567890', + 'device': '01:02:03:04:05:06', + 'network_id': '123456789', + 'network_type': 'vlan', + 'physical_network': 'default', + 'segmentation_id': 2, + 'admin_state_up': False} + func, dev_up = self._mock_treat_devices_added(details, + 'treat_vif_port') + self.assertTrue(func) + self.assertFalse(dev_up) + + def test_treat_devices_removed_returns_true_for_missing_device(self): + with mock.patch.object(self.agent.plugin_rpc, 'update_device_down', + side_effect=Exception()): + self.assertTrue(self.agent.treat_devices_removed([{}])) + + def test_treat_devices_removed_releases_port(self): + details = dict(exists=False) + with mock.patch.object(self.agent.plugin_rpc, 'update_device_down', + return_value=details): + with mock.patch.object(self.agent.eswitch, + 'port_release') as port_release: + self.assertFalse(self.agent.treat_devices_removed([{}])) + self.assertTrue(port_release.called) + + def test_process_network_ports(self): + current_ports = set(['01:02:03:04:05:06']) + added_ports = set(['10:20:30:40:50:60']) + removed_ports = set(['11:22:33:44:55:66']) + reply = {'current': current_ports, + 'removed': removed_ports, + 'added': added_ports} + with mock.patch.object(self.agent, 'treat_devices_added', + return_value=False) as device_added: + with mock.patch.object(self.agent, 'treat_devices_removed', + return_value=False) as device_removed: + self.assertFalse(self.agent.process_network_ports(reply)) + device_added.assert_called_once_with(added_ports) + device_removed.assert_called_once_with(removed_ports) diff --git a/neutron/tests/unit/mlnx/test_mlnx_plugin.py b/neutron/tests/unit/mlnx/test_mlnx_plugin.py new file mode 100644 index 000000000..f3c83ac8d --- /dev/null +++ b/neutron/tests/unit/mlnx/test_mlnx_plugin.py @@ -0,0 +1,116 @@ +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg +from webob import exc + +from neutron.extensions import portbindings +from neutron.plugins.mlnx.common import constants +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit import test_db_plugin as test_plugin +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + + +PLUGIN_NAME = ('neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin') + + +class MlnxPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self): + super(MlnxPluginV2TestCase, self).setUp(self._plugin_name) + self.port_create_status = 'DOWN' + + +class TestMlnxBasicGet(test_plugin.TestBasicGet, MlnxPluginV2TestCase): + pass + + +class TestMlnxV2HTTPResponse(test_plugin.TestV2HTTPResponse, + MlnxPluginV2TestCase): + pass + + +class TestMlnxPortsV2(test_plugin.TestPortsV2, + MlnxPluginV2TestCase): + pass + + +class TestMlnxNetworksV2(test_plugin.TestNetworksV2, MlnxPluginV2TestCase): + pass + + +class TestMlnxPortBinding(MlnxPluginV2TestCase, + test_bindings.PortBindingsTestCase): + VIF_TYPE = constants.VIF_TYPE_DIRECT + ENABLE_SG = False + HAS_PORT_FILTER = False + + def setUp(self, firewall_driver=None): + cfg.CONF.set_override( + 'enable_security_group', self.ENABLE_SG, + group='SECURITYGROUP') + super(TestMlnxPortBinding, self).setUp() + + def _check_default_port_binding_profole(self, port, + expected_vif_type=None): + if expected_vif_type is None: + expected_vif_type = constants.VIF_TYPE_DIRECT + p = port['port'] + self.assertIn('id', p) + self.assertEqual(expected_vif_type, p[portbindings.VIF_TYPE]) + self.assertEqual({'physical_network': 'default'}, + p[portbindings.PROFILE]) + + def test_create_port_no_binding_profile(self): + with self.port() as port: + self._check_default_port_binding_profole(port) + + def test_create_port_binding_profile_none(self): + profile_arg = {portbindings.PROFILE: None} + with self.port(arg_list=(portbindings.PROFILE,), + **profile_arg) as port: + self._check_default_port_binding_profole(port) + + def test_create_port_binding_profile_vif_type(self): + for vif_type in [constants.VIF_TYPE_HOSTDEV, + constants.VIF_TYPE_DIRECT]: + profile_arg = {portbindings.PROFILE: + {constants.VNIC_TYPE: vif_type}} + with self.port(arg_list=(portbindings.PROFILE,), + **profile_arg) as port: + self._check_default_port_binding_profole( + port, expected_vif_type=vif_type) + + def test_create_port_binding_profile_with_empty_dict(self): + profile_arg = {portbindings.PROFILE: {}} + try: + with self.port(arg_list=(portbindings.PROFILE,), + expected_res_status=400, **profile_arg): + pass + except exc.HTTPClientError: + pass + + +class TestMlnxPortBindingNoSG(TestMlnxPortBinding): + HAS_PORT_FILTER = False + ENABLE_SG = False + FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER + + +class TestMlnxPortBindingHost( + MlnxPluginV2TestCase, + test_bindings.PortBindingsHostTestCaseMixin): + pass diff --git a/neutron/tests/unit/mlnx/test_mlnx_plugin_config.py b/neutron/tests/unit/mlnx/test_mlnx_plugin_config.py new file mode 100644 index 000000000..70f3d2df4 --- /dev/null +++ b/neutron/tests/unit/mlnx/test_mlnx_plugin_config.py @@ -0,0 +1,89 @@ +# Copyright (c) 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from oslo.config import cfg + +#NOTE this import loads tests required options +from neutron.plugins.mlnx.common import config # noqa +from neutron.plugins.mlnx.common import constants +from neutron.plugins.mlnx import mlnx_plugin +from neutron.tests import base + + +class TestMlnxPluginConfig(base.BaseTestCase): + expected_vlan_mappings = {'physnet1': [(1, 1000)], + 'physnet2': [(1, 1000)]} + expected_network_types = {'physnet1': constants.TYPE_ETH, + 'physnet2': constants.TYPE_IB} + config_vlan_ranges = ['physnet1:1:1000', 'physnet2:1:1000'] + config_network_types = ['physnet1:eth', 'physnet2:ib'] + + def setUp(self): + super(TestMlnxPluginConfig, self).setUp() + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + cfg.CONF.set_override(group='MLNX', + name='network_vlan_ranges', + override=self.config_vlan_ranges) + + def _create_mlnx_plugin(self): + with mock.patch('neutron.plugins.mlnx.db.mlnx_db_v2'): + return mlnx_plugin.MellanoxEswitchPlugin() + + def _assert_expected_config(self): + plugin = self._create_mlnx_plugin() + self.assertEqual(plugin.network_vlan_ranges, + self.expected_vlan_mappings) + self.assertEqual(plugin.phys_network_type_maps, + self.expected_network_types) + + def test_vlan_ranges_with_network_type(self): + cfg.CONF.set_override(group='MLNX', + name='physical_network_type_mappings', + override=self.config_network_types) + self._assert_expected_config() + + def test_vlan_ranges_partial_network_type(self): + cfg.CONF.set_override(group='MLNX', + name='physical_network_type_mappings', + override=self.config_network_types[:1]) + cfg.CONF.set_override(group='MLNX', + name='physical_network_type', + override=constants.TYPE_IB) + self._assert_expected_config() + + def test_vlan_ranges_no_network_type(self): + cfg.CONF.set_override(group='MLNX', + name='physical_network_type', + override=constants.TYPE_IB) + cfg.CONF.set_override(group='MLNX', + name='physical_network_type_mappings', + override=[]) + self.expected_network_types.update({'physnet1': constants.TYPE_IB}) + self._assert_expected_config() + self.expected_network_types.update({'physnet1': constants.TYPE_ETH}) + + def test_parse_physical_network_mappings_invalid_type(self): + cfg.CONF.set_override(group='MLNX', + name='physical_network_type_mappings', + override=['physnet:invalid-type']) + self.assertRaises(SystemExit, self._create_mlnx_plugin) + + def test_invalid_network_type(self): + cfg.CONF.set_override(group='MLNX', + name='physical_network_type', + override='invalid-type') + self.assertRaises(SystemExit, self._create_mlnx_plugin) diff --git a/neutron/tests/unit/mlnx/test_mlnx_security_group.py b/neutron/tests/unit/mlnx/test_mlnx_security_group.py new file mode 100644 index 000000000..347de62c8 --- /dev/null +++ b/neutron/tests/unit/mlnx/test_mlnx_security_group.py @@ -0,0 +1,100 @@ +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +import webob.exc + +from neutron.api.v2 import attributes +from neutron.extensions import securitygroup as ext_sg +from neutron.plugins.mlnx.db import mlnx_db_v2 as mlnx_db +from neutron.tests.unit import test_extension_security_group as test_sg +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + + +PLUGIN_NAME = ('neutron.plugins.mlnx.' + 'mlnx_plugin.MellanoxEswitchPlugin') +NOTIFIER = ('neutron.plugins.mlnx.' + 'agent_notify_api.AgentNotifierApi') + + +class MlnxSecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self, plugin=None): + test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_IPTABLES_DRIVER) + notifier_p = mock.patch(NOTIFIER) + notifier_cls = notifier_p.start() + self.notifier = mock.Mock() + notifier_cls.return_value = self.notifier + self._attribute_map_bk_ = {} + for item in attributes.RESOURCE_ATTRIBUTE_MAP: + self._attribute_map_bk_[item] = (attributes. + RESOURCE_ATTRIBUTE_MAP[item]. + copy()) + super(MlnxSecurityGroupsTestCase, self).setUp(PLUGIN_NAME) + + def tearDown(self): + attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_ + super(MlnxSecurityGroupsTestCase, self).tearDown() + + +class TestMlnxSecurityGroups(MlnxSecurityGroupsTestCase, + test_sg.TestSecurityGroups, + test_sg_rpc.SGNotificationTestMixin): + pass + + +class TestMlnxSecurityGroupsXML(TestMlnxSecurityGroups): + fmt = 'xml' + + +class TestMlnxSecurityGroupsDB(MlnxSecurityGroupsTestCase): + def test_security_group_get_port_from_device(self): + with self.network() as n: + with self.subnet(n): + with self.security_group() as sg: + security_group_id = sg['security_group']['id'] + res = self._create_port(self.fmt, n['network']['id']) + port = self.deserialize(self.fmt, res) + fixed_ips = port['port']['fixed_ips'] + data = {'port': {'fixed_ips': fixed_ips, + 'name': port['port']['name'], + ext_sg.SECURITYGROUPS: + [security_group_id]}} + + req = self.new_update_request('ports', data, + port['port']['id']) + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + port_id = res['port']['id'] + device_id = port_id[:8] + port_dict = mlnx_db.get_port_from_device(device_id) + self.assertEqual(port_id, port_dict['id']) + self.assertEqual([security_group_id], + port_dict[ext_sg.SECURITYGROUPS]) + self.assertEqual([], port_dict['security_group_rules']) + self.assertEqual([fixed_ips[0]['ip_address']], + port_dict['fixed_ips']) + self._delete('ports', port['port']['id']) + + def test_security_group_get_port_from_device_with_no_port(self): + port_dict = mlnx_db.get_port_from_device('bad_device_id') + self.assertIsNone(port_dict) + + +class TestMlnxSecurityGroupsDBXML(TestMlnxSecurityGroupsDB): + fmt = 'xml' diff --git a/neutron/tests/unit/mlnx/test_rpcapi.py b/neutron/tests/unit/mlnx/test_rpcapi.py new file mode 100644 index 000000000..ea34a840b --- /dev/null +++ b/neutron/tests/unit/mlnx/test_rpcapi.py @@ -0,0 +1,155 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Unit Tests for Mellanox RPC (major reuse of linuxbridge rpc unit tests) +""" + +import fixtures +from oslo.config import cfg + +from neutron.agent import rpc as agent_rpc +from neutron.common import topics +from neutron.openstack.common import context +from neutron.plugins.mlnx import agent_notify_api +from neutron.tests import base + + +class rpcApiTestCase(base.BaseTestCase): + + def _test_mlnx_api(self, rpcapi, topic, method, rpc_method, + expected_msg=None, **kwargs): + ctxt = context.RequestContext('fake_user', 'fake_project') + expected_retval = 'foo' if method == 'call' else None + if not expected_msg: + expected_msg = rpcapi.make_msg(method, **kwargs) + if rpc_method == 'cast' and method == 'run_instance': + kwargs['call'] = False + + self.fake_args = None + self.fake_kwargs = None + + def _fake_rpc_method(*args, **kwargs): + self.fake_args = args + self.fake_kwargs = kwargs + if expected_retval: + return expected_retval + + self.useFixture(fixtures.MonkeyPatch( + 'neutron.common.rpc_compat.RpcProxy.' + rpc_method, + _fake_rpc_method)) + + retval = getattr(rpcapi, method)(ctxt, **kwargs) + + self.assertEqual(expected_retval, retval) + expected_args = [ctxt, expected_msg] + expected_kwargs = {'topic': topic} + + # skip the first argument which is 'self' + for arg, expected_arg in zip(self.fake_args[1:], expected_args): + self.assertEqual(expected_arg, arg) + self.assertEqual(expected_kwargs, self.fake_kwargs) + + def test_delete_network(self): + rpcapi = agent_notify_api.AgentNotifierApi(topics.AGENT) + self._test_mlnx_api(rpcapi, + topics.get_topic_name(topics.AGENT, + topics.NETWORK, + topics.DELETE), + 'network_delete', rpc_method='fanout_cast', + network_id='fake_request_spec') + + def test_port_update(self): + cfg.CONF.set_override('rpc_support_old_agents', False, 'AGENT') + rpcapi = agent_notify_api.AgentNotifierApi(topics.AGENT) + expected_msg = rpcapi.make_msg('port_update', + port='fake_port', + network_type='vlan', + physical_network='fake_net', + segmentation_id='fake_vlan_id') + self._test_mlnx_api(rpcapi, + topics.get_topic_name(topics.AGENT, + topics.PORT, + topics.UPDATE), + 'port_update', rpc_method='fanout_cast', + expected_msg=expected_msg, + port='fake_port', + network_type='vlan', + physical_network='fake_net', + vlan_id='fake_vlan_id') + + def test_port_update_ib(self): + cfg.CONF.set_override('rpc_support_old_agents', False, 'AGENT') + rpcapi = agent_notify_api.AgentNotifierApi(topics.AGENT) + expected_msg = rpcapi.make_msg('port_update', + port='fake_port', + network_type='ib', + physical_network='fake_net', + segmentation_id='fake_vlan_id') + self._test_mlnx_api(rpcapi, + topics.get_topic_name(topics.AGENT, + topics.PORT, + topics.UPDATE), + 'port_update', rpc_method='fanout_cast', + expected_msg=expected_msg, + port='fake_port', + network_type='ib', + physical_network='fake_net', + vlan_id='fake_vlan_id') + + def test_port_update_old_agent(self): + cfg.CONF.set_override('rpc_support_old_agents', True, 'AGENT') + rpcapi = agent_notify_api.AgentNotifierApi(topics.AGENT) + expected_msg = rpcapi.make_msg('port_update', + port='fake_port', + network_type='vlan', + physical_network='fake_net', + segmentation_id='fake_vlan_id', + vlan_id='fake_vlan_id') + self._test_mlnx_api(rpcapi, + topics.get_topic_name(topics.AGENT, + topics.PORT, + topics.UPDATE), + 'port_update', rpc_method='fanout_cast', + expected_msg=expected_msg, + port='fake_port', + network_type='vlan', + physical_network='fake_net', + vlan_id='fake_vlan_id') + + def test_device_details(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_mlnx_api(rpcapi, topics.PLUGIN, + 'get_device_details', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id') + + def test_update_device_down(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_mlnx_api(rpcapi, topics.PLUGIN, + 'update_device_down', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id', + host='fake_host') + + def test_update_device_up(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_mlnx_api(rpcapi, topics.PLUGIN, + 'update_device_up', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id', + host='fake_host') diff --git a/neutron/tests/unit/nec/__init__.py b/neutron/tests/unit/nec/__init__.py new file mode 100644 index 000000000..362a36068 --- /dev/null +++ b/neutron/tests/unit/nec/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/nec/fake_ofc_manager.py b/neutron/tests/unit/nec/fake_ofc_manager.py new file mode 100644 index 000000000..d64b11d03 --- /dev/null +++ b/neutron/tests/unit/nec/fake_ofc_manager.py @@ -0,0 +1,106 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + + +OFC_MANAGER = 'neutron.plugins.nec.nec_plugin.ofc_manager.OFCManager' + + +def patch_ofc_manager(): + m = mock.patch(OFC_MANAGER).start() + f = FakeOFCManager() + + m.create_ofc_tenant.side_effect = f.create_ofc_tenant + m.delete_ofc_tenant.side_effect = f.delete_ofc_tenant + m.exists_ofc_tenant.side_effect = f.exists_ofc_tenant + m.create_ofc_network.side_effect = f.create_ofc_net + m.delete_ofc_network.side_effect = f.delete_ofc_net + m.exists_ofc_network.side_effect = f.exists_ofc_net + m.create_ofc_port.side_effect = f.create_ofc_port + m.delete_ofc_port.side_effect = f.delete_ofc_port + m.exists_ofc_port.side_effect = f.exists_ofc_port + m.create_ofc_packet_filter.side_effect = f.create_ofc_pf + m.delete_ofc_packet_filter.side_effect = f.delete_ofc_pf + m.exists_ofc_packet_filter.side_effect = f.exists_ofc_pf + m.set_raise_exc = f.set_raise_exc + + return m + + +class FakeOFCManager(object): + + def __init__(self): + self.ofc_tenants = {} + self.ofc_nets = {} + self.ofc_ports = {} + self.ofc_pfs = {} + self.raise_exc_map = {} + + def set_raise_exc(self, func, raise_exc): + self.raise_exc_map.update({func: raise_exc}) + + def _raise_exc(self, func): + exc = self.raise_exc_map.get(func) + if exc: + raise exc + + def create_ofc_tenant(self, context, tenant_id): + self._raise_exc('create_ofc_tenant') + self.ofc_tenants.update({tenant_id: True}) + + def exists_ofc_tenant(self, context, tenant_id): + self._raise_exc('exists_ofc_tenant') + return self.ofc_tenants.get(tenant_id, False) + + def delete_ofc_tenant(self, context, tenant_id): + self._raise_exc('delete_ofc_tenant') + del self.ofc_tenants[tenant_id] + + def create_ofc_net(self, context, tenant_id, net_id, net_name=None): + self._raise_exc('create_ofc_network') + self.ofc_nets.update({net_id: True}) + + def exists_ofc_net(self, context, net_id): + self._raise_exc('exists_ofc_network') + return self.ofc_nets.get(net_id, False) + + def delete_ofc_net(self, context, net_id, net): + self._raise_exc('delete_ofc_network') + del self.ofc_nets[net_id] + + def create_ofc_port(self, context, port_id, port): + self._raise_exc('create_ofc_port') + self.ofc_ports.update({port_id: True}) + + def exists_ofc_port(self, context, port_id): + self._raise_exc('exists_ofc_port') + return self.ofc_ports.get(port_id, False) + + def delete_ofc_port(self, context, port_id, port): + self._raise_exc('delete_ofc_port') + del self.ofc_ports[port_id] + + def create_ofc_pf(self, context, pf_id, pf_dict): + self._raise_exc('create_ofc_packet_filter') + self.ofc_pfs.update({pf_id: True}) + + def exists_ofc_pf(self, context, pf_id): + self._raise_exc('exists_ofc_packet_filter') + return self.ofc_pfs.get(pf_id, False) + + def delete_ofc_pf(self, context, pf_id): + self._raise_exc('delete_ofc_packet_filter') + del self.ofc_pfs[pf_id] diff --git a/neutron/tests/unit/nec/stub_ofc_driver.py b/neutron/tests/unit/nec/stub_ofc_driver.py new file mode 100644 index 000000000..d03744ffa --- /dev/null +++ b/neutron/tests/unit/nec/stub_ofc_driver.py @@ -0,0 +1,293 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import netaddr + +from neutron.common import log as call_log +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec import ofc_driver_base + + +LOG = logging.getLogger(__name__) + +MAX_NUM_OPENFLOW_ROUTER = 2 + + +class StubOFCDriver(ofc_driver_base.OFCDriverBase): + """Stub OFC driver for testing. + + This driver can be used not only for unit tests but also for real testing + as a logging driver. It stores the created resources on OFC and returns + them in get methods(). + + If autocheck is enabled, it checks whether the specified resource exists + in OFC and raises an exception if it is different from expected status. + """ + + def __init__(self, conf): + self.autocheck = False + self.reset_all() + + def reset_all(self): + self.ofc_tenant_dict = {} + self.ofc_network_dict = {} + self.ofc_port_dict = {} + self.ofc_filter_dict = {} + self.ofc_router_dict = {} + self.ofc_router_inf_dict = {} + self.ofc_router_route_dict = {} + + def enable_autocheck(self): + self.autocheck = True + + def disable_autocheck(self): + self.autocheck = False + + @call_log.log + def create_tenant(self, description, tenant_id=None): + ofc_id = "ofc-" + tenant_id[:-4] + if self.autocheck: + if ofc_id in self.ofc_tenant_dict: + raise Exception(_('(create_tenant) OFC tenant %s ' + 'already exists') % ofc_id) + self.ofc_tenant_dict[ofc_id] = {'tenant_id': tenant_id, + 'description': description} + return ofc_id + + @call_log.log + def delete_tenant(self, ofc_tenant_id): + if ofc_tenant_id in self.ofc_tenant_dict: + del self.ofc_tenant_dict[ofc_tenant_id] + else: + if self.autocheck: + raise Exception(_('(delete_tenant) OFC tenant %s not found') + % ofc_tenant_id) + LOG.debug(_('delete_tenant: SUCCEED')) + + @call_log.log + def create_network(self, ofc_tenant_id, description, network_id=None): + ofc_id = "ofc-" + network_id[:-4] + if self.autocheck: + if ofc_tenant_id not in self.ofc_tenant_dict: + raise Exception(_('(create_network) OFC tenant %s not found') + % ofc_tenant_id) + if ofc_id in self.ofc_network_dict: + raise Exception(_('(create_network) OFC network %s ' + 'already exists') % ofc_id) + self.ofc_network_dict[ofc_id] = {'tenant_id': ofc_tenant_id, + 'network_id': network_id, + 'description': description} + return ofc_id + + @call_log.log + def update_network(self, ofc_network_id, description): + if self.autocheck: + if ofc_network_id not in self.ofc_network_dict: + raise Exception(_('(update_network) OFC network %s not found') + % ofc_network_id) + data = {'description': description} + self.ofc_network_dict[ofc_network_id].update(data) + LOG.debug(_('update_network: SUCCEED')) + + @call_log.log + def delete_network(self, ofc_network_id): + if ofc_network_id in self.ofc_network_dict: + del self.ofc_network_dict[ofc_network_id] + else: + if self.autocheck: + raise Exception(_('(delete_network) OFC network %s not found') + % ofc_network_id) + LOG.debug(_('delete_network: SUCCEED')) + + @call_log.log + def create_port(self, ofc_network_id, info, port_id=None, filters=None): + ofc_id = "ofc-" + port_id[:-4] + if self.autocheck: + if ofc_network_id not in self.ofc_network_dict: + raise Exception(_('(create_port) OFC network %s not found') + % ofc_network_id) + if ofc_id in self.ofc_port_dict: + raise Exception(_('(create_port) OFC port %s already exists') + % ofc_id) + self.ofc_port_dict[ofc_id] = {'network_id': ofc_network_id, + 'port_id': port_id} + if filters: + self.ofc_port_dict[ofc_id]['filters'] = filters + return ofc_id + + @call_log.log + def delete_port(self, ofc_port_id): + if ofc_port_id in self.ofc_port_dict: + del self.ofc_port_dict[ofc_port_id] + else: + if self.autocheck: + raise Exception(_('(delete_port) OFC port %s not found') + % ofc_port_id) + LOG.debug(_('delete_port: SUCCEED')) + + @classmethod + def filter_supported(cls): + return True + + def create_filter(self, ofc_network_id, filter_dict, + portinfo=None, filter_id=None, apply_ports=None): + return "ofc-" + filter_id[:-4] + + def delete_filter(self, ofc_filter_id): + pass + + def convert_ofc_tenant_id(self, context, ofc_tenant_id): + return ofc_tenant_id + + def convert_ofc_network_id(self, context, ofc_network_id, tenant_id): + return ofc_network_id + + def convert_ofc_port_id(self, context, ofc_port_id, tenant_id, network_id): + return ofc_port_id + + def convert_ofc_filter_id(self, context, ofc_filter_id): + return ofc_filter_id + + router_supported = True + router_nat_supported = True + + @call_log.log + def create_router(self, ofc_tenant_id, router_id, description): + ofc_id = "ofc-" + router_id[:-4] + if self.autocheck: + if ofc_tenant_id not in self.ofc_tenant_dict: + raise Exception(_('(create_router) OFC tenant %s not found') + % ofc_tenant_id) + if ofc_id in self.ofc_router_dict: + raise Exception(_('(create_router) OFC router %s ' + 'already exists') % ofc_id) + if len(self.ofc_router_dict) >= MAX_NUM_OPENFLOW_ROUTER: + params = {'reason': _("Operation on OFC is failed"), + 'status': 409} + raise nexc.OFCException(**params) + self.ofc_router_dict[ofc_id] = {'tenant_id': ofc_tenant_id, + 'router_id': router_id, + 'description': description} + return ofc_id + + @call_log.log + def delete_router(self, ofc_router_id): + if ofc_router_id in self.ofc_router_dict: + del self.ofc_router_dict[ofc_router_id] + else: + if self.autocheck: + raise Exception(_('(delete_router) OFC router %s not found') + % ofc_router_id) + LOG.debug(_('delete_router: SUCCEED')) + + @call_log.log + def add_router_interface(self, ofc_router_id, ofc_net_id, + ip_address=None, mac_address=None): + if_id = "ofc-" + uuidutils.generate_uuid()[:-4] + # IP address should have a format of a.b.c.d/N + if ip_address != str(netaddr.IPNetwork(ip_address)): + raise Exception(_('(add_router_interface) ' + 'ip_address %s is not a valid format (a.b.c.d/N).') + % ip_address) + if self.autocheck: + if ofc_router_id not in self.ofc_router_dict: + raise Exception(_('(add_router_interface) ' + 'OFC router %s not found') % ofc_router_id) + if ofc_net_id not in self.ofc_network_dict: + raise Exception(_('(add_router_interface) ' + 'OFC network %s not found') % ofc_net_id) + # Check duplicate destination + self.ofc_router_inf_dict[if_id] = {'router_id': ofc_router_id, + 'network_id': ofc_net_id, + 'ip_address': ip_address, + 'mac_address': mac_address} + LOG.debug(_('add_router_interface: SUCCEED (if_id=%s)'), if_id) + return if_id + + @call_log.log + def update_router_interface(self, ofc_router_inf_id, + ip_address=None, mac_address=None): + if ofc_router_inf_id not in self.ofc_router_inf_dict: + if self.autocheck: + raise Exception(_('(delete_router_interface) ' + 'OFC router interface %s not found') + % ofc_router_inf_id) + self.ofc_router_inf_dict[ofc_router_inf_id] = {} + inf = self.ofc_router_inf_dict[ofc_router_inf_id] + if ip_address: + inf.update({'ip_address': ip_address}) + if mac_address: + inf.update({'mac_address': mac_address}) + LOG.debug(_('update_router_route: SUCCEED')) + + @call_log.log + def delete_router_interface(self, ofc_router_inf_id): + if ofc_router_inf_id in self.ofc_router_inf_dict: + del self.ofc_router_inf_dict[ofc_router_inf_id] + else: + if self.autocheck: + raise Exception(_('(delete_router_interface) ' + 'OFC router interface %s not found') + % ofc_router_inf_id) + LOG.debug(_('delete_router_interface: SUCCEED')) + + @call_log.log + def add_router_route(self, ofc_router_id, destination, nexthop): + route_id = "ofc-" + uuidutils.generate_uuid()[:-4] + # IP address format check + netaddr.IPNetwork(destination) + netaddr.IPAddress(nexthop) + if self.autocheck: + if ofc_router_id not in self.ofc_router_dict: + raise Exception(_('(add_router_route) OFC router %s not found') + % ofc_router_id) + # Check duplicate destination + if destination in [route['destination'] for route in + self.ofc_router_route_dict.values()]: + raise Exception(_('(add_router_route) ' + 'route to "%s" already exists') % destination) + self.ofc_router_route_dict[route_id] = {'router_id': ofc_router_id, + 'destination': destination, + 'nexthop': nexthop} + LOG.debug(_('add_router_route: SUCCEED (route_id=%s)'), route_id) + return route_id + + @call_log.log + def delete_router_route(self, ofc_router_route_id): + if ofc_router_route_id in self.ofc_router_route_dict: + del self.ofc_router_route_dict[ofc_router_route_id] + else: + if self.autocheck: + raise Exception(_('(delete_router_route) OFC router route %s ' + 'not found') % ofc_router_route_id) + LOG.debug(_('delete_router_route: SUCCEED')) + + @call_log.log + def list_router_routes(self, ofc_router_id): + if self.autocheck: + if ofc_router_id not in self.ofc_router_dict: + raise Exception(_('(delete_router) OFC router %s not found') + % ofc_router_id) + routes = [{'id': k, + 'destination': v['destination'], + 'nexthop': v['nexthop']} + for k, v in self.ofc_router_route_dict.items() + if v['router_id'] == ofc_router_id] + LOG.debug(_('list_router_routes: routes=%s'), routes) + return routes diff --git a/neutron/tests/unit/nec/test_agent_scheduler.py b/neutron/tests/unit/nec/test_agent_scheduler.py new file mode 100644 index 000000000..60c1770a5 --- /dev/null +++ b/neutron/tests/unit/nec/test_agent_scheduler.py @@ -0,0 +1,118 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib + +from neutron.common import constants +from neutron.db import l3_rpc_base +from neutron.tests.unit.nec import test_nec_plugin +from neutron.tests.unit.openvswitch import test_agent_scheduler + +L3_HOSTA = test_agent_scheduler.L3_HOSTA +L3_HOSTB = test_agent_scheduler.L3_HOSTB + + +class NecAgentSchedulerTestCase( + test_agent_scheduler.OvsAgentSchedulerTestCase, + test_nec_plugin.NecPluginV2TestCaseBase): + + plugin_str = test_nec_plugin.PLUGIN_NAME + l3_plugin = None + + def setUp(self): + self.setup_nec_plugin_base() + super(NecAgentSchedulerTestCase, self).setUp() + + +class NecDhcpAgentNotifierTestCase( + test_agent_scheduler.OvsDhcpAgentNotifierTestCase, + test_nec_plugin.NecPluginV2TestCaseBase): + + plugin_str = test_nec_plugin.PLUGIN_NAME + + def setUp(self): + self.setup_nec_plugin_base() + super(NecDhcpAgentNotifierTestCase, self).setUp() + + +class NecL3AgentNotifierTestCase( + test_agent_scheduler.OvsL3AgentNotifierTestCase, + test_nec_plugin.NecPluginV2TestCaseBase): + + plugin_str = test_nec_plugin.PLUGIN_NAME + l3_plugin = None + + def setUp(self): + self.setup_nec_plugin_base() + super(NecL3AgentNotifierTestCase, self).setUp() + + +class NecL3AgentSchedulerWithOpenFlowRouter( + test_agent_scheduler.OvsAgentSchedulerTestCaseBase, + test_nec_plugin.NecPluginV2TestCaseBase): + + plugin_str = test_nec_plugin.PLUGIN_NAME + + def setUp(self): + self.setup_nec_plugin_base() + super(NecL3AgentSchedulerWithOpenFlowRouter, self).setUp() + + def test_router_auto_schedule_with_l3agent_and_openflow(self): + with contextlib.nested( + self.router(), + self.router(arg_list=('provider',), + provider='openflow' + )) as (r1, r2): + l3_rpc = l3_rpc_base.L3RpcCallbackMixin() + self._register_agent_states() + ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA) + ret_b = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTB) + l3_agents = self._list_l3_agents_hosting_router( + r1['router']['id']) + self.assertEqual(1, len(ret_a)) + self.assertFalse(len(ret_b)) + self.assertIn(r1['router']['id'], [r['id'] for r in ret_a]) + self.assertNotIn(r2['router']['id'], [r['id'] for r in ret_a]) + self.assertEqual(1, len(l3_agents['agents'])) + self.assertEqual(L3_HOSTA, l3_agents['agents'][0]['host']) + + def test_router_auto_schedule_only_with_openflow_router(self): + with contextlib.nested( + self.router(arg_list=('provider',), provider='openflow'), + self.router(arg_list=('provider',), provider='openflow') + ) as (r1, r2): + l3_rpc = l3_rpc_base.L3RpcCallbackMixin() + self._register_agent_states() + ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA) + l3_agents_1 = self._list_l3_agents_hosting_router( + r1['router']['id']) + l3_agents_2 = self._list_l3_agents_hosting_router( + r2['router']['id']) + self.assertFalse(len(ret_a)) + self.assertNotIn(r1['router']['id'], [r['id'] for r in ret_a]) + self.assertNotIn(r2['router']['id'], [r['id'] for r in ret_a]) + self.assertFalse(len(l3_agents_1['agents'])) + self.assertFalse(len(l3_agents_2['agents'])) + + def test_add_router_to_l3_agent_for_openflow_router(self): + with self.router(arg_list=('provider',), provider='openflow') as r1: + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTA) + self._add_router_to_l3_agent(hosta_id, + r1['router']['id'], + expected_code=409) diff --git a/neutron/tests/unit/nec/test_config.py b/neutron/tests/unit/nec/test_config.py new file mode 100644 index 000000000..9d9fad521 --- /dev/null +++ b/neutron/tests/unit/nec/test_config.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +from neutron.plugins.nec.common import config +from neutron.tests import base + + +class ConfigurationTest(base.BaseTestCase): + + def test_defaults(self): + self.assertEqual('br-int', config.CONF.OVS.integration_bridge) + self.assertEqual(2, config.CONF.AGENT.polling_interval) + self.assertEqual('sudo', config.CONF.AGENT.root_helper) + + self.assertEqual('127.0.0.1', config.CONF.OFC.host) + self.assertEqual('8888', config.CONF.OFC.port) + # Check path_prefix is an empty string explicitly. + self.assertEqual('', config.CONF.OFC.path_prefix) + self.assertEqual('trema', config.CONF.OFC.driver) + self.assertTrue(config.CONF.OFC.enable_packet_filter) + self.assertFalse(config.CONF.OFC.use_ssl) + self.assertIsNone(config.CONF.OFC.key_file) + self.assertIsNone(config.CONF.OFC.cert_file) + + def test_shortcuts(self): + self.assertEqual(config.CONF.OVS.integration_bridge, + config.OVS.integration_bridge) + self.assertEqual(config.CONF.AGENT.polling_interval, + config.AGENT.polling_interval) + self.assertEqual(config.CONF.OFC.host, config.OFC.host) diff --git a/neutron/tests/unit/nec/test_db.py b/neutron/tests/unit/nec/test_db.py new file mode 100644 index 000000000..e3b886907 --- /dev/null +++ b/neutron/tests/unit/nec/test_db.py @@ -0,0 +1,176 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import contextlib +import random + +from neutron.common import constants as q_const +from neutron.openstack.common import uuidutils +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.db import api as ndb +from neutron.plugins.nec.db import models as nmodels # noqa +from neutron.tests.unit.nec import test_nec_plugin + + +class NECPluginV2DBTestBase(test_nec_plugin.NecPluginV2TestCase): + """Class conisting of NECPluginV2 DB unit tests.""" + + def setUp(self): + """Setup for tests.""" + super(NECPluginV2DBTestBase, self).setUp() + self.session = self.context.session + + def get_ofc_item_random_params(self): + """create random parameters for ofc_item test.""" + ofc_id = uuidutils.generate_uuid() + neutron_id = uuidutils.generate_uuid() + none = uuidutils.generate_uuid() + return ofc_id, neutron_id, none + + @contextlib.contextmanager + def portinfo_random_params(self): + with self.port() as port: + params = {'port_id': port['port']['id'], + 'datapath_id': hex(random.randint(0, 0xffffffff)), + 'port_no': random.randint(1, 100), + 'vlan_id': random.randint(q_const.MIN_VLAN_TAG, + q_const.MAX_VLAN_TAG), + 'mac': ':'.join(["%02x" % random.randint(0, 0xff) + for x in range(6)]) + } + yield params + + +class NECPluginV2DBOfcMappingTest(NECPluginV2DBTestBase): + + def test_add_ofc_item(self): + """test add OFC item.""" + o, q, n = self.get_ofc_item_random_params() + tenant = ndb.add_ofc_item(self.session, 'ofc_tenant', q, o) + self.assertEqual(tenant.ofc_id, o) + self.assertEqual(tenant.neutron_id, q) + + def test_add_ofc_item_duplicate_entry(self): + o, q, n = self.get_ofc_item_random_params() + ndb.add_ofc_item(self.session, 'ofc_tenant', q, o) + self.assertRaises(nexc.NECDBException, + ndb.add_ofc_item, + self.session, 'ofc_tenant', q, o) + + def test_get_ofc_item(self): + o, q, n = self.get_ofc_item_random_params() + ndb.add_ofc_item(self.session, 'ofc_tenant', q, o) + tenant = ndb.get_ofc_item(self.session, 'ofc_tenant', q) + self.assertEqual(tenant.ofc_id, o) + self.assertEqual(tenant.neutron_id, q) + + def test_get_ofc_item_for_nonexisting_entry(self): + self.assertIsNone( + ndb.get_ofc_item(self.session, 'ofc_tenant', 'non-exist-id')) + + def test_get_ofc_id(self): + o, q, n = self.get_ofc_item_random_params() + ndb.add_ofc_item(self.session, 'ofc_tenant', q, o) + tenant_id = ndb.get_ofc_id(self.session, 'ofc_tenant', q) + self.assertEqual(tenant_id, o) + + def test_get_ofc_id_for_nonexisting_entry(self): + self.assertRaises(nexc.OFCMappingNotFound, + ndb.get_ofc_id, + self.session, 'ofc_tenant', 'non-exist-id') + + def test_exists_ofc_item(self): + o, q, n = self.get_ofc_item_random_params() + self.assertFalse(ndb.exists_ofc_item(self.session, 'ofc_tenant', q)) + + ndb.add_ofc_item(self.session, 'ofc_tenant', q, o) + self.assertTrue(ndb.exists_ofc_item(self.session, 'ofc_tenant', q)) + + ndb.del_ofc_item(self.session, 'ofc_tenant', q) + self.assertFalse(ndb.exists_ofc_item(self.session, 'ofc_tenant', q)) + + def test_find_ofc_item(self): + o, q, n = self.get_ofc_item_random_params() + ndb.add_ofc_item(self.session, 'ofc_tenant', q, o) + tenant = ndb.find_ofc_item(self.session, 'ofc_tenant', o) + self.assertEqual(tenant.ofc_id, o) + self.assertEqual(tenant.neutron_id, q) + + def test_find_ofc_item_for_nonexisting_entry(self): + self.assertIsNone( + ndb.find_ofc_item(self.session, 'ofc_tenant', 'non-existi-id')) + + def test_del_ofc_item(self): + o, q, n = self.get_ofc_item_random_params() + ndb.add_ofc_item(self.session, 'ofc_tenant', q, o) + self.assertTrue(ndb.del_ofc_item(self.session, 'ofc_tenant', q)) + + self.assertIsNone(ndb.get_ofc_item(self.session, 'ofc_tenant', q)) + self.assertIsNone(ndb.find_ofc_item(self.session, 'ofc_tenant', o)) + + def test_del_ofc_item_for_nonexisting_entry(self): + self.assertFalse( + ndb.del_ofc_item(self.session, 'ofc_tenant', 'non-existi-id')) + + +class NECPluginV2DBPortInfoTest(NECPluginV2DBTestBase): + + def _compare_portinfo(self, portinfo, expected): + self.assertEqual(portinfo.id, expected['port_id']) + self.assertEqual(portinfo.datapath_id, expected['datapath_id']) + self.assertEqual(portinfo.port_no, expected['port_no']) + self.assertEqual(portinfo.vlan_id, expected['vlan_id']) + self.assertEqual(portinfo.mac, expected['mac']) + + def _add_portinfo(self, session, params): + return ndb.add_portinfo(session, params['port_id'], + params['datapath_id'], params['port_no'], + params['vlan_id'], params['mac']) + + def testd_add_portinfo(self): + """test add portinfo.""" + with self.portinfo_random_params() as params: + portinfo = self._add_portinfo(self.session, params) + self._compare_portinfo(portinfo, params) + + exception_raised = False + try: + self._add_portinfo(self.session, params) + except nexc.NECDBException: + exception_raised = True + self.assertTrue(exception_raised) + + def teste_get_portinfo(self): + """test get portinfo.""" + with self.portinfo_random_params() as params: + self._add_portinfo(self.session, params) + portinfo = ndb.get_portinfo(self.session, params['port_id']) + self._compare_portinfo(portinfo, params) + + nonexist_id = uuidutils.generate_uuid() + portinfo_none = ndb.get_portinfo(self.session, nonexist_id) + self.assertIsNone(portinfo_none) + + def testf_del_portinfo(self): + """test delete portinfo.""" + with self.portinfo_random_params() as params: + self._add_portinfo(self.session, params) + portinfo = ndb.get_portinfo(self.session, params['port_id']) + self.assertEqual(portinfo.id, params['port_id']) + ndb.del_portinfo(self.session, params['port_id']) + portinfo_none = ndb.get_portinfo(self.session, params['port_id']) + self.assertIsNone(portinfo_none) diff --git a/neutron/tests/unit/nec/test_nec_agent.py b/neutron/tests/unit/nec/test_nec_agent.py new file mode 100644 index 000000000..26396aeb9 --- /dev/null +++ b/neutron/tests/unit/nec/test_nec_agent.py @@ -0,0 +1,366 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import copy +import itertools +import time + +import mock +from oslo.config import cfg +from six import moves +import testtools + +from neutron.agent.linux import ovs_lib +from neutron.extensions import securitygroup as ext_sg +from neutron.plugins.nec.agent import nec_neutron_agent +from neutron.tests import base + +DAEMON_LOOP_COUNT = 10 +OVS_DPID = '00000629355b6943' +OVS_DPID_0X = '0x' + OVS_DPID + + +class TestNecAgentBase(base.BaseTestCase): + + def setUp(self): + super(TestNecAgentBase, self).setUp() + cfg.CONF.set_default('firewall_driver', + 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + cfg.CONF.set_override('host', 'dummy-host') + with contextlib.nested( + mock.patch.object(ovs_lib.OVSBridge, 'get_datapath_id', + return_value=OVS_DPID), + mock.patch('socket.gethostname', return_value='dummy-host'), + mock.patch('neutron.openstack.common.loopingcall.' + 'FixedIntervalLoopingCall'), + mock.patch('neutron.agent.rpc.PluginReportStateAPI') + ) as (get_datapath_id, gethostname, + loopingcall, state_rpc_api): + kwargs = {'integ_br': 'integ_br', + 'root_helper': 'dummy_wrapper', + 'polling_interval': 1} + self.agent = nec_neutron_agent.NECNeutronAgent(**kwargs) + self.loopingcall = loopingcall + self.state_rpc_api = state_rpc_api + + +class TestNecAgent(TestNecAgentBase): + + def _setup_mock(self): + vif_ports = [ovs_lib.VifPort('port1', '1', 'id-1', 'mac-1', + self.agent.int_br), + ovs_lib.VifPort('port2', '2', 'id-2', 'mac-2', + self.agent.int_br)] + self.get_vif_ports = mock.patch.object( + ovs_lib.OVSBridge, 'get_vif_ports', + return_value=vif_ports).start() + self.update_ports = mock.patch.object( + nec_neutron_agent.NECPluginApi, 'update_ports').start() + self.prepare_devices_filter = mock.patch.object( + self.agent.sg_agent, 'prepare_devices_filter').start() + self.remove_devices_filter = mock.patch.object( + self.agent.sg_agent, 'remove_devices_filter').start() + + def _test_single_loop(self, with_exc=False, need_sync=False): + self.agent.cur_ports = ['id-0', 'id-1'] + self.agent.need_sync = need_sync + + self.agent.loop_handler() + if with_exc: + self.assertEqual(self.agent.cur_ports, ['id-0', 'id-1']) + self.assertTrue(self.agent.need_sync) + else: + self.assertEqual(self.agent.cur_ports, ['id-1', 'id-2']) + self.assertFalse(self.agent.need_sync) + + def test_single_loop_normal(self): + self._setup_mock() + self._test_single_loop() + agent_id = 'nec-q-agent.dummy-host' + self.update_ports.assert_called_once_with( + mock.ANY, agent_id, OVS_DPID_0X, + [{'id': 'id-2', 'mac': 'mac-2', 'port_no': '2'}], + ['id-0']) + self.prepare_devices_filter.assert_called_once_with(['id-2']) + self.remove_devices_filter.assert_called_once_with(['id-0']) + + def test_single_loop_need_sync(self): + self._setup_mock() + self._test_single_loop(need_sync=True) + agent_id = 'nec-q-agent.dummy-host' + self.update_ports.assert_called_once_with( + mock.ANY, agent_id, OVS_DPID_0X, + [{'id': 'id-1', 'mac': 'mac-1', 'port_no': '1'}, + {'id': 'id-2', 'mac': 'mac-2', 'port_no': '2'}], + []) + self.prepare_devices_filter.assert_called_once_with(['id-1', 'id-2']) + self.assertFalse(self.remove_devices_filter.call_count) + + def test_single_loop_with_sg_exception_remove(self): + self._setup_mock() + self.update_ports.side_effect = Exception() + self._test_single_loop(with_exc=True) + + def test_single_loop_with_sg_exception_prepare(self): + self._setup_mock() + self.prepare_devices_filter.side_effect = Exception() + self._test_single_loop(with_exc=True) + + def test_single_loop_with_update_ports_exception(self): + self._setup_mock() + self.remove_devices_filter.side_effect = Exception() + self._test_single_loop(with_exc=True) + + def test_daemon_loop(self): + + def state_check(index): + self.assertEqual(len(self.vif_ports_scenario[index]), + len(self.agent.cur_ports)) + + # Fake time.sleep to stop the infinite loop in daemon_loop() + self.sleep_count = 0 + + def sleep_mock(*args, **kwargs): + state_check(self.sleep_count) + self.sleep_count += 1 + if self.sleep_count >= DAEMON_LOOP_COUNT: + raise RuntimeError() + + vif_ports = [ovs_lib.VifPort('port1', '1', 'id-1', 'mac-1', + self.agent.int_br), + ovs_lib.VifPort('port2', '2', 'id-2', 'mac-2', + self.agent.int_br)] + + self.vif_ports_scenario = [[], [], vif_ports[0:1], vif_ports[0:2], + vif_ports[1:2], []] + + # Ensure vif_ports_scenario is longer than DAEMON_LOOP_COUNT + if len(self.vif_ports_scenario) < DAEMON_LOOP_COUNT: + self.vif_ports_scenario.extend( + [] for _i in moves.xrange(DAEMON_LOOP_COUNT - + len(self.vif_ports_scenario))) + + with contextlib.nested( + mock.patch.object(time, 'sleep', side_effect=sleep_mock), + mock.patch.object(ovs_lib.OVSBridge, 'get_vif_ports'), + mock.patch.object(nec_neutron_agent.NECPluginApi, 'update_ports'), + mock.patch.object(self.agent.sg_agent, 'prepare_devices_filter'), + mock.patch.object(self.agent.sg_agent, 'remove_devices_filter') + ) as (sleep, get_vif_potrs, update_ports, + prepare_devices_filter, remove_devices_filter): + get_vif_potrs.side_effect = self.vif_ports_scenario + + with testtools.ExpectedException(RuntimeError): + self.agent.daemon_loop() + self.assertEqual(update_ports.call_count, 4) + self.assertEqual(sleep.call_count, DAEMON_LOOP_COUNT) + + agent_id = 'nec-q-agent.dummy-host' + expected = [ + mock.call(mock.ANY, agent_id, OVS_DPID_0X, + [{'id': 'id-1', 'mac': 'mac-1', 'port_no': '1'}], + []), + mock.call(mock.ANY, agent_id, OVS_DPID_0X, + [{'id': 'id-2', 'mac': 'mac-2', 'port_no': '2'}], + []), + mock.call(mock.ANY, agent_id, OVS_DPID_0X, + [], ['id-1']), + mock.call(mock.ANY, agent_id, OVS_DPID_0X, + [], ['id-2']) + ] + update_ports.assert_has_calls(expected) + + expected = [mock.call(['id-1']), + mock.call(['id-2'])] + self.assertEqual(prepare_devices_filter.call_count, 2) + prepare_devices_filter.assert_has_calls(expected) + self.assertEqual(remove_devices_filter.call_count, 2) + remove_devices_filter.assert_has_calls(expected) + + sleep.assert_called_with(self.agent.polling_interval) + + def test_report_state_installed(self): + self.loopingcall.assert_called_once_with(self.agent._report_state) + instance = self.loopingcall.return_value + self.assertTrue(instance.start.called) + + def _check_report_state(self, cur_ports, num_ports, fail_mode, + first=False): + self.assertEqual(first or fail_mode, + 'start_flag' in self.agent.agent_state) + self.agent.cur_ports = cur_ports + + self.agent._report_state() + + self.assertEqual(fail_mode, + 'start_flag' in self.agent.agent_state) + self.assertEqual(self.agent. + agent_state['configurations']['devices'], + num_ports) + self.num_ports_hist.append(num_ports) + + def _test_report_state(self, fail_mode): + log_mocked = mock.patch.object(nec_neutron_agent, 'LOG') + log_patched = log_mocked.start() + + def record_state(*args, **kwargs): + self.record_calls.append(copy.deepcopy(args)) + if fail_mode: + raise Exception() + + self.record_calls = [] + self.num_ports_hist = [] + state_rpc = self.state_rpc_api.return_value + state_rpc.report_state.side_effect = record_state + dummy_vif = ovs_lib.VifPort('port1', '1', 'id-1', 'mac-1', None) + + self.state_rpc_api.assert_called_once_with('q-plugin') + self.assertIn('start_flag', self.agent.agent_state) + + self._check_report_state([], 0, fail_mode, first=True) + self._check_report_state([dummy_vif] * 2, 2, fail_mode) + self._check_report_state([dummy_vif] * 5, 5, fail_mode) + self._check_report_state([], 0, fail_mode) + + # Since loopingcall start is mocked, call_count is same as + # the call count of check_report_state. + self.assertEqual(state_rpc.report_state.call_count, 4) + self.assertEqual(len(self.record_calls), 4) + + for i, x in enumerate(itertools.izip(self.record_calls, + self.num_ports_hist)): + rec, num_ports = x + expected_state = { + 'binary': 'neutron-nec-agent', + 'host': 'dummy-host', + 'topic': 'N/A', + 'configurations': {'devices': 0}, + 'agent_type': 'NEC plugin agent'} + expected_state['configurations']['devices'] = num_ports + if i == 0 or fail_mode: + expected_state['start_flag'] = True + self.assertEqual(expected_state, rec[1]) + + self.assertEqual(fail_mode, log_patched.exception.called) + + def test_report_state(self): + self._test_report_state(fail_mode=False) + + def test_report_state_fail(self): + self._test_report_state(fail_mode=True) + + +class TestNecAgentCallback(TestNecAgentBase): + + def test_port_update(self): + with contextlib.nested( + mock.patch.object(ovs_lib.OVSBridge, 'get_vif_port_by_id'), + mock.patch.object(self.agent.sg_agent, 'refresh_firewall') + ) as (get_vif_port_by_id, refresh_firewall): + context = mock.Mock() + vifport = ovs_lib.VifPort('port1', '1', 'id-1', 'mac-1', + self.agent.int_br) + + # The OVS port does not exist. + get_vif_port_by_id.return_value = None + port = {'id': 'update-port-1'} + self.agent.callback_nec.port_update(context, port=port) + self.assertEqual(get_vif_port_by_id.call_count, 1) + self.assertFalse(refresh_firewall.call_count) + + # The OVS port exists but no security group is associated. + get_vif_port_by_id.return_value = vifport + port = {'id': 'update-port-1'} + self.agent.callback_nec.port_update(context, port=port) + self.assertEqual(get_vif_port_by_id.call_count, 2) + self.assertFalse(refresh_firewall.call_count) + + # The OVS port exists but a security group is associated. + get_vif_port_by_id.return_value = vifport + port = {'id': 'update-port-1', + ext_sg.SECURITYGROUPS: ['default']} + self.agent.callback_nec.port_update(context, port=port) + self.assertEqual(get_vif_port_by_id.call_count, 3) + self.assertEqual(refresh_firewall.call_count, 1) + + get_vif_port_by_id.return_value = None + port = {'id': 'update-port-1', + ext_sg.SECURITYGROUPS: ['default']} + self.agent.callback_nec.port_update(context, port=port) + self.assertEqual(get_vif_port_by_id.call_count, 4) + self.assertEqual(refresh_firewall.call_count, 1) + + +class TestNecAgentPluginApi(TestNecAgentBase): + + def _test_plugin_api(self, expected_failure=False): + with contextlib.nested( + mock.patch.object(nec_neutron_agent.NECPluginApi, 'make_msg'), + mock.patch.object(nec_neutron_agent.NECPluginApi, 'call'), + mock.patch.object(nec_neutron_agent, 'LOG') + ) as (make_msg, apicall, log): + agent_id = 'nec-q-agent.dummy-host' + if expected_failure: + apicall.side_effect = Exception() + + self.agent.plugin_rpc.update_ports( + mock.sentinel.ctx, agent_id, OVS_DPID_0X, + # port_added + [{'id': 'id-1', 'mac': 'mac-1', 'port_no': '1'}, + {'id': 'id-2', 'mac': 'mac-2', 'port_no': '2'}], + # port_removed + ['id-3', 'id-4', 'id-5']) + + make_msg.assert_called_once_with( + 'update_ports', topic='q-agent-notifier', + agent_id=agent_id, datapath_id=OVS_DPID_0X, + port_added=[{'id': 'id-1', 'mac': 'mac-1', 'port_no': '1'}, + {'id': 'id-2', 'mac': 'mac-2', 'port_no': '2'}], + port_removed=['id-3', 'id-4', 'id-5']) + + apicall.assert_called_once_with(mock.sentinel.ctx, + make_msg.return_value) + + self.assertTrue(log.info.called) + if expected_failure: + self.assertTrue(log.warn.called) + + def test_plugin_api(self): + self._test_plugin_api() + + +class TestNecAgentMain(base.BaseTestCase): + def test_main(self): + with contextlib.nested( + mock.patch.object(nec_neutron_agent, 'NECNeutronAgent'), + mock.patch.object(nec_neutron_agent, 'common_config'), + mock.patch.object(nec_neutron_agent, 'config') + ) as (agent, common_config, cfg): + cfg.OVS.integration_bridge = 'br-int-x' + cfg.AGENT.root_helper = 'dummy-helper' + cfg.AGENT.polling_interval = 10 + + nec_neutron_agent.main() + + self.assertTrue(common_config.setup_logging.called) + agent.assert_has_calls([ + mock.call('br-int-x', 'dummy-helper', 10), + mock.call().daemon_loop() + ]) diff --git a/neutron/tests/unit/nec/test_nec_plugin.py b/neutron/tests/unit/nec/test_nec_plugin.py new file mode 100644 index 000000000..0a012b793 --- /dev/null +++ b/neutron/tests/unit/nec/test_nec_plugin.py @@ -0,0 +1,930 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import fixtures +import mock +import webob.exc + +from neutron.common import constants +from neutron.common import test_lib +from neutron.common import topics +from neutron import context +from neutron.db import db_base_plugin_v2 +from neutron import manager +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.db import api as ndb +from neutron.plugins.nec import nec_plugin +from neutron.tests.unit.nec import fake_ofc_manager +from neutron.tests.unit import test_db_plugin as test_plugin +from neutron.tests.unit import test_extension_allowedaddresspairs as test_pair + + +PLUGIN_NAME = 'neutron.plugins.nec.nec_plugin.NECPluginV2' +OFC_MANAGER = 'neutron.plugins.nec.nec_plugin.ofc_manager.OFCManager' +NOTIFIER = 'neutron.plugins.nec.nec_plugin.NECPluginV2AgentNotifierApi' +NEC_PLUGIN_INI = """ +[DEFAULT] +api_extensions_path = neutron/plugins/nec/extensions +[OFC] +driver = neutron.tests.unit.nec.stub_ofc_driver.StubOFCDriver +enable_packet_filter = False +""" + + +class NecPluginV2TestCaseBase(object): + _nec_ini = NEC_PLUGIN_INI + + def _set_nec_ini(self): + self.nec_ini_file = self.useFixture(fixtures.TempDir()).join("nec.ini") + with open(self.nec_ini_file, 'w') as f: + f.write(self._nec_ini) + if 'config_files' in test_lib.test_config.keys(): + for c in test_lib.test_config['config_files']: + if c.rfind("/nec.ini") > -1: + test_lib.test_config['config_files'].remove(c) + test_lib.test_config['config_files'].append(self.nec_ini_file) + else: + test_lib.test_config['config_files'] = [self.nec_ini_file] + + def _clean_nec_ini(self): + test_lib.test_config['config_files'].remove(self.nec_ini_file) + os.remove(self.nec_ini_file) + self.nec_ini_file = None + + def patch_remote_calls(self): + self.plugin_notifier_p = mock.patch(NOTIFIER) + self.ofc_manager_p = mock.patch(OFC_MANAGER) + self.plugin_notifier_p.start() + self.ofc_manager_p.start() + + def setup_nec_plugin_base(self): + self._set_nec_ini() + self.addCleanup(self._clean_nec_ini) + self.patch_remote_calls() + + +class NecPluginV2TestCase(NecPluginV2TestCaseBase, + test_plugin.NeutronDbPluginV2TestCase): + + _plugin_name = PLUGIN_NAME + + def rpcapi_update_ports(self, agent_id='nec-q-agent.fake', + datapath_id="0xabc", added=[], removed=[]): + kwargs = {'topic': topics.AGENT, + 'agent_id': agent_id, + 'datapath_id': datapath_id, + 'port_added': added, 'port_removed': removed} + self.callback_nec.update_ports(self.context, **kwargs) + + def setUp(self, plugin=None, ext_mgr=None): + + self._set_nec_ini() + self.addCleanup(self._clean_nec_ini) + plugin = plugin or self._plugin_name + super(NecPluginV2TestCase, self).setUp(plugin, ext_mgr=ext_mgr) + + self.plugin = manager.NeutronManager.get_plugin() + self.plugin.ofc = fake_ofc_manager.patch_ofc_manager() + self.ofc = self.plugin.ofc + self.callback_nec = nec_plugin.NECPluginV2RPCCallbacks(self.plugin) + self.context = context.get_admin_context() + self.net_create_status = 'ACTIVE' + self.port_create_status = 'DOWN' + + +class TestNecBasicGet(test_plugin.TestBasicGet, NecPluginV2TestCase): + pass + + +class TestNecV2HTTPResponse(test_plugin.TestV2HTTPResponse, + NecPluginV2TestCase): + pass + + +class TestNecNetworksV2(test_plugin.TestNetworksV2, NecPluginV2TestCase): + pass + + +class TestNecPortsV2Callback(NecPluginV2TestCase): + + def _get_portinfo(self, port_id): + return ndb.get_portinfo(self.context.session, port_id) + + def test_portinfo_create(self): + with self.port() as port: + port_id = port['port']['id'] + sport = self.plugin.get_port(self.context, port_id) + self.assertEqual(sport['status'], 'DOWN') + self.assertEqual(self.ofc.create_ofc_port.call_count, 0) + self.assertIsNone(self._get_portinfo(port_id)) + + portinfo = {'id': port_id, 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + + sport = self.plugin.get_port(self.context, port_id) + self.assertEqual(sport['status'], 'ACTIVE') + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertIsNotNone(self._get_portinfo(port_id)) + + expected = [ + mock.call.exists_ofc_port(mock.ANY, port_id), + mock.call.create_ofc_port(mock.ANY, port_id, mock.ANY), + ] + self.ofc.assert_has_calls(expected) + + def test_portinfo_delete_before_port_deletion(self): + self._test_portinfo_delete() + + def test_portinfo_delete_after_port_deletion(self): + self._test_portinfo_delete(portinfo_delete_first=False) + + def _test_portinfo_delete(self, portinfo_delete_first=True): + with self.port() as port: + port_id = port['port']['id'] + portinfo = {'id': port_id, 'port_no': 456} + self.assertEqual(self.ofc.create_ofc_port.call_count, 0) + self.assertIsNone(self._get_portinfo(port_id)) + + self.rpcapi_update_ports(added=[portinfo]) + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 0) + self.assertIsNotNone(self._get_portinfo(port_id)) + + # Before port-deletion, switch port removed message is sent. + if portinfo_delete_first: + self.rpcapi_update_ports(removed=[port_id]) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 1) + self.assertIsNone(self._get_portinfo(port_id)) + + # The port and portinfo is expected to delete when exiting with-clause. + self.assertEqual(self.ofc.delete_ofc_port.call_count, 1) + self.assertIsNone(self._get_portinfo(port_id)) + if not portinfo_delete_first: + self.rpcapi_update_ports(removed=[port_id]) + + # Ensure port deletion is called once. + self.assertEqual(self.ofc.delete_ofc_port.call_count, 1) + self.assertIsNone(self._get_portinfo(port_id)) + + expected = [ + mock.call.exists_ofc_port(mock.ANY, port_id), + mock.call.create_ofc_port(mock.ANY, port_id, mock.ANY), + mock.call.exists_ofc_port(mock.ANY, port_id), + mock.call.delete_ofc_port(mock.ANY, port_id, mock.ANY), + ] + self.ofc.assert_has_calls(expected) + + def test_portinfo_added_unknown_port(self): + portinfo = {'id': 'dummy-p1', 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + self.assertIsNone(ndb.get_portinfo(self.context.session, + 'dummy-p1')) + self.assertEqual(self.ofc.exists_ofc_port.call_count, 0) + self.assertEqual(self.ofc.create_ofc_port.call_count, 0) + + def _test_portinfo_change(self, portinfo_change_first=True): + with self.port() as port: + port_id = port['port']['id'] + self.assertEqual(self.ofc.create_ofc_port.call_count, 0) + + portinfo = {'id': port_id, 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 0) + self.assertEqual(ndb.get_portinfo(self.context.session, + port_id).port_no, 123) + + if portinfo_change_first: + portinfo = {'id': port_id, 'port_no': 456} + self.rpcapi_update_ports(added=[portinfo]) + # OFC port is recreated. + self.assertEqual(self.ofc.create_ofc_port.call_count, 2) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 1) + self.assertEqual(ndb.get_portinfo(self.context.session, + port_id).port_no, 456) + + if not portinfo_change_first: + # The port is expected to delete when exiting with-clause. + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 1) + + portinfo = {'id': port_id, 'port_no': 456} + self.rpcapi_update_ports(added=[portinfo]) + # No OFC operations are expected. + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 1) + self.assertIsNone(ndb.get_portinfo(self.context.session, port_id)) + + def test_portinfo_change(self): + self._test_portinfo_change() + + def test_portinfo_change_for_nonexisting_port(self): + self._test_portinfo_change(portinfo_change_first=False) + + def test_port_migration(self): + agent_id_a, datapath_id_a, port_no_a = 'nec-q-agent.aa', '0xaaa', 10 + agent_id_b, datapath_id_b, port_no_b = 'nec-q-agent.bb', '0xbbb', 11 + + with self.port() as port: + port_id = port['port']['id'] + sport = self.plugin.get_port(self.context, port_id) + self.assertEqual(sport['status'], 'DOWN') + + portinfo_a = {'id': port_id, 'port_no': port_no_a} + self.rpcapi_update_ports(agent_id=agent_id_a, + datapath_id=datapath_id_a, + added=[portinfo_a]) + + portinfo_b = {'id': port_id, 'port_no': port_no_b} + self.rpcapi_update_ports(agent_id=agent_id_b, + datapath_id=datapath_id_b, + added=[portinfo_b]) + + self.rpcapi_update_ports(agent_id=agent_id_a, + datapath_id=datapath_id_a, + removed=[port_id]) + + sport = self.plugin.get_port(self.context, port_id) + self.assertEqual(sport['status'], 'ACTIVE') + self.assertTrue(self.ofc.ofc_ports[port_id]) + + expected = [ + mock.call.exists_ofc_port(mock.ANY, port_id), + mock.call.create_ofc_port(mock.ANY, port_id, mock.ANY), + mock.call.exists_ofc_port(mock.ANY, port_id), + mock.call.delete_ofc_port(mock.ANY, port_id, mock.ANY), + mock.call.exists_ofc_port(mock.ANY, port_id), + mock.call.create_ofc_port(mock.ANY, port_id, mock.ANY), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(2, self.ofc.create_ofc_port.call_count) + self.assertEqual(1, self.ofc.delete_ofc_port.call_count) + + def test_portinfo_readd(self): + with self.port() as port: + port_id = port['port']['id'] + self.plugin.get_port(self.context, port_id) + + portinfo = {'id': port_id, 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + + sport = self.plugin.get_port(self.context, port_id) + self.assertEqual(sport['status'], 'ACTIVE') + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 0) + self.assertIsNotNone(self._get_portinfo(port_id)) + + portinfo = {'id': port_id, 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + + sport = self.plugin.get_port(self.context, port_id) + self.assertEqual(sport['status'], 'ACTIVE') + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 0) + self.assertIsNotNone(self._get_portinfo(port_id)) + + +class TestNecPluginDbTest(NecPluginV2TestCase): + + def test_update_resource(self): + with self.network() as network: + self.assertEqual("ACTIVE", network['network']['status']) + net_id = network['network']['id'] + for status in ["DOWN", "BUILD", "ERROR", "ACTIVE"]: + self.plugin._update_resource_status( + self.context, 'network', net_id, + getattr(constants, 'NET_STATUS_%s' % status)) + n = self.plugin._get_network(self.context, net_id) + self.assertEqual(status, n.status) + + +class TestNecPluginOfcManager(NecPluginV2TestCase): + def setUp(self): + super(TestNecPluginOfcManager, self).setUp() + self.ofc = self.plugin.ofc + + def _create_resource(self, resource, data): + collection = resource + 's' + data = {resource: data} + req = self.new_create_request(collection, data) + res = self.deserialize(self.fmt, req.get_response(self.api)) + return res[resource] + + def _update_resource(self, resource, id, data): + collection = resource + 's' + data = {resource: data} + req = self.new_update_request(collection, data, id) + res = self.deserialize(self.fmt, req.get_response(self.api)) + return res[resource] + + def _show_resource(self, resource, id): + collection = resource + 's' + req = self.new_show_request(collection, id) + res = self.deserialize(self.fmt, req.get_response(self.api)) + return res[resource] + + def _list_resource(self, resource): + collection = resource + 's' + req = self.new_list_request(collection) + res = req.get_response(self.api) + return res[collection] + + def _delete_resource(self, resource, id): + collection = resource + 's' + req = self.new_delete_request(collection, id) + res = req.get_response(self.api) + return res.status_int + + def test_create_network(self): + net = None + ctx = mock.ANY + with self.network() as network: + net = network['network'] + self.assertEqual(network['network']['status'], 'ACTIVE') + + expected = [ + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_network(ctx, self._tenant_id, net['id'], + net['name']), + mock.call.exists_ofc_network(ctx, net['id']), + mock.call.delete_ofc_network(ctx, net['id'], mock.ANY), + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.delete_ofc_tenant(ctx, self._tenant_id) + ] + self.ofc.assert_has_calls(expected) + + def test_create_network_with_admin_state_down(self): + net = None + ctx = mock.ANY + with self.network(admin_state_up=False) as network: + net = network['network'] + self.assertEqual(network['network']['status'], 'DOWN') + + expected = [ + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_network(ctx, self._tenant_id, net['id'], + net['name']), + mock.call.exists_ofc_network(ctx, net['id']), + mock.call.delete_ofc_network(ctx, net['id'], mock.ANY), + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.delete_ofc_tenant(ctx, self._tenant_id) + ] + self.ofc.assert_has_calls(expected) + + def test_create_two_network(self): + nets = [] + ctx = mock.ANY + with self.network() as net1: + nets.append(net1['network']) + self.assertEqual(net1['network']['status'], 'ACTIVE') + with self.network() as net2: + nets.append(net2['network']) + self.assertEqual(net2['network']['status'], 'ACTIVE') + + expected = [ + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_network(ctx, self._tenant_id, nets[0]['id'], + nets[0]['name']), + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_network(ctx, self._tenant_id, nets[1]['id'], + nets[1]['name']), + mock.call.exists_ofc_network(ctx, nets[1]['id']), + mock.call.delete_ofc_network(ctx, nets[1]['id'], mock.ANY), + mock.call.exists_ofc_network(ctx, nets[0]['id']), + mock.call.delete_ofc_network(ctx, nets[0]['id'], mock.ANY), + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.delete_ofc_tenant(ctx, self._tenant_id) + ] + self.ofc.assert_has_calls(expected) + + def test_create_network_fail(self): + self.ofc.create_ofc_network.side_effect = nexc.OFCException( + reason='hoge') + + net = None + ctx = mock.ANY + # NOTE: We don't delete network through api, but db will be cleaned in + # tearDown(). When OFCManager has failed to create a network on OFC, + # it does not keeps ofc_network entry and will fail to delete this + # network from OFC. Deletion of network is not the scope of this test. + with self.network(do_delete=False) as network: + net = network['network'] + self.assertEqual(net['status'], 'ERROR') + net_ref = self._show('networks', net['id']) + self.assertEqual(net_ref['network']['status'], 'ERROR') + + expected = [ + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_network(ctx, self._tenant_id, net['id'], + net['name']) + ] + self.ofc.assert_has_calls(expected) + + def test_update_network(self): + net = None + ctx = mock.ANY + with self.network() as network: + net = network['network'] + self.assertEqual(network['network']['status'], 'ACTIVE') + + net_ref = self._show('networks', net['id']) + self.assertEqual(net_ref['network']['status'], 'ACTIVE') + + # Set admin_state_up to False + res = self._update_resource('network', net['id'], + {'admin_state_up': False}) + self.assertFalse(res['admin_state_up']) + self.assertEqual(res['status'], 'DOWN') + + net_ref = self._show('networks', net['id']) + self.assertEqual(net_ref['network']['status'], 'DOWN') + + # Set admin_state_up to True + res = self._update_resource('network', net['id'], + {'admin_state_up': True}) + self.assertTrue(res['admin_state_up']) + self.assertEqual(res['status'], 'ACTIVE') + + net_ref = self._show('networks', net['id']) + self.assertEqual(net_ref['network']['status'], 'ACTIVE') + + expected = [ + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_network(ctx, self._tenant_id, net['id'], + net['name']), + mock.call.exists_ofc_network(ctx, net['id']), + mock.call.delete_ofc_network(ctx, net['id'], mock.ANY), + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.delete_ofc_tenant(ctx, self._tenant_id) + ] + self.ofc.assert_has_calls(expected) + + def test_create_port_no_ofc_creation(self): + net = None + p1 = None + ctx = mock.ANY + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + p1 = port['port'] + net_id = port['port']['network_id'] + net = self._show_resource('network', net_id) + self.assertEqual(net['status'], 'ACTIVE') + self.assertEqual(p1['status'], 'DOWN') + + p1_ref = self._show('ports', p1['id']) + self.assertEqual(p1_ref['port']['status'], 'DOWN') + + expected = [ + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_network(ctx, self._tenant_id, net['id'], + net['name']), + + mock.call.exists_ofc_port(ctx, p1['id']), + mock.call.exists_ofc_network(ctx, net['id']), + mock.call.delete_ofc_network(ctx, net['id'], mock.ANY), + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.delete_ofc_tenant(ctx, self._tenant_id) + ] + self.ofc.assert_has_calls(expected) + + def test_create_port_with_ofc_creation(self): + net = None + p1 = None + ctx = mock.ANY + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + p1 = port['port'] + net_id = port['port']['network_id'] + net = self._show_resource('network', net_id) + self.assertEqual(net['status'], 'ACTIVE') + self.assertEqual(p1['status'], 'DOWN') + + p1_ref = self._show('ports', p1['id']) + self.assertEqual(p1_ref['port']['status'], 'DOWN') + + # Check the port is not created on OFC + self.assertFalse(self.ofc.create_ofc_port.call_count) + + # Register portinfo, then the port is created on OFC + portinfo = {'id': p1['id'], 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + + p1_ref = self._show('ports', p1['id']) + self.assertEqual(p1_ref['port']['status'], 'ACTIVE') + + expected = [ + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_network(ctx, self._tenant_id, net['id'], + net['name']), + + mock.call.exists_ofc_port(ctx, p1['id']), + mock.call.create_ofc_port(ctx, p1['id'], mock.ANY), + + mock.call.exists_ofc_port(ctx, p1['id']), + mock.call.delete_ofc_port(ctx, p1['id'], mock.ANY), + mock.call.exists_ofc_network(ctx, net['id']), + mock.call.delete_ofc_network(ctx, net['id'], mock.ANY), + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.delete_ofc_tenant(ctx, self._tenant_id) + ] + self.ofc.assert_has_calls(expected) + + def test_delete_network_with_dhcp_port(self): + ctx = mock.ANY + with self.network() as network: + with self.subnet(network=network): + net = network['network'] + p = self._create_resource( + 'port', + {'network_id': net['id'], + 'tenant_id': net['tenant_id'], + 'device_owner': constants.DEVICE_OWNER_DHCP, + 'device_id': 'dhcp-port1'}) + # Make sure that the port is created on OFC. + portinfo = {'id': p['id'], 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + # In a case of dhcp port, the port is deleted automatically + # when delete_network. + + expected = [ + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_network(ctx, self._tenant_id, + net['id'], net['name']), + mock.call.exists_ofc_port(ctx, p['id']), + mock.call.create_ofc_port(ctx, p['id'], mock.ANY), + mock.call.exists_ofc_port(ctx, p['id']), + mock.call.delete_ofc_port(ctx, p['id'], mock.ANY), + mock.call.exists_ofc_network(ctx, net['id']), + mock.call.delete_ofc_network(ctx, net['id'], mock.ANY), + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.delete_ofc_tenant(ctx, self._tenant_id) + ] + self.ofc.assert_has_calls(expected) + + def test_delete_network_with_error_status(self): + self.ofc.set_raise_exc('create_ofc_network', + nexc.OFCException(reason='fake error')) + + with self.network() as net: + net_id = net['network']['id'] + net_ref = self._show('networks', net_id) + self.assertEqual(net_ref['network']['status'], 'ERROR') + + ctx = mock.ANY + tenant_id = self._tenant_id + net_name = mock.ANY + net = mock.ANY + expected = [ + mock.call.exists_ofc_tenant(ctx, tenant_id), + mock.call.create_ofc_tenant(ctx, tenant_id), + mock.call.create_ofc_network(ctx, tenant_id, net_id, net_name), + mock.call.exists_ofc_network(ctx, net_id), + mock.call.exists_ofc_tenant(ctx, tenant_id), + mock.call.delete_ofc_tenant(ctx, tenant_id), + ] + self.ofc.assert_has_calls(expected) + self.assertFalse(self.ofc.delete_ofc_network.call_count) + + def test_delete_network_with_ofc_deletion_failure(self): + self.ofc.set_raise_exc('delete_ofc_network', + nexc.OFCException(reason='hoge')) + + with self.network() as net: + net_id = net['network']['id'] + + self._delete('networks', net_id, + expected_code=webob.exc.HTTPInternalServerError.code) + + net_ref = self._show('networks', net_id) + self.assertEqual(net_ref['network']['status'], 'ERROR') + + self.ofc.set_raise_exc('delete_ofc_network', None) + + ctx = mock.ANY + tenant = mock.ANY + net_name = mock.ANY + net = mock.ANY + expected = [ + mock.call.create_ofc_network(ctx, tenant, net_id, net_name), + mock.call.exists_ofc_network(ctx, net_id), + mock.call.delete_ofc_network(ctx, net_id, net), + mock.call.exists_ofc_network(ctx, net_id), + mock.call.delete_ofc_network(ctx, net_id, net), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.delete_ofc_network.call_count, 2) + + def test_delete_network_with_deactivating_auto_delete_port_failure(self): + self.ofc.set_raise_exc('delete_ofc_port', + nexc.OFCException(reason='hoge')) + + with self.network(do_delete=False) as net: + net_id = net['network']['id'] + + device_owner = db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS[0] + port = self._make_port(self.fmt, net_id, device_owner=device_owner) + port_id = port['port']['id'] + + portinfo = {'id': port_id, 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + + self._delete('networks', net_id, + expected_code=webob.exc.HTTPInternalServerError.code) + + net_ref = self._show('networks', net_id) + self.assertEqual(net_ref['network']['status'], 'ACTIVE') + port_ref = self._show('ports', port_id) + self.assertEqual(port_ref['port']['status'], 'ERROR') + + self.ofc.set_raise_exc('delete_ofc_port', None) + self._delete('networks', net_id) + + ctx = mock.ANY + tenant = mock.ANY + net_name = mock.ANY + net = mock.ANY + port = mock.ANY + expected = [ + mock.call.create_ofc_network(ctx, tenant, net_id, net_name), + mock.call.exists_ofc_port(ctx, port_id), + mock.call.create_ofc_port(ctx, port_id, port), + mock.call.exists_ofc_port(ctx, port_id), + mock.call.delete_ofc_port(ctx, port_id, port), + mock.call.exists_ofc_port(ctx, port_id), + mock.call.delete_ofc_port(ctx, port_id, port), + mock.call.exists_ofc_network(ctx, net_id), + mock.call.delete_ofc_network(ctx, net_id, net) + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.delete_ofc_network.call_count, 1) + + def test_update_port(self): + self._test_update_port_with_admin_state(resource='port') + + def test_update_network_with_ofc_port(self): + self._test_update_port_with_admin_state(resource='network') + + def _test_update_port_with_admin_state(self, resource='port'): + net = None + p1 = None + ctx = mock.ANY + + if resource == 'network': + net_ini_admin_state = False + port_ini_admin_state = True + else: + net_ini_admin_state = True + port_ini_admin_state = False + + with self.network(admin_state_up=net_ini_admin_state) as network: + with self.subnet(network=network) as subnet: + with self.port(subnet=subnet, + admin_state_up=port_ini_admin_state) as port: + p1 = port['port'] + net_id = port['port']['network_id'] + res_id = net_id if resource == 'network' else p1['id'] + self.assertEqual(p1['status'], 'DOWN') + + net = self._show_resource('network', net_id) + + # Check the port is not created on OFC + self.assertFalse(self.ofc.create_ofc_port.call_count) + + # Register portinfo, then the port is created on OFC + portinfo = {'id': p1['id'], 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + self.assertFalse(self.ofc.create_ofc_port.call_count) + + res = self._update_resource(resource, res_id, + {'admin_state_up': True}) + self.assertEqual(res['status'], 'ACTIVE') + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertFalse(self.ofc.delete_ofc_port.call_count) + + res = self._update_resource(resource, res_id, + {'admin_state_up': False}) + self.assertEqual(res['status'], 'DOWN') + self.assertEqual(self.ofc.delete_ofc_port.call_count, 1) + + expected = [ + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_tenant(ctx, self._tenant_id), + mock.call.create_ofc_network(ctx, self._tenant_id, net['id'], + net['name']), + + mock.call.exists_ofc_port(ctx, p1['id']), + mock.call.create_ofc_port(ctx, p1['id'], mock.ANY), + + mock.call.exists_ofc_port(ctx, p1['id']), + mock.call.delete_ofc_port(ctx, p1['id'], mock.ANY), + + mock.call.exists_ofc_port(ctx, p1['id']), + mock.call.exists_ofc_network(ctx, net['id']), + mock.call.delete_ofc_network(ctx, net['id'], mock.ANY), + mock.call.exists_ofc_tenant(ctx, self._tenant_id), + mock.call.delete_ofc_tenant(ctx, self._tenant_id) + ] + self.ofc.assert_has_calls(expected) + + def test_update_port_with_ofc_creation_failure(self): + with self.port(admin_state_up=False) as port: + port_id = port['port']['id'] + portinfo = {'id': port_id, 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + + self.ofc.set_raise_exc('create_ofc_port', + nexc.OFCException(reason='hoge')) + + body = {'port': {'admin_state_up': True}} + res = self._update('ports', port_id, body) + self.assertEqual(res['port']['status'], 'ERROR') + port_ref = self._show('ports', port_id) + self.assertEqual(port_ref['port']['status'], 'ERROR') + + body = {'port': {'admin_state_up': False}} + res = self._update('ports', port_id, body) + self.assertEqual(res['port']['status'], 'ERROR') + port_ref = self._show('ports', port_id) + self.assertEqual(port_ref['port']['status'], 'ERROR') + + self.ofc.set_raise_exc('create_ofc_port', None) + + body = {'port': {'admin_state_up': True}} + res = self._update('ports', port_id, body) + self.assertEqual(res['port']['status'], 'ACTIVE') + port_ref = self._show('ports', port_id) + self.assertEqual(port_ref['port']['status'], 'ACTIVE') + + ctx = mock.ANY + port = mock.ANY + expected = [ + mock.call.exists_ofc_port(ctx, port_id), + mock.call.create_ofc_port(ctx, port_id, port), + mock.call.exists_ofc_port(ctx, port_id), + mock.call.exists_ofc_port(ctx, port_id), + mock.call.create_ofc_port(ctx, port_id, port), + mock.call.exists_ofc_port(ctx, port_id), + mock.call.delete_ofc_port(ctx, port_id, port), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.create_ofc_port.call_count, 2) + + def test_update_port_with_ofc_deletion_failure(self): + with self.port() as port: + port_id = port['port']['id'] + portinfo = {'id': port_id, 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + + self.ofc.set_raise_exc('delete_ofc_port', + nexc.OFCException(reason='hoge')) + + body = {'port': {'admin_state_up': False}} + self._update('ports', port_id, body, + expected_code=webob.exc.HTTPInternalServerError.code) + port_ref = self._show('ports', port_id) + self.assertEqual(port_ref['port']['status'], 'ERROR') + + body = {'port': {'admin_state_up': True}} + res = self._update('ports', port_id, body) + self.assertEqual(res['port']['status'], 'ERROR') + port_ref = self._show('ports', port_id) + self.assertEqual(port_ref['port']['status'], 'ERROR') + + self.ofc.set_raise_exc('delete_ofc_port', None) + + body = {'port': {'admin_state_up': False}} + res = self._update('ports', port_id, body) + self.assertEqual(res['port']['status'], 'DOWN') + port_ref = self._show('ports', port_id) + self.assertEqual(port_ref['port']['status'], 'DOWN') + + ctx = mock.ANY + port = mock.ANY + expected = [ + mock.call.exists_ofc_port(ctx, port_id), + mock.call.create_ofc_port(ctx, port_id, port), + mock.call.exists_ofc_port(ctx, port_id), + mock.call.delete_ofc_port(ctx, port_id, port), + mock.call.exists_ofc_port(ctx, port_id), + mock.call.exists_ofc_port(ctx, port_id), + mock.call.delete_ofc_port(ctx, port_id, port), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 2) + + def test_delete_port_with_error_status(self): + self.ofc.set_raise_exc('create_ofc_port', + nexc.OFCException(reason='fake')) + + with self.port() as port: + port_id = port['port']['id'] + portinfo = {'id': port_id, 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + port_ref = self._show('ports', port_id) + self.assertEqual(port_ref['port']['status'], 'ERROR') + + ctx = mock.ANY + port = mock.ANY + expected = [ + mock.call.exists_ofc_port(ctx, port_id), + mock.call.create_ofc_port(ctx, port_id, port), + mock.call.exists_ofc_port(ctx, port_id), + ] + self.ofc.assert_has_calls(expected) + self.assertFalse(self.ofc.delete_ofc_port.call_count) + + def test_delete_port_with_ofc_deletion_failure(self): + self.ofc.set_raise_exc('delete_ofc_port', + nexc.OFCException(reason='hoge')) + + with self.port() as port: + port_id = port['port']['id'] + + portinfo = {'id': port_id, 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + + self._delete('ports', port_id, + expected_code=webob.exc.HTTPInternalServerError.code) + + port_ref = self._show('ports', port_id) + self.assertEqual(port_ref['port']['status'], 'ERROR') + + self.ofc.set_raise_exc('delete_ofc_port', None) + + ctx = mock.ANY + port = mock.ANY + expected = [ + mock.call.exists_ofc_port(ctx, port_id), + mock.call.create_ofc_port(ctx, port_id, port), + mock.call.exists_ofc_port(ctx, port_id), + mock.call.delete_ofc_port(ctx, port_id, port), + mock.call.exists_ofc_port(ctx, port_id), + mock.call.delete_ofc_port(ctx, port_id, port) + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 2) + + def _test_delete_port_for_disappeared_ofc_port(self, raised_exc): + self.ofc.set_raise_exc('delete_ofc_port', raised_exc) + + with self.port(no_delete=True) as port: + port_id = port['port']['id'] + + portinfo = {'id': port_id, 'port_no': 123} + self.rpcapi_update_ports(added=[portinfo]) + + self._delete('ports', port_id) + + # Check the port on neutron db is deleted. NotFound for + # neutron port itself should be handled by called. It is + # consistent with ML2 behavior, but it may need to be + # revisit. + self._show('ports', port_id, + expected_code=webob.exc.HTTPNotFound.code) + + ctx = mock.ANY + port = mock.ANY + expected = [ + mock.call.exists_ofc_port(ctx, port_id), + mock.call.create_ofc_port(ctx, port_id, port), + mock.call.exists_ofc_port(ctx, port_id), + mock.call.delete_ofc_port(ctx, port_id, port), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 1) + + def test_delete_port_for_nonexist_ofc_port(self): + self._test_delete_port_for_disappeared_ofc_port( + nexc.OFCResourceNotFound(resource='ofc_port')) + + def test_delete_port_for_noofcmap_ofc_port(self): + self._test_delete_port_for_disappeared_ofc_port( + nexc.OFCMappingNotFound(resource='port', neutron_id='port1')) + + +class TestNecAllowedAddressPairs(NecPluginV2TestCase, + test_pair.TestAllowedAddressPairs): + pass diff --git a/neutron/tests/unit/nec/test_ofc_client.py b/neutron/tests/unit/nec/test_ofc_client.py new file mode 100644 index 000000000..93f75a2b0 --- /dev/null +++ b/neutron/tests/unit/nec/test_ofc_client.py @@ -0,0 +1,179 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Akihiro Motoki + +import mock +from oslo.config import cfg +import requests + +from neutron.openstack.common import jsonutils as json +from neutron.plugins.nec.common import config +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.common import ofc_client +from neutron.tests import base + + +class FakeResponse(requests.Response): + def __init__(self, status_code=None, text=None, headers=None): + self._text = text + self.status_code = status_code + if headers is not None: + self.headers = headers + + @property + def text(self): + return self._text + + +class OFCClientTest(base.BaseTestCase): + + def _test_do_request(self, status, resbody, expected_data, exctype=None, + exc_checks=None, path_prefix=None): + req = mock.Mock(return_value=(FakeResponse(status, resbody))) + + with mock.patch.object(requests, 'request', req): + client = ofc_client.OFCClient() + path = '/somewhere' + realpath = path_prefix + path if path_prefix else path + if exctype: + e = self.assertRaises(exctype, client.do_request, + 'GET', path, body={}) + self.assertEqual(expected_data, str(e)) + if exc_checks: + for k, v in exc_checks.items(): + self.assertEqual(v, getattr(e, k)) + else: + response = client.do_request('GET', path, body={}) + self.assertEqual(response, expected_data) + + headers = {"Content-Type": "application/json"} + req.assert_called_with('GET', 'http://127.0.0.1:8888' + realpath, + verify=True, cert={}, data='{}', + headers=headers) + + def test_do_request_200_json_value(self): + self._test_do_request(200, json.dumps([1, 2, 3]), [1, 2, 3]) + + def test_do_request_200_string(self): + self._test_do_request(200, 'abcdef', 'abcdef') + + def test_do_request_200_no_body(self): + self._test_do_request(200, None, None) + + def test_do_request_other_success_codes(self): + for status in [201, 202, 204]: + self._test_do_request(status, None, None) + + def test_do_request_with_path_prefix(self): + config.CONF.set_override('path_prefix', '/dummy', group='OFC') + self._test_do_request(200, json.dumps([1, 2, 3]), [1, 2, 3], + path_prefix='/dummy') + + def test_do_request_returns_404(self): + resbody = '' + errmsg = _("The specified OFC resource (/somewhere) is not found.") + self._test_do_request(404, resbody, errmsg, nexc.OFCResourceNotFound) + + def test_do_request_error_no_body(self): + errmsg = _("An OFC exception has occurred: Operation on OFC failed") + exc_checks = {'status': 400, 'err_code': None, 'err_msg': None} + self._test_do_request(400, None, errmsg, nexc.OFCException, exc_checks) + + def test_do_request_error_string_body(self): + resbody = 'This is an error.' + errmsg = _("An OFC exception has occurred: Operation on OFC failed") + exc_checks = {'status': 400, 'err_code': None, + 'err_msg': 'This is an error.'} + self._test_do_request(400, resbody, errmsg, nexc.OFCException, + exc_checks) + + def test_do_request_error_json_body(self): + resbody = json.dumps({'err_code': 40022, + 'err_msg': 'This is an error.'}) + errmsg = _("An OFC exception has occurred: Operation on OFC failed") + exc_checks = {'status': 400, 'err_code': 40022, + 'err_msg': 'This is an error.'} + self._test_do_request(400, resbody, errmsg, nexc.OFCException, + exc_checks) + + def test_do_request_socket_error(self): + data = _("An OFC exception has occurred: Failed to connect OFC : ") + + req = mock.Mock() + req.side_effect = requests.exceptions.RequestException + + with mock.patch.object(requests, 'request', req): + client = ofc_client.OFCClient() + + e = self.assertRaises(nexc.OFCException, client.do_request, + 'GET', '/somewhere', body={}) + self.assertEqual(data, str(e)) + for k in ['status', 'err_code', 'err_msg']: + self.assertIsNone(getattr(e, k)) + + headers = {"Content-Type": "application/json"} + req.assert_called_with('GET', 'http://127.0.0.1:8888/somewhere', + verify=True, cert={}, data='{}', + headers=headers) + + def test_do_request_retry_fail_after_one_attempts(self): + self._test_do_request_retry_after(1, api_max_attempts=1) + + def test_do_request_retry_fail_with_max_attempts(self): + self._test_do_request_retry_after(3) + + def test_do_request_retry_succeed_with_2nd_attempt(self): + self._test_do_request_retry_after(2, succeed_final=True) + + def test_do_request_retry_succeed_with_1st_attempt(self): + self._test_do_request_retry_after(1, succeed_final=True) + + def _test_do_request_retry_after(self, exp_request_count, + api_max_attempts=None, + succeed_final=False): + if api_max_attempts is not None: + cfg.CONF.set_override('api_max_attempts', api_max_attempts, + group='OFC') + + res_unavail = FakeResponse(503, headers={'retry-after': '10'}) + res_ok = FakeResponse(200) + + req = mock.Mock() + if succeed_final: + req.side_effect = ([res_unavail] * (exp_request_count - 1) + + [res_ok]) + else: + req.side_effect = [res_unavail] * exp_request_count + + with mock.patch.object(requests, 'request', req): + with mock.patch('time.sleep') as sleep: + client = ofc_client.OFCClient() + if succeed_final: + ret = client.do_request('GET', '/somewhere') + self.assertIsNone(ret) + else: + e = self.assertRaises(nexc.OFCServiceUnavailable, + client.do_request, + 'GET', '/somewhere') + self.assertEqual('10', e.retry_after) + + headers = {"Content-Type": "application/json"} + req.assert_called_with('GET', 'http://127.0.0.1:8888/somewhere', + verify=True, cert={}, data=None, + headers=headers) + self.assertEqual(exp_request_count, req.call_count) + self.assertEqual(exp_request_count - 1, sleep.call_count) diff --git a/neutron/tests/unit/nec/test_ofc_manager.py b/neutron/tests/unit/nec/test_ofc_manager.py new file mode 100644 index 000000000..e4543c21f --- /dev/null +++ b/neutron/tests/unit/nec/test_ofc_manager.py @@ -0,0 +1,297 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import mock + +from neutron import context +from neutron.db import api as db +from neutron.openstack.common import uuidutils +from neutron.plugins.nec.common import config +from neutron.plugins.nec.db import api as ndb +from neutron.plugins.nec.db import models as nmodels # noqa +from neutron.plugins.nec import ofc_manager +from neutron.tests import base + + +class FakePortInfo(object): + def __init__(self, id, datapath_id, port_no=0, + vlan_id=65535, mac='00:11:22:33:44:55'): + self.data = {'id': id, 'datapath_id': datapath_id, + 'port_no': port_no, 'vlan_id': vlan_id, 'mac': mac} + + def __getattr__(self, name): + if name in self.fields: + return self[name] + else: + raise AttributeError(name) + + +class OFCManagerTestBase(base.BaseTestCase): + """Class conisting of OFCManager unit tests.""" + + def setUp(self): + super(OFCManagerTestBase, self).setUp() + db.configure_db() + driver = "neutron.tests.unit.nec.stub_ofc_driver.StubOFCDriver" + config.CONF.set_override('driver', driver, 'OFC') + self.addCleanup(ndb.clear_db) + self.plugin = mock.Mock() + self.plugin.get_packet_filters_for_port.return_value = None + self.ofc = ofc_manager.OFCManager(self.plugin) + # NOTE: enable_autocheck() is a feature of StubOFCDriver + self.ofc.driver.enable_autocheck() + self.ctx = context.get_admin_context() + + def get_random_params(self): + """create random parameters for portinfo test.""" + tenant = uuidutils.generate_uuid() + network = uuidutils.generate_uuid() + port = uuidutils.generate_uuid() + _filter = uuidutils.generate_uuid() + none = uuidutils.generate_uuid() + return tenant, network, port, _filter, none + + +class OFCManagerTest(OFCManagerTestBase): + def testa_create_ofc_tenant(self): + """test create ofc_tenant.""" + t, n, p, f, none = self.get_random_params() + self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t)) + self.ofc.create_ofc_tenant(self.ctx, t) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t)) + tenant = ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t) + self.assertEqual(tenant.ofc_id, "ofc-" + t[:-4]) + + def testb_exists_ofc_tenant(self): + """test exists_ofc_tenant.""" + t, n, p, f, none = self.get_random_params() + self.assertFalse(self.ofc.exists_ofc_tenant(self.ctx, t)) + self.ofc.create_ofc_tenant(self.ctx, t) + self.assertTrue(self.ofc.exists_ofc_tenant(self.ctx, t)) + + def testc_delete_ofc_tenant(self): + """test delete ofc_tenant.""" + t, n, p, f, none = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t)) + self.ofc.delete_ofc_tenant(self.ctx, t) + self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t)) + + def testd_create_ofc_network(self): + """test create ofc_network.""" + t, n, p, f, none = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n)) + self.ofc.create_ofc_network(self.ctx, t, n) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n)) + network = ndb.get_ofc_item(self.ctx.session, 'ofc_network', n) + self.assertEqual(network.ofc_id, "ofc-" + n[:-4]) + + def teste_exists_ofc_network(self): + """test exists_ofc_network.""" + t, n, p, f, none = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.assertFalse(self.ofc.exists_ofc_network(self.ctx, n)) + self.ofc.create_ofc_network(self.ctx, t, n) + self.assertTrue(self.ofc.exists_ofc_network(self.ctx, n)) + + def testf_delete_ofc_network(self): + """test delete ofc_network.""" + t, n, p, f, none = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.ofc.create_ofc_network(self.ctx, t, n) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n)) + self.ofc.delete_ofc_network(self.ctx, n, {'tenant_id': t}) + self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n)) + + def _mock_get_portinfo(self, port_id, datapath_id='0xabc', port_no=1): + get_portinfo = mock.patch.object(ndb, 'get_portinfo').start() + fake_portinfo = FakePortInfo(id=port_id, datapath_id=datapath_id, + port_no=port_no) + get_portinfo.return_value = fake_portinfo + return get_portinfo + + def _test_create_ofc_port(self, with_filter=False): + t, n, p, f, none = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.ofc.create_ofc_network(self.ctx, t, n) + self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p)) + get_portinfo = self._mock_get_portinfo(p) + port = {'tenant_id': t, 'network_id': n} + if with_filter: + _filters = ['filter1', 'filter2'] + self.plugin.get_packet_filters_for_port.return_value = _filters + self.ofc.create_ofc_port(self.ctx, p, port) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p)) + port = ndb.get_ofc_item(self.ctx.session, 'ofc_port', p) + self.assertEqual(port.ofc_id, "ofc-" + p[:-4]) + get_portinfo.assert_called_once_with(mock.ANY, p) + portval = self.ofc.driver.ofc_port_dict[port.ofc_id] + if with_filter: + self.assertEqual(_filters, portval['filters']) + else: + self.assertFalse('filters' in portval) + + def testg_create_ofc_port(self): + """test create ofc_port.""" + self._test_create_ofc_port(with_filter=False) + + def testg_create_ofc_port_with_filters(self): + """test create ofc_port.""" + self._test_create_ofc_port(with_filter=True) + + def testh_exists_ofc_port(self): + """test exists_ofc_port.""" + t, n, p, f, none = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.ofc.create_ofc_network(self.ctx, t, n) + self.assertFalse(self.ofc.exists_ofc_port(self.ctx, p)) + get_portinfo = self._mock_get_portinfo(p) + port = {'tenant_id': t, 'network_id': n} + self.ofc.create_ofc_port(self.ctx, p, port) + self.assertTrue(self.ofc.exists_ofc_port(self.ctx, p)) + get_portinfo.assert_called_once_with(mock.ANY, p) + + def testi_delete_ofc_port(self): + """test delete ofc_port.""" + t, n, p, f, none = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.ofc.create_ofc_network(self.ctx, t, n) + get_portinfo = self._mock_get_portinfo(p) + port = {'tenant_id': t, 'network_id': n} + self.ofc.create_ofc_port(self.ctx, p, port) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p)) + self.ofc.delete_ofc_port(self.ctx, p, port) + self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p)) + get_portinfo.assert_called_once_with(mock.ANY, p) + + +class OFCManagerFilterTest(OFCManagerTestBase): + def testj_create_ofc_packet_filter(self): + """test create ofc_filter.""" + t, n, p, f, none = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.ofc.create_ofc_network(self.ctx, t, n) + self.assertFalse(ndb.get_ofc_item(self.ctx.session, + 'ofc_packet_filter', f)) + pf = {'tenant_id': t, 'network_id': n} + self.ofc.create_ofc_packet_filter(self.ctx, f, pf) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, + 'ofc_packet_filter', f)) + _filter = ndb.get_ofc_item(self.ctx.session, 'ofc_packet_filter', f) + self.assertEqual(_filter.ofc_id, "ofc-" + f[:-4]) + + def testk_exists_ofc_packet_filter(self): + """test exists_ofc_packet_filter.""" + t, n, p, f, none = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.ofc.create_ofc_network(self.ctx, t, n) + self.assertFalse(self.ofc.exists_ofc_packet_filter(self.ctx, f)) + pf = {'tenant_id': t, 'network_id': n} + self.ofc.create_ofc_packet_filter(self.ctx, f, pf) + self.assertTrue(self.ofc.exists_ofc_packet_filter(self.ctx, f)) + + def testl_delete_ofc_packet_filter(self): + """test delete ofc_filter.""" + t, n, p, f, none = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.ofc.create_ofc_network(self.ctx, t, n) + pf = {'tenant_id': t, 'network_id': n} + self.ofc.create_ofc_packet_filter(self.ctx, f, pf) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, + 'ofc_packet_filter', f)) + self.ofc.delete_ofc_packet_filter(self.ctx, f) + self.assertFalse(ndb.get_ofc_item(self.ctx.session, + 'ofc_packet_filter', f)) + + +class OFCManagerRouterTest(OFCManagerTestBase): + def get_random_params(self): + tenant = uuidutils.generate_uuid() + router = uuidutils.generate_uuid() + network = uuidutils.generate_uuid() + return (tenant, router, network) + + def test_create_ofc_router(self): + """test create ofc_router""" + t, r, _n = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r)) + self.ofc.create_ofc_router(self.ctx, t, r, 'test router') + self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r)) + router = ndb.get_ofc_item(self.ctx.session, 'ofc_router', r) + self.assertEqual(router.ofc_id, "ofc-" + r[:-4]) + + def test_exists_ofc_router(self): + """test exists_ofc_router""" + t, r, _n = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.assertFalse(self.ofc.exists_ofc_router(self.ctx, r)) + self.ofc.create_ofc_router(self.ctx, t, r) + self.assertTrue(self.ofc.exists_ofc_router(self.ctx, r)) + + def test_delete_ofc_router(self): + """test delete ofc_router""" + t, r, _n = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.ofc.create_ofc_router(self.ctx, t, r) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r)) + self.ofc.delete_ofc_router(self.ctx, r, {'tenant_id': t}) + self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', r)) + + def test_router_interface(self): + t, r, n = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.ofc.create_ofc_router(self.ctx, t, r) + self.ofc.create_ofc_network(self.ctx, t, n) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r)) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n)) + + p = {'id': uuidutils.generate_uuid(), + 'network_id': n, 'ip_address': '10.1.1.1', 'cidr': '10.1.0.0/20', + 'mac_address': '11:22:33:44:55:66'} + self.ofc.add_ofc_router_interface(self.ctx, r, p['id'], p) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, + 'ofc_port', p['id'])) + self.ofc.delete_ofc_router_interface(self.ctx, r, p['id']) + self.assertFalse(ndb.get_ofc_item(self.ctx.session, + 'ofc_port', p['id'])) + self.ofc.delete_ofc_router(self.ctx, r, {'tenant_id': t}) + self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', r)) + + def test_router_route(self): + t, r, _n = self.get_random_params() + self.ofc.create_ofc_tenant(self.ctx, t) + self.ofc.create_ofc_router(self.ctx, t, r) + self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r)) + + routes = [{'destination': '2.2.2.0/24', 'nexthop': '1.1.1.10'}] + self.ofc.update_ofc_router_route(self.ctx, r, routes) + self.assertEqual(len(self.ofc.driver.ofc_router_route_dict), 1) + + routes = [{'destination': '3.3.3.0/24', 'nexthop': '1.1.1.11'}, + {'destination': '4.4.4.0/24', 'nexthop': '1.1.1.11'}] + self.ofc.update_ofc_router_route(self.ctx, r, routes) + self.assertEqual(len(self.ofc.driver.ofc_router_route_dict), 2) + + routes = [{'destination': '2.2.2.0/24', 'nexthop': '1.1.1.10'}] + self.ofc.update_ofc_router_route(self.ctx, r, routes) + self.assertEqual(len(self.ofc.driver.ofc_router_route_dict), 1) + + routes = [] + self.ofc.update_ofc_router_route(self.ctx, r, routes) + self.assertEqual(len(self.ofc.driver.ofc_router_route_dict), 0) diff --git a/neutron/tests/unit/nec/test_packet_filter.py b/neutron/tests/unit/nec/test_packet_filter.py new file mode 100644 index 000000000..b87a9255a --- /dev/null +++ b/neutron/tests/unit/nec/test_packet_filter.py @@ -0,0 +1,714 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib + +import mock +import webob.exc + +from neutron.api.v2 import attributes +from neutron import context +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.extensions import packetfilter as ext_pf +from neutron.tests.unit.nec import test_nec_plugin +from neutron.tests.unit import test_db_plugin as test_plugin + + +NEC_PLUGIN_PF_INI = """ +[DEFAULT] +api_extensions_path = neutron/plugins/nec/extensions +[OFC] +driver = neutron.tests.unit.nec.stub_ofc_driver.StubOFCDriver +enable_packet_filter = True +""" + + +class PacketfilterExtensionManager(ext_pf.Packetfilter): + + @classmethod + def get_resources(cls): + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + {'packet_filters': ext_pf.PACKET_FILTER_ATTR_MAP}) + return super(PacketfilterExtensionManager, cls).get_resources() + + +class TestNecPluginPacketFilterBase(test_nec_plugin.NecPluginV2TestCase): + + _nec_ini = NEC_PLUGIN_PF_INI + + def setUp(self): + ext_mgr = PacketfilterExtensionManager() + super(TestNecPluginPacketFilterBase, self).setUp(ext_mgr=ext_mgr) + + def _create_packet_filter(self, fmt, net_id, expected_res_status=None, + arg_list=None, **kwargs): + data = {'packet_filter': {'network_id': net_id, + 'tenant_id': self._tenant_id, + 'priority': '1', + 'action': 'ALLOW'}} + + for arg in (('name', 'admin_state_up', 'action', 'priority', 'in_port', + 'src_mac', 'dst_mac', 'eth_type', 'src_cidr', 'dst_cidr', + 'protocol', 'src_port', 'dst_port') + + (arg_list or ())): + # Arg must be present + if arg in kwargs: + data['packet_filter'][arg] = kwargs[arg] + pf_req = self.new_create_request('packet_filters', data, fmt) + if (kwargs.get('set_context') and 'tenant_id' in kwargs): + # create a specific auth context for this request + pf_req.environ['neutron.context'] = context.Context( + '', kwargs['tenant_id']) + + pf_res = pf_req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(pf_res.status_int, expected_res_status) + return pf_res + + def _make_packet_filter(self, fmt, net_id, expected_res_status=None, + **kwargs): + res = self._create_packet_filter(fmt, net_id, expected_res_status, + **kwargs) + # Things can go wrong - raise HTTP exc with res code only + # so it can be caught by unit tests + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + return self.deserialize(fmt, res) + + @contextlib.contextmanager + def packet_filter_on_network(self, network=None, fmt=None, do_delete=True, + **kwargs): + with test_plugin.optional_ctx(network, self.network) as network_to_use: + net_id = network_to_use['network']['id'] + pf = self._make_packet_filter(fmt or self.fmt, net_id, **kwargs) + yield pf + if do_delete: + self._delete('packet_filters', pf['packet_filter']['id']) + + @contextlib.contextmanager + def packet_filter_on_port(self, port=None, fmt=None, do_delete=True, + set_portinfo=True, **kwargs): + with test_plugin.optional_ctx(port, self.port) as port_to_use: + net_id = port_to_use['port']['network_id'] + port_id = port_to_use['port']['id'] + + if set_portinfo: + portinfo = {'id': port_id, + 'port_no': kwargs.get('port_no', 123)} + kw = {'added': [portinfo]} + if 'datapath_id' in kwargs: + kw['datapath_id'] = kwargs['datapath_id'] + self.rpcapi_update_ports(**kw) + + kwargs['in_port'] = port_id + pf = self._make_packet_filter(fmt or self.fmt, net_id, **kwargs) + self.assertEqual(port_id, pf['packet_filter']['in_port']) + yield pf + if do_delete: + self._delete('packet_filters', pf['packet_filter']['id']) + + +class TestNecPluginPacketFilter(TestNecPluginPacketFilterBase): + + def setUp(self): + super(TestNecPluginPacketFilter, self).setUp() + # Remove attributes explicitly from mock object to check + # a case where there are no update_filter and validate_*. + del self.ofc.driver.update_filter + del self.ofc.driver.validate_filter_create + del self.ofc.driver.validate_filter_update + + def test_list_packet_filters(self): + self._list('packet_filters') + + def test_create_pf_on_network_no_ofc_creation(self): + with self.packet_filter_on_network(admin_state_up=False) as pf: + self.assertEqual(pf['packet_filter']['status'], 'DOWN') + + self.assertFalse(self.ofc.create_ofc_packet_filter.called) + self.assertFalse(self.ofc.delete_ofc_packet_filter.called) + + def test_create_pf_on_port_no_ofc_creation(self): + with self.packet_filter_on_port(admin_state_up=False, + set_portinfo=False) as pf: + self.assertEqual(pf['packet_filter']['status'], 'DOWN') + + self.assertFalse(self.ofc.create_ofc_packet_filter.called) + self.assertFalse(self.ofc.delete_ofc_packet_filter.called) + + def test_create_pf_on_network_with_ofc_creation(self): + with self.packet_filter_on_network() as pf: + pf_id = pf['packet_filter']['id'] + self.assertEqual(pf['packet_filter']['status'], 'ACTIVE') + + ctx = mock.ANY + pf_dict = mock.ANY + expected = [ + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict), + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.delete_ofc_packet_filter(ctx, pf_id), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 1) + + def test_create_pf_on_port_with_ofc_creation(self): + with self.packet_filter_on_port() as pf: + pf_id = pf['packet_filter']['id'] + self.assertEqual(pf['packet_filter']['status'], 'ACTIVE') + + ctx = mock.ANY + pf_dict = mock.ANY + expected = [ + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict), + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.delete_ofc_packet_filter(ctx, pf_id), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 1) + + def _test_create_pf_with_protocol(self, protocol, expected_eth_type): + with self.packet_filter_on_network(protocol=protocol) as pf: + pf_data = pf['packet_filter'] + self.assertEqual(protocol, pf_data['protocol']) + self.assertEqual(expected_eth_type, pf_data['eth_type']) + + def test_create_pf_with_protocol_tcp(self): + self._test_create_pf_with_protocol('TCP', 0x800) + + def test_create_pf_with_protocol_udp(self): + self._test_create_pf_with_protocol('UDP', 0x800) + + def test_create_pf_with_protocol_icmp(self): + self._test_create_pf_with_protocol('ICMP', 0x800) + + def test_create_pf_with_protocol_arp(self): + self._test_create_pf_with_protocol('ARP', 0x806) + + def test_create_pf_with_inconsistent_protocol_and_eth_type(self): + with self.packet_filter_on_network(protocol='TCP') as pf: + pf_data = pf['packet_filter'] + pf_id = pf_data['id'] + self.assertEqual('TCP', pf_data['protocol']) + self.assertEqual(0x800, pf_data['eth_type']) + data = {'packet_filter': {'eth_type': 0x806}} + self._update('packet_filters', pf_id, data, + expected_code=409) + + def test_create_pf_with_invalid_priority(self): + with self.network() as net: + net_id = net['network']['id'] + kwargs = {'priority': 'high'} + self._create_packet_filter(self.fmt, net_id, + webob.exc.HTTPBadRequest.code, + **kwargs) + self.assertFalse(self.ofc.create_ofc_packet_filter.called) + + def test_create_pf_with_ofc_creation_failure(self): + self.ofc.set_raise_exc('create_ofc_packet_filter', + nexc.OFCException(reason='hoge')) + + with self.packet_filter_on_network() as pf: + pf_id = pf['packet_filter']['id'] + pf_ref = self._show('packet_filters', pf_id) + self.assertEqual(pf_ref['packet_filter']['status'], 'ERROR') + + self.ofc.set_raise_exc('create_ofc_packet_filter', None) + + # Retry activate packet_filter (even if there is no change). + data = {'packet_filter': {}} + self._update('packet_filters', pf_id, data) + + pf_ref = self._show('packet_filters', pf_id) + self.assertEqual(pf_ref['packet_filter']['status'], 'ACTIVE') + + ctx = mock.ANY + pf_dict = mock.ANY + expected = [ + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict), + + mock.call.exists_ofc_packet_filter(ctx, pf_id), + + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 2) + + def test_show_pf_on_network(self): + kwargs = { + 'name': 'test-pf-net', + 'admin_state_up': False, + 'action': 'DENY', + 'priority': '102', + 'src_mac': '00:11:22:33:44:55', + 'dst_mac': '66:77:88:99:aa:bb', + 'eth_type': '2048', + 'src_cidr': '192.168.1.0/24', + 'dst_cidr': '192.168.2.0/24', + 'protocol': 'TCP', + 'src_port': '35001', + 'dst_port': '22' + } + + with self.packet_filter_on_network(**kwargs) as pf: + pf_id = pf['packet_filter']['id'] + pf_ref = self._show('packet_filters', pf_id) + + # convert string to int. + kwargs.update({'priority': 102, 'eth_type': 2048, + 'src_port': 35001, 'dst_port': 22, + 'in_port': None}) + + self.assertEqual(pf_id, pf_ref['packet_filter']['id']) + for key in kwargs: + self.assertEqual(kwargs[key], pf_ref['packet_filter'][key]) + + def test_show_pf_on_network_with_wildcards(self): + kwargs = { + 'name': 'test-pf-net', + 'admin_state_up': False, + 'action': 'DENY', + 'priority': '102', + } + + with self.packet_filter_on_network(**kwargs) as pf: + pf_id = pf['packet_filter']['id'] + pf_ref = self._show('packet_filters', pf_id) + + # convert string to int. + kwargs.update({'priority': 102, + 'in_port': None, + 'src_mac': None, + 'dst_mac': None, + 'eth_type': None, + 'src_cidr': None, + 'dst_cidr': None, + 'protocol': None, + 'src_port': None, + 'dst_port': None}) + + self.assertEqual(pf_id, pf_ref['packet_filter']['id']) + for key in kwargs: + self.assertEqual(kwargs[key], pf_ref['packet_filter'][key]) + + def test_show_pf_on_port(self): + kwargs = { + 'name': 'test-pf-port', + 'admin_state_up': False, + 'action': 'DENY', + 'priority': '0o147', + 'src_mac': '00:11:22:33:44:55', + 'dst_mac': '66:77:88:99:aa:bb', + 'eth_type': 2048, + 'src_cidr': '192.168.1.0/24', + 'dst_cidr': '192.168.2.0/24', + 'protocol': 'TCP', + 'dst_port': '0x50' + } + + with self.packet_filter_on_port(**kwargs) as pf: + pf_id = pf['packet_filter']['id'] + pf_ref = self._show('packet_filters', pf_id) + + # convert string to int. + kwargs.update({'priority': 103, 'eth_type': 2048, + 'dst_port': 80, + # wildcard field is None in a response. + 'src_port': None}) + + self.assertEqual(pf_id, pf_ref['packet_filter']['id']) + self.assertTrue(pf_ref['packet_filter']['in_port']) + for key in kwargs: + self.assertEqual(kwargs[key], pf_ref['packet_filter'][key]) + + def test_show_pf_not_found(self): + pf_id = '00000000-ffff-ffff-ffff-000000000000' + + self._show('packet_filters', pf_id, + expected_code=webob.exc.HTTPNotFound.code) + + def test_update_pf_on_network(self): + ctx = mock.ANY + pf_dict = mock.ANY + with self.packet_filter_on_network(admin_state_up=False) as pf: + pf_id = pf['packet_filter']['id'] + + self.assertFalse(self.ofc.create_ofc_packet_filter.called) + data = {'packet_filter': {'admin_state_up': True}} + self._update('packet_filters', pf_id, data) + self.ofc.create_ofc_packet_filter.assert_called_once_with( + ctx, pf_id, pf_dict) + + self.assertFalse(self.ofc.delete_ofc_packet_filter.called) + data = {'packet_filter': {'admin_state_up': False}} + self._update('packet_filters', pf_id, data) + self.ofc.delete_ofc_packet_filter.assert_called_once_with( + ctx, pf_id) + + def test_update_pf_on_port(self): + ctx = mock.ANY + pf_dict = mock.ANY + with self.packet_filter_on_port(admin_state_up=False) as pf: + pf_id = pf['packet_filter']['id'] + + self.assertFalse(self.ofc.create_ofc_packet_filter.called) + data = {'packet_filter': {'admin_state_up': True}} + self._update('packet_filters', pf_id, data) + self.ofc.create_ofc_packet_filter.assert_called_once_with( + ctx, pf_id, pf_dict) + + self.assertFalse(self.ofc.delete_ofc_packet_filter.called) + data = {'packet_filter': {'admin_state_up': False}} + self._update('packet_filters', pf_id, data) + self.ofc.delete_ofc_packet_filter.assert_called_once_with( + ctx, pf_id) + + def test_delete_pf_with_error_status(self): + self.ofc.set_raise_exc('create_ofc_packet_filter', + nexc.OFCException(reason='fake')) + with self.packet_filter_on_network() as pf: + pf_id = pf['packet_filter']['id'] + pf_ref = self._show('packet_filters', pf_id) + self.assertEqual(pf_ref['packet_filter']['status'], 'ERROR') + + ctx = mock.ANY + pf_dict = mock.ANY + expected = [ + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict), + mock.call.exists_ofc_packet_filter(ctx, pf_id), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(1, self.ofc.create_ofc_packet_filter.call_count) + self.assertEqual(0, self.ofc.delete_ofc_packet_filter.call_count) + + def test_activate_pf_on_port_triggered_by_update_port(self): + ctx = mock.ANY + pf_dict = mock.ANY + with self.packet_filter_on_port(set_portinfo=False) as pf: + pf_id = pf['packet_filter']['id'] + in_port_id = pf['packet_filter']['in_port'] + + self.assertFalse(self.ofc.create_ofc_packet_filter.called) + portinfo = {'id': in_port_id, 'port_no': 123} + kw = {'added': [portinfo]} + self.rpcapi_update_ports(**kw) + self.ofc.create_ofc_packet_filter.assert_called_once_with( + ctx, pf_id, pf_dict) + + self.assertFalse(self.ofc.delete_ofc_packet_filter.called) + kw = {'removed': [in_port_id]} + self.rpcapi_update_ports(**kw) + self.ofc.delete_ofc_packet_filter.assert_called_once_with( + ctx, pf_id) + + # Ensure pf was created before in_port has activated. + ctx = mock.ANY + pf_dict = mock.ANY + port_dict = mock.ANY + expected = [ + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict), + mock.call.exists_ofc_port(ctx, in_port_id), + mock.call.create_ofc_port(ctx, in_port_id, port_dict), + + mock.call.exists_ofc_port(ctx, in_port_id), + mock.call.delete_ofc_port(ctx, in_port_id, port_dict), + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.delete_ofc_packet_filter(ctx, pf_id), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 1) + + def test_activate_pf_while_exists_on_ofc(self): + ctx = mock.ANY + with self.packet_filter_on_network() as pf: + pf_id = pf['packet_filter']['id'] + + self.ofc.set_raise_exc('delete_ofc_packet_filter', + nexc.OFCException(reason='hoge')) + + # This update request will make plugin reactivate pf. + data = {'packet_filter': {'priority': 1000}} + self._update('packet_filters', pf_id, data, + expected_code=webob.exc.HTTPInternalServerError.code) + + self.ofc.set_raise_exc('delete_ofc_packet_filter', None) + + ctx = mock.ANY + pf_dict = mock.ANY + expected = [ + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict), + + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.delete_ofc_packet_filter(ctx, pf_id), + + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.delete_ofc_packet_filter(ctx, pf_id), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 2) + + def test_deactivate_pf_with_ofc_deletion_failure(self): + ctx = mock.ANY + with self.packet_filter_on_network() as pf: + pf_id = pf['packet_filter']['id'] + + self.ofc.set_raise_exc('delete_ofc_packet_filter', + nexc.OFCException(reason='hoge')) + + data = {'packet_filter': {'admin_state_up': False}} + self._update('packet_filters', pf_id, data, + expected_code=webob.exc.HTTPInternalServerError.code) + + pf_ref = self._show('packet_filters', pf_id) + self.assertEqual(pf_ref['packet_filter']['status'], 'ERROR') + + self.ofc.set_raise_exc('delete_ofc_packet_filter', None) + + data = {'packet_filter': {'priority': 1000}} + self._update('packet_filters', pf_id, data) + + pf_ref = self._show('packet_filters', pf_id) + self.assertEqual(pf_ref['packet_filter']['status'], 'DOWN') + + ctx = mock.ANY + pf_dict = mock.ANY + expected = [ + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict), + + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.delete_ofc_packet_filter(ctx, pf_id), + + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.delete_ofc_packet_filter(ctx, pf_id), + + mock.call.exists_ofc_packet_filter(ctx, pf_id), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 2) + + def test_delete_pf_with_ofc_deletion_failure(self): + self.ofc.set_raise_exc('delete_ofc_packet_filter', + nexc.OFCException(reason='hoge')) + + with self.packet_filter_on_network() as pf: + pf_id = pf['packet_filter']['id'] + + self._delete('packet_filters', pf_id, + expected_code=webob.exc.HTTPInternalServerError.code) + + pf_ref = self._show('packet_filters', pf_id) + self.assertEqual(pf_ref['packet_filter']['status'], 'ERROR') + + self.ofc.set_raise_exc('delete_ofc_packet_filter', None) + # Then, self._delete('packet_filters', pf_id) will success. + + ctx = mock.ANY + pf_dict = mock.ANY + expected = [ + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict), + + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.delete_ofc_packet_filter(ctx, pf_id), + + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.delete_ofc_packet_filter(ctx, pf_id), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 2) + + def test_auto_delete_pf_in_network_deletion(self): + with self.packet_filter_on_network(admin_state_up=False, + do_delete=False) as pf: + pf_id = pf['packet_filter']['id'] + + self._show('packet_filters', pf_id, + expected_code=webob.exc.HTTPNotFound.code) + + def test_auto_delete_pf_in_port_deletion(self): + with self.port(no_delete=True) as port: + network = self._show('networks', port['port']['network_id']) + + with self.packet_filter_on_network(network=network) as pfn: + with self.packet_filter_on_port(port=port, do_delete=False, + set_portinfo=False) as pf: + pf_id = pf['packet_filter']['id'] + in_port_id = pf['packet_filter']['in_port'] + + self._delete('ports', in_port_id) + # Check the packet filter on the port is deleted. + self._show('packet_filters', pf_id, + expected_code=webob.exc.HTTPNotFound.code) + # Check the packet filter on the network is not deleted. + self._show('packet_filters', pfn['packet_filter']['id']) + + def test_no_pf_activation_while_port_operations(self): + with self.packet_filter_on_port() as pf: + in_port_id = pf['packet_filter']['in_port'] + self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 0) + + data = {'port': {'admin_state_up': False}} + self._update('ports', in_port_id, data) + self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 0) + + data = {'port': {'admin_state_up': True}} + self._update('ports', in_port_id, data) + self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 0) + + +class TestNecPluginPacketFilterWithValidate(TestNecPluginPacketFilterBase): + + def setUp(self): + super(TestNecPluginPacketFilterWithValidate, self).setUp() + # Remove attributes explicitly from mock object to check + # a case where there are no update_filter. + del self.ofc.driver.update_filter + self.validate_create = self.ofc.driver.validate_filter_create + self.validate_update = self.ofc.driver.validate_filter_update + + def test_create_pf_on_network(self): + with self.packet_filter_on_network() as pf: + pf_id = pf['packet_filter']['id'] + self.assertEqual(pf['packet_filter']['status'], 'ACTIVE') + + ctx = mock.ANY + pf_dict = mock.ANY + expected = [ + mock.call.driver.validate_filter_create(ctx, pf_dict), + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.create_ofc_packet_filter(ctx, pf_id, pf_dict), + mock.call.exists_ofc_packet_filter(ctx, pf_id), + mock.call.delete_ofc_packet_filter(ctx, pf_id), + ] + self.ofc.assert_has_calls(expected) + self.assertEqual(self.ofc.create_ofc_packet_filter.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_packet_filter.call_count, 1) + + def test_update_pf_on_network(self): + ctx = mock.ANY + pf_dict = mock.ANY + with self.packet_filter_on_network(admin_state_up=False) as pf: + pf_id = pf['packet_filter']['id'] + + self.assertFalse(self.ofc.create_ofc_packet_filter.called) + data = {'packet_filter': {'admin_state_up': True}} + self._update('packet_filters', pf_id, data) + self.ofc.create_ofc_packet_filter.assert_called_once_with( + ctx, pf_id, pf_dict) + self.ofc.driver.validate_filter_update.assert_called_once_with( + ctx, data['packet_filter']) + + self.assertFalse(self.ofc.delete_ofc_packet_filter.called) + data = {'packet_filter': {'admin_state_up': False}} + self._update('packet_filters', pf_id, data) + self.ofc.delete_ofc_packet_filter.assert_called_once_with( + ctx, pf_id) + self.assertEqual( + 2, self.ofc.driver.validate_filter_update.call_count) + + def test_create_pf_on_network_with_validation_error(self): + self.validate_create.side_effect = ext_pf.PacketFilterInvalidPriority( + min=1, max=65535) + with self.network() as net: + net_id = net['network']['id'] + e = self.assertRaises(webob.exc.HTTPClientError, + self._make_packet_filter, + self.fmt, net_id, expected_res_status=400) + self.assertEqual(400, e.status_int) + + def test_update_pf_on_network_with_validation_error(self): + self.validate_update.side_effect = ( + ext_pf.PacketFilterUpdateNotSupported(field='priority')) + with self.packet_filter_on_network() as pf: + pf_id = pf['packet_filter']['id'] + pf_ref = self._show('packet_filters', pf_id) + self.assertEqual(pf_ref['packet_filter']['status'], 'ACTIVE') + data = {'packet_filter': {'priority': 1000}} + self._update('packet_filters', pf_id, data, + expected_code=400) + + +class TestNecPluginPacketFilterWithFilterUpdate(TestNecPluginPacketFilterBase): + + def setUp(self): + super(TestNecPluginPacketFilterWithFilterUpdate, self).setUp() + # Remove attributes explicitly from mock object to check + # a case where there are no update_filter and validate_*. + del self.ofc.driver.validate_filter_create + del self.ofc.driver.validate_filter_update + + def test_update_pf_toggle_admin_state(self): + ctx = mock.ANY + pf_dict = mock.ANY + with self.packet_filter_on_network(admin_state_up=False) as pf: + pf_id = pf['packet_filter']['id'] + + self.assertFalse(self.ofc.create_ofc_packet_filter.called) + data = {'packet_filter': {'admin_state_up': True}} + self._update('packet_filters', pf_id, data) + self.ofc.create_ofc_packet_filter.assert_called_once_with( + ctx, pf_id, pf_dict) + + self.assertFalse(self.ofc.delete_ofc_packet_filter.called) + data = {'packet_filter': {'admin_state_up': False}} + self._update('packet_filters', pf_id, data) + self.ofc.delete_ofc_packet_filter.assert_called_once_with( + ctx, pf_id) + + def test_update_pf_change_field(self): + ctx = mock.ANY + with self.packet_filter_on_network(admin_state_up=True) as pf: + pf_id = pf['packet_filter']['id'] + self.assertTrue(self.ofc.create_ofc_packet_filter.called) + + data = {'packet_filter': {'src_mac': '12:34:56:78:9a:bc'}} + self._update('packet_filters', pf_id, data) + self.ofc.update_ofc_packet_filter.assert_called_once_with( + ctx, pf_id, data['packet_filter']) + self.assertEqual(1, self.ofc.update_ofc_packet_filter.call_count) + + self.assertFalse(self.ofc.delete_ofc_packet_filter.called) + data = {'packet_filter': {'admin_state_up': False}} + self._update('packet_filters', pf_id, data) + self.ofc.delete_ofc_packet_filter.assert_called_once_with( + ctx, pf_id) + + data = {'packet_filter': {'src_mac': '11:22:33:44:55:66'}} + self._update('packet_filters', pf_id, data) + self.assertEqual(1, self.ofc.update_ofc_packet_filter.call_count) + + data = {'packet_filter': {'admin_state_up': True}} + self._update('packet_filters', pf_id, data) + + data = {'packet_filter': {'src_mac': '66:55:44:33:22:11'}} + self._update('packet_filters', pf_id, data) + self.assertEqual(2, self.ofc.update_ofc_packet_filter.call_count) diff --git a/neutron/tests/unit/nec/test_pfc_driver.py b/neutron/tests/unit/nec/test_pfc_driver.py new file mode 100644 index 000000000..c198800c4 --- /dev/null +++ b/neutron/tests/unit/nec/test_pfc_driver.py @@ -0,0 +1,705 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import random +import string +import uuid + +import mock +import netaddr + +from neutron.common import constants +from neutron.openstack.common import uuidutils +from neutron.plugins.nec.common import ofc_client as ofc +from neutron.plugins.nec.db import models as nmodels +from neutron.plugins.nec import drivers +from neutron.plugins.nec.drivers import pfc +from neutron.plugins.nec.extensions import packetfilter as ext_pf +from neutron.tests import base + + +class TestConfig(object): + """Configuration for this test.""" + host = '127.0.0.1' + port = 8888 + use_ssl = False + key_file = None + cert_file = None + insecure_ssl = False + + +def _ofc(id): + """OFC ID converter.""" + return "ofc-%s" % id + + +class PFCDriverTestBase(base.BaseTestCase): + + driver = 'neutron.plugins.nec.drivers.pfc.PFCDriverBase' + filter_supported = False + + def setUp(self): + super(PFCDriverTestBase, self).setUp() + self.driver = drivers.get_driver(self.driver)(TestConfig) + self.do_request = mock.patch.object(ofc.OFCClient, + 'do_request').start() + + def get_ofc_item_random_params(self): + """create random parameters for ofc_item test.""" + tenant_id = uuidutils.generate_uuid() + network_id = uuidutils.generate_uuid() + port_id = uuidutils.generate_uuid() + portinfo = nmodels.PortInfo(id=port_id, datapath_id="0x123456789", + port_no=1234, vlan_id=321, + mac="11:22:33:44:55:66") + return tenant_id, network_id, portinfo + + def _generate_ofc_tenant_id(self, tenant_id): + fields = tenant_id.split('-') + # Strip 1st character (UUID version) of 3rd field + fields[2] = fields[2][1:] + return ''.join(fields) + + def get_ofc_description(self, desc): + """OFC description consists of [A-Za-z0-9_].""" + return desc.replace('-', '_').replace(' ', '_') + + def _create_tenant(self, t, ofc_t, post_id=False, post_desc=False): + tenant_path = '/tenants/%s' % ofc_t + path = "/tenants" + description = "desc of %s" % t + body = {} + if post_desc: + ofc_description = self.get_ofc_description(description) + body['description'] = ofc_description + if post_id: + body['id'] = ofc_t + self.do_request.return_value = None + else: + self.do_request.return_value = {'id': ofc_t} + + ret = self.driver.create_tenant(description, t) + self.do_request.assert_called_once_with("POST", path, body=body) + self.assertEqual(ret, tenant_path) + + def testa_create_tenant(self): + t, n, p = self.get_ofc_item_random_params() + ofc_t = self._generate_ofc_tenant_id(t) + self._create_tenant(t, ofc_t, post_id=True) + + def testc_delete_tenant(self): + t, n, p = self.get_ofc_item_random_params() + + path = "/tenants/%s" % _ofc(t) + + self.driver.delete_tenant(path) + self.do_request.assert_called_once_with("DELETE", path) + + def testd_create_network(self): + t, n, p = self.get_ofc_item_random_params() + description = "desc of %s" % n + ofc_description = self.get_ofc_description(description) + + tenant_path = "/tenants/%s" % _ofc(t) + post_path = "%s/networks" % tenant_path + body = {'description': ofc_description} + network = {'id': _ofc(n)} + self.do_request.return_value = network + + ret = self.driver.create_network(tenant_path, description, n) + self.do_request.assert_called_once_with("POST", post_path, body=body) + net_path = "/tenants/%s/networks/%s" % (_ofc(t), _ofc(n)) + self.assertEqual(ret, net_path) + + def testf_delete_network(self): + t, n, p = self.get_ofc_item_random_params() + + net_path = "/tenants/%s/networks/%s" % (_ofc(t), _ofc(n)) + + self.driver.delete_network(net_path) + self.do_request.assert_called_once_with("DELETE", net_path) + + def _test_create_port(self, call_filters_arg=None, send_filters_arg=None): + t, n, p = self.get_ofc_item_random_params() + + net_path = "/tenants/%s/networks/%s" % (_ofc(t), _ofc(n)) + post_path = "%s/ports" % net_path + port_path = "/tenants/%s/networks/%s/ports/%s" % (_ofc(t), _ofc(n), + _ofc(p.id)) + body = {'datapath_id': p.datapath_id, + 'port': str(p.port_no), + 'vid': str(p.vlan_id)} + if send_filters_arg is not None: + body['filters'] = send_filters_arg + port = {'id': _ofc(p.id)} + self.do_request.return_value = port + + if call_filters_arg is not None: + ret = self.driver.create_port(net_path, p, p.id, call_filters_arg) + else: + ret = self.driver.create_port(net_path, p, p.id) + self.do_request.assert_called_once_with("POST", post_path, body=body) + self.assertEqual(ret, port_path) + + def testg_create_port(self): + self._test_create_port() + + def test_create_port_with_filters_argument(self): + # If no filter support, 'filters' argument is passed to OFC. + # Note that it will be overridden in a test class with filter support. + self._test_create_port(call_filters_arg=['dummy'], + send_filters_arg=None) + + def testh_delete_port(self): + t, n, p = self.get_ofc_item_random_params() + + port_path = "/tenants/%s/networks/%s/ports/%s" % (_ofc(t), _ofc(n), + _ofc(p.id)) + + self.driver.delete_port(port_path) + self.do_request.assert_called_once_with("DELETE", port_path) + + def test_filter_supported(self): + self.assertEqual(self.filter_supported, self.driver.filter_supported()) + + +class PFCDriverBaseTest(PFCDriverTestBase): + + def test_extract_ofc_network_id(self): + network_id = '/tenants/tenant-a/networks/network-a' + self.assertEqual('network-a', + self.driver._extract_ofc_network_id(network_id)) + + def test_extract_ofc_network_id_failure(self): + network_id = '/tenants/tenant-a/networks/network-a/dummy' + self.assertRaises(pfc.InvalidOFCIdFormat, + self.driver._extract_ofc_network_id, network_id) + + def test_extract_ofc_port_id(self): + port_id = '/tenants/tenant-a/networks/network-a/ports/port-a' + self.assertEqual({'tenant': 'tenant-a', + 'network': 'network-a', + 'port': 'port-a'}, + self.driver._extract_ofc_port_id(port_id)) + + def test_extract_ofc_port_id_failure(self): + port_id = '/tenants/tenant-a/dummy/network-a/ports/port-a' + self.assertRaises(pfc.InvalidOFCIdFormat, + self.driver._extract_ofc_port_id, port_id) + + +class PFCV3DriverTest(PFCDriverTestBase): + driver = 'pfc_v3' + + def testa_create_tenant(self): + t, n, p = self.get_ofc_item_random_params() + ret = self.driver.create_tenant('dummy_desc', t) + self.assertEqual(0, self.do_request.call_count) + ofc_t_path = "/tenants/" + self._generate_ofc_tenant_id(t) + self.assertEqual(ofc_t_path, ret) + + def testc_delete_tenant(self): + t, n, p = self.get_ofc_item_random_params() + path = "/tenants/%s" % _ofc(t) + self.driver.delete_tenant(path) + self.assertEqual(0, self.do_request.call_count) + + +class PFCV4DriverTest(PFCDriverTestBase): + driver = 'pfc_v4' + + +class PFCV5DriverTest(PFCDriverTestBase): + driver = 'pfc_v5' + + def test_create_router(self): + t = uuidutils.generate_uuid() + r = uuidutils.generate_uuid() + description = 'dummy_router_desc' + + tenant_path = "/tenants/%s" % _ofc(t) + post_path = "%s/routers" % tenant_path + router = {'id': _ofc(r)} + self.do_request.return_value = router + + ret = self.driver.create_router(tenant_path, description, r) + self.do_request.assert_called_once_with("POST", post_path, body=None) + router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) + self.assertEqual(ret, router_path) + + def test_delete_router(self): + t = uuidutils.generate_uuid() + r = uuidutils.generate_uuid() + + router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) + + self.driver.delete_router(router_path) + self.do_request.assert_called_once_with("DELETE", router_path) + + def test_add_router_interface(self): + t = uuidutils.generate_uuid() + r = uuidutils.generate_uuid() + n = uuidutils.generate_uuid() + p = uuidutils.generate_uuid() + + router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) + infs_path = router_path + "/interfaces" + net_path = "/tenants/%s/networks/%s" % (_ofc(t), _ofc(n)) + ip_address = '10.1.1.1/24' + mac_address = '11:22:33:44:55:66' + body = {'net_id': _ofc(n), + 'ip_address': ip_address, + 'mac_address': mac_address} + inf = {'id': _ofc(p)} + self.do_request.return_value = inf + + ret = self.driver.add_router_interface(router_path, net_path, + ip_address, mac_address) + self.do_request.assert_called_once_with("POST", infs_path, body=body) + + inf_path = "%s/interfaces/%s" % (router_path, _ofc(p)) + self.assertEqual(ret, inf_path) + + def test_update_router_interface(self): + t = uuidutils.generate_uuid() + r = uuidutils.generate_uuid() + p = uuidutils.generate_uuid() + + router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) + inf_path = "%s/interfaces/%s" % (router_path, _ofc(p)) + ip_address = '10.1.1.1/24' + mac_address = '11:22:33:44:55:66' + + self.driver.update_router_interface(inf_path, ip_address, mac_address) + self.driver.update_router_interface(inf_path, ip_address=ip_address) + self.driver.update_router_interface(inf_path, mac_address=mac_address) + + self.do_request.assert_has_calls([ + mock.call("PUT", inf_path, body={'ip_address': ip_address, + 'mac_address': mac_address}), + mock.call("PUT", inf_path, body={'ip_address': ip_address}), + mock.call("PUT", inf_path, body={'mac_address': mac_address}), + ]) + + def test_delete_router_interface(self): + t = uuidutils.generate_uuid() + r = uuidutils.generate_uuid() + p = uuidutils.generate_uuid() + + router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) + inf_path = "%s/interfaces/%s" % (router_path, _ofc(p)) + + self.driver.delete_router_interface(inf_path) + self.do_request.assert_called_once_with("DELETE", inf_path) + + def _get_route_id(self, dest, nexthop): + dest = netaddr.IPNetwork(dest) + return '-'.join([str(dest.network), nexthop, str(dest.netmask)]) + + def test_add_router_route(self): + t = uuidutils.generate_uuid() + r = uuidutils.generate_uuid() + + router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) + routes_path = router_path + "/routes" + dest = '10.1.1.0/24' + nexthop = '192.168.100.10' + body = {'destination': dest, 'nexthop': nexthop} + route_id = self._get_route_id(dest, nexthop) + self.do_request.return_value = {'id': route_id} + + ret = self.driver.add_router_route(router_path, '10.1.1.0/24', + '192.168.100.10') + self.do_request.assert_called_once_with("POST", routes_path, body=body) + route_path = routes_path + '/' + route_id + self.assertEqual(ret, route_path) + + def test_delete_router_route(self): + t = uuidutils.generate_uuid() + r = uuidutils.generate_uuid() + + router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) + routes_path = router_path + "/routes" + + route_id = self._get_route_id('10.1.1.0/24', '192.168.100.10') + route_path = routes_path + '/' + route_id + + self.driver.delete_router_route(route_path) + self.do_request.assert_called_once_with("DELETE", route_path) + + def test_list_router_routes(self): + t = uuidutils.generate_uuid() + r = uuidutils.generate_uuid() + + router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) + routes_path = router_path + "/routes" + + routes = [('10.1.1.0/24', '192.168.100.10'), + ('10.2.2.0/20', '192.168.100.20')] + data = {'routes': [{'id': self._get_route_id(route[0], route[1]), + 'destination': route[0], 'nexthop': route[1]} + for route in routes]} + self.do_request.return_value = data + + ret = self.driver.list_router_routes(router_path) + self.do_request.assert_called_once_with("GET", routes_path) + + expected = [{'id': (routes_path + "/" + + self._get_route_id(route[0], route[1])), + 'destination': route[0], 'nexthop': route[1]} + for route in routes] + self.assertEqual(len(routes), len(ret)) + self.assertEqual(data['routes'], expected) + + +class PFCFilterDriverTestMixin: + def _test_create_filter(self, filter_dict=None, filter_post=None, + apply_ports=None): + t, n, p = self.get_ofc_item_random_params() + + filter_id = uuidutils.generate_uuid() + f = {'priority': 123, 'action': "ACCEPT"} + if filter_dict: + f.update(filter_dict) + + net_path = "/networks/%s" % n + body = {'action': 'pass', 'priority': 123} + if filter_post: + body.update(filter_post) + + self.do_request.return_value = {'id': filter_id} + if apply_ports is not None: + ret = self.driver.create_filter(net_path, f, p, + apply_ports=apply_ports) + else: + ret = self.driver.create_filter(net_path, f, p) + self.do_request.assert_called_once_with("POST", "/filters", + body=body) + self.assertEqual(ret, '/filters/%s' % filter_id) + + def test_create_filter_accept(self): + self._test_create_filter(filter_dict={'action': 'ACCEPT'}) + + def test_create_filter_allow(self): + self._test_create_filter(filter_dict={'action': 'ALLOW'}) + + def test_create_filter_deny(self): + self._test_create_filter(filter_dict={'action': 'DENY'}, + filter_post={'action': 'drop'}) + + def test_create_filter_drop(self): + self._test_create_filter(filter_dict={'action': 'DROP'}, + filter_post={'action': 'drop'}) + + def test_create_filter_empty_field_not_post(self): + filter_dict = {'src_mac': '', 'src_cidr': '', 'src_port': 0, + 'dst_mac': '', 'dst_cidr': '', 'dst_port': 0, + 'protocol': '', 'eth_type': 0} + filter_post = {} + self._test_create_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_create_filter_none_field_not_post(self): + filter_dict = {'src_mac': None, 'src_cidr': None, 'src_port': None, + 'dst_mac': None, 'dst_cidr': None, 'dst_port': None, + 'protocol': None, 'eth_type': None} + filter_post = {} + self._test_create_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_create_filter_all_fields(self): + filter_dict = {'src_mac': '11:22:33:44:55:66', + 'dst_mac': '77:88:99:aa:bb:cc', + 'src_cidr': '192.168.3.0/24', + 'dst_cidr': '10.11.240.0/20', + 'src_port': 12345, + 'dst_port': 23456, + 'protocol': '0x10', + 'eth_type': 0x800} + filter_post = filter_dict.copy() + filter_post['protocol'] = 16 + filter_post['eth_type'] = '0x800' + self._test_create_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_create_filter_cidr_ip_addr_32(self): + filter_dict = {'src_cidr': '192.168.3.1', + 'dst_cidr': '10.11.240.2'} + filter_post = {'src_cidr': '192.168.3.1/32', + 'dst_cidr': '10.11.240.2/32'} + self._test_create_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_create_filter_proto_tcp(self): + filter_dict = {'protocol': 'TCP'} + filter_post = {'protocol': constants.PROTO_NUM_TCP} + self._test_create_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_create_filter_proto_udp(self): + filter_dict = {'protocol': 'UDP'} + filter_post = {'protocol': constants.PROTO_NUM_UDP} + self._test_create_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_create_filter_proto_icmp(self): + filter_dict = {'protocol': 'ICMP'} + filter_post = {'protocol': constants.PROTO_NUM_ICMP} + self._test_create_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_create_filter_proto_arp_not_proto_post(self): + filter_dict = {'protocol': 'ARP'} + filter_post = {} + self._test_create_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_create_filter_apply_ports(self): + apply_ports = [ + ('p1', '/tenants/tenant-1/networks/network-1/ports/port-1'), + ('p2', '/tenants/tenant-2/networks/network-2/ports/port-2')] + filter_post = {'apply_ports': [ + {'tenant': 'tenant-1', 'network': 'network-1', 'port': 'port-1'}, + {'tenant': 'tenant-2', 'network': 'network-2', 'port': 'port-2'} + ]} + self._test_create_filter(filter_dict={}, apply_ports=apply_ports, + filter_post=filter_post) + + def _test_update_filter(self, filter_dict=None, filter_post=None): + filter_id = uuidutils.generate_uuid() + ofc_filter_id = '/filters/%s' % filter_id + self.driver.update_filter(ofc_filter_id, filter_dict) + self.do_request.assert_called_once_with("PUT", ofc_filter_id, + body=filter_post) + + def test_update_filter_empty_fields(self): + filter_dict = {'src_mac': '', 'src_cidr': '', 'src_port': 0, + 'dst_mac': '', 'dst_cidr': '', 'dst_port': 0, + 'protocol': '', 'eth_type': 0} + filter_post = {'src_mac': '', 'src_cidr': '', 'src_port': '', + 'dst_mac': '', 'dst_cidr': '', 'dst_port': '', + 'protocol': '', 'eth_type': ''} + self._test_update_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_update_filter_none_fields(self): + filter_dict = {'src_mac': None, 'src_cidr': None, 'src_port': None, + 'dst_mac': None, 'dst_cidr': None, 'dst_port': None, + 'protocol': None, 'eth_type': None} + filter_post = {'src_mac': '', 'src_cidr': '', 'src_port': '', + 'dst_mac': '', 'dst_cidr': '', 'dst_port': '', + 'protocol': '', 'eth_type': ''} + self._test_update_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_update_filter_all_fields(self): + filter_dict = {'src_mac': '11:22:33:44:55:66', + 'dst_mac': '77:88:99:aa:bb:cc', + 'src_cidr': '192.168.3.0/24', + 'dst_cidr': '10.11.240.0/20', + 'src_port': 12345, + 'dst_port': 23456, + 'protocol': '0x10', + 'eth_type': 0x800} + filter_post = filter_dict.copy() + filter_post['protocol'] = 16 + filter_post['eth_type'] = '0x800' + self._test_update_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_update_filter_cidr_ip_addr_32(self): + filter_dict = {'src_cidr': '192.168.3.1', + 'dst_cidr': '10.11.240.2'} + filter_post = {'src_cidr': '192.168.3.1/32', + 'dst_cidr': '10.11.240.2/32'} + self._test_update_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_update_filter_proto_tcp(self): + filter_dict = {'protocol': 'TCP'} + filter_post = {'protocol': constants.PROTO_NUM_TCP} + self._test_update_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_update_filter_proto_udp(self): + filter_dict = {'protocol': 'UDP'} + filter_post = {'protocol': constants.PROTO_NUM_UDP} + self._test_update_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_update_filter_proto_icmp(self): + filter_dict = {'protocol': 'ICMP'} + filter_post = {'protocol': constants.PROTO_NUM_ICMP} + self._test_update_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_update_filter_proto_arp_post_empty(self): + filter_dict = {'protocol': 'ARP'} + filter_post = {'protocol': ''} + self._test_update_filter(filter_dict=filter_dict, + filter_post=filter_post) + + def test_delete_filter(self): + t, n, p = self.get_ofc_item_random_params() + f_path = "/filters/%s" % uuidutils.generate_uuid() + self.driver.delete_filter(f_path) + self.do_request.assert_called_once_with("DELETE", f_path) + + def _test_validate_filter_duplicate_priority(self, method, found_dup): + with mock.patch('neutron.manager.NeutronManager' + '.get_plugin') as get_plugin: + plugin = get_plugin.return_value + if found_dup: + plugin.get_packet_filters.return_value = ['found'] + else: + plugin.get_packet_filters.return_value = [] + network_id = str(uuid.uuid4()) + filter_dict = {'network_id': network_id, + 'priority': 12} + if found_dup: + self.assertRaises(ext_pf.PacketFilterDuplicatedPriority, + method, 'context', filter_dict) + else: + self.assertIsNone(method('context', filter_dict)) + plugin.get_packet_filters.assert_called_once_with( + 'context', + filters={'network_id': [network_id], + 'priority': [12]}, + fields=['id']) + + def test_validate_filter_create_no_duplicate_priority(self): + self._test_validate_filter_duplicate_priority( + self.driver.validate_filter_create, + found_dup=False) + + def test_validate_filter_create_duplicate_priority(self): + self._test_validate_filter_duplicate_priority( + self.driver.validate_filter_create, + found_dup=True) + + def test_validate_filter_update_action_raises_error(self): + filter_dict = {'action': 'ALLOW'} + self.assertRaises(ext_pf.PacketFilterUpdateNotSupported, + self.driver.validate_filter_update, + 'context', filter_dict) + + def test_validate_filter_update_priority_raises_error(self): + filter_dict = {'priority': '13'} + self.assertRaises(ext_pf.PacketFilterUpdateNotSupported, + self.driver.validate_filter_update, + 'context', filter_dict) + + def _test_validate_filter_ipv6_not_supported(self, field, create=True): + if create: + filter_dict = {'network_id': 'net1', 'priority': 12} + method = self.driver.validate_filter_create + else: + filter_dict = {} + method = self.driver.validate_filter_update + filter_dict[field] = 'fe80::1' + self.assertRaises(ext_pf.PacketFilterIpVersionNonSupported, + method, 'context', filter_dict) + filter_dict[field] = '10.56.3.3' + self.assertIsNone(method('context', filter_dict)) + + def test_validate_filter_create_ipv6_not_supported(self): + with mock.patch('neutron.manager.NeutronManager' + '.get_plugin') as get_plugin: + plugin = get_plugin.return_value + plugin.get_packet_filters.return_value = [] + self._test_validate_filter_ipv6_not_supported( + 'src_cidr', create=True) + self._test_validate_filter_ipv6_not_supported( + 'dst_cidr', create=True) + + def test_validate_filter_update_ipv6_not_supported(self): + self._test_validate_filter_ipv6_not_supported('src_cidr', create=False) + self._test_validate_filter_ipv6_not_supported('dst_cidr', create=False) + + def _test_validate_filter_priority_range_one(self, method, priority, ok): + filter_dict = {'priority': priority, 'network_id': 'net1'} + if ok: + self.assertIsNone(method('context', filter_dict)) + else: + self.assertRaises(ext_pf.PacketFilterInvalidPriority, + method, 'context', filter_dict) + + def test_validate_filter_create_priority_range(self): + with mock.patch('neutron.manager.NeutronManager' + '.get_plugin') as get_plugin: + plugin = get_plugin.return_value + plugin.get_packet_filters.return_value = [] + + method = self.driver.validate_filter_create + self._test_validate_filter_priority_range_one(method, 0, False) + self._test_validate_filter_priority_range_one(method, 1, True) + self._test_validate_filter_priority_range_one(method, 32766, True) + self._test_validate_filter_priority_range_one(method, 32767, False) + + +class PFCV51DriverTest(PFCFilterDriverTestMixin, PFCV5DriverTest): + driver = 'pfc_v51' + filter_supported = True + + def test_create_port_with_filters_argument(self): + self._test_create_port( + call_filters_arg=[('neutron-id-1', '/filters/filter-1'), + ('neutron-id-2', '/filters/filter-2')], + send_filters_arg=['filter-1', 'filter-2']) + + +class PFCDriverStringTest(base.BaseTestCase): + + driver = 'neutron.plugins.nec.drivers.pfc.PFCDriverBase' + + def setUp(self): + super(PFCDriverStringTest, self).setUp() + self.driver = drivers.get_driver(self.driver)(TestConfig) + + def test_generate_pfc_id_uuid(self): + id_str = uuidutils.generate_uuid() + exp_str = (id_str[:14] + id_str[15:]).replace('-', '')[:31] + + ret_str = self.driver._generate_pfc_id(id_str) + self.assertEqual(exp_str, ret_str) + + def test_generate_pfc_id_uuid_no_hyphen(self): + # Keystone tenant_id style uuid + id_str = uuidutils.generate_uuid() + id_no_hyphen = id_str.replace('-', '') + exp_str = (id_str[:14] + id_str[15:]).replace('-', '')[:31] + + ret_str = self.driver._generate_pfc_id(id_no_hyphen) + self.assertEqual(exp_str, ret_str) + + def test_generate_pfc_id_string(self): + id_str = uuidutils.generate_uuid() + 'x' + exp_str = id_str[:31].replace('-', '_') + + ret_str = self.driver._generate_pfc_id(id_str) + self.assertEqual(exp_str, ret_str) + + def test_generate_pfc_desc(self): + random_list = [random.choice(string.printable) for x in range(128)] + random_str = ''.join(random_list) + + accept_letters = string.letters + string.digits + exp_list = [x if x in accept_letters else '_' for x in random_list] + exp_str = ''.join(exp_list)[:127] + + ret_str = self.driver._generate_pfc_description(random_str) + self.assertEqual(exp_str, ret_str) diff --git a/neutron/tests/unit/nec/test_portbindings.py b/neutron/tests/unit/nec/test_portbindings.py new file mode 100644 index 000000000..f9aca7294 --- /dev/null +++ b/neutron/tests/unit/nec/test_portbindings.py @@ -0,0 +1,350 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 NEC Corporation +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Akihiro Motoki, NEC Corporation + +from oslo.config import cfg +from testtools import matchers +from webob import exc + +from neutron.common import exceptions as n_exc +from neutron import context +from neutron.extensions import portbindings +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit.nec import test_nec_plugin +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + + +class TestNecPortBinding(test_bindings.PortBindingsTestCase, + test_nec_plugin.NecPluginV2TestCase): + VIF_TYPE = portbindings.VIF_TYPE_OVS + HAS_PORT_FILTER = True + ENABLE_SG = True + FIREWALL_DRIVER = test_sg_rpc.FIREWALL_HYBRID_DRIVER + + def setUp(self): + test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER) + cfg.CONF.set_override( + 'enable_security_group', self.ENABLE_SG, + group='SECURITYGROUP') + super(TestNecPortBinding, self).setUp() + + +class TestNecPortBindingNoSG(TestNecPortBinding): + HAS_PORT_FILTER = False + ENABLE_SG = False + FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER + + +class TestNecPortBindingHost( + test_bindings.PortBindingsHostTestCaseMixin, + test_nec_plugin.NecPluginV2TestCase): + pass + + +class TestNecPortBindingPortInfo(test_nec_plugin.NecPluginV2TestCase): + def _get_portinfo(self, datapath_id=None, port_no=None): + if datapath_id is None: + datapath_id = '0xabc' + if port_no is None: + port_no = 1 + return {'datapath_id': datapath_id, + 'port_no': port_no} + + def _check_response_portbinding_profile(self, port, datapath_id=None, + port_no=None): + expected = self._get_portinfo(datapath_id, port_no) + profile = port[portbindings.PROFILE] + self.assertEqual(len(profile), 2) + self.assertEqual(profile['datapath_id'], + expected['datapath_id']) + self.assertEqual(profile['port_no'], + expected['port_no']) + + def _check_response_portbinding_no_profile(self, port): + self.assertIn('status', port) + self.assertNotIn(portbindings.PROFILE, port) + + def _get_non_admin_context(self): + return context.Context(user_id=None, + tenant_id=self._tenant_id, + is_admin=False, + read_deleted="no") + + def test_port_create_portinfo(self): + profile_arg = {portbindings.PROFILE: self._get_portinfo()} + with self.port(arg_list=(portbindings.PROFILE,), + **profile_arg) as port: + port_id = port['port']['id'] + # Check a response of create_port + self._check_response_portbinding_profile(port['port']) + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + # Check a response of get_port + ctx = context.get_admin_context() + port = self._show('ports', port_id, neutron_context=ctx)['port'] + self._check_response_portbinding_profile(port) + # By default user is admin - now test non admin user + ctx = self._get_non_admin_context() + non_admin_port = self._show( + 'ports', port_id, neutron_context=ctx)['port'] + self._check_response_portbinding_no_profile(non_admin_port) + # port-update with non admin user should fail + self._update('ports', port_id, + {'port': profile_arg}, + expected_code=404, + neutron_context=ctx) + + def test_port_update_portinfo(self): + profile_arg = {portbindings.PROFILE: self._get_portinfo()} + with self.port() as port: + self.assertEqual(self.ofc.create_ofc_port.call_count, 0) + port_id = port['port']['id'] + # Check a response of create_port + self._check_response_portbinding_no_profile(port['port']) + # Check a response of update_port + ctx = context.get_admin_context() + port = self._update('ports', port_id, {'port': profile_arg}, + neutron_context=ctx)['port'] + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self._check_response_portbinding_profile(port) + port = self._show('ports', port_id, neutron_context=ctx)['port'] + self._check_response_portbinding_profile(port) + + def test_port_update_portinfo_detail(self): + with self.port() as port: + self.assertEqual(self.ofc.create_ofc_port.call_count, 0) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 0) + port_id = port['port']['id'] + ctx = context.get_admin_context() + + # add portinfo + profile_arg = {portbindings.PROFILE: self._get_portinfo()} + port = self._update('ports', port_id, {'port': profile_arg}, + neutron_context=ctx)['port'] + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 0) + + # portinfo unchanged + port = self._update('ports', port_id, {'port': profile_arg}, + neutron_context=ctx)['port'] + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 0) + + # modify portinfo + profile_arg = {portbindings.PROFILE: + self._get_portinfo(datapath_id='0x1234567890', + port_no=99)} + port = self._update('ports', port_id, {'port': profile_arg}, + neutron_context=ctx)['port'] + self.assertEqual(self.ofc.create_ofc_port.call_count, 2) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 1) + + # delete portinfo with an empty dict + profile_arg = {portbindings.PROFILE: {}} + port = self._update('ports', port_id, {'port': profile_arg}, + neutron_context=ctx)['port'] + self.assertEqual(self.ofc.create_ofc_port.call_count, 2) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 2) + + def test_port_update_portinfo_detail_clear_with_none(self): + with self.port() as port: + self.assertEqual(self.ofc.create_ofc_port.call_count, 0) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 0) + port_id = port['port']['id'] + ctx = context.get_admin_context() + + # add portinfo + profile_arg = {portbindings.PROFILE: self._get_portinfo()} + port = self._update('ports', port_id, {'port': profile_arg}, + neutron_context=ctx)['port'] + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 0) + + # delete portinfo with None + profile_arg = {portbindings.PROFILE: None} + port = self._update('ports', port_id, {'port': profile_arg}, + neutron_context=ctx)['port'] + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 1) + + def test_port_create_portinfo_with_empty_dict(self): + profile_arg = {portbindings.PROFILE: {}} + with self.port(arg_list=(portbindings.PROFILE,), + **profile_arg) as port: + port_id = port['port']['id'] + + # Check a response of create_port + self._check_response_portbinding_no_profile(port['port']) + self.assertEqual(self.ofc.create_ofc_port.call_count, 0) + # add portinfo + ctx = context.get_admin_context() + profile_arg = {portbindings.PROFILE: self._get_portinfo()} + port = self._update('ports', port_id, {'port': profile_arg}, + neutron_context=ctx)['port'] + self._check_response_portbinding_profile(port) + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 0) + + def test_port_create_portinfo_with_none(self): + profile_arg = {portbindings.PROFILE: None} + with self.port(arg_list=(portbindings.PROFILE,), + **profile_arg) as port: + port_id = port['port']['id'] + + # Check a response of create_port + self._check_response_portbinding_no_profile(port['port']) + self.assertEqual(self.ofc.create_ofc_port.call_count, 0) + # add portinfo + ctx = context.get_admin_context() + profile_arg = {portbindings.PROFILE: self._get_portinfo()} + port = self._update('ports', port_id, {'port': profile_arg}, + neutron_context=ctx)['port'] + self._check_response_portbinding_profile(port) + self.assertEqual(self.ofc.create_ofc_port.call_count, 1) + self.assertEqual(self.ofc.delete_ofc_port.call_count, 0) + + def test_port_update_for_existing_port_with_different_padding_dpid(self): + ctx = context.get_admin_context() + with self.port() as port: + port_id = port['port']['id'] + portinfo = {'id': port_id, 'port_no': 123} + self.rpcapi_update_ports(datapath_id='0x000000000000abcd', + added=[portinfo]) + self.assertEqual(1, self.ofc.create_ofc_port.call_count) + self.assertEqual(0, self.ofc.delete_ofc_port.call_count) + + profile_arg = {portbindings.PROFILE: + self._get_portinfo(datapath_id='0xabcd', + port_no=123)} + self._update('ports', port_id, {'port': profile_arg}, + neutron_context=ctx) + # Check create_ofc_port/delete_ofc_port are not called. + self.assertEqual(1, self.ofc.create_ofc_port.call_count) + self.assertEqual(0, self.ofc.delete_ofc_port.call_count) + + def test_port_create_portinfo_non_admin(self): + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + profile_arg = {portbindings.PROFILE: self._get_portinfo()} + try: + with self.port(subnet=subnet1, + expected_res_status=403, + arg_list=(portbindings.PROFILE,), + set_context=True, tenant_id='test', + **profile_arg): + pass + except exc.HTTPClientError: + pass + self.assertEqual(self.ofc.create_ofc_port.call_count, 0) + + def test_port_update_portinfo_non_admin(self): + profile_arg = {portbindings.PROFILE: self._get_portinfo()} + with self.network() as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1) as port: + # By default user is admin - now test non admin user + # Note that 404 is returned when prohibit by policy. + # See comment for PolicyNotAuthorized except clause + # in update() in neutron.api.v2.base.Controller. + port_id = port['port']['id'] + ctx = self._get_non_admin_context() + port = self._update('ports', port_id, + {'port': profile_arg}, + expected_code=404, + neutron_context=ctx) + self.assertEqual(self.ofc.create_ofc_port.call_count, 0) + + def test_port_create_portinfo_validation_called(self): + # Check validate_portinfo is called. + profile_arg = {portbindings.PROFILE: + {'datapath_id': '0xabc', + 'port_no': 0xffff + 1}} + try: + with self.port(arg_list=(portbindings.PROFILE,), + expected_res_status=400, + **profile_arg): + pass + except exc.HTTPClientError: + pass + + +class TestNecPortBindingValidatePortInfo(test_nec_plugin.NecPluginV2TestCase): + + def test_validate_portinfo_ok(self): + profile = {'datapath_id': '0x1234567890abcdef', + 'port_no': 123} + portinfo = self.plugin._validate_portinfo(profile) + # NOTE(mriedem): Handle long integer conversion universally. + self.assertEqual( + 0x1234567890abcdef, + int(portinfo['datapath_id'].replace('L', ''), 16) + ) + self.assertEqual(portinfo['port_no'], 123) + + def test_validate_portinfo_ok_without_0x(self): + profile = {'datapath_id': '1234567890abcdef', + 'port_no': 123} + portinfo = self.plugin._validate_portinfo(profile) + # NOTE(mriedem): Handle long integer conversion universally. + self.assertEqual( + 0x1234567890abcdef, + int(portinfo['datapath_id'].replace('L', ''), 16) + ) + self.assertEqual(portinfo['port_no'], 123) + + def _test_validate_exception(self, profile, expected_msg): + e = self.assertRaises(n_exc.InvalidInput, + self.plugin._validate_portinfo, profile) + self.assertThat(str(e), matchers.StartsWith(expected_msg)) + + def test_validate_portinfo_dict_validation(self): + expected_msg = ("Invalid input for operation: " + "Validation of dictionary's keys failed.") + + profile = {'port_no': 123} + self._test_validate_exception(profile, expected_msg) + + profile = {'datapath_id': '0xabcdef'} + self._test_validate_exception(profile, expected_msg) + + def test_validate_portinfo_negative_port_number(self): + profile = {'datapath_id': '0x1234567890abcdef', + 'port_no': -1} + expected_msg = ("Invalid input for operation: " + "'-1' should be non-negative.") + self._test_validate_exception(profile, expected_msg) + + def test_validate_portinfo_invalid_datapath_id(self): + expected_msg = ("Invalid input for operation: " + "datapath_id should be a hex string") + + # non hexidecimal datapath_id + profile = {'datapath_id': 'INVALID', + 'port_no': 123} + self._test_validate_exception(profile, expected_msg) + + # Too big datapath_id + profile = {'datapath_id': '0x10000000000000000', + 'port_no': 123} + self._test_validate_exception(profile, expected_msg) + + def test_validate_portinfo_too_big_port_number(self): + profile = {'datapath_id': '0x1234567890abcdef', + 'port_no': 65536} + expected_msg = ("Invalid input for operation: " + "port_no should be [0:65535]") + self._test_validate_exception(profile, expected_msg) diff --git a/neutron/tests/unit/nec/test_router.py b/neutron/tests/unit/nec/test_router.py new file mode 100644 index 000000000..5f3fc0035 --- /dev/null +++ b/neutron/tests/unit/nec/test_router.py @@ -0,0 +1,45 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from neutron import manager +from neutron.plugins.nec.common import config +from neutron.tests.unit.nec import test_nec_plugin +from neutron.tests.unit import test_extension_extraroute as test_ext_route + + +class NecRouterL3AgentTestCase(test_ext_route.ExtraRouteDBIntTestCase): + + _plugin_name = test_nec_plugin.PLUGIN_NAME + + def setUp(self): + mock.patch(test_nec_plugin.OFC_MANAGER).start() + super(NecRouterL3AgentTestCase, self).setUp(self._plugin_name) + + plugin = manager.NeutronManager.get_plugin() + plugin.network_scheduler = None + plugin.router_scheduler = None + + def test_floatingip_with_invalid_create_port(self): + self._test_floatingip_with_invalid_create_port(self._plugin_name) + + +class NecRouterOpenFlowTestCase(NecRouterL3AgentTestCase): + + def setUp(self): + config.CONF.set_override('default_router_provider', + 'openflow', 'PROVIDER') + super(NecRouterOpenFlowTestCase, self).setUp() diff --git a/neutron/tests/unit/nec/test_security_group.py b/neutron/tests/unit/nec/test_security_group.py new file mode 100644 index 000000000..5b2264596 --- /dev/null +++ b/neutron/tests/unit/nec/test_security_group.py @@ -0,0 +1,101 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, NEC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +import mock + +from neutron.api.v2 import attributes +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.plugins.nec.db import api as ndb # noqa +from neutron.tests.unit.nec import test_nec_plugin +from neutron.tests.unit import test_extension_security_group as test_sg +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + +PLUGIN_NAME = test_nec_plugin.PLUGIN_NAME +OFC_MANAGER = 'neutron.plugins.nec.nec_plugin.ofc_manager.OFCManager' +NOTIFIER = 'neutron.plugins.nec.nec_plugin.NECPluginV2AgentNotifierApi' + + +class NecSecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase): + + def setUp(self, plugin=None): + test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER) + mock.patch(NOTIFIER).start() + mock.patch(OFC_MANAGER).start() + self._attribute_map_bk_ = {} + for item in attributes.RESOURCE_ATTRIBUTE_MAP: + self._attribute_map_bk_[item] = (attributes. + RESOURCE_ATTRIBUTE_MAP[item]. + copy()) + super(NecSecurityGroupsTestCase, self).setUp(PLUGIN_NAME) + plugin = manager.NeutronManager.get_plugin() + self.notifier = plugin.notifier + self.rpc = plugin.callback_sg + + def tearDown(self): + super(NecSecurityGroupsTestCase, self).tearDown() + attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_ + + +class TestNecSGServerRpcCallBack( + test_sg_rpc.SGServerRpcCallBackMixinTestCase, + NecSecurityGroupsTestCase): + pass + + +class TestNecSGServerRpcCallBackXML( + test_sg_rpc.SGServerRpcCallBackMixinTestCaseXML, + NecSecurityGroupsTestCase): + pass + + +class TestNecSecurityGroups(NecSecurityGroupsTestCase, + test_sg.TestSecurityGroups, + test_sg_rpc.SGNotificationTestMixin): + + def test_security_group_get_port_from_device(self): + with contextlib.nested(self.network(), + self.security_group()) as (n, sg): + with self.subnet(n): + res = self._create_port(self.fmt, n['network']['id']) + port = self.deserialize(self.fmt, res) + port_id = port['port']['id'] + sg_id = sg['security_group']['id'] + fixed_ips = port['port']['fixed_ips'] + + data = {'port': {'fixed_ips': fixed_ips, + 'name': port['port']['name'], + ext_sg.SECURITYGROUPS: [sg_id]}} + req = self.new_update_request('ports', data, port_id) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin.callback_sg.get_port_from_device(port_id) + self.assertEqual(port_id, port_dict['id']) + self.assertEqual([sg_id], + port_dict[ext_sg.SECURITYGROUPS]) + self.assertEqual([], port_dict['security_group_rules']) + self.assertEqual([fixed_ips[0]['ip_address']], + port_dict['fixed_ips']) + self._delete('ports', port_id) + + +class TestNecSecurityGroupsXML(TestNecSecurityGroups): + fmt = 'xml' diff --git a/neutron/tests/unit/nec/test_trema_driver.py b/neutron/tests/unit/nec/test_trema_driver.py new file mode 100644 index 000000000..484d65dd9 --- /dev/null +++ b/neutron/tests/unit/nec/test_trema_driver.py @@ -0,0 +1,353 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import random + +import mock +from six import moves + +from neutron.openstack.common import uuidutils +from neutron.plugins.nec.common import ofc_client +from neutron.plugins.nec.db import models as nmodels +from neutron.plugins.nec import drivers +from neutron.tests import base + + +class TestConfig(object): + """Configuration for this test.""" + host = '127.0.0.1' + port = 8888 + + +class TremaDriverTestBase(base.BaseTestCase): + + driver_name = "trema" + + def setUp(self): + super(TremaDriverTestBase, self).setUp() + self.driver = drivers.get_driver(self.driver_name)(TestConfig) + self.do_request = mock.patch.object(ofc_client.OFCClient, + 'do_request').start() + + def get_ofc_item_random_params(self): + """create random parameters for ofc_item test.""" + tenant_id = uuidutils.generate_uuid() + network_id = uuidutils.generate_uuid() + port_id = uuidutils.generate_uuid() + mac = ':'.join(['%x' % random.randint(0, 255) + for i in moves.xrange(6)]) + portinfo = nmodels.PortInfo(id=port_id, datapath_id="0x123456789", + port_no=1234, vlan_id=321, + mac=mac) + return tenant_id, network_id, portinfo + + +class TremaDriverNetworkTestBase(TremaDriverTestBase): + + def test_create_tenant(self): + t, n, p = self.get_ofc_item_random_params() + ret = self.driver.create_tenant('dummy_desc', t) + ofc_t_path = "/tenants/%s" % t + self.assertEqual(ofc_t_path, ret) + # There is no API call. + self.assertEqual(0, self.do_request.call_count) + + def test_update_tenant(self): + t, n, p = self.get_ofc_item_random_params() + path = "/tenants/%s" % t + self.driver.update_tenant(path, 'dummy_desc') + # There is no API call. + self.assertEqual(0, self.do_request.call_count) + + def testc_delete_tenant(self): + t, n, p = self.get_ofc_item_random_params() + path = "/tenants/%s" % t + self.driver.delete_tenant(path) + # There is no API call. + self.assertEqual(0, self.do_request.call_count) + + def testa_create_network(self): + t, n, p = self.get_ofc_item_random_params() + description = "desc of %s" % n + body = {'id': n, 'description': description} + ret = self.driver.create_network(t, description, n) + self.do_request.assert_called_once_with("POST", "/networks", body=body) + self.assertEqual(ret, '/networks/%s' % n) + + def testc_delete_network(self): + t, n, p = self.get_ofc_item_random_params() + net_path = "/networks/%s" % n + self.driver.delete_network(net_path) + self.do_request.assert_called_once_with("DELETE", net_path) + + +class TremaPortBaseDriverTest(TremaDriverNetworkTestBase): + + driver_name = "trema_port" + + def test_filter_supported(self): + self.assertTrue(self.driver.filter_supported()) + + def testd_create_port(self): + _t, n, p = self.get_ofc_item_random_params() + net_path = "/networks/%s" % n + body = {'id': p.id, + 'datapath_id': p.datapath_id, + 'port': str(p.port_no), + 'vid': str(p.vlan_id)} + ret = self.driver.create_port(net_path, p, p.id) + self.do_request.assert_called_once_with( + "POST", "/networks/%s/ports" % n, body=body) + self.assertEqual(ret, '/networks/%s/ports/%s' % (n, p.id)) + + def testd_delete_port(self): + t, n, p = self.get_ofc_item_random_params() + p_path = "/networks/%s/ports/%s" % (n, p.id) + self.driver.delete_port(p_path) + self.do_request.assert_called_once_with("DELETE", p_path) + + +class TremaPortMACBaseDriverTest(TremaDriverNetworkTestBase): + + driver_name = "trema_portmac" + + def test_filter_supported(self): + self.assertTrue(self.driver.filter_supported()) + + def testd_create_port(self): + t, n, p = self.get_ofc_item_random_params() + dummy_port = "dummy-%s" % p.id + + net_path = "/networks/%s" % n + path_1 = "/networks/%s/ports" % n + body_1 = {'id': dummy_port, + 'datapath_id': p.datapath_id, + 'port': str(p.port_no), + 'vid': str(p.vlan_id)} + path_2 = "/networks/%s/ports/%s/attachments" % (n, dummy_port) + body_2 = {'id': p.id, 'mac': p.mac} + path_3 = "/networks/%s/ports/%s" % (n, dummy_port) + ret = self.driver.create_port(net_path, p, p.id) + + self.do_request.assert_has_calls([ + mock.call("POST", path_1, body=body_1), + mock.call("POST", path_2, body=body_2), + mock.call("DELETE", path_3) + ]) + port_path = "/networks/%s/ports/%s/attachments/%s" % (n, dummy_port, + p.id) + self.assertEqual(ret, port_path) + + def testd_delete_port(self): + t, n, p = self.get_ofc_item_random_params() + dummy_port = "dummy-%s" % p.id + path = "/networks/%s/ports/%s/attachments/%s" % (n, dummy_port, p.id) + self.driver.delete_port(path) + self.do_request.assert_called_once_with("DELETE", path) + + +class TremaMACBaseDriverTest(TremaDriverNetworkTestBase): + + driver_name = "trema_mac" + + def test_filter_supported(self): + self.assertFalse(self.driver.filter_supported()) + + def testd_create_port(self): + t, n, p = self.get_ofc_item_random_params() + net_path = "/networks/%s" % n + path = "/networks/%s/attachments" % n + body = {'id': p.id, 'mac': p.mac} + ret = self.driver.create_port(net_path, p, p.id) + self.do_request.assert_called_once_with("POST", path, body=body) + self.assertEqual(ret, '/networks/%s/attachments/%s' % (n, p.id)) + + def testd_delete_port(self): + t, n, p = self.get_ofc_item_random_params() + path = "/networks/%s/attachments/%s" % (n, p.id) + self.driver.delete_port(path) + self.do_request.assert_called_once_with("DELETE", path) + + +class TremaFilterDriverTest(TremaDriverTestBase): + def _test_create_filter(self, filter_dict=None, filter_post=None, + filter_wildcards=None, no_portinfo=False): + t, n, p = self.get_ofc_item_random_params() + src_mac = ':'.join(['%x' % random.randint(0, 255) + for i in moves.xrange(6)]) + if filter_wildcards is None: + filter_wildcards = [] + + f = {'tenant_id': t, + 'id': uuidutils.generate_uuid(), + 'network_id': n, + 'priority': 123, + 'action': "ACCEPT", + 'in_port': p.id, + 'src_mac': src_mac, + 'dst_mac': "", + 'eth_type': 0, + 'src_cidr': "", + 'dst_cidr': "", + 'src_port': 0, + 'dst_port': 0, + 'protocol': "TCP", + 'admin_state_up': True, + 'status': "ACTIVE"} + if filter_dict: + f.update(filter_dict) + + net_path = "/networks/%s" % n + + all_wildcards_ofp = ['dl_vlan', 'dl_vlan_pcp', 'nw_tos', + 'in_port', 'dl_src', 'dl_dst', + 'nw_src', 'nw_dst', + 'dl_type', 'nw_proto', + 'tp_src', 'tp_dst'] + all_wildcards_non_ofp = ['in_datapath_id', 'slice'] + + body = {'id': f['id'], + 'action': 'ALLOW', + 'priority': 123, + 'slice': n, + 'in_datapath_id': '0x123456789', + 'in_port': 1234, + 'nw_proto': '0x6', + 'dl_type': '0x800', + 'dl_src': src_mac} + if filter_post: + body.update(filter_post) + + if no_portinfo: + filter_wildcards += ['in_datapath_id', 'in_port'] + p = None + + for field in filter_wildcards: + if field in body: + del body[field] + + ofp_wildcards = ["%s:32" % _f if _f in ['nw_src', 'nw_dst'] else _f + for _f in all_wildcards_ofp if _f not in body] + body['ofp_wildcards'] = set(ofp_wildcards) + + non_ofp_wildcards = [_f for _f in all_wildcards_non_ofp + if _f not in body] + if non_ofp_wildcards: + body['wildcards'] = set(non_ofp_wildcards) + + ret = self.driver.create_filter(net_path, f, p, f['id']) + # The content of 'body' is checked below. + self.do_request.assert_called_once_with("POST", "/filters", + body=mock.ANY) + self.assertEqual(ret, '/filters/%s' % f['id']) + + # ofp_wildcards and wildcards in body are comma-separated + # string but the order of elements are not considered, + # so we check these fields as set. + actual_body = self.do_request.call_args[1]['body'] + if 'ofp_wildcards' in actual_body: + ofp_wildcards = actual_body['ofp_wildcards'].split(',') + actual_body['ofp_wildcards'] = set(ofp_wildcards) + if 'wildcards' in actual_body: + actual_body['wildcards'] = set(actual_body['wildcards'].split(',')) + self.assertEqual(body, actual_body) + + def test_create_filter_accept(self): + self._test_create_filter(filter_dict={'action': 'ACCEPT'}) + + def test_create_filter_allow(self): + self._test_create_filter(filter_dict={'action': 'ALLOW'}) + + def test_create_filter_deny(self): + self._test_create_filter(filter_dict={'action': 'DENY'}, + filter_post={'action': 'DENY'}) + + def test_create_filter_drop(self): + self._test_create_filter(filter_dict={'action': 'DROP'}, + filter_post={'action': 'DENY'}) + + def test_create_filter_no_port(self): + self._test_create_filter(no_portinfo=True) + + def test_create_filter_src_mac_wildcard(self): + self._test_create_filter(filter_dict={'src_mac': ''}, + filter_wildcards=['dl_src']) + + def test_create_filter_dst_mac(self): + dst_mac = ':'.join(['%x' % random.randint(0, 255) + for i in moves.xrange(6)]) + self._test_create_filter(filter_dict={'dst_mac': dst_mac}, + filter_post={'dl_dst': dst_mac}) + + def test_create_filter_src_cidr(self): + src_cidr = '10.2.0.0/24' + self._test_create_filter(filter_dict={'src_cidr': src_cidr}, + filter_post={'nw_src': src_cidr}) + + def test_create_filter_dst_cidr(self): + dst_cidr = '192.168.10.0/24' + self._test_create_filter(filter_dict={'dst_cidr': dst_cidr}, + filter_post={'nw_dst': dst_cidr}) + + def test_create_filter_proto_icmp(self): + self._test_create_filter( + filter_dict={'protocol': 'icmp'}, + filter_post={'dl_type': '0x800', 'nw_proto': '0x1'}) + + def test_create_filter_proto_tcp(self): + self._test_create_filter( + filter_dict={'protocol': 'tcp'}, + filter_post={'dl_type': '0x800', 'nw_proto': '0x6'}) + + def test_create_filter_proto_udp(self): + self._test_create_filter( + filter_dict={'protocol': 'udp'}, + filter_post={'dl_type': '0x800', 'nw_proto': '0x11'}) + + def test_create_filter_proto_arp(self): + self._test_create_filter( + filter_dict={'protocol': 'arp'}, + filter_post={'dl_type': '0x806'}, + filter_wildcards=['nw_proto']) + + def test_create_filter_proto_misc(self): + self._test_create_filter( + filter_dict={'protocol': '0x33', 'eth_type': '0x900'}, + filter_post={'dl_type': '0x900', 'nw_proto': '0x33'}) + + def test_create_filter_proto_misc_dl_type_wildcard(self): + self._test_create_filter( + filter_dict={'protocol': '0x33', 'ether_type': ''}, + filter_post={'nw_proto': '0x33'}, + filter_wildcards=['dl_type']) + + def test_create_filter_proto_wildcard(self): + self._test_create_filter( + filter_dict={'protocol': ''}, + filter_wildcards=['dl_type', 'nw_proto']) + + def test_create_filter_src_dst_port(self): + self._test_create_filter(filter_dict={'src_port': 8192, + 'dst_port': 4096}, + filter_post={'tp_src': '0x2000', + 'tp_dst': '0x1000'}) + + def testb_delete_filter(self): + t, n, p = self.get_ofc_item_random_params() + f_path = "/filters/%s" % uuidutils.generate_uuid() + self.driver.delete_filter(f_path) + self.do_request.assert_called_once_with("DELETE", f_path) diff --git a/neutron/tests/unit/nec/test_utils.py b/neutron/tests/unit/nec/test_utils.py new file mode 100644 index 000000000..c8a8ac7bc --- /dev/null +++ b/neutron/tests/unit/nec/test_utils.py @@ -0,0 +1,31 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.plugins.nec.common import utils +from neutron.tests import base + + +class NecUtilsTest(base.BaseTestCase): + + def test_cmp_dpid(self): + self.assertTrue(utils.cmp_dpid('0xabcd', '0xabcd')) + self.assertTrue(utils.cmp_dpid('abcd', '0xabcd')) + self.assertTrue(utils.cmp_dpid('0x000000000000abcd', '0xabcd')) + self.assertTrue(utils.cmp_dpid('0x000000000000abcd', '0x00abcd')) + self.assertFalse(utils.cmp_dpid('0x000000000000abcd', '0xabc0')) + self.assertFalse(utils.cmp_dpid('0x000000000000abcd', '0x00abc0')) + + def test_cmp_dpid_with_exception(self): + self.assertFalse(utils.cmp_dpid('0xabcx', '0xabcx')) + self.assertFalse(utils.cmp_dpid(None, None)) diff --git a/neutron/tests/unit/notifiers/__init__.py b/neutron/tests/unit/notifiers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/notifiers/test_notifiers_nova.py b/neutron/tests/unit/notifiers/test_notifiers_nova.py new file mode 100644 index 000000000..7972ebf55 --- /dev/null +++ b/neutron/tests/unit/notifiers/test_notifiers_nova.py @@ -0,0 +1,305 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import mock +from novaclient import exceptions as nova_exceptions +from sqlalchemy.orm import attributes as sql_attr + +from oslo.config import cfg + +from neutron.common import constants +from neutron.db import models_v2 +from neutron.notifiers import nova +from neutron.openstack.common import uuidutils +from neutron.tests import base + + +class TestNovaNotify(base.BaseTestCase): + def setUp(self, plugin=None): + super(TestNovaNotify, self).setUp() + + class FakePlugin(object): + def get_port(self, context, port_id): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + return {'device_id': device_id, + 'device_owner': 'compute:None'} + + self.nova_notifier = nova.Notifier() + self.nova_notifier._plugin_ref = FakePlugin() + + def test_notify_port_status_all_values(self): + states = [constants.PORT_STATUS_ACTIVE, constants.PORT_STATUS_DOWN, + constants.PORT_STATUS_ERROR, constants.PORT_STATUS_BUILD, + sql_attr.NO_VALUE] + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + # test all combinations + for previous_port_status in states: + for current_port_status in states: + + port = models_v2.Port(id='port-uuid', device_id=device_id, + device_owner="compute:", + status=current_port_status) + self._record_port_status_changed_helper(current_port_status, + previous_port_status, + port) + + def test_port_without_uuid_device_id_no_notify(self): + port = models_v2.Port(id='port-uuid', device_id='compute_probe:', + device_owner='compute:', + status=constants.PORT_STATUS_ACTIVE) + self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE, + sql_attr.NO_VALUE, + port) + + def test_port_without_device_owner_no_notify(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + port = models_v2.Port(id='port-uuid', device_id=device_id, + status=constants.PORT_STATUS_ACTIVE) + self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE, + sql_attr.NO_VALUE, + port) + + def test_port_without_device_id_no_notify(self): + port = models_v2.Port(id='port-uuid', device_owner="network:dhcp", + status=constants.PORT_STATUS_ACTIVE) + self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE, + sql_attr.NO_VALUE, + port) + + def test_port_without_id_no_notify(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + port = models_v2.Port(device_id=device_id, + device_owner="compute:", + status=constants.PORT_STATUS_ACTIVE) + self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE, + sql_attr.NO_VALUE, + port) + + def test_non_compute_instances_no_notify(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + port = models_v2.Port(id='port-uuid', device_id=device_id, + device_owner="network:dhcp", + status=constants.PORT_STATUS_ACTIVE) + self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE, + sql_attr.NO_VALUE, + port) + + def _record_port_status_changed_helper(self, current_port_status, + previous_port_status, port): + + if not (port.device_id and port.id and port.device_owner and + port.device_owner.startswith('compute:') and + uuidutils.is_uuid_like(port.device_id)): + return + + if (previous_port_status == constants.PORT_STATUS_ACTIVE and + current_port_status == constants.PORT_STATUS_DOWN): + event_name = nova.VIF_UNPLUGGED + + elif (previous_port_status in [sql_attr.NO_VALUE, + constants.PORT_STATUS_DOWN, + constants.PORT_STATUS_BUILD] + and current_port_status in [constants.PORT_STATUS_ACTIVE, + constants.PORT_STATUS_ERROR]): + event_name = nova.VIF_PLUGGED + + else: + return + + status = nova.NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status) + self.nova_notifier.record_port_status_changed(port, + current_port_status, + previous_port_status, + None) + + event = {'server_uuid': port.device_id, 'status': status, + 'name': event_name, 'tag': 'port-uuid'} + self.assertEqual(event, port._notify_event) + + def test_update_fixed_ip_changed(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + returned_obj = {'port': + {'device_owner': u'compute:dfd', + 'id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222', + 'device_id': device_id}} + + expected_event = {'server_uuid': device_id, + 'name': 'network-changed'} + event = self.nova_notifier.create_port_changed_event('update_port', + {}, returned_obj) + self.assertEqual(event, expected_event) + + def test_create_floatingip_notify(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + returned_obj = {'floatingip': + {'port_id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222'}} + + expected_event = {'server_uuid': device_id, + 'name': 'network-changed'} + event = self.nova_notifier.create_port_changed_event( + 'create_floatingip', {}, returned_obj) + self.assertEqual(event, expected_event) + + def test_create_floatingip_no_port_id_no_notify(self): + returned_obj = {'floatingip': + {'port_id': None}} + + event = self.nova_notifier.create_port_changed_event( + 'create_floatingip', {}, returned_obj) + self.assertFalse(event, None) + + def test_delete_floatingip_notify(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + returned_obj = {'floatingip': + {'port_id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222'}} + + expected_event = {'server_uuid': device_id, + 'name': 'network-changed'} + event = self.nova_notifier.create_port_changed_event( + 'delete_floatingip', {}, returned_obj) + self.assertEqual(expected_event, event) + + def test_delete_floatingip_no_port_id_no_notify(self): + returned_obj = {'floatingip': + {'port_id': None}} + + event = self.nova_notifier.create_port_changed_event( + 'delete_floatingip', {}, returned_obj) + self.assertEqual(event, None) + + def test_associate_floatingip_notify(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + returned_obj = {'floatingip': + {'port_id': u'5a39def4-3d3f-473d-9ff4-8e90064b9cc1'}} + original_obj = {'port_id': None} + + expected_event = {'server_uuid': device_id, + 'name': 'network-changed'} + event = self.nova_notifier.create_port_changed_event( + 'update_floatingip', original_obj, returned_obj) + self.assertEqual(expected_event, event) + + def test_disassociate_floatingip_notify(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + returned_obj = {'floatingip': {'port_id': None}} + original_obj = {'port_id': '5a39def4-3d3f-473d-9ff4-8e90064b9cc1'} + + expected_event = {'server_uuid': device_id, + 'name': 'network-changed'} + + event = self.nova_notifier.create_port_changed_event( + 'update_floatingip', original_obj, returned_obj) + self.assertEqual(expected_event, event) + + def test_no_notification_notify_nova_on_port_data_changes_false(self): + cfg.CONF.set_override('notify_nova_on_port_data_changes', False) + + with mock.patch.object(self.nova_notifier, + 'send_events') as send_events: + self.nova_notifier.send_network_change('update_floatingip', + {}, {}) + self.assertFalse(send_events.called, False) + + def test_nova_send_events_returns_bad_list(self): + with mock.patch.object( + self.nova_notifier.nclient.server_external_events, + 'create') as nclient_create: + nclient_create.return_value = 'i am a string!' + self.nova_notifier.send_events() + + def test_nova_send_event_rasies_404(self): + with mock.patch.object( + self.nova_notifier.nclient.server_external_events, + 'create') as nclient_create: + nclient_create.side_effect = nova_exceptions.NotFound + self.nova_notifier.send_events() + + def test_nova_send_events_raises(self): + with mock.patch.object( + self.nova_notifier.nclient.server_external_events, + 'create') as nclient_create: + nclient_create.side_effect = Exception + self.nova_notifier.send_events() + + def test_nova_send_events_returns_non_200(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + with mock.patch.object( + self.nova_notifier.nclient.server_external_events, + 'create') as nclient_create: + nclient_create.return_value = [{'code': 404, + 'name': 'network-changed', + 'server_uuid': device_id}] + self.nova_notifier.pending_events.append( + {'name': 'network-changed', 'server_uuid': device_id}) + self.nova_notifier.send_events() + + def test_nova_send_events_return_200(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + with mock.patch.object( + self.nova_notifier.nclient.server_external_events, + 'create') as nclient_create: + nclient_create.return_value = [{'code': 200, + 'name': 'network-changed', + 'server_uuid': device_id}] + self.nova_notifier.pending_events.append( + {'name': 'network-changed', 'server_uuid': device_id}) + self.nova_notifier.send_events() + + def test_nova_send_events_multiple(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + with mock.patch.object( + self.nova_notifier.nclient.server_external_events, + 'create') as nclient_create: + nclient_create.return_value = [{'code': 200, + 'name': 'network-changed', + 'server_uuid': device_id}, + {'code': 200, + 'name': 'network-changed', + 'server_uuid': device_id}] + self.nova_notifier.pending_events.append( + {'name': 'network-changed', 'server_uuid': device_id}) + self.nova_notifier.pending_events.append( + {'name': 'network-changed', 'server_uuid': device_id}) + self.nova_notifier.send_events() + + def test_queue_event_no_event(self): + with mock.patch('eventlet.spawn_n') as spawn_n: + self.nova_notifier.queue_event(None) + self.assertEqual(0, len(self.nova_notifier.pending_events)) + self.assertEqual(0, spawn_n.call_count) + + def test_queue_event_first_event(self): + with mock.patch('eventlet.spawn_n') as spawn_n: + self.nova_notifier.queue_event(mock.Mock()) + self.assertEqual(1, len(self.nova_notifier.pending_events)) + self.assertEqual(1, spawn_n.call_count) + + def test_queue_event_multiple_events(self): + with mock.patch('eventlet.spawn_n') as spawn_n: + events = 6 + for i in range(0, events): + self.nova_notifier.queue_event(mock.Mock()) + self.assertEqual(events, len(self.nova_notifier.pending_events)) + self.assertEqual(1, spawn_n.call_count) + + def test_queue_event_call_send_events(self): + with mock.patch.object(self.nova_notifier, + 'send_events') as send_events: + with mock.patch('eventlet.spawn_n') as spawn_n: + spawn_n.side_effect = lambda func: func() + self.nova_notifier.queue_event(mock.Mock()) + self.assertFalse(self.nova_notifier._waiting_to_send) + send_events.assert_called_once_with() diff --git a/neutron/tests/unit/nuage/__init__.py b/neutron/tests/unit/nuage/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/nuage/fake_nuageclient.py b/neutron/tests/unit/nuage/fake_nuageclient.py new file mode 100644 index 000000000..2abcd8ddc --- /dev/null +++ b/neutron/tests/unit/nuage/fake_nuageclient.py @@ -0,0 +1,115 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Aniket Dandekar, Nuage Networks, Alcatel-Lucent USA Inc. + +from neutron.openstack.common import uuidutils + + +class FakeNuageClient(object): + def __init__(self, server, base_uri, serverssl, + serverauth, auth_resource, organization): + pass + + def rest_call(self, action, resource, data, extra_headers=None): + pass + + def vms_on_l2domain(self, l2dom_id): + pass + + def create_subnet(self, neutron_subnet, params): + nuage_subnet = { + 'nuage_l2template_id': uuidutils.generate_uuid(), + 'nuage_userid': uuidutils.generate_uuid(), + 'nuage_groupid': uuidutils.generate_uuid(), + 'nuage_l2domain_id': uuidutils.generate_uuid() + } + return nuage_subnet + + def delete_subnet(self, id, template_id): + pass + + def create_router(self, neutron_router, router, params): + nuage_router = { + 'nuage_userid': uuidutils.generate_uuid(), + 'nuage_groupid': uuidutils.generate_uuid(), + 'nuage_domain_id': uuidutils.generate_uuid(), + 'nuage_def_zone_id': uuidutils.generate_uuid(), + } + return nuage_router + + def delete_router(self, id): + pass + + def delete_user(self, id): + pass + + def delete_group(self, id): + pass + + def create_domain_subnet(self, neutron_subnet, params): + pass + + def delete_domain_subnet(self, id): + pass + + def create_net_partition(self, params): + fake_net_partition = { + 'nuage_entid': uuidutils.generate_uuid(), + 'l3dom_id': uuidutils.generate_uuid(), + 'l2dom_id': uuidutils.generate_uuid(), + } + return fake_net_partition + + def get_def_netpartition_data(self, default_net_part): + if default_net_part == 'default_test_np': + fake_defnetpart_data = { + 'np_id': uuidutils.generate_uuid(), + 'l3dom_tid': uuidutils.generate_uuid(), + 'l2dom_tid': uuidutils.generate_uuid(), + } + return fake_defnetpart_data + + def delete_net_partition(self, id, l3dom_id=None, l2dom_id=None): + pass + + def check_del_def_net_partition(self, ent_name): + pass + + def create_vms(self, params): + pass + + def delete_vms(self, params): + pass + + def create_nuage_staticroute(self, params): + return uuidutils.generate_uuid() + + def delete_nuage_staticroute(self, id): + pass + + def create_nuage_sharedresource(self, params): + return uuidutils.generate_uuid() + + def delete_nuage_sharedresource(self, id): + pass + + def create_nuage_floatingip(self, params): + return uuidutils.generate_uuid() + + def delete_nuage_floatingip(self, id): + pass + + def update_nuage_vm_vport(self, params): + pass diff --git a/neutron/tests/unit/nuage/test_netpartition.py b/neutron/tests/unit/nuage/test_netpartition.py new file mode 100644 index 000000000..bf51bc40a --- /dev/null +++ b/neutron/tests/unit/nuage/test_netpartition.py @@ -0,0 +1,100 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + +import contextlib +import uuid +import webob.exc + +from neutron import manager +from neutron.plugins.nuage.extensions import netpartition as netpart_ext +from neutron.tests.unit.nuage import test_nuage_plugin +from neutron.tests.unit import test_extensions + + +class NetPartitionTestExtensionManager(object): + def get_resources(self): + return netpart_ext.Netpartition.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class NetPartitionTestCase(test_nuage_plugin.NuagePluginV2TestCase): + def setUp(self): + ext_mgr = NetPartitionTestExtensionManager() + super(NetPartitionTestCase, self).setUp() + self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) + + def _make_netpartition(self, fmt, name): + data = { + 'net_partition': { + 'name': name, + 'tenant_id': uuid.uuid4() + } + } + netpart_req = self.new_create_request('net-partitions', data, fmt) + resp = netpart_req.get_response(self.ext_api) + if resp.status_int >= webob.exc.HTTPClientError.code: + raise webob.exc.HTTPClientError(code=resp.status_int) + return self.deserialize(fmt, resp) + + def _del_netpartition(self, id): + self._delete('net-partitions', id) + + @contextlib.contextmanager + def netpartition(self, name='netpartition1', + do_delete=True, + fmt=None, + **kwargs): + netpart = self._make_netpartition(fmt or self.fmt, name) + + yield netpart + if do_delete: + self._del_netpartition(netpart['net_partition']['id']) + + def test_create_netpartition(self): + name = 'netpart1' + keys = [('name', name)] + with self.netpartition(name=name) as netpart: + for k, v in keys: + self.assertEqual(netpart['net_partition'][k], v) + + def test_delete_netpartition(self): + name = 'netpart1' + netpart = self._make_netpartition(self.fmt, name) + req = self.new_delete_request('net-partitions', + netpart['net_partition']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + + def test_show_netpartition(self): + with self.netpartition(name='netpart1') as npart: + req = self.new_show_request('net-partitions', + npart['net_partition']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self.assertEqual(res['net_partition']['name'], + npart['net_partition']['name']) + + def test_create_existing_default_netpartition(self): + name = 'default_test_np' + netpart1 = self._make_netpartition(self.fmt, name) + nuage_plugin = manager.NeutronManager.get_plugin() + netpart2 = nuage_plugin._create_default_net_partition(name) + self.assertEqual(netpart1['net_partition']['name'], + netpart2['name']) diff --git a/neutron/tests/unit/nuage/test_nuage_plugin.py b/neutron/tests/unit/nuage/test_nuage_plugin.py new file mode 100644 index 000000000..afbc91bb9 --- /dev/null +++ b/neutron/tests/unit/nuage/test_nuage_plugin.py @@ -0,0 +1,283 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Aniket Dandekar, Nuage Networks, Alcatel-Lucent USA Inc. + +import contextlib +import os + +import mock +from oslo.config import cfg +from webob import exc + +from neutron.extensions import external_net +from neutron.extensions import portbindings +from neutron.plugins.nuage import extensions +from neutron.plugins.nuage import plugin as nuage_plugin +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit.nuage import fake_nuageclient +from neutron.tests.unit import test_db_plugin +from neutron.tests.unit import test_extension_extraroute as extraroute_test +from neutron.tests.unit import test_l3_plugin + +API_EXT_PATH = os.path.dirname(extensions.__file__) +FAKE_DEFAULT_ENT = 'default' +NUAGE_PLUGIN_PATH = 'neutron.plugins.nuage.plugin' +FAKE_SERVER = '1.1.1.1' +FAKE_SERVER_AUTH = 'user:pass' +FAKE_SERVER_SSL = False +FAKE_BASE_URI = '/base/' +FAKE_AUTH_RESOURCE = '/auth' +FAKE_ORGANIZATION = 'fake_org' + +_plugin_name = ('%s.NuagePlugin' % NUAGE_PLUGIN_PATH) + + +class NuagePluginV2TestCase(test_db_plugin.NeutronDbPluginV2TestCase): + def setUp(self, plugin=_plugin_name, + ext_mgr=None, service_plugins=None): + def mock_nuageClient_init(self): + server = FAKE_SERVER + serverauth = FAKE_SERVER_AUTH + serverssl = FAKE_SERVER_SSL + base_uri = FAKE_BASE_URI + auth_resource = FAKE_AUTH_RESOURCE + organization = FAKE_ORGANIZATION + self.nuageclient = None + self.nuageclient = fake_nuageclient.FakeNuageClient(server, + base_uri, + serverssl, + serverauth, + auth_resource, + organization) + + with mock.patch.object(nuage_plugin.NuagePlugin, + 'nuageclient_init', new=mock_nuageClient_init): + cfg.CONF.set_override('api_extensions_path', + API_EXT_PATH) + super(NuagePluginV2TestCase, self).setUp(plugin=plugin, + ext_mgr=ext_mgr) + + def _assert_no_assoc_fip(self, fip): + body = self._show('floatingips', + fip['floatingip']['id']) + self.assertIsNone(body['floatingip']['port_id']) + self.assertIsNone( + body['floatingip']['fixed_ip_address']) + + def _associate_and_assert_fip(self, fip, port, allow=True): + port_id = port['port']['id'] + ip_address = (port['port']['fixed_ips'] + [0]['ip_address']) + if allow: + body = self._update( + 'floatingips', fip['floatingip']['id'], + {'floatingip': {'port_id': port_id}}) + self.assertEqual( + body['floatingip']['port_id'], port_id) + self.assertEqual( + body['floatingip']['fixed_ip_address'], + ip_address) + return body['floatingip']['router_id'] + else: + code = exc.HTTPInternalServerError.code + self._update( + 'floatingips', fip['floatingip']['id'], + {'floatingip': {'port_id': port_id}}, + expected_code=code) + + def _test_floatingip_update_different_router(self): + with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), + self.subnet(cidr='10.0.1.0/24')) as ( + s1, s2): + with contextlib.nested(self.port(subnet=s1), + self.port(subnet=s2)) as (p1, p2): + private_sub1 = {'subnet': + {'id': + p1['port']['fixed_ips'][0]['subnet_id']}} + private_sub2 = {'subnet': + {'id': + p2['port']['fixed_ips'][0]['subnet_id']}} + with self.subnet(cidr='12.0.0.0/24') as public_sub: + with contextlib.nested( + self.floatingip_no_assoc_with_public_sub( + private_sub1, public_sub=public_sub), + self.floatingip_no_assoc_with_public_sub( + private_sub2, public_sub=public_sub)) as ( + (fip1, r1), (fip2, r2)): + + self._assert_no_assoc_fip(fip1) + self._assert_no_assoc_fip(fip2) + + fip1_r1_res = self._associate_and_assert_fip(fip1, p1) + self.assertEqual(fip1_r1_res, r1['router']['id']) + # The following operation will associate the floating + # ip to a different router and should fail + self._associate_and_assert_fip(fip1, p2, allow=False) + # disassociate fip1 + self._update( + 'floatingips', fip1['floatingip']['id'], + {'floatingip': {'port_id': None}}) + fip2_r2_res = self._associate_and_assert_fip(fip2, p2) + self.assertEqual(fip2_r2_res, r2['router']['id']) + + def _test_network_update_external_failure(self): + with self.router() as r: + with self.subnet() as s1: + self._set_net_external(s1['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s1['subnet']['network_id']) + self._update('networks', s1['subnet']['network_id'], + {'network': {external_net.EXTERNAL: False}}, + expected_code=exc.HTTPInternalServerError.code) + self._remove_external_gateway_from_router( + r['router']['id'], + s1['subnet']['network_id']) + + +class TestNuageBasicGet(NuagePluginV2TestCase, + test_db_plugin.TestBasicGet): + pass + + +class TestNuageV2HTTPResponse(NuagePluginV2TestCase, + test_db_plugin.TestV2HTTPResponse): + pass + + +class TestNuageNetworksV2(NuagePluginV2TestCase, + test_db_plugin.TestNetworksV2): + pass + + +class TestNuageSubnetsV2(NuagePluginV2TestCase, + test_db_plugin.TestSubnetsV2): + def test_create_subnet_bad_hostroutes(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_update_subnet_adding_additional_host_routes_and_dns(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_create_subnet_with_one_host_route(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_create_subnet_with_two_host_routes(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_create_subnet_with_too_many_routes(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_update_subnet_route(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_update_subnet_route_to_None(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_update_subnet_route_with_too_many_entries(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_delete_subnet_with_route(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_delete_subnet_with_dns_and_route(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_validate_subnet_host_routes_exhausted(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_validate_subnet_dns_nameservers_exhausted(self): + self.skipTest("Plugin does not support Neutron Subnet host-routes") + + def test_create_subnet_with_none_gateway(self): + self.skipTest("Plugin does not support " + "Neutron Subnet no-gateway option") + + def test_create_subnet_with_none_gateway_fully_allocated(self): + self.skipTest("Plugin does not support Neutron " + "Subnet no-gateway option") + + def test_create_subnet_with_none_gateway_allocation_pool(self): + self.skipTest("Plugin does not support Neutron " + "Subnet no-gateway option") + + +class TestNuagePluginPortBinding(NuagePluginV2TestCase, + test_bindings.PortBindingsTestCase): + VIF_TYPE = portbindings.VIF_TYPE_OVS + + def setUp(self): + super(TestNuagePluginPortBinding, self).setUp() + + +class TestNuagePortsV2(NuagePluginV2TestCase, + test_db_plugin.TestPortsV2): + pass + + +class TestNuageL3NatTestCase(NuagePluginV2TestCase, + test_l3_plugin.L3NatDBIntTestCase): + + def test_floatingip_update_different_router(self): + self._test_floatingip_update_different_router() + + def test_network_update_external_failure(self): + self._test_network_update_external_failure() + + +class TestNuageExtrarouteTestCase(NuagePluginV2TestCase, + extraroute_test.ExtraRouteDBIntTestCase): + + def test_router_update_with_dup_destination_address(self): + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + + routes = [{'destination': '135.207.0.0/16', + 'nexthop': '10.0.1.3'}, + {'destination': '135.207.0.0/16', + 'nexthop': '10.0.1.5'}] + + self._update('routers', r['router']['id'], + {'router': {'routes': + routes}}, + expected_code=exc.HTTPBadRequest.code) + + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_floatingip_update_different_router(self): + self._test_floatingip_update_different_router() + + def test_network_update_external_failure(self): + self._test_network_update_external_failure() diff --git a/neutron/tests/unit/ofagent/__init__.py b/neutron/tests/unit/ofagent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/ofagent/fake_oflib.py b/neutron/tests/unit/ofagent/fake_oflib.py new file mode 100644 index 000000000..68d21cb61 --- /dev/null +++ b/neutron/tests/unit/ofagent/fake_oflib.py @@ -0,0 +1,113 @@ +# Copyright (C) 2014 VA Linux Systems Japan K.K. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K. +# @author: YAMAMOTO Takashi, VA Linux Systems Japan K.K. + +import mock + + +class _Eq(object): + def __eq__(self, other): + return repr(self) == repr(other) + + def __ne__(self, other): + return not self.__eq__(other) + + +class _Value(_Eq): + def __or__(self, b): + return _Op('|', self, b) + + def __ror__(self, a): + return _Op('|', a, self) + + +class _SimpleValue(_Value): + def __init__(self, name): + self.name = name + + def __repr__(self): + return self.name + + +class _Op(_Value): + def __init__(self, op, a, b): + self.op = op + self.a = a + self.b = b + + def __repr__(self): + return '%s%s%s' % (self.a, self.op, self.b) + + +def _mkcls(name): + class Cls(_Eq): + _name = name + + def __init__(self, *args, **kwargs): + self._args = args + self._kwargs = kwargs + self._hist = [] + + def __getattr__(self, name): + return name + + def __repr__(self): + args = map(repr, self._args) + kwargs = sorted(['%s=%s' % (x, y) for x, y in + self._kwargs.items()]) + return '%s(%s)' % (self._name, ', '.join(args + kwargs)) + + return Cls + + +class _Mod(object): + def __init__(self, name): + self._name = name + + def __getattr__(self, name): + fullname = '%s.%s' % (self._name, name) + if '_' in name: # constants are named like OFPxxx_yyy_zzz + return _SimpleValue(fullname) + return _mkcls(fullname) + + def __repr__(self): + return 'Mod(%s)' % (self._name,) + + +def patch_fake_oflib_of(): + ryu_mod = mock.Mock() + ryu_base_mod = ryu_mod.base + ryu_lib_mod = ryu_mod.lib + ryu_lib_hub = ryu_lib_mod.hub + ryu_ofproto_mod = ryu_mod.ofproto + ofp = _Mod('ryu.ofproto.ofproto_v1_3') + ofpp = _Mod('ryu.ofproto.ofproto_v1_3_parser') + ryu_ofproto_mod.ofproto_v1_3 = ofp + ryu_ofproto_mod.ofproto_v1_3_parser = ofpp + ryu_app_mod = ryu_mod.app + ryu_app_ofctl_mod = ryu_app_mod.ofctl + ryu_ofctl_api = ryu_app_ofctl_mod.api + modules = {'ryu': ryu_mod, + 'ryu.base': ryu_base_mod, + 'ryu.lib': ryu_lib_mod, + 'ryu.lib.hub': ryu_lib_hub, + 'ryu.ofproto': ryu_ofproto_mod, + 'ryu.ofproto.ofproto_v1_3': ofp, + 'ryu.ofproto.ofproto_v1_3_parser': ofpp, + 'ryu.app': ryu_app_mod, + 'ryu.app.ofctl': ryu_app_ofctl_mod, + 'ryu.app.ofctl.api': ryu_ofctl_api} + return mock.patch.dict('sys.modules', modules) diff --git a/neutron/tests/unit/ofagent/test_ofa_defaults.py b/neutron/tests/unit/ofagent/test_ofa_defaults.py new file mode 100644 index 000000000..635070b51 --- /dev/null +++ b/neutron/tests/unit/ofagent/test_ofa_defaults.py @@ -0,0 +1,25 @@ +# Copyright (C) 2014 VA Linux Systems Japan K.K. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K. + +from oslo.config import cfg + +from neutron.plugins.ofagent.common import config # noqa +from neutron.tests import base + + +class ConfigurationTest(base.BaseTestCase): + """Configuration file Tests.""" + def test_ml2_defaults(self): + self.assertEqual(60, cfg.CONF.AGENT.get_datapath_retry_times) diff --git a/neutron/tests/unit/ofagent/test_ofa_neutron_agent.py b/neutron/tests/unit/ofagent/test_ofa_neutron_agent.py new file mode 100644 index 000000000..6df549a2d --- /dev/null +++ b/neutron/tests/unit/ofagent/test_ofa_neutron_agent.py @@ -0,0 +1,1023 @@ +# Copyright (C) 2014 VA Linux Systems Japan K.K. +# Based on test for openvswitch agent(test_ovs_neutron_agent.py). +# +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K. +# @author: YAMAMOTO Takashi, VA Linux Systems Japan K.K. + +import contextlib + +import mock +import netaddr +from oslo.config import cfg +import testtools + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import utils +from neutron.openstack.common import importutils +from neutron.plugins.common import constants as p_const +from neutron.plugins.openvswitch.common import constants +from neutron.tests import base +from neutron.tests.unit.ofagent import fake_oflib + + +NOTIFIER = ('neutron.plugins.ml2.rpc.AgentNotifierApi') +OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0" + + +class OFAAgentTestCase(base.BaseTestCase): + + _AGENT_NAME = 'neutron.plugins.ofagent.agent.ofa_neutron_agent' + + def setUp(self): + super(OFAAgentTestCase, self).setUp() + self.fake_oflib_of = fake_oflib.patch_fake_oflib_of().start() + self.mod_agent = importutils.import_module(self._AGENT_NAME) + cfg.CONF.set_default('firewall_driver', + 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + self.ryuapp = mock.Mock() + cfg.CONF.register_cli_opts([ + cfg.StrOpt('ofp-listen-host', default='', + help='openflow listen host'), + cfg.IntOpt('ofp-tcp-listen-port', default=6633, + help='openflow tcp listen port') + ]) + cfg.CONF.set_override('root_helper', 'fake_helper', group='AGENT') + + +class CreateAgentConfigMap(OFAAgentTestCase): + + def test_create_agent_config_map_succeeds(self): + self.assertTrue(self.mod_agent.create_agent_config_map(cfg.CONF)) + + def test_create_agent_config_map_fails_for_invalid_tunnel_config(self): + # An ip address is required for tunneling but there is no default, + # verify this for both gre and vxlan tunnels. + cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE], + group='AGENT') + with testtools.ExpectedException(ValueError): + self.mod_agent.create_agent_config_map(cfg.CONF) + cfg.CONF.set_override('tunnel_types', [p_const.TYPE_VXLAN], + group='AGENT') + with testtools.ExpectedException(ValueError): + self.mod_agent.create_agent_config_map(cfg.CONF) + + def test_create_agent_config_map_enable_tunneling(self): + # Verify setting only enable_tunneling will default tunnel_type to GRE + cfg.CONF.set_override('tunnel_types', None, group='AGENT') + cfg.CONF.set_override('enable_tunneling', True, group='OVS') + cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS') + cfgmap = self.mod_agent.create_agent_config_map(cfg.CONF) + self.assertEqual(cfgmap['tunnel_types'], [p_const.TYPE_GRE]) + + def test_create_agent_config_map_fails_no_local_ip(self): + # An ip address is required for tunneling but there is no default + cfg.CONF.set_override('enable_tunneling', True, group='OVS') + with testtools.ExpectedException(ValueError): + self.mod_agent.create_agent_config_map(cfg.CONF) + + def test_create_agent_config_map_fails_for_invalid_tunnel_type(self): + cfg.CONF.set_override('tunnel_types', ['foobar'], group='AGENT') + with testtools.ExpectedException(ValueError): + self.mod_agent.create_agent_config_map(cfg.CONF) + + def test_create_agent_config_map_multiple_tunnel_types(self): + cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS') + cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE, + p_const.TYPE_VXLAN], group='AGENT') + cfgmap = self.mod_agent.create_agent_config_map(cfg.CONF) + self.assertEqual(cfgmap['tunnel_types'], + [p_const.TYPE_GRE, p_const.TYPE_VXLAN]) + + +class TestOFANeutronAgentOVSBridge(OFAAgentTestCase): + + def setUp(self): + super(TestOFANeutronAgentOVSBridge, self).setUp() + self.br_name = 'bridge1' + self.root_helper = 'fake_helper' + self.ovs = self.mod_agent.OVSBridge( + self.br_name, self.root_helper, self.ryuapp) + + def test_find_datapath_id(self): + with mock.patch.object(self.ovs, 'get_datapath_id', + return_value='12345'): + self.ovs.find_datapath_id() + self.assertEqual(self.ovs.datapath_id, '12345') + + def _fake_get_datapath(self, app, datapath_id): + if self.ovs.retry_count >= 2: + datapath = mock.Mock() + datapath.ofproto_parser = mock.Mock() + return datapath + self.ovs.retry_count += 1 + return None + + def test_get_datapath_normal(self): + self.ovs.retry_count = 0 + with mock.patch.object(self.mod_agent.ryu_api, 'get_datapath', + new=self._fake_get_datapath): + self.ovs.datapath_id = '0x64' + self.ovs.get_datapath(retry_max=4) + self.assertEqual(self.ovs.retry_count, 2) + + def test_get_datapath_retry_out_by_default_time(self): + cfg.CONF.set_override('get_datapath_retry_times', 3, group='AGENT') + with mock.patch.object(self.mod_agent.ryu_api, 'get_datapath', + return_value=None) as mock_get_datapath: + with testtools.ExpectedException(SystemExit): + self.ovs.datapath_id = '0x64' + self.ovs.get_datapath(retry_max=3) + self.assertEqual(mock_get_datapath.call_count, 3) + + def test_get_datapath_retry_out_by_specified_time(self): + with mock.patch.object(self.mod_agent.ryu_api, 'get_datapath', + return_value=None) as mock_get_datapath: + with testtools.ExpectedException(SystemExit): + self.ovs.datapath_id = '0x64' + self.ovs.get_datapath(retry_max=2) + self.assertEqual(mock_get_datapath.call_count, 2) + + def test_setup_ofp_default_par(self): + with contextlib.nested( + mock.patch.object(self.ovs, 'set_protocols'), + mock.patch.object(self.ovs, 'set_controller'), + mock.patch.object(self.ovs, 'find_datapath_id'), + mock.patch.object(self.ovs, 'get_datapath'), + ) as (mock_set_protocols, mock_set_controller, + mock_find_datapath_id, mock_get_datapath): + self.ovs.setup_ofp() + mock_set_protocols.assert_called_with('OpenFlow13') + mock_set_controller.assert_called_with(['tcp:127.0.0.1:6633']) + mock_get_datapath.assert_called_with( + cfg.CONF.AGENT.get_datapath_retry_times) + self.assertEqual(mock_find_datapath_id.call_count, 1) + + def test_setup_ofp_specify_par(self): + controller_names = ['tcp:192.168.10.10:1234', 'tcp:172.17.16.20:5555'] + with contextlib.nested( + mock.patch.object(self.ovs, 'set_protocols'), + mock.patch.object(self.ovs, 'set_controller'), + mock.patch.object(self.ovs, 'find_datapath_id'), + mock.patch.object(self.ovs, 'get_datapath'), + ) as (mock_set_protocols, mock_set_controller, + mock_find_datapath_id, mock_get_datapath): + self.ovs.setup_ofp(controller_names=controller_names, + protocols='OpenFlow133', + retry_max=11) + mock_set_protocols.assert_called_with('OpenFlow133') + mock_set_controller.assert_called_with(controller_names) + mock_get_datapath.assert_called_with(11) + self.assertEqual(mock_find_datapath_id.call_count, 1) + + def test_setup_ofp_with_except(self): + with contextlib.nested( + mock.patch.object(self.ovs, 'set_protocols', + side_effect=RuntimeError), + mock.patch.object(self.ovs, 'set_controller'), + mock.patch.object(self.ovs, 'find_datapath_id'), + mock.patch.object(self.ovs, 'get_datapath'), + ) as (mock_set_protocols, mock_set_controller, + mock_find_datapath_id, mock_get_datapath): + with testtools.ExpectedException(SystemExit): + self.ovs.setup_ofp() + + +class TestOFANeutronAgent(OFAAgentTestCase): + + def setUp(self): + super(TestOFANeutronAgent, self).setUp() + notifier_p = mock.patch(NOTIFIER) + notifier_cls = notifier_p.start() + self.notifier = mock.Mock() + notifier_cls.return_value = self.notifier + # Avoid rpc initialization for unit tests + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + kwargs = self.mod_agent.create_agent_config_map(cfg.CONF) + + class MockFixedIntervalLoopingCall(object): + def __init__(self, f): + self.f = f + + def start(self, interval=0): + self.f() + + def _mk_test_dp(name): + ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3') + ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser') + dp = mock.Mock() + dp.ofproto = ofp + dp.ofproto_parser = ofpp + dp.__repr__ = lambda _self: name + return dp + + def _mk_test_br(name): + dp = _mk_test_dp(name) + br = mock.Mock() + br.datapath = dp + br.ofproto = dp.ofproto + br.ofparser = dp.ofproto_parser + return br + + with contextlib.nested( + mock.patch.object(self.mod_agent.OFANeutronAgent, + 'setup_integration_br', + return_value=mock.Mock()), + mock.patch.object(self.mod_agent.OFANeutronAgent, + 'setup_ancillary_bridges', + return_value=[]), + mock.patch.object(self.mod_agent.OVSBridge, + 'get_local_port_mac', + return_value='00:00:00:00:00:01'), + mock.patch('neutron.agent.linux.utils.get_interface_mac', + return_value='00:00:00:00:00:01'), + mock.patch('neutron.openstack.common.loopingcall.' + 'FixedIntervalLoopingCall', + new=MockFixedIntervalLoopingCall)): + self.agent = self.mod_agent.OFANeutronAgent(self.ryuapp, **kwargs) + self.agent.tun_br = _mk_test_br('tun_br') + self.datapath = mock.Mock() + self.ofparser = mock.Mock() + self.agent.phys_brs['phys-net1'] = _mk_test_br('phys_br1') + self.agent.phys_ofports['phys-net1'] = 777 + self.agent.int_ofports['phys-net1'] = 666 + self.datapath.ofparser = self.ofparser + self.ofparser.OFPMatch = mock.Mock() + self.ofparser.OFPMatch.return_value = mock.Mock() + self.ofparser.OFPFlowMod = mock.Mock() + self.ofparser.OFPFlowMod.return_value = mock.Mock() + self.agent.int_br.ofparser = self.ofparser + self.agent.int_br.datapath = _mk_test_dp('int_br') + + self.agent.sg_agent = mock.Mock() + + def _mock_port_bound(self, ofport=None, new_local_vlan=None, + old_local_vlan=None): + port = mock.Mock() + port.ofport = ofport + net_uuid = 'my-net-uuid' + if old_local_vlan is not None: + self.agent.local_vlan_map[net_uuid] = ( + self.mod_agent.LocalVLANMapping( + old_local_vlan, None, None, None)) + with contextlib.nested( + mock.patch.object(self.mod_agent.OVSBridge, + 'set_db_attribute', return_value=True), + mock.patch.object(self.mod_agent.OVSBridge, + 'db_get_val', return_value=str(old_local_vlan)), + mock.patch.object(self.agent, 'ryu_send_msg') + ) as (set_ovs_db_func, get_ovs_db_func, ryu_send_msg_func): + self.agent.port_bound(port, net_uuid, 'local', None, None) + get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag") + if new_local_vlan != old_local_vlan: + set_ovs_db_func.assert_called_once_with( + "Port", mock.ANY, "tag", str(new_local_vlan)) + if ofport != -1: + ryu_send_msg_func.assert_called_once_with( + self.ofparser.OFPFlowMod.return_value) + else: + self.assertFalse(ryu_send_msg_func.called) + else: + self.assertFalse(set_ovs_db_func.called) + self.assertFalse(ryu_send_msg_func.called) + + def test_port_bound_deletes_flows_for_valid_ofport(self): + self._mock_port_bound(ofport=1, new_local_vlan=1) + + def test_port_bound_ignores_flows_for_invalid_ofport(self): + self._mock_port_bound(ofport=-1, new_local_vlan=1) + + def test_port_bound_does_not_rewire_if_already_bound(self): + self._mock_port_bound(ofport=-1, new_local_vlan=1, old_local_vlan=1) + + def _test_port_dead(self, cur_tag=None): + port = mock.Mock() + port.ofport = 1 + with contextlib.nested( + mock.patch.object(self.mod_agent.OVSBridge, + 'set_db_attribute', return_value=True), + mock.patch.object(self.mod_agent.OVSBridge, + 'db_get_val', return_value=cur_tag), + mock.patch.object(self.agent, 'ryu_send_msg') + ) as (set_ovs_db_func, get_ovs_db_func, ryu_send_msg_func): + self.agent.port_dead(port) + get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag") + if cur_tag == self.mod_agent.DEAD_VLAN_TAG: + self.assertFalse(set_ovs_db_func.called) + self.assertFalse(ryu_send_msg_func.called) + else: + set_ovs_db_func.assert_called_once_with( + "Port", mock.ANY, "tag", str(self.mod_agent.DEAD_VLAN_TAG)) + ryu_send_msg_func.assert_called_once_with( + self.ofparser.OFPFlowMod.return_value) + + def test_port_dead(self): + self._test_port_dead() + + def test_port_dead_with_port_already_dead(self): + self._test_port_dead(self.mod_agent.DEAD_VLAN_TAG) + + def mock_scan_ports(self, vif_port_set=None, registered_ports=None, + updated_ports=None, port_tags_dict=None): + port_tags_dict = port_tags_dict or {} + with contextlib.nested( + mock.patch.object(self.agent.int_br, 'get_vif_port_set', + return_value=vif_port_set), + mock.patch.object(self.agent.int_br, 'get_port_tag_dict', + return_value=port_tags_dict) + ): + return self.agent.scan_ports(registered_ports, updated_ports) + + def test_scan_ports_returns_current_only_for_unchanged_ports(self): + vif_port_set = set([1, 3]) + registered_ports = set([1, 3]) + expected = {'current': vif_port_set} + actual = self.mock_scan_ports(vif_port_set, registered_ports) + self.assertEqual(expected, actual) + + def test_scan_ports_returns_port_changes(self): + vif_port_set = set([1, 3]) + registered_ports = set([1, 2]) + expected = dict(current=vif_port_set, added=set([3]), removed=set([2])) + actual = self.mock_scan_ports(vif_port_set, registered_ports) + self.assertEqual(expected, actual) + + def _test_scan_ports_with_updated_ports(self, updated_ports): + vif_port_set = set([1, 3, 4]) + registered_ports = set([1, 2, 4]) + expected = dict(current=vif_port_set, added=set([3]), + removed=set([2]), updated=set([4])) + actual = self.mock_scan_ports(vif_port_set, registered_ports, + updated_ports) + self.assertEqual(expected, actual) + + def test_scan_ports_finds_known_updated_ports(self): + self._test_scan_ports_with_updated_ports(set([4])) + + def test_scan_ports_ignores_unknown_updated_ports(self): + # the port '5' was not seen on current ports. Hence it has either + # never been wired or already removed and should be ignored + self._test_scan_ports_with_updated_ports(set([4, 5])) + + def test_scan_ports_ignores_updated_port_if_removed(self): + vif_port_set = set([1, 3]) + registered_ports = set([1, 2]) + updated_ports = set([1, 2]) + expected = dict(current=vif_port_set, added=set([3]), + removed=set([2]), updated=set([1])) + actual = self.mock_scan_ports(vif_port_set, registered_ports, + updated_ports) + self.assertEqual(expected, actual) + + def test_scan_ports_no_vif_changes_returns_updated_port_only(self): + vif_port_set = set([1, 2, 3]) + registered_ports = set([1, 2, 3]) + updated_ports = set([2]) + expected = dict(current=vif_port_set, updated=set([2])) + actual = self.mock_scan_ports(vif_port_set, registered_ports, + updated_ports) + self.assertEqual(expected, actual) + + def test_update_ports_returns_lost_vlan_port(self): + br = self.mod_agent.OVSBridge('br-int', 'fake_helper', self.ryuapp) + mac = "ca:fe:de:ad:be:ef" + port = ovs_lib.VifPort(1, 1, 1, mac, br) + lvm = self.mod_agent.LocalVLANMapping( + 1, '1', None, 1, {port.vif_id: port}) + local_vlan_map = {'1': lvm} + vif_port_set = set([1, 3]) + registered_ports = set([1, 2]) + port_tags_dict = {1: []} + expected = dict( + added=set([3]), current=vif_port_set, + removed=set([2]), updated=set([1]) + ) + with mock.patch.dict(self.agent.local_vlan_map, local_vlan_map): + actual = self.mock_scan_ports( + vif_port_set, registered_ports, port_tags_dict=port_tags_dict) + self.assertEqual(expected, actual) + + def test_treat_devices_added_returns_true_for_missing_device(self): + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'get_device_details', + side_effect=Exception()), + mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', + return_value=mock.Mock())): + self.assertTrue(self.agent.treat_devices_added_or_updated([{}])) + + def _mock_treat_devices_added_updated(self, details, port, func_name): + """Mock treat devices added or updated. + + :param details: the details to return for the device + :param port: the port that get_vif_port_by_id should return + :param func_name: the function that should be called + :returns: whether the named function was called + """ + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'get_device_details', + return_value=details), + mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', + return_value=port), + mock.patch.object(self.agent.plugin_rpc, 'update_device_up'), + mock.patch.object(self.agent.plugin_rpc, 'update_device_down'), + mock.patch.object(self.agent, func_name) + ) as (get_dev_fn, get_vif_func, upd_dev_up, upd_dev_down, func): + self.assertFalse(self.agent.treat_devices_added_or_updated([{}])) + return func.called + + def test_treat_devices_added_updated_ignores_invalid_ofport(self): + port = mock.Mock() + port.ofport = -1 + self.assertFalse(self._mock_treat_devices_added_updated( + mock.MagicMock(), port, 'port_dead')) + + def test_treat_devices_added_updated_marks_unknown_port_as_dead(self): + port = mock.Mock() + port.ofport = 1 + self.assertTrue(self._mock_treat_devices_added_updated( + mock.MagicMock(), port, 'port_dead')) + + def test_treat_devices_added_does_not_process_missing_port(self): + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'get_device_details'), + mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', + return_value=None) + ) as (get_dev_fn, get_vif_func): + self.assertFalse(get_dev_fn.called) + + def test_treat_devices_added_updated_updates_known_port(self): + details = mock.MagicMock() + details.__contains__.side_effect = lambda x: True + self.assertTrue(self._mock_treat_devices_added_updated( + details, mock.Mock(), 'treat_vif_port')) + + def test_treat_devices_added_updated_put_port_down(self): + fake_details_dict = {'admin_state_up': False, + 'port_id': 'xxx', + 'device': 'xxx', + 'network_id': 'yyy', + 'physical_network': 'foo', + 'segmentation_id': 'bar', + 'network_type': 'baz'} + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'get_device_details', + return_value=fake_details_dict), + mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', + return_value=mock.MagicMock()), + mock.patch.object(self.agent.plugin_rpc, 'update_device_up'), + mock.patch.object(self.agent.plugin_rpc, 'update_device_down'), + mock.patch.object(self.agent, 'treat_vif_port') + ) as (get_dev_fn, get_vif_func, upd_dev_up, + upd_dev_down, treat_vif_port): + self.assertFalse(self.agent.treat_devices_added_or_updated([{}])) + self.assertTrue(treat_vif_port.called) + self.assertTrue(upd_dev_down.called) + + def test_treat_devices_removed_returns_true_for_missing_device(self): + with mock.patch.object(self.agent.plugin_rpc, 'update_device_down', + side_effect=Exception()): + self.assertTrue(self.agent.treat_devices_removed([{}])) + + def _mock_treat_devices_removed(self, port_exists): + details = dict(exists=port_exists) + with mock.patch.object(self.agent.plugin_rpc, 'update_device_down', + return_value=details): + with mock.patch.object(self.agent, 'port_unbound') as port_unbound: + self.assertFalse(self.agent.treat_devices_removed([{}])) + self.assertTrue(port_unbound.called) + + def test_treat_devices_removed_unbinds_port(self): + self._mock_treat_devices_removed(True) + + def test_treat_devices_removed_ignores_missing_port(self): + self._mock_treat_devices_removed(False) + + def _test_process_network_ports(self, port_info): + with contextlib.nested( + mock.patch.object(self.agent.sg_agent, "setup_port_filters"), + mock.patch.object(self.agent, "treat_devices_added_or_updated", + return_value=False), + mock.patch.object(self.agent, "treat_devices_removed", + return_value=False) + ) as (setup_port_filters, device_added_updated, device_removed): + self.assertFalse(self.agent.process_network_ports(port_info)) + setup_port_filters.assert_called_once_with( + port_info['added'], port_info.get('updated', set())) + device_added_updated.assert_called_once_with( + port_info['added'] | port_info.get('updated', set())) + device_removed.assert_called_once_with(port_info['removed']) + + def test_process_network_ports(self): + self._test_process_network_ports( + {'current': set(['tap0']), + 'removed': set(['eth0']), + 'added': set(['eth1'])}) + + def test_process_network_port_with_updated_ports(self): + self._test_process_network_ports( + {'current': set(['tap0', 'tap1']), + 'updated': set(['tap1', 'eth1']), + 'removed': set(['eth0']), + 'added': set(['eth1'])}) + + def test_report_state(self): + with mock.patch.object(self.agent.state_rpc, + "report_state") as report_st: + self.agent.int_br_device_count = 5 + self.agent._report_state() + report_st.assert_called_with(self.agent.context, + self.agent.agent_state) + self.assertNotIn("start_flag", self.agent.agent_state) + self.assertEqual( + self.agent.agent_state["configurations"]["devices"], + self.agent.int_br_device_count + ) + + def test_network_delete(self): + with mock.patch.object(self.agent, + "reclaim_local_vlan") as recl_fn: + self.agent.network_delete("unused_context", + network_id="123") + self.assertFalse(recl_fn.called) + self.agent.local_vlan_map["123"] = "LVM object" + self.agent.network_delete("unused_context", + network_id="123") + recl_fn.assert_called_with("123") + + def test_port_update(self): + port = {"id": "123", + "network_id": "124", + "admin_state_up": False} + self.agent.port_update("unused_context", + port=port, + network_type="vlan", + segmentation_id="1", + physical_network="physnet") + self.assertEqual(set(['123']), self.agent.updated_ports) + + def test_setup_physical_bridges(self): + with contextlib.nested( + mock.patch.object(ip_lib, "device_exists"), + mock.patch.object(utils, "execute"), + mock.patch.object(self.mod_agent.OVSBridge, "add_port"), + mock.patch.object(self.mod_agent.OVSBridge, "delete_port"), + mock.patch.object(self.mod_agent.OVSBridge, "set_protocols"), + mock.patch.object(self.mod_agent.OVSBridge, "set_controller"), + mock.patch.object(self.mod_agent.OVSBridge, "get_datapath_id", + return_value='0xa'), + mock.patch.object(self.agent.int_br, "add_port"), + mock.patch.object(self.agent.int_br, "delete_port"), + mock.patch.object(ip_lib.IPWrapper, "add_veth"), + mock.patch.object(ip_lib.IpLinkCommand, "delete"), + mock.patch.object(ip_lib.IpLinkCommand, "set_up"), + mock.patch.object(ip_lib.IpLinkCommand, "set_mtu"), + mock.patch.object(self.mod_agent.ryu_api, "get_datapath", + return_value=self.datapath) + ) as (devex_fn, utilsexec_fn, + ovs_addport_fn, ovs_delport_fn, ovs_set_protocols_fn, + ovs_set_controller_fn, ovs_datapath_id_fn, br_addport_fn, + br_delport_fn, addveth_fn, linkdel_fn, linkset_fn, linkmtu_fn, + ryu_api_fn): + devex_fn.return_value = True + parent = mock.MagicMock() + parent.attach_mock(utilsexec_fn, 'utils_execute') + parent.attach_mock(linkdel_fn, 'link_delete') + parent.attach_mock(addveth_fn, 'add_veth') + addveth_fn.return_value = (ip_lib.IPDevice("int-br-eth1"), + ip_lib.IPDevice("phy-br-eth1")) + ovs_addport_fn.return_value = "25" + br_addport_fn.return_value = "11" + self.agent.setup_physical_bridges({"physnet1": "br-eth"}) + expected_calls = [mock.call.link_delete(), + mock.call.utils_execute(['/sbin/udevadm', + 'settle', + '--timeout=10']), + mock.call.add_veth('int-br-eth', + 'phy-br-eth')] + parent.assert_has_calls(expected_calls, any_order=False) + self.assertEqual(self.agent.int_ofports["physnet1"], + "11") + self.assertEqual(self.agent.phys_ofports["physnet1"], + "25") + + def test_port_unbound(self): + with mock.patch.object(self.agent, "reclaim_local_vlan") as reclvl_fn: + self.agent.enable_tunneling = True + lvm = mock.Mock() + lvm.network_type = "gre" + lvm.vif_ports = {"vif1": mock.Mock()} + self.agent.local_vlan_map["netuid12345"] = lvm + self.agent.port_unbound("vif1", "netuid12345") + self.assertTrue(reclvl_fn.called) + reclvl_fn.called = False + + lvm.vif_ports = {} + self.agent.port_unbound("vif1", "netuid12345") + self.assertEqual(reclvl_fn.call_count, 2) + + lvm.vif_ports = {"vif1": mock.Mock()} + self.agent.port_unbound("vif3", "netuid12345") + self.assertEqual(reclvl_fn.call_count, 2) + + def test_daemon_loop_uses_polling_manager(self): + with mock.patch( + 'neutron.agent.linux.polling.get_polling_manager' + ) as mock_get_pm: + fake_pm = mock.Mock() + mock_get_pm.return_value = fake_pm + fake_pm.__enter__ = mock.Mock() + fake_pm.__exit__ = mock.Mock() + with mock.patch.object( + self.agent, 'ovsdb_monitor_loop' + ) as mock_loop: + self.agent.daemon_loop() + mock_get_pm.assert_called_once_with(True, 'fake_helper', + constants.DEFAULT_OVSDBMON_RESPAWN) + mock_loop.assert_called_once_with(polling_manager=fake_pm.__enter__()) + + def test_setup_tunnel_port_error_negative(self): + with contextlib.nested( + mock.patch.object(self.agent.tun_br, 'add_tunnel_port', + return_value='-1'), + mock.patch.object(self.mod_agent.LOG, 'error') + ) as (add_tunnel_port_fn, log_error_fn): + ofport = self.agent.setup_tunnel_port( + 'gre-1', 'remote_ip', p_const.TYPE_GRE) + add_tunnel_port_fn.assert_called_once_with( + 'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE, + self.agent.vxlan_udp_port, self.agent.dont_fragment) + log_error_fn.assert_called_once_with( + _("Failed to set-up %(type)s tunnel port to %(ip)s"), + {'type': p_const.TYPE_GRE, 'ip': 'remote_ip'}) + self.assertEqual(ofport, 0) + + def test_setup_tunnel_port_error_not_int(self): + with contextlib.nested( + mock.patch.object(self.agent.tun_br, 'add_tunnel_port', + return_value=None), + mock.patch.object(self.mod_agent.LOG, 'exception'), + mock.patch.object(self.mod_agent.LOG, 'error') + ) as (add_tunnel_port_fn, log_exc_fn, log_error_fn): + ofport = self.agent.setup_tunnel_port( + 'gre-1', 'remote_ip', p_const.TYPE_GRE) + add_tunnel_port_fn.assert_called_once_with( + 'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE, + self.agent.vxlan_udp_port, self.agent.dont_fragment) + log_exc_fn.assert_called_once_with( + _("ofport should have a value that can be " + "interpreted as an integer")) + log_error_fn.assert_called_once_with( + _("Failed to set-up %(type)s tunnel port to %(ip)s"), + {'type': p_const.TYPE_GRE, 'ip': 'remote_ip'}) + self.assertEqual(ofport, 0) + + def _create_tunnel_port_name(self, tunnel_ip, tunnel_type): + tunnel_ip_hex = '%08x' % netaddr.IPAddress(tunnel_ip, version=4) + return '%s-%s' % (tunnel_type, tunnel_ip_hex) + + def test_tunnel_sync_with_valid_ip_address_and_gre_type(self): + tunnel_ip = '100.101.102.103' + self.agent.tunnel_types = ['gre'] + tun_name = self._create_tunnel_port_name(tunnel_ip, + self.agent.tunnel_types[0]) + fake_tunnel_details = {'tunnels': [{'ip_address': tunnel_ip}]} + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', + return_value=fake_tunnel_details), + mock.patch.object(self.agent, 'setup_tunnel_port') + ) as (tunnel_sync_rpc_fn, setup_tunnel_port_fn): + self.agent.tunnel_sync() + expected_calls = [mock.call(tun_name, tunnel_ip, + self.agent.tunnel_types[0])] + setup_tunnel_port_fn.assert_has_calls(expected_calls) + + def test_tunnel_sync_with_valid_ip_address_and_vxlan_type(self): + tunnel_ip = '100.101.31.15' + self.agent.tunnel_types = ['vxlan'] + tun_name = self._create_tunnel_port_name(tunnel_ip, + self.agent.tunnel_types[0]) + fake_tunnel_details = {'tunnels': [{'ip_address': tunnel_ip}]} + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', + return_value=fake_tunnel_details), + mock.patch.object(self.agent, 'setup_tunnel_port') + ) as (tunnel_sync_rpc_fn, setup_tunnel_port_fn): + self.agent.tunnel_sync() + expected_calls = [mock.call(tun_name, tunnel_ip, + self.agent.tunnel_types[0])] + setup_tunnel_port_fn.assert_has_calls(expected_calls) + + def test_tunnel_sync_invalid_ip_address(self): + tunnel_ip = '100.100.100.100' + self.agent.tunnel_types = ['vxlan'] + tun_name = self._create_tunnel_port_name(tunnel_ip, + self.agent.tunnel_types[0]) + fake_tunnel_details = {'tunnels': [{'ip_address': '300.300.300.300'}, + {'ip_address': tunnel_ip}]} + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', + return_value=fake_tunnel_details), + mock.patch.object(self.agent, 'setup_tunnel_port') + ) as (tunnel_sync_rpc_fn, setup_tunnel_port_fn): + self.agent.tunnel_sync() + setup_tunnel_port_fn.assert_called_once_with( + tun_name, tunnel_ip, self.agent.tunnel_types[0]) + + def test_tunnel_update(self): + tunnel_ip = '10.10.10.10' + self.agent.tunnel_types = ['gre'] + tun_name = self._create_tunnel_port_name(tunnel_ip, + self.agent.tunnel_types[0]) + kwargs = {'tunnel_ip': tunnel_ip, + 'tunnel_type': self.agent.tunnel_types[0]} + self.agent.setup_tunnel_port = mock.Mock() + self.agent.enable_tunneling = True + self.agent.l2_pop = False + self.agent.tunnel_update(context=None, **kwargs) + expected_calls = [mock.call(tun_name, tunnel_ip, + self.agent.tunnel_types[0])] + self.agent.setup_tunnel_port.assert_has_calls(expected_calls) + + def test__provision_local_vlan_inbound_for_tunnel(self): + with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg: + self.agent._provision_local_vlan_inbound_for_tunnel(1, 'gre', 3) + + ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3') + ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser') + expected_msg = ofpp.OFPFlowMod( + self.agent.tun_br.datapath, + instructions=[ + ofpp.OFPInstructionActions( + ofp.OFPIT_APPLY_ACTIONS, + [ + ofpp.OFPActionPushVlan(), + ofpp.OFPActionSetField(vlan_vid=1 | + ofp.OFPVID_PRESENT), + ]), + ofpp.OFPInstructionGotoTable(table_id=10), + ], + match=ofpp.OFPMatch(tunnel_id=3), + priority=1, + table_id=2) + sendmsg.assert_has_calls([mock.call(expected_msg)]) + + def test__provision_local_vlan_outbound(self): + with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg: + self.agent._provision_local_vlan_outbound(888, 999, 'phys-net1') + + ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3') + ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser') + expected_msg = ofpp.OFPFlowMod( + self.agent.phys_brs['phys-net1'].datapath, + instructions=[ + ofpp.OFPInstructionActions( + ofp.OFPIT_APPLY_ACTIONS, + [ + ofpp.OFPActionSetField(vlan_vid=999), + ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), + ] + ) + ], + match=ofpp.OFPMatch( + in_port=777, + vlan_vid=888 | ofp.OFPVID_PRESENT + ), + priority=4) + sendmsg.assert_has_calls([mock.call(expected_msg)]) + + def test__provision_local_vlan_inbound(self): + with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg: + self.agent._provision_local_vlan_inbound(888, 999, 'phys-net1') + + ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3') + ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser') + expected_msg = ofpp.OFPFlowMod( + self.agent.int_br.datapath, + instructions=[ + ofpp.OFPInstructionActions( + ofp.OFPIT_APPLY_ACTIONS, + [ + ofpp.OFPActionSetField( + vlan_vid=888 | ofp.OFPVID_PRESENT + ), + ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), + ] + ) + ], + match=ofpp.OFPMatch(in_port=666, vlan_vid=999), + priority=3) + sendmsg.assert_has_calls([mock.call(expected_msg)]) + + def test__reclaim_local_vlan_outbound(self): + lvm = mock.Mock() + lvm.network_type = p_const.TYPE_VLAN + lvm.segmentation_id = 555 + lvm.vlan = 444 + lvm.physical_network = 'phys-net1' + with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg: + self.agent._reclaim_local_vlan_outbound(lvm) + + ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3') + ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser') + expected_msg = ofpp.OFPFlowMod( + self.agent.phys_brs['phys-net1'].datapath, + command=ofp.OFPFC_DELETE, + match=ofpp.OFPMatch( + in_port=777, + vlan_vid=444 | ofp.OFPVID_PRESENT + ), + out_group=ofp.OFPG_ANY, + out_port=ofp.OFPP_ANY, + table_id=ofp.OFPTT_ALL) + sendmsg.assert_has_calls([mock.call(expected_msg)]) + + def test__reclaim_local_vlan_inbound(self): + lvm = mock.Mock() + lvm.network_type = p_const.TYPE_VLAN + lvm.segmentation_id = 555 + lvm.vlan = 444 + lvm.physical_network = 'phys-net1' + with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg: + self.agent._reclaim_local_vlan_inbound(lvm) + + ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3') + ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser') + expected_msg = ofpp.OFPFlowMod( + self.agent.int_br.datapath, + command=ofp.OFPFC_DELETE, + match=ofpp.OFPMatch( + in_port=666, + vlan_vid=555 | ofp.OFPVID_PRESENT + ), + out_group=ofp.OFPG_ANY, + out_port=ofp.OFPP_ANY, + table_id=ofp.OFPTT_ALL) + sendmsg.assert_has_calls([mock.call(expected_msg)]) + + def test__provision_local_vlan_outbound_flat(self): + ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3') + ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser') + with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg: + self.agent._provision_local_vlan_outbound(888, ofp.OFPVID_NONE, + 'phys-net1') + + expected_msg = ofpp.OFPFlowMod( + self.agent.phys_brs['phys-net1'].datapath, + instructions=[ + ofpp.OFPInstructionActions( + ofp.OFPIT_APPLY_ACTIONS, + [ + ofpp.OFPActionPopVlan(), + ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), + ] + ) + ], + match=ofpp.OFPMatch( + in_port=777, + vlan_vid=888 | ofp.OFPVID_PRESENT + ), + priority=4) + sendmsg.assert_has_calls([mock.call(expected_msg)]) + + def test__provision_local_vlan_inbound_flat(self): + ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3') + ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser') + with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg: + self.agent._provision_local_vlan_inbound(888, ofp.OFPVID_NONE, + 'phys-net1') + + expected_msg = ofpp.OFPFlowMod( + self.agent.int_br.datapath, + instructions=[ + ofpp.OFPInstructionActions( + ofp.OFPIT_APPLY_ACTIONS, + [ + ofpp.OFPActionPushVlan(), + ofpp.OFPActionSetField( + vlan_vid=888 | ofp.OFPVID_PRESENT + ), + ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), + ] + ) + ], + match=ofpp.OFPMatch(in_port=666, vlan_vid=ofp.OFPVID_NONE), + priority=3) + sendmsg.assert_has_calls([mock.call(expected_msg)]) + + def test__reclaim_local_vlan_outbound_flat(self): + lvm = mock.Mock() + lvm.network_type = p_const.TYPE_FLAT + lvm.segmentation_id = 555 + lvm.vlan = 444 + lvm.physical_network = 'phys-net1' + with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg: + self.agent._reclaim_local_vlan_outbound(lvm) + + ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3') + ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser') + expected_msg = ofpp.OFPFlowMod( + self.agent.phys_brs['phys-net1'].datapath, + command=ofp.OFPFC_DELETE, + match=ofpp.OFPMatch( + in_port=777, + vlan_vid=444 | ofp.OFPVID_PRESENT + ), + out_group=ofp.OFPG_ANY, + out_port=ofp.OFPP_ANY, + table_id=ofp.OFPTT_ALL) + sendmsg.assert_has_calls([mock.call(expected_msg)]) + + def test__reclaim_local_vlan_inbound_flat(self): + lvm = mock.Mock() + lvm.network_type = p_const.TYPE_FLAT + lvm.segmentation_id = 555 + lvm.vlan = 444 + lvm.physical_network = 'phys-net1' + with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg: + self.agent._reclaim_local_vlan_inbound(lvm) + + ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3') + ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser') + expected_msg = ofpp.OFPFlowMod( + self.agent.int_br.datapath, + command=ofp.OFPFC_DELETE, + match=ofpp.OFPMatch( + in_port=666, + vlan_vid=ofp.OFPVID_NONE + ), + out_group=ofp.OFPG_ANY, + out_port=ofp.OFPP_ANY, + table_id=ofp.OFPTT_ALL) + sendmsg.assert_has_calls([mock.call(expected_msg)]) + + +class AncillaryBridgesTest(OFAAgentTestCase): + + def setUp(self): + super(AncillaryBridgesTest, self).setUp() + notifier_p = mock.patch(NOTIFIER) + notifier_cls = notifier_p.start() + self.notifier = mock.Mock() + notifier_cls.return_value = self.notifier + # Avoid rpc initialization for unit tests + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + cfg.CONF.set_override('report_interval', 0, 'AGENT') + self.kwargs = self.mod_agent.create_agent_config_map(cfg.CONF) + + def _test_ancillary_bridges(self, bridges, ancillary): + device_ids = ancillary[:] + + def pullup_side_effect(self, *args): + result = device_ids.pop(0) + return result + + with contextlib.nested( + mock.patch.object(self.mod_agent.OFANeutronAgent, + 'setup_integration_br', + return_value=mock.Mock()), + mock.patch('neutron.agent.linux.utils.get_interface_mac', + return_value='00:00:00:00:00:01'), + mock.patch.object(self.mod_agent.OVSBridge, + 'get_local_port_mac', + return_value='00:00:00:00:00:01'), + mock.patch('neutron.agent.linux.ovs_lib.get_bridges', + return_value=bridges), + mock.patch( + 'neutron.agent.linux.ovs_lib.get_bridge_external_bridge_id', + side_effect=pullup_side_effect)): + self.agent = self.mod_agent.OFANeutronAgent( + self.ryuapp, **self.kwargs) + self.assertEqual(len(ancillary), len(self.agent.ancillary_brs)) + if ancillary: + bridges = [br.br_name for br in self.agent.ancillary_brs] + for br in ancillary: + self.assertIn(br, bridges) + + def test_ancillary_bridges_single(self): + bridges = ['br-int', 'br-ex'] + self._test_ancillary_bridges(bridges, ['br-ex']) + + def test_ancillary_bridges_none(self): + bridges = ['br-int'] + self._test_ancillary_bridges(bridges, []) + + def test_ancillary_bridges_multiple(self): + bridges = ['br-int', 'br-ex1', 'br-ex2'] + self._test_ancillary_bridges(bridges, ['br-ex1', 'br-ex2']) diff --git a/neutron/tests/unit/oneconvergence/__init__.py b/neutron/tests/unit/oneconvergence/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/oneconvergence/test_nvsd_agent.py b/neutron/tests/unit/oneconvergence/test_nvsd_agent.py new file mode 100644 index 000000000..f04d2fecb --- /dev/null +++ b/neutron/tests/unit/oneconvergence/test_nvsd_agent.py @@ -0,0 +1,177 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kedar Kulkarni, One Convergence, Inc. + +import contextlib +import time + +import mock +from oslo.config import cfg +import testtools + +from neutron.agent.linux import ovs_lib +from neutron.extensions import securitygroup as ext_sg +from neutron.plugins.oneconvergence.agent import nvsd_neutron_agent +from neutron.tests import base + +DAEMON_LOOP_COUNT = 5 + + +class TestOneConvergenceAgentBase(base.BaseTestCase): + + def setUp(self): + super(TestOneConvergenceAgentBase, self).setUp() + cfg.CONF.set_default('firewall_driver', + 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + with contextlib.nested( + mock.patch('neutron.openstack.common.loopingcall.' + 'FixedIntervalLoopingCall'), + ) as (loopingcall): + kwargs = {'integ_br': 'integration_bridge', + 'root_helper': 'dummy_wrapper', + 'polling_interval': 5} + context = mock.Mock() + self.agent = nvsd_neutron_agent.NVSDNeutronAgent(**kwargs) + self.sg_agent = nvsd_neutron_agent.SecurityGroupAgentRpc( + context, 'dummy_wrapper') + self.callback_nvsd = nvsd_neutron_agent.NVSDAgentRpcCallback( + context, self.agent, self.sg_agent) + self.loopingcall = loopingcall + + +class TestOneConvergenceAgentCallback(TestOneConvergenceAgentBase): + + def test_port_update(self): + with contextlib.nested( + mock.patch.object(ovs_lib.OVSBridge, 'get_vif_port_by_id'), + mock.patch.object(self.sg_agent, 'refresh_firewall') + ) as (get_vif_port_by_id, refresh_firewall): + context = mock.Mock() + vifport = ovs_lib.VifPort('port1', '1', 'id-1', 'mac-1', + self.agent.int_br) + + # The OVS port does not exist. + get_vif_port_by_id.return_value = None + port = {'id': 'update-port-1'} + self.callback_nvsd.port_update(context, port=port) + self.assertEqual(get_vif_port_by_id.call_count, 1) + self.assertFalse(refresh_firewall.call_count) + + # The OVS port exists but no security group is associated. + get_vif_port_by_id.return_value = vifport + port = {'id': 'update-port-1'} + self.callback_nvsd.port_update(context, port=port) + self.assertEqual(get_vif_port_by_id.call_count, 2) + self.assertFalse(refresh_firewall.call_count) + + # The OVS port exists but a security group is associated. + get_vif_port_by_id.return_value = vifport + port = {'id': 'update-port-1', + ext_sg.SECURITYGROUPS: ['default']} + self.callback_nvsd.port_update(context, port=port) + self.assertEqual(get_vif_port_by_id.call_count, 3) + self.assertEqual(refresh_firewall.call_count, 1) + + get_vif_port_by_id.return_value = None + port = {'id': 'update-port-1', + ext_sg.SECURITYGROUPS: ['default']} + self.callback_nvsd.port_update(context, port=port) + self.assertEqual(get_vif_port_by_id.call_count, 4) + self.assertEqual(refresh_firewall.call_count, 1) + + +class TestNVSDAgent(TestOneConvergenceAgentBase): + + def _setup_mock(self): + self.get_vif_ports = mock.patch.object( + ovs_lib.OVSBridge, 'get_vif_port_set', + return_value=set(['id-1', 'id-2'])).start() + self.prepare_devices_filter = mock.patch.object( + self.agent.sg_agent, 'prepare_devices_filter').start() + self.remove_devices_filter = mock.patch.object( + self.agent.sg_agent, 'remove_devices_filter').start() + + def test_daemon_loop(self): + + def state_check(index): + self.assertEqual(len(self.vif_ports_scenario[index]), + len(self.agent.ports)) + + # Fake time.sleep to stop the infinite loop in daemon_loop() + self.sleep_count = 0 + + def sleep_mock(*args, **kwargs): + state_check(self.sleep_count) + self.sleep_count += 1 + if self.sleep_count >= DAEMON_LOOP_COUNT: + raise RuntimeError() + + self.vif_ports_scenario = [set(), set(), set(), set(['id-1', 'id-2']), + set(['id-2', 'id-3'])] + + # Ensure vif_ports_scenario is longer than DAEMON_LOOP_COUNT + if len(self.vif_ports_scenario) < DAEMON_LOOP_COUNT: + self.vif_ports_scenario.extend( + [] for _i in xrange(DAEMON_LOOP_COUNT - + len(self.vif_ports_scenario))) + + with contextlib.nested( + mock.patch.object(time, 'sleep', side_effect=sleep_mock), + mock.patch.object(ovs_lib.OVSBridge, 'get_vif_port_set'), + mock.patch.object(self.agent.sg_agent, 'prepare_devices_filter'), + mock.patch.object(self.agent.sg_agent, 'remove_devices_filter') + ) as (sleep, get_vif_port_set, prepare_devices_filter, + remove_devices_filter): + get_vif_port_set.side_effect = self.vif_ports_scenario + + with testtools.ExpectedException(RuntimeError): + self.agent.daemon_loop() + self.assertEqual(sleep.call_count, DAEMON_LOOP_COUNT) + + expected = [mock.call(set(['id-1', 'id-2'])), + mock.call(set(['id-3']))] + + self.assertEqual(prepare_devices_filter.call_count, 2) + prepare_devices_filter.assert_has_calls(expected) + + expected = [mock.call(set([])), mock.call(set(['id-1']))] + + self.assertEqual(remove_devices_filter.call_count, 2) + remove_devices_filter.assert_has_calls(expected) + + sleep.assert_called_with(self.agent.polling_interval) + + +class TestOneConvergenceAgentMain(base.BaseTestCase): + def test_main(self): + with contextlib.nested( + mock.patch.object(nvsd_neutron_agent, 'NVSDNeutronAgent'), + mock.patch.object(nvsd_neutron_agent, 'common_config'), + mock.patch.object(nvsd_neutron_agent, 'config') + ) as (agent, common_config, config): + config.AGENT.integration_bridge = 'br-int-dummy' + config.AGENT.root_helper = 'root-helper' + config.AGENT.polling_interval = 5 + + nvsd_neutron_agent.main() + + self.assertTrue(common_config.setup_logging.called) + agent.assert_has_calls([ + mock.call('br-int-dummy', 'root-helper', 5), + mock.call().daemon_loop() + ]) diff --git a/neutron/tests/unit/oneconvergence/test_nvsd_plugin.py b/neutron/tests/unit/oneconvergence/test_nvsd_plugin.py new file mode 100644 index 000000000..564b8c56a --- /dev/null +++ b/neutron/tests/unit/oneconvergence/test_nvsd_plugin.py @@ -0,0 +1,152 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Test Library for OneConvergencePlugin.""" + +import contextlib +import uuid + +import mock +from oslo.config import cfg + +from neutron import context +from neutron.extensions import portbindings +from neutron import manager +from neutron.plugins.oneconvergence import plugin as nvsd_plugin +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit import test_db_plugin as test_plugin +from neutron.tests.unit import test_l3_plugin + +PLUGIN_NAME = 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2' + + +class OneConvergencePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self): + def mocked_oneconvergence_init(self): + def side_effect(*args, **kwargs): + return {'id': str(uuid.uuid4())} + + self.nvsdlib = mock.Mock() + self.nvsdlib.create_network.side_effect = side_effect + + with mock.patch.object(nvsd_plugin.OneConvergencePluginV2, + 'oneconvergence_init', + new=mocked_oneconvergence_init): + super(OneConvergencePluginV2TestCase, + self).setUp(self._plugin_name) + + +class TestOneConvergencePluginNetworksV2(test_plugin.TestNetworksV2, + OneConvergencePluginV2TestCase): + pass + + +class TestOneConvergencePluginSubnetsV2(test_plugin.TestSubnetsV2, + OneConvergencePluginV2TestCase): + def test_update_subnet_inconsistent_ipv6_gatewayv4(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_create_subnet_with_v6_allocation_pool(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_create_subnet_ipv6_attributes(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_create_subnet_ipv6_single_attribute_set(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_update_subnet_ipv6_attributes(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_update_subnet_ipv6_inconsistent_enable_dhcp(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_update_subnet_ipv6_inconsistent_ra_attribute(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_update_subnet_ipv6_inconsistent_address_attribute(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + +class TestOneConvergencePluginPortsV2(test_plugin.TestPortsV2, + test_bindings.PortBindingsTestCase, + OneConvergencePluginV2TestCase): + VIF_TYPE = portbindings.VIF_TYPE_OVS + + def test_requested_subnet_id_v4_and_v6(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_port_vif_details(self): + plugin = manager.NeutronManager.get_plugin() + with self.port(name='name') as port1: + ctx = context.get_admin_context() + port = plugin.get_port(ctx, port1['port']['id']) + self.assertEqual(port['binding:vif_type'], + portbindings.VIF_TYPE_OVS) + + def test_ports_vif_details(self): + cfg.CONF.set_default('allow_overlapping_ips', True) + plugin = manager.NeutronManager.get_plugin() + with contextlib.nested(self.port(), self.port()) as (port1, port2): + ctx = context.get_admin_context() + ports = plugin.get_ports(ctx) + self.assertEqual(len(ports), 2) + for port in ports: + self.assertEqual(port['binding:vif_type'], + portbindings.VIF_TYPE_OVS) + + def test_ip_allocation_for_ipv6_subnet_slaac_adddress_mode(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + +class TestOneConvergenceBasicGet(test_plugin.TestBasicGet, + OneConvergencePluginV2TestCase): + pass + + +class TestOneConvergenceV2HTTPResponse(test_plugin.TestV2HTTPResponse, + OneConvergencePluginV2TestCase): + pass + + +class TestOneConvergenceL3NatTestCase(test_l3_plugin.L3NatDBIntTestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self): + def mocked_oneconvergence_init(self): + def side_effect(*args, **kwargs): + return {'id': str(uuid.uuid4())} + + self.nvsdlib = mock.Mock() + self.nvsdlib.create_network.side_effect = side_effect + + ext_mgr = test_l3_plugin.L3TestExtensionManager() + + with mock.patch.object(nvsd_plugin.OneConvergencePluginV2, + 'oneconvergence_init', + new=mocked_oneconvergence_init): + super(TestOneConvergenceL3NatTestCase, + self).setUp(plugin=self._plugin_name, ext_mgr=ext_mgr) + + def test_floatingip_with_invalid_create_port(self): + self._test_floatingip_with_invalid_create_port(self._plugin_name) diff --git a/neutron/tests/unit/oneconvergence/test_nvsdlib.py b/neutron/tests/unit/oneconvergence/test_nvsdlib.py new file mode 100644 index 000000000..5ee80ada6 --- /dev/null +++ b/neutron/tests/unit/oneconvergence/test_nvsdlib.py @@ -0,0 +1,261 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock + +from neutron.openstack.common import jsonutils as json +from neutron.plugins.oneconvergence.lib import nvsdlib +from neutron.tests import base + +NETWORKS_URI = "/pluginhandler/ocplugin/tenant/%s/lnetwork/" +NETWORK_URI = NETWORKS_URI + "%s" +GET_ALL_NETWORKS = "/pluginhandler/ocplugin/tenant/getallnetworks" + +SUBNETS_URI = NETWORK_URI + "/lsubnet/" +SUBNET_URI = SUBNETS_URI + "%s" +GET_ALL_SUBNETS = "/pluginhandler/ocplugin/tenant/getallsubnets" + +PORTS_URI = NETWORK_URI + "/lport/" +PORT_URI = PORTS_URI + "%s" + +EXT_URI = "/pluginhandler/ocplugin/ext/tenant/%s" +FLOATING_IPS_URI = EXT_URI + "/floatingip/" +FLOATING_IP_URI = FLOATING_IPS_URI + "%s" + +ROUTERS_URI = EXT_URI + "/lrouter/" +ROUTER_URI = ROUTERS_URI + "%s" + +TEST_NET = 'test-network' +TEST_SUBNET = 'test-subnet' +TEST_PORT = 'test-port' +TEST_FIP = 'test-floatingip' +TEST_ROUTER = 'test-router' +TEST_TENANT = 'test-tenant' + + +class TestNVSDApi(base.BaseTestCase): + + def setUp(self): + super(TestNVSDApi, self).setUp() + self.nvsdlib = nvsdlib.NVSDApi() + + def test_create_network(self): + network_obj = { + "name": 'test-net', + "tenant_id": TEST_TENANT, + "shared": False, + "admin_state_up": True, + "router:external": False + } + resp = mock.Mock() + resp.json.return_value = {'id': 'uuid'} + with mock.patch.object(self.nvsdlib, 'send_request', + return_value=resp) as send_request: + uri = NETWORKS_URI % TEST_TENANT + net = self.nvsdlib.create_network(network_obj) + send_request.assert_called_once_with("POST", uri, + body=json.dumps(network_obj), + resource='network', + tenant_id=TEST_TENANT) + self.assertEqual(net, {'id': 'uuid'}) + + def test_update_network(self): + network = {'id': TEST_NET, + 'tenant_id': TEST_TENANT} + update_network = {'name': 'new_name'} + uri = NETWORK_URI % (TEST_TENANT, TEST_NET) + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + self.nvsdlib.update_network(network, update_network) + send_request.assert_called_once_with( + "PUT", uri, body=json.dumps(update_network), + resource='network', tenant_id=TEST_TENANT, + resource_id=TEST_NET) + + def test_delete_network(self): + network = {'id': TEST_NET, + 'tenant_id': TEST_TENANT} + + uri = NETWORK_URI % (TEST_TENANT, TEST_NET) + + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + with mock.patch.object(self.nvsdlib, '_get_ports'): + self.nvsdlib.delete_network(network) + send_request.assert_called_once_with( + "DELETE", uri, resource='network', + tenant_id=TEST_TENANT, resource_id=TEST_NET) + + def test_create_port(self): + path = PORTS_URI % (TEST_TENANT, TEST_NET) + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + fixed_ips = [{'ip_address': '10.0.0.2', + 'subnet_id': TEST_SUBNET}] + + lport = { + "id": TEST_PORT, + "name": 'test', + "device_id": "device_id", + "device_owner": "device_owner", + "mac_address": "mac_address", + "fixed_ips": fixed_ips, + "admin_state_up": True, + "network_id": TEST_NET, + "status": 'ACTIVE' + } + self.nvsdlib.create_port(TEST_TENANT, lport) + expected = {"id": TEST_PORT, "name": 'test', + "device_id": "device_id", + "device_owner": "device_owner", + "mac_address": "mac_address", + "ip_address": '10.0.0.2', + "subnet_id": TEST_SUBNET, + "admin_state_up": True, + "network_id": TEST_NET, + "status": 'ACTIVE'} + send_request.assert_called_once_with("POST", path, + body=json.dumps(expected), + resource='port', + tenant_id=TEST_TENANT) + + def test_update_port(self): + port = {'id': TEST_PORT, + 'network_id': TEST_NET} + + port_update = {'name': 'new-name'} + uri = PORT_URI % (TEST_TENANT, TEST_NET, TEST_PORT) + + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + self.nvsdlib.update_port(TEST_TENANT, port, port_update) + send_request.assert_called_once_with("PUT", uri, + body=json.dumps(port_update), + resource='port', + resource_id='test-port', + tenant_id=TEST_TENANT) + + def test_delete_port(self): + port = {'network_id': TEST_NET, + 'tenant_id': TEST_TENANT} + uri = PORT_URI % (TEST_TENANT, TEST_NET, TEST_PORT) + + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + self.nvsdlib.delete_port(TEST_PORT, port) + send_request.assert_called_once_with("DELETE", uri, + resource='port', + tenant_id=TEST_TENANT, + resource_id=TEST_PORT) + + def test_create_subnet(self): + subnet = {'id': TEST_SUBNET, + 'tenant_id': TEST_TENANT, + 'network_id': TEST_NET} + uri = SUBNETS_URI % (TEST_TENANT, TEST_NET) + + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + self.nvsdlib.create_subnet(subnet) + send_request.assert_called_once_with("POST", uri, + body=json.dumps(subnet), + resource='subnet', + tenant_id=TEST_TENANT) + + def test_update_subnet(self): + subnet = {'id': TEST_SUBNET, + 'tenant_id': TEST_TENANT, + 'network_id': TEST_NET} + subnet_update = {'name': 'new-name'} + uri = SUBNET_URI % (TEST_TENANT, TEST_NET, TEST_SUBNET) + + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + self.nvsdlib.update_subnet(subnet, subnet_update) + send_request.assert_called_once_with( + "PUT", uri, body=json.dumps(subnet_update), resource='subnet', + tenant_id=TEST_TENANT, resource_id=TEST_SUBNET) + + def test_delete_subnet(self): + subnet = {'id': TEST_SUBNET, + 'tenant_id': TEST_TENANT, + 'network_id': TEST_NET} + uri = SUBNET_URI % (TEST_TENANT, TEST_NET, TEST_SUBNET) + + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + self.nvsdlib.delete_subnet(subnet) + send_request.assert_called_once_with("DELETE", uri, + resource='subnet', + tenant_id=TEST_TENANT, + resource_id=TEST_SUBNET) + + def test_create_floatingip(self): + floatingip = {'id': TEST_FIP, + 'tenant_id': TEST_TENANT} + uri = FLOATING_IPS_URI % TEST_TENANT + + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + self.nvsdlib.create_floatingip(floatingip) + send_request.assert_called_once_with("POST", uri, + body=json.dumps(floatingip), + resource='floating_ip', + tenant_id=TEST_TENANT) + + def test_update_floatingip(self): + floatingip = {'id': TEST_FIP, + 'tenant_id': TEST_TENANT} + uri = FLOATING_IP_URI % (TEST_TENANT, TEST_FIP) + + floatingip_update = {'floatingip': {'router_id': TEST_ROUTER}} + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + self.nvsdlib.update_floatingip(floatingip, floatingip_update) + send_request.assert_called_once_with( + "PUT", uri, body=json.dumps(floatingip_update['floatingip']), + resource='floating_ip', tenant_id=TEST_TENANT, + resource_id=TEST_FIP) + + def test_delete_floatingip(self): + floatingip = {'id': TEST_FIP, + 'tenant_id': TEST_TENANT} + uri = FLOATING_IP_URI % (TEST_TENANT, TEST_FIP) + + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + self.nvsdlib.delete_floatingip(floatingip) + send_request.assert_called_once_with( + "DELETE", uri, resource='floating_ip', tenant_id=TEST_TENANT, + resource_id=TEST_FIP) + + def test_create_router(self): + router = {'id': TEST_ROUTER, 'tenant_id': TEST_TENANT} + uri = ROUTERS_URI % TEST_TENANT + + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + self.nvsdlib.create_router(router) + send_request.assert_called_once_with( + "POST", uri, body=json.dumps(router), resource='router', + tenant_id=TEST_TENANT) + + def test_update_router(self): + router = {'id': TEST_ROUTER, 'tenant_id': TEST_TENANT} + uri = ROUTER_URI % (TEST_TENANT, TEST_ROUTER) + + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + self.nvsdlib.update_router(router) + send_request.assert_called_once_with( + "PUT", uri, body=json.dumps(router), + resource='router', tenant_id=TEST_TENANT, + resource_id=TEST_ROUTER) + + def test_delete_router(self): + uri = ROUTER_URI % (TEST_TENANT, TEST_ROUTER) + + with mock.patch.object(self.nvsdlib, 'send_request') as send_request: + self.nvsdlib.delete_router(TEST_TENANT, TEST_ROUTER) + send_request.assert_called_once_with( + "DELETE", uri, resource='router', + tenant_id=TEST_TENANT, resource_id=TEST_ROUTER) diff --git a/neutron/tests/unit/oneconvergence/test_plugin_helper.py b/neutron/tests/unit/oneconvergence/test_plugin_helper.py new file mode 100644 index 000000000..21031a79c --- /dev/null +++ b/neutron/tests/unit/oneconvergence/test_plugin_helper.py @@ -0,0 +1,60 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kedar Kulkarni, One Convergence, Inc. +import mock +import requests + +from neutron.openstack.common import jsonutils as json +from neutron.plugins.oneconvergence.lib import config # noqa +from neutron.plugins.oneconvergence.lib import plugin_helper as client +from neutron.tests import base + + +class TestPluginHelper(base.BaseTestCase): + def setUp(self): + super(TestPluginHelper, self).setUp() + self.nvsdcontroller = client.NVSDController() + + def get_response(self, *args, **kwargs): + response = mock.Mock() + response.status_code = requests.codes.ok + response.content = json.dumps({'session_uuid': 'new_auth_token'}) + return response + + def test_login(self): + login_url = ('http://127.0.0.1:8082/pluginhandler/ocplugin/' + 'authmgmt/login') + headers = {'Content-Type': 'application/json'} + data = json.dumps({"user_name": "ocplugin", "passwd": "oc123"}) + timeout = 30.0 + + with mock.patch.object(self.nvsdcontroller, 'do_request', + side_effect=self.get_response) as do_request: + self.nvsdcontroller.login() + do_request.assert_called_once_with('POST', url=login_url, + headers=headers, data=data, + timeout=timeout) + + def test_request(self): + with mock.patch.object(self.nvsdcontroller, 'do_request', + side_effect=self.get_response) as do_request: + self.nvsdcontroller.login() + self.nvsdcontroller.request("POST", "/some_url") + self.assertEqual(do_request.call_count, 2) + do_request.assert_called_with( + 'POST', + url='http://127.0.0.1:8082/some_url?authToken=new_auth_token', + headers={'Content-Type': 'application/json'}, data='', + timeout=30.0) diff --git a/neutron/tests/unit/oneconvergence/test_security_group.py b/neutron/tests/unit/oneconvergence/test_security_group.py new file mode 100644 index 000000000..af08132c5 --- /dev/null +++ b/neutron/tests/unit/oneconvergence/test_security_group.py @@ -0,0 +1,157 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kedar Kulkarni, One Convergence, Inc. + +import uuid + +import mock + +from neutron.api.v2 import attributes +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.plugins.oneconvergence import plugin as nvsd_plugin +from neutron.tests.unit import test_extension_security_group as test_sg +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + +PLUGIN_NAME = ('neutron.plugins.oneconvergence.' + 'plugin.OneConvergencePluginV2') +AGENTNOTIFIER = ('neutron.plugins.oneconvergence.' + 'plugin.NVSDPluginV2AgentNotifierApi') +DUMMY_NVSD_LIB = ('neutron.tests.unit.oneconvergence.dummynvsdlib.NVSDApi') + + +class OneConvergenceSecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self): + def mocked_oneconvergence_init(self): + def side_effect(*args, **kwargs): + return {'id': str(uuid.uuid4())} + + self.nvsdlib = mock.Mock() + self.nvsdlib.create_network.side_effect = side_effect + + test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER) + notifier_cls = mock.patch(AGENTNOTIFIER).start() + self.notifier = mock.Mock() + notifier_cls.return_value = self.notifier + self._attribute_map_bk_ = {} + for item in attributes.RESOURCE_ATTRIBUTE_MAP: + self._attribute_map_bk_[item] = (attributes. + RESOURCE_ATTRIBUTE_MAP[item]. + copy()) + with mock.patch.object(nvsd_plugin.OneConvergencePluginV2, + 'oneconvergence_init', + new=mocked_oneconvergence_init): + super(OneConvergenceSecurityGroupsTestCase, + self).setUp(PLUGIN_NAME) + + def tearDown(self): + super(OneConvergenceSecurityGroupsTestCase, self).tearDown() + attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_ + + +class TestOneConvergenceSGServerRpcCallBack( + OneConvergenceSecurityGroupsTestCase, + test_sg_rpc.SGServerRpcCallBackMixinTestCase): + def test_security_group_rules_for_devices_ipv6_egress(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_security_group_rules_for_devices_ipv6_ingress(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_security_group_rules_for_devices_ipv6_source_group(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_security_group_ra_rules_for_devices_ipv6_gateway_global(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_security_group_ra_rules_for_devices_ipv6_gateway_lla(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_security_group_ra_rules_for_devices_ipv6_no_gateway_port(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_security_group_rule_for_device_ipv6_multi_router_interfaces(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + +class TestOneConvergenceSGServerRpcCallBackXML( + OneConvergenceSecurityGroupsTestCase, + test_sg_rpc.SGServerRpcCallBackMixinTestCaseXML): + def test_security_group_rules_for_devices_ipv6_egress(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_security_group_rules_for_devices_ipv6_ingress(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_security_group_rules_for_devices_ipv6_source_group(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_security_group_ra_rules_for_devices_ipv6_gateway_global(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_security_group_ra_rules_for_devices_ipv6_gateway_lla(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_security_group_ra_rules_for_devices_ipv6_no_gateway_port(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + def test_security_group_rule_for_device_ipv6_multi_router_interfaces(self): + self.skipTest("NVSD Plugin does not support IPV6.") + + +class TestOneConvergenceSecurityGroups(OneConvergenceSecurityGroupsTestCase, + test_sg.TestSecurityGroups, + test_sg_rpc.SGNotificationTestMixin): + + def test_security_group_get_port_from_device(self): + with self.network() as n: + with self.subnet(n): + with self.security_group() as sg: + security_group_id = sg['security_group']['id'] + res = self._create_port(self.fmt, n['network']['id']) + port = self.deserialize(self.fmt, res) + fixed_ips = port['port']['fixed_ips'] + data = {'port': {'fixed_ips': fixed_ips, + 'name': port['port']['name'], + ext_sg.SECURITYGROUPS: + [security_group_id]}} + + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + port_id = res['port']['id'] + plugin = manager.NeutronManager.get_plugin() + callbacks = plugin.endpoints[0] + port_dict = callbacks.get_port_from_device(port_id) + self.assertEqual(port_id, port_dict['id']) + self.assertEqual([security_group_id], + port_dict[ext_sg.SECURITYGROUPS]) + self.assertEqual([], port_dict['security_group_rules']) + self.assertEqual([fixed_ips[0]['ip_address']], + port_dict['fixed_ips']) + self._delete('ports', port_id) + + def test_security_group_get_port_from_device_with_no_port(self): + + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin.endpoints[0].get_port_from_device('bad_device_id') + self.assertIsNone(port_dict) + + +class TestOneConvergenceSecurityGroupsXML(TestOneConvergenceSecurityGroups): + fmt = 'xml' diff --git a/neutron/tests/unit/openvswitch/__init__.py b/neutron/tests/unit/openvswitch/__init__.py new file mode 100644 index 000000000..7e503debd --- /dev/null +++ b/neutron/tests/unit/openvswitch/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/openvswitch/test_agent_scheduler.py b/neutron/tests/unit/openvswitch/test_agent_scheduler.py new file mode 100644 index 000000000..04ef593b5 --- /dev/null +++ b/neutron/tests/unit/openvswitch/test_agent_scheduler.py @@ -0,0 +1,1245 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import copy + +import mock +from oslo.config import cfg +from webob import exc + +from neutron.api import extensions +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron import context +from neutron.db import agents_db +from neutron.db import dhcp_rpc_base +from neutron.db import l3_rpc_base +from neutron.extensions import agent +from neutron.extensions import dhcpagentscheduler +from neutron.extensions import l3agentscheduler +from neutron import manager +from neutron.openstack.common import timeutils +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as service_constants +from neutron.tests.unit import test_agent_ext_plugin +from neutron.tests.unit import test_db_plugin as test_plugin +from neutron.tests.unit import test_extensions +from neutron.tests.unit import test_l3_plugin +from neutron.tests.unit import testlib_api +from neutron import wsgi + +L3_HOSTA = 'hosta' +DHCP_HOSTA = 'hosta' +L3_HOSTB = 'hostb' +DHCP_HOSTC = 'hostc' + + +class AgentSchedulerTestMixIn(object): + + def _request_list(self, path, admin_context=True, + expected_code=exc.HTTPOk.code): + req = self._path_req(path, admin_context=admin_context) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, expected_code) + return self.deserialize(self.fmt, res) + + def _path_req(self, path, method='GET', data=None, + query_string=None, + admin_context=True): + content_type = 'application/%s' % self.fmt + body = None + if data is not None: # empty dict is valid + body = wsgi.Serializer().serialize(data, content_type) + if admin_context: + return testlib_api.create_request( + path, body, content_type, method, query_string=query_string) + else: + return testlib_api.create_request( + path, body, content_type, method, query_string=query_string, + context=context.Context('', 'tenant_id')) + + def _path_create_request(self, path, data, admin_context=True): + return self._path_req(path, method='POST', data=data, + admin_context=admin_context) + + def _path_show_request(self, path, admin_context=True): + return self._path_req(path, admin_context=admin_context) + + def _path_delete_request(self, path, admin_context=True): + return self._path_req(path, method='DELETE', + admin_context=admin_context) + + def _path_update_request(self, path, data, admin_context=True): + return self._path_req(path, method='PUT', data=data, + admin_context=admin_context) + + def _list_routers_hosted_by_l3_agent(self, agent_id, + expected_code=exc.HTTPOk.code, + admin_context=True): + path = "/agents/%s/%s.%s" % (agent_id, + l3agentscheduler.L3_ROUTERS, + self.fmt) + return self._request_list(path, expected_code=expected_code, + admin_context=admin_context) + + def _list_networks_hosted_by_dhcp_agent(self, agent_id, + expected_code=exc.HTTPOk.code, + admin_context=True): + path = "/agents/%s/%s.%s" % (agent_id, + dhcpagentscheduler.DHCP_NETS, + self.fmt) + return self._request_list(path, expected_code=expected_code, + admin_context=admin_context) + + def _list_l3_agents_hosting_router(self, router_id, + expected_code=exc.HTTPOk.code, + admin_context=True): + path = "/routers/%s/%s.%s" % (router_id, + l3agentscheduler.L3_AGENTS, + self.fmt) + return self._request_list(path, expected_code=expected_code, + admin_context=admin_context) + + def _list_dhcp_agents_hosting_network(self, network_id, + expected_code=exc.HTTPOk.code, + admin_context=True): + path = "/networks/%s/%s.%s" % (network_id, + dhcpagentscheduler.DHCP_AGENTS, + self.fmt) + return self._request_list(path, expected_code=expected_code, + admin_context=admin_context) + + def _add_router_to_l3_agent(self, id, router_id, + expected_code=exc.HTTPCreated.code, + admin_context=True): + path = "/agents/%s/%s.%s" % (id, + l3agentscheduler.L3_ROUTERS, + self.fmt) + req = self._path_create_request(path, + {'router_id': router_id}, + admin_context=admin_context) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, expected_code) + + def _add_network_to_dhcp_agent(self, id, network_id, + expected_code=exc.HTTPCreated.code, + admin_context=True): + path = "/agents/%s/%s.%s" % (id, + dhcpagentscheduler.DHCP_NETS, + self.fmt) + req = self._path_create_request(path, + {'network_id': network_id}, + admin_context=admin_context) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, expected_code) + + def _remove_network_from_dhcp_agent(self, id, network_id, + expected_code=exc.HTTPNoContent.code, + admin_context=True): + path = "/agents/%s/%s/%s.%s" % (id, + dhcpagentscheduler.DHCP_NETS, + network_id, + self.fmt) + req = self._path_delete_request(path, + admin_context=admin_context) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, expected_code) + + def _remove_router_from_l3_agent(self, id, router_id, + expected_code=exc.HTTPNoContent.code, + admin_context=True): + path = "/agents/%s/%s/%s.%s" % (id, + l3agentscheduler.L3_ROUTERS, + router_id, + self.fmt) + req = self._path_delete_request(path, admin_context=admin_context) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, expected_code) + + def _register_one_agent_state(self, agent_state): + callback = agents_db.AgentExtRpcCallback() + callback.report_state(self.adminContext, + agent_state={'agent_state': agent_state}, + time=timeutils.strtime()) + + def _disable_agent(self, agent_id, admin_state_up=False): + new_agent = {} + new_agent['agent'] = {} + new_agent['agent']['admin_state_up'] = admin_state_up + self._update('agents', agent_id, new_agent) + + def _get_agent_id(self, agent_type, host): + agents = self._list_agents() + for agent_data in agents['agents']: + if (agent_data['agent_type'] == agent_type and + agent_data['host'] == host): + return agent_data['id'] + + +class OvsAgentSchedulerTestCaseBase(test_l3_plugin.L3NatTestCaseMixin, + test_agent_ext_plugin.AgentDBTestMixIn, + AgentSchedulerTestMixIn, + test_plugin.NeutronDbPluginV2TestCase): + fmt = 'json' + plugin_str = ('neutron.plugins.openvswitch.' + 'ovs_neutron_plugin.OVSNeutronPluginV2') + l3_plugin = None + + def setUp(self): + # Save the global RESOURCE_ATTRIBUTE_MAP before loading plugin + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + if self.l3_plugin: + service_plugins = {'l3_plugin_name': self.l3_plugin} + else: + service_plugins = None + super(OvsAgentSchedulerTestCaseBase, self).setUp( + self.plugin_str, service_plugins=service_plugins) + ext_mgr = extensions.PluginAwareExtensionManager.get_instance() + self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) + self.adminContext = context.get_admin_context() + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + agent.RESOURCE_ATTRIBUTE_MAP) + self.addCleanup(self.restore_attribute_map) + self.l3agentscheduler_dbMinxin = ( + manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT)) + + def restore_attribute_map(self): + # Restore the original RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + + +class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): + + def test_report_states(self): + self._register_agent_states() + agents = self._list_agents() + self.assertEqual(4, len(agents['agents'])) + + def test_network_scheduling_on_network_creation(self): + self._register_agent_states() + with self.network() as net: + dhcp_agents = self._list_dhcp_agents_hosting_network( + net['network']['id']) + self.assertEqual(0, len(dhcp_agents['agents'])) + + def test_network_auto_schedule_with_disabled(self): + cfg.CONF.set_override('allow_overlapping_ips', True) + with contextlib.nested(self.subnet(), + self.subnet()): + dhcp_rpc = dhcp_rpc_base.DhcpRpcCallbackMixin() + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTA) + hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTC) + self._disable_agent(hosta_id) + dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTA) + # second agent will host all the networks since first is disabled. + dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTC) + networks = self._list_networks_hosted_by_dhcp_agent(hostc_id) + num_hostc_nets = len(networks['networks']) + networks = self._list_networks_hosted_by_dhcp_agent(hosta_id) + num_hosta_nets = len(networks['networks']) + self.assertEqual(0, num_hosta_nets) + self.assertEqual(2, num_hostc_nets) + + def test_network_auto_schedule_with_no_dhcp(self): + cfg.CONF.set_override('allow_overlapping_ips', True) + with contextlib.nested(self.subnet(enable_dhcp=False), + self.subnet(enable_dhcp=False)): + dhcp_rpc = dhcp_rpc_base.DhcpRpcCallbackMixin() + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTA) + hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTC) + self._disable_agent(hosta_id) + dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTA) + dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTC) + networks = self._list_networks_hosted_by_dhcp_agent(hostc_id) + num_hostc_nets = len(networks['networks']) + networks = self._list_networks_hosted_by_dhcp_agent(hosta_id) + num_hosta_nets = len(networks['networks']) + self.assertEqual(0, num_hosta_nets) + self.assertEqual(0, num_hostc_nets) + + def test_network_auto_schedule_with_multiple_agents(self): + cfg.CONF.set_override('dhcp_agents_per_network', 2) + cfg.CONF.set_override('allow_overlapping_ips', True) + with contextlib.nested(self.subnet(), + self.subnet()): + dhcp_rpc = dhcp_rpc_base.DhcpRpcCallbackMixin() + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTA) + hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTC) + dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTA) + dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTC) + networks = self._list_networks_hosted_by_dhcp_agent(hostc_id) + num_hostc_nets = len(networks['networks']) + networks = self._list_networks_hosted_by_dhcp_agent(hosta_id) + num_hosta_nets = len(networks['networks']) + self.assertEqual(2, num_hosta_nets) + self.assertEqual(2, num_hostc_nets) + + def test_network_auto_schedule_restart_dhcp_agent(self): + cfg.CONF.set_override('dhcp_agents_per_network', 2) + with self.subnet() as sub1: + dhcp_rpc = dhcp_rpc_base.DhcpRpcCallbackMixin() + self._register_agent_states() + dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTA) + dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTA) + dhcp_agents = self._list_dhcp_agents_hosting_network( + sub1['subnet']['network_id']) + self.assertEqual(1, len(dhcp_agents['agents'])) + + def test_network_auto_schedule_with_hosted(self): + # one agent hosts all the networks, other hosts none + cfg.CONF.set_override('allow_overlapping_ips', True) + with contextlib.nested(self.subnet(), + self.subnet()) as (sub1, sub2): + dhcp_rpc = dhcp_rpc_base.DhcpRpcCallbackMixin() + self._register_agent_states() + dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTA) + # second agent will not host the network since first has got it. + dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTC) + dhcp_agents = self._list_dhcp_agents_hosting_network( + sub1['subnet']['network_id']) + hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTA) + hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTC) + hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id) + num_hosta_nets = len(hosta_nets['networks']) + hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id) + num_hostc_nets = len(hostc_nets['networks']) + + self.assertEqual(2, num_hosta_nets) + self.assertEqual(0, num_hostc_nets) + self.assertEqual(1, len(dhcp_agents['agents'])) + self.assertEqual(DHCP_HOSTA, dhcp_agents['agents'][0]['host']) + + def test_network_auto_schedule_with_hosted_2(self): + # one agent hosts one network + dhcp_rpc = dhcp_rpc_base.DhcpRpcCallbackMixin() + dhcp_hosta = { + 'binary': 'neutron-dhcp-agent', + 'host': DHCP_HOSTA, + 'topic': 'DHCP_AGENT', + 'configurations': {'dhcp_driver': 'dhcp_driver', + 'use_namespaces': True, + }, + 'agent_type': constants.AGENT_TYPE_DHCP} + dhcp_hostc = copy.deepcopy(dhcp_hosta) + dhcp_hostc['host'] = DHCP_HOSTC + cfg.CONF.set_override('allow_overlapping_ips', True) + with self.subnet() as sub1: + self._register_one_agent_state(dhcp_hosta) + dhcp_rpc.get_active_networks(self.adminContext, host=DHCP_HOSTA) + hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTA) + self._disable_agent(hosta_id, admin_state_up=False) + with self.subnet() as sub2: + self._register_one_agent_state(dhcp_hostc) + dhcp_rpc.get_active_networks(self.adminContext, + host=DHCP_HOSTC) + dhcp_agents_1 = self._list_dhcp_agents_hosting_network( + sub1['subnet']['network_id']) + dhcp_agents_2 = self._list_dhcp_agents_hosting_network( + sub2['subnet']['network_id']) + hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id) + num_hosta_nets = len(hosta_nets['networks']) + hostc_id = self._get_agent_id( + constants.AGENT_TYPE_DHCP, + DHCP_HOSTC) + hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id) + num_hostc_nets = len(hostc_nets['networks']) + + self.assertEqual(1, num_hosta_nets) + self.assertEqual(1, num_hostc_nets) + self.assertEqual(1, len(dhcp_agents_1['agents'])) + self.assertEqual(1, len(dhcp_agents_2['agents'])) + self.assertEqual(DHCP_HOSTA, dhcp_agents_1['agents'][0]['host']) + self.assertEqual(DHCP_HOSTC, dhcp_agents_2['agents'][0]['host']) + + def test_network_scheduling_on_port_creation(self): + with self.subnet() as subnet: + dhcp_agents = self._list_dhcp_agents_hosting_network( + subnet['subnet']['network_id']) + result0 = len(dhcp_agents['agents']) + self._register_agent_states() + with self.port(subnet=subnet, + device_owner="compute:test:" + DHCP_HOSTA) as port: + dhcp_agents = self._list_dhcp_agents_hosting_network( + port['port']['network_id']) + result1 = len(dhcp_agents['agents']) + self.assertEqual(0, result0) + self.assertEqual(1, result1) + + def test_network_ha_scheduling_on_port_creation(self): + cfg.CONF.set_override('dhcp_agents_per_network', 2) + with self.subnet() as subnet: + dhcp_agents = self._list_dhcp_agents_hosting_network( + subnet['subnet']['network_id']) + result0 = len(dhcp_agents['agents']) + self._register_agent_states() + with self.port(subnet=subnet, + device_owner="compute:test:" + DHCP_HOSTA) as port: + dhcp_agents = self._list_dhcp_agents_hosting_network( + port['port']['network_id']) + result1 = len(dhcp_agents['agents']) + self.assertEqual(0, result0) + self.assertEqual(2, result1) + + def test_network_ha_scheduling_on_port_creation_with_new_agent(self): + cfg.CONF.set_override('dhcp_agents_per_network', 3) + with self.subnet() as subnet: + dhcp_agents = self._list_dhcp_agents_hosting_network( + subnet['subnet']['network_id']) + result0 = len(dhcp_agents['agents']) + self._register_agent_states() + with self.port(subnet=subnet, + device_owner="compute:test:" + DHCP_HOSTA) as port: + dhcp_agents = self._list_dhcp_agents_hosting_network( + port['port']['network_id']) + result1 = len(dhcp_agents['agents']) + self._register_one_dhcp_agent() + with self.port(subnet=subnet, + device_owner="compute:test:" + DHCP_HOSTA) as port: + dhcp_agents = self._list_dhcp_agents_hosting_network( + port['port']['network_id']) + result2 = len(dhcp_agents['agents']) + self.assertEqual(0, result0) + self.assertEqual(2, result1) + self.assertEqual(3, result2) + + def test_network_scheduler_with_disabled_agent(self): + dhcp_hosta = { + 'binary': 'neutron-dhcp-agent', + 'host': DHCP_HOSTA, + 'topic': 'DHCP_AGENT', + 'configurations': {'dhcp_driver': 'dhcp_driver', + 'use_namespaces': True, + }, + 'agent_type': constants.AGENT_TYPE_DHCP} + self._register_one_agent_state(dhcp_hosta) + with self.port() as port1: + dhcp_agents = self._list_dhcp_agents_hosting_network( + port1['port']['network_id']) + self.assertEqual(1, len(dhcp_agents['agents'])) + agents = self._list_agents() + self._disable_agent(agents['agents'][0]['id']) + with self.port() as port2: + dhcp_agents = self._list_dhcp_agents_hosting_network( + port2['port']['network_id']) + self.assertEqual(0, len(dhcp_agents['agents'])) + + def test_network_scheduler_with_down_agent(self): + dhcp_hosta = { + 'binary': 'neutron-dhcp-agent', + 'host': DHCP_HOSTA, + 'topic': 'DHCP_AGENT', + 'configurations': {'dhcp_driver': 'dhcp_driver', + 'use_namespaces': True, + }, + 'agent_type': constants.AGENT_TYPE_DHCP} + self._register_one_agent_state(dhcp_hosta) + is_agent_down_str = 'neutron.db.agents_db.AgentDbMixin.is_agent_down' + with mock.patch(is_agent_down_str) as mock_is_agent_down: + mock_is_agent_down.return_value = False + with self.port() as port: + dhcp_agents = self._list_dhcp_agents_hosting_network( + port['port']['network_id']) + self.assertEqual(1, len(dhcp_agents['agents'])) + with mock.patch(is_agent_down_str) as mock_is_agent_down: + mock_is_agent_down.return_value = True + with self.port() as port: + dhcp_agents = self._list_dhcp_agents_hosting_network( + port['port']['network_id']) + self.assertEqual(0, len(dhcp_agents['agents'])) + + def test_network_scheduler_with_hosted_network(self): + plugin = manager.NeutronManager.get_plugin() + dhcp_hosta = { + 'binary': 'neutron-dhcp-agent', + 'host': DHCP_HOSTA, + 'topic': 'DHCP_AGENT', + 'configurations': {'dhcp_driver': 'dhcp_driver', + 'use_namespaces': True, + }, + 'agent_type': constants.AGENT_TYPE_DHCP} + self._register_one_agent_state(dhcp_hosta) + with self.port() as port1: + dhcp_agents = self._list_dhcp_agents_hosting_network( + port1['port']['network_id']) + self.assertEqual(1, len(dhcp_agents['agents'])) + with mock.patch.object(plugin, + 'get_dhcp_agents_hosting_networks', + autospec=True) as mock_hosting_agents: + + mock_hosting_agents.return_value = plugin.get_agents_db( + self.adminContext) + with self.network('test', do_delete=False) as net1: + pass + with self.subnet(network=net1, + cidr='10.0.1.0/24', + do_delete=False) as subnet1: + pass + with self.port(subnet=subnet1, no_delete=True) as port2: + pass + dhcp_agents = self._list_dhcp_agents_hosting_network( + port2['port']['network_id']) + self.assertEqual(0, len(dhcp_agents['agents'])) + + def test_network_policy(self): + with self.network() as net1: + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTA) + self._list_networks_hosted_by_dhcp_agent( + hosta_id, expected_code=exc.HTTPForbidden.code, + admin_context=False) + self._add_network_to_dhcp_agent( + hosta_id, net1['network']['id'], + expected_code=exc.HTTPForbidden.code, + admin_context=False) + self._add_network_to_dhcp_agent(hosta_id, + net1['network']['id']) + self._remove_network_from_dhcp_agent( + hosta_id, net1['network']['id'], + expected_code=exc.HTTPForbidden.code, + admin_context=False) + self._list_dhcp_agents_hosting_network( + net1['network']['id'], + expected_code=exc.HTTPForbidden.code, + admin_context=False) + + def test_network_add_to_dhcp_agent(self): + with self.network() as net1: + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTA) + num_before_add = len( + self._list_networks_hosted_by_dhcp_agent( + hosta_id)['networks']) + self._add_network_to_dhcp_agent(hosta_id, + net1['network']['id']) + num_after_add = len( + self._list_networks_hosted_by_dhcp_agent( + hosta_id)['networks']) + self.assertEqual(0, num_before_add) + self.assertEqual(1, num_after_add) + + def test_network_remove_from_dhcp_agent(self): + dhcp_hosta = { + 'binary': 'neutron-dhcp-agent', + 'host': DHCP_HOSTA, + 'topic': 'DHCP_AGENT', + 'configurations': {'dhcp_driver': 'dhcp_driver', + 'use_namespaces': True, + }, + 'agent_type': constants.AGENT_TYPE_DHCP} + self._register_one_agent_state(dhcp_hosta) + hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTA) + with self.port() as port1: + num_before_remove = len( + self._list_networks_hosted_by_dhcp_agent( + hosta_id)['networks']) + self._remove_network_from_dhcp_agent(hosta_id, + port1['port']['network_id']) + num_after_remove = len( + self._list_networks_hosted_by_dhcp_agent( + hosta_id)['networks']) + self.assertEqual(1, num_before_remove) + self.assertEqual(0, num_after_remove) + + def test_list_active_networks_on_not_registered_yet_dhcp_agent(self): + plugin = manager.NeutronManager.get_plugin() + nets = plugin.list_active_networks_on_active_dhcp_agent( + self.adminContext, host=DHCP_HOSTA) + self.assertEqual([], nets) + + def test_reserved_port_after_network_remove_from_dhcp_agent(self): + dhcp_hosta = { + 'binary': 'neutron-dhcp-agent', + 'host': DHCP_HOSTA, + 'topic': 'DHCP_AGENT', + 'configurations': {'dhcp_driver': 'dhcp_driver', + 'use_namespaces': True, + }, + 'agent_type': constants.AGENT_TYPE_DHCP} + self._register_one_agent_state(dhcp_hosta) + hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTA) + with self.port(device_owner=constants.DEVICE_OWNER_DHCP, + host=DHCP_HOSTA) as port1: + self._remove_network_from_dhcp_agent(hosta_id, + port1['port']['network_id']) + port_res = self._list_ports( + 'json', + 200, + network_id=port1['port']['network_id']) + port_list = self.deserialize('json', port_res) + self.assertEqual(port_list['ports'][0]['device_id'], + constants.DEVICE_ID_RESERVED_DHCP_PORT) + + def test_router_auto_schedule_with_invalid_router(self): + with self.router() as router: + l3_rpc = l3_rpc_base.L3RpcCallbackMixin() + self._register_agent_states() + # deleted router + ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA, + router_ids=[router['router']['id']]) + self.assertFalse(ret_a) + # non-existent router + ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA, + router_ids=[uuidutils.generate_uuid()]) + self.assertFalse(ret_a) + + def test_router_auto_schedule_with_hosted(self): + with self.router() as router: + l3_rpc = l3_rpc_base.L3RpcCallbackMixin() + self._register_agent_states() + ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA) + ret_b = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTB) + l3_agents = self._list_l3_agents_hosting_router( + router['router']['id']) + self.assertEqual(1, len(ret_a)) + self.assertIn(router['router']['id'], [r['id'] for r in ret_a]) + self.assertFalse(len(ret_b)) + self.assertEqual(1, len(l3_agents['agents'])) + self.assertEqual(L3_HOSTA, l3_agents['agents'][0]['host']) + + def test_router_auto_schedule_restart_l3_agent(self): + with self.router(): + l3_rpc = l3_rpc_base.L3RpcCallbackMixin() + self._register_agent_states() + l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA) + l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA) + + def test_router_auto_schedule_with_hosted_2(self): + # one agent hosts one router + l3_rpc = l3_rpc_base.L3RpcCallbackMixin() + l3_hosta = { + 'binary': 'neutron-l3-agent', + 'host': L3_HOSTA, + 'topic': 'L3_AGENT', + 'configurations': {'use_namespaces': True, + 'router_id': None, + 'handle_internal_only_routers': + True, + 'gateway_external_network_id': + None, + 'interface_driver': 'interface_driver', + }, + 'agent_type': constants.AGENT_TYPE_L3} + l3_hostb = copy.deepcopy(l3_hosta) + l3_hostb['host'] = L3_HOSTB + with self.router() as router1: + self._register_one_agent_state(l3_hosta) + l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA) + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTA) + self._disable_agent(hosta_id, admin_state_up=False) + with self.router() as router2: + self._register_one_agent_state(l3_hostb) + l3_rpc.sync_routers(self.adminContext, host=L3_HOSTB) + l3_agents_1 = self._list_l3_agents_hosting_router( + router1['router']['id']) + l3_agents_2 = self._list_l3_agents_hosting_router( + router2['router']['id']) + hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id) + num_hosta_routers = len(hosta_routers['routers']) + hostb_id = self._get_agent_id( + constants.AGENT_TYPE_L3, + L3_HOSTB) + hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id) + num_hostc_routers = len(hostb_routers['routers']) + + self.assertEqual(1, num_hosta_routers) + self.assertEqual(1, num_hostc_routers) + self.assertEqual(1, len(l3_agents_1['agents'])) + self.assertEqual(1, len(l3_agents_2['agents'])) + self.assertEqual(L3_HOSTA, l3_agents_1['agents'][0]['host']) + self.assertEqual(L3_HOSTB, l3_agents_2['agents'][0]['host']) + + def test_router_auto_schedule_with_disabled(self): + with contextlib.nested(self.router(), + self.router()): + l3_rpc = l3_rpc_base.L3RpcCallbackMixin() + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTA) + hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTB) + self._disable_agent(hosta_id) + # first agent will not host router since it is disabled + l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA) + # second agent will host all the routers since first is disabled. + l3_rpc.sync_routers(self.adminContext, host=L3_HOSTB) + hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id) + num_hostb_routers = len(hostb_routers['routers']) + hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id) + num_hosta_routers = len(hosta_routers['routers']) + self.assertEqual(2, num_hostb_routers) + self.assertEqual(0, num_hosta_routers) + + def test_router_auto_schedule_with_candidates(self): + l3_hosta = { + 'binary': 'neutron-l3-agent', + 'host': L3_HOSTA, + 'topic': 'L3_AGENT', + 'configurations': {'use_namespaces': False, + 'router_id': None, + 'handle_internal_only_routers': + True, + 'gateway_external_network_id': + None, + 'interface_driver': 'interface_driver', + }, + 'agent_type': constants.AGENT_TYPE_L3} + with contextlib.nested(self.router(), + self.router()) as (router1, router2): + l3_rpc = l3_rpc_base.L3RpcCallbackMixin() + l3_hosta['configurations']['router_id'] = router1['router']['id'] + self._register_one_agent_state(l3_hosta) + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTA) + l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA) + hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id) + num_hosta_routers = len(hosta_routers['routers']) + l3_agents_1 = self._list_l3_agents_hosting_router( + router1['router']['id']) + l3_agents_2 = self._list_l3_agents_hosting_router( + router2['router']['id']) + # L3 agent will host only the compatible router. + self.assertEqual(1, num_hosta_routers) + self.assertEqual(1, len(l3_agents_1['agents'])) + self.assertEqual(0, len(l3_agents_2['agents'])) + + def test_rpc_sync_routers(self): + l3_rpc = l3_rpc_base.L3RpcCallbackMixin() + self._register_agent_states() + + # No routers + ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA) + self.assertEqual(0, len(ret_a)) + + with contextlib.nested(self.router(), + self.router(), + self.router()) as routers: + router_ids = [r['router']['id'] for r in routers] + + # Get all routers + ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA) + self.assertEqual(3, len(ret_a)) + self.assertEqual(set(router_ids), set([r['id'] for r in ret_a])) + + # Get all routers (router_ids=None) + ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA, + router_ids=None) + self.assertEqual(3, len(ret_a)) + self.assertEqual(set(router_ids), set([r['id'] for r in ret_a])) + + # Get router2 only + ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA, + router_ids=[router_ids[1]]) + self.assertEqual(1, len(ret_a)) + self.assertIn(router_ids[1], [r['id'] for r in ret_a]) + + # Get router1 and router3 + ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA, + router_ids=[router_ids[0], + router_ids[2]]) + self.assertEqual(2, len(ret_a)) + self.assertIn(router_ids[0], [r['id'] for r in ret_a]) + self.assertIn(router_ids[2], [r['id'] for r in ret_a]) + + def test_router_auto_schedule_for_specified_routers(self): + + def _sync_router_with_ids(router_ids, exp_synced, exp_hosted, host_id): + ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA, + router_ids=router_ids) + self.assertEqual(exp_synced, len(ret_a)) + for r in router_ids: + self.assertIn(r, [r['id'] for r in ret_a]) + host_routers = self._list_routers_hosted_by_l3_agent(host_id) + num_host_routers = len(host_routers['routers']) + self.assertEqual(exp_hosted, num_host_routers) + + l3_rpc = l3_rpc_base.L3RpcCallbackMixin() + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) + + with contextlib.nested(self.router(), self.router(), + self.router(), self.router()) as routers: + router_ids = [r['router']['id'] for r in routers] + # Sync router1 (router1 is scheduled) + _sync_router_with_ids([router_ids[0]], 1, 1, hosta_id) + # Sync router1 only (no router is scheduled) + _sync_router_with_ids([router_ids[0]], 1, 1, hosta_id) + # Schedule router2 + _sync_router_with_ids([router_ids[1]], 1, 2, hosta_id) + # Sync router2 and router4 (router4 is scheduled) + _sync_router_with_ids([router_ids[1], router_ids[3]], + 2, 3, hosta_id) + # Sync all routers (router3 is scheduled) + _sync_router_with_ids(router_ids, 4, 4, hosta_id) + + def test_router_schedule_with_candidates(self): + l3_hosta = { + 'binary': 'neutron-l3-agent', + 'host': L3_HOSTA, + 'topic': 'L3_AGENT', + 'configurations': {'use_namespaces': False, + 'router_id': None, + 'handle_internal_only_routers': + True, + 'gateway_external_network_id': + None, + 'interface_driver': 'interface_driver', + }, + 'agent_type': constants.AGENT_TYPE_L3} + with contextlib.nested(self.router(), + self.router(), + self.subnet(), + self.subnet(cidr='10.0.3.0/24')) as (router1, + router2, + subnet1, + subnet2): + l3_hosta['configurations']['router_id'] = router1['router']['id'] + self._register_one_agent_state(l3_hosta) + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTA) + self._router_interface_action('add', + router1['router']['id'], + subnet1['subnet']['id'], + None) + self._router_interface_action('add', + router2['router']['id'], + subnet2['subnet']['id'], + None) + hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id) + num_hosta_routers = len(hosta_routers['routers']) + l3_agents_1 = self._list_l3_agents_hosting_router( + router1['router']['id']) + l3_agents_2 = self._list_l3_agents_hosting_router( + router2['router']['id']) + # safe cleanup + self._router_interface_action('remove', + router1['router']['id'], + subnet1['subnet']['id'], + None) + self._router_interface_action('remove', + router2['router']['id'], + subnet2['subnet']['id'], + None) + + # L3 agent will host only the compatible router. + self.assertEqual(1, num_hosta_routers) + self.assertEqual(1, len(l3_agents_1['agents'])) + self.assertEqual(0, len(l3_agents_2['agents'])) + + def test_router_without_l3_agents(self): + with self.subnet() as s: + self._set_net_external(s['subnet']['network_id']) + data = {'router': {'tenant_id': uuidutils.generate_uuid()}} + data['router']['name'] = 'router1' + data['router']['external_gateway_info'] = { + 'network_id': s['subnet']['network_id']} + router_req = self.new_create_request('routers', data, self.fmt) + res = router_req.get_response(self.ext_api) + router = self.deserialize(self.fmt, res) + l3agents = ( + self.l3agentscheduler_dbMinxin.get_l3_agents_hosting_routers( + self.adminContext, [router['router']['id']])) + self._delete('routers', router['router']['id']) + self.assertEqual(0, len(l3agents)) + + def test_router_sync_data(self): + with contextlib.nested( + self.subnet(), + self.subnet(cidr='10.0.2.0/24'), + self.subnet(cidr='10.0.3.0/24') + ) as (s1, s2, s3): + self._register_agent_states() + self._set_net_external(s1['subnet']['network_id']) + data = {'router': {'tenant_id': uuidutils.generate_uuid()}} + data['router']['name'] = 'router1' + data['router']['external_gateway_info'] = { + 'network_id': s1['subnet']['network_id']} + router_req = self.new_create_request('routers', data, self.fmt) + res = router_req.get_response(self.ext_api) + router = self.deserialize(self.fmt, res) + self._router_interface_action('add', + router['router']['id'], + s2['subnet']['id'], + None) + self._router_interface_action('add', + router['router']['id'], + s3['subnet']['id'], + None) + l3agents = self._list_l3_agents_hosting_router( + router['router']['id']) + self.assertEqual(1, len(l3agents['agents'])) + agents = self._list_agents() + another_l3_agent_id = None + another_l3_agent_host = None + default = l3agents['agents'][0]['id'] + for com in agents['agents']: + if (com['id'] != default and + com['agent_type'] == constants.AGENT_TYPE_L3): + another_l3_agent_id = com['id'] + another_l3_agent_host = com['host'] + break + self.assertIsNotNone(another_l3_agent_id) + self._add_router_to_l3_agent(another_l3_agent_id, + router['router']['id'], + expected_code=exc.HTTPConflict.code) + self._remove_router_from_l3_agent(default, + router['router']['id']) + self._add_router_to_l3_agent(another_l3_agent_id, + router['router']['id']) + l3agents = self._list_l3_agents_hosting_router( + router['router']['id']) + self.assertEqual(another_l3_agent_host, + l3agents['agents'][0]['host']) + self._remove_router_from_l3_agent(another_l3_agent_id, + router['router']['id']) + self._router_interface_action('remove', + router['router']['id'], + s2['subnet']['id'], + None) + l3agents = self._list_l3_agents_hosting_router( + router['router']['id']) + self.assertEqual(1, + len(l3agents['agents'])) + self._router_interface_action('remove', + router['router']['id'], + s3['subnet']['id'], + None) + self._delete('routers', router['router']['id']) + + def test_router_add_to_l3_agent(self): + with self.router() as router1: + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTA) + num_before_add = len( + self._list_routers_hosted_by_l3_agent( + hosta_id)['routers']) + self._add_router_to_l3_agent(hosta_id, + router1['router']['id']) + hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTB) + self._add_router_to_l3_agent(hostb_id, + router1['router']['id'], + expected_code=exc.HTTPConflict.code) + num_after_add = len( + self._list_routers_hosted_by_l3_agent( + hosta_id)['routers']) + self.assertEqual(0, num_before_add) + self.assertEqual(1, num_after_add) + + def test_router_add_to_l3_agent_two_times(self): + with self.router() as router1: + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTA) + self._add_router_to_l3_agent(hosta_id, + router1['router']['id']) + self._add_router_to_l3_agent(hosta_id, + router1['router']['id'], + expected_code=exc.HTTPConflict.code) + + def test_router_add_to_two_l3_agents(self): + with self.router() as router1: + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTA) + hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTB) + self._add_router_to_l3_agent(hosta_id, + router1['router']['id']) + self._add_router_to_l3_agent(hostb_id, + router1['router']['id'], + expected_code=exc.HTTPConflict.code) + + def test_router_policy(self): + with self.router() as router1: + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTA) + self._list_routers_hosted_by_l3_agent( + hosta_id, expected_code=exc.HTTPForbidden.code, + admin_context=False) + self._add_router_to_l3_agent( + hosta_id, router1['router']['id'], + expected_code=exc.HTTPForbidden.code, + admin_context=False) + self._add_router_to_l3_agent( + hosta_id, router1['router']['id']) + self._remove_router_from_l3_agent( + hosta_id, router1['router']['id'], + expected_code=exc.HTTPForbidden.code, + admin_context=False) + self._list_l3_agents_hosting_router( + router1['router']['id'], + expected_code=exc.HTTPForbidden.code, + admin_context=False) + + +class OvsDhcpAgentNotifierTestCase(test_l3_plugin.L3NatTestCaseMixin, + test_agent_ext_plugin.AgentDBTestMixIn, + AgentSchedulerTestMixIn, + test_plugin.NeutronDbPluginV2TestCase): + plugin_str = ('neutron.plugins.openvswitch.' + 'ovs_neutron_plugin.OVSNeutronPluginV2') + + def setUp(self): + # Save the global RESOURCE_ATTRIBUTE_MAP before loading plugin + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + super(OvsDhcpAgentNotifierTestCase, self).setUp(self.plugin_str) + # the notifier is used to get access to make_msg() method only + self.dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + self.dhcp_notifier_cast = mock.patch( + 'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.' + 'DhcpAgentNotifyAPI.cast').start() + ext_mgr = extensions.PluginAwareExtensionManager.get_instance() + self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) + self.adminContext = context.get_admin_context() + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + agent.RESOURCE_ATTRIBUTE_MAP) + self.addCleanup(self.restore_attribute_map) + + def restore_attribute_map(self): + # Restore the original RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + + def test_network_add_to_dhcp_agent_notification(self): + with self.network() as net1: + network_id = net1['network']['id'] + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTA) + self._add_network_to_dhcp_agent(hosta_id, + network_id) + self.dhcp_notifier_cast.assert_called_with( + mock.ANY, + self.dhcp_notifier.make_msg( + 'network_create_end', + payload={'network': {'id': network_id}}), + topic='dhcp_agent.' + DHCP_HOSTA) + + def test_network_remove_from_dhcp_agent_notification(self): + with self.network(do_delete=False) as net1: + network_id = net1['network']['id'] + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTA) + self._add_network_to_dhcp_agent(hosta_id, + network_id) + + self._remove_network_from_dhcp_agent(hosta_id, + network_id) + self.dhcp_notifier_cast.assert_called_with( + mock.ANY, + self.dhcp_notifier.make_msg( + 'network_delete_end', + payload={'network_id': network_id}), + topic='dhcp_agent.' + DHCP_HOSTA) + + def test_agent_updated_dhcp_agent_notification(self): + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, + DHCP_HOSTA) + self._disable_agent(hosta_id, admin_state_up=False) + + self.dhcp_notifier_cast.assert_called_with( + mock.ANY, self.dhcp_notifier.make_msg( + 'agent_updated', + payload={'admin_state_up': False}), + topic='dhcp_agent.' + DHCP_HOSTA) + + def _network_port_create( + self, hosts, gateway=attributes.ATTR_NOT_SPECIFIED, owner=None): + for host in hosts: + self._register_one_agent_state( + {'binary': 'neutron-dhcp-agent', + 'host': host, + 'topic': 'dhcp_agent', + 'configurations': {'dhcp_driver': 'dhcp_driver', + 'use_namespaces': True, }, + 'agent_type': constants.AGENT_TYPE_DHCP}) + with self.network(do_delete=False) as net1: + with self.subnet(network=net1, + gateway_ip=gateway, + do_delete=False) as subnet1: + if owner: + with self.port(subnet=subnet1, + no_delete=True, + device_owner=owner) as port: + return [net1, subnet1, port] + else: + with self.port(subnet=subnet1, + no_delete=True) as port: + return [net1, subnet1, port] + + def _notification_mocks(self, hosts, net, subnet, port): + host_calls = {} + for host in hosts: + expected_calls = [ + mock.call( + mock.ANY, + self.dhcp_notifier.make_msg( + 'network_create_end', + payload={'network': {'id': net['network']['id']}}), + topic='dhcp_agent.' + host), + mock.call( + mock.ANY, + self.dhcp_notifier.make_msg( + 'port_create_end', + payload={'port': port['port']}), + topic='dhcp_agent.' + host)] + host_calls[host] = expected_calls + return host_calls + + def test_network_port_create_notification(self): + hosts = [DHCP_HOSTA] + net, subnet, port = self._network_port_create(hosts) + expected_calls = self._notification_mocks(hosts, net, subnet, port) + self.assertEqual( + expected_calls[DHCP_HOSTA], self.dhcp_notifier_cast.call_args_list) + + def test_network_ha_port_create_notification(self): + cfg.CONF.set_override('dhcp_agents_per_network', 2) + hosts = [DHCP_HOSTA, DHCP_HOSTC] + net, subnet, port = self._network_port_create(hosts) + expected_calls = self._notification_mocks(hosts, net, subnet, port) + for expected in expected_calls[DHCP_HOSTA]: + self.assertIn(expected, self.dhcp_notifier_cast.call_args_list) + for expected in expected_calls[DHCP_HOSTC]: + self.assertIn(expected, self.dhcp_notifier_cast.call_args_list) + + +class OvsL3AgentNotifierTestCase(test_l3_plugin.L3NatTestCaseMixin, + test_agent_ext_plugin.AgentDBTestMixIn, + AgentSchedulerTestMixIn, + test_plugin.NeutronDbPluginV2TestCase): + plugin_str = ('neutron.plugins.openvswitch.' + 'ovs_neutron_plugin.OVSNeutronPluginV2') + l3_plugin = None + + def setUp(self): + self.dhcp_notifier_cls_p = mock.patch( + 'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.' + 'DhcpAgentNotifyAPI') + self.dhcp_notifier = mock.Mock(name='dhcp_notifier') + self.dhcp_notifier_cls = self.dhcp_notifier_cls_p.start() + self.dhcp_notifier_cls.return_value = self.dhcp_notifier + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + if self.l3_plugin: + service_plugins = {'l3_plugin_name': self.l3_plugin} + else: + service_plugins = None + super(OvsL3AgentNotifierTestCase, self).setUp( + self.plugin_str, service_plugins=service_plugins) + ext_mgr = extensions.PluginAwareExtensionManager.get_instance() + self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) + self.adminContext = context.get_admin_context() + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + agent.RESOURCE_ATTRIBUTE_MAP) + self.addCleanup(self.restore_attribute_map) + + def restore_attribute_map(self): + # Restore the original RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + + def test_router_add_to_l3_agent_notification(self): + plugin = manager.NeutronManager.get_plugin() + l3_notifier = plugin.agent_notifiers[constants.AGENT_TYPE_L3] + with mock.patch.object(l3_notifier, 'cast') as mock_l3: + with self.router() as router1: + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTA) + self._add_router_to_l3_agent(hosta_id, + router1['router']['id']) + routers = [router1['router']['id']] + mock_l3.assert_called_with( + mock.ANY, + l3_notifier.make_msg( + 'router_added_to_agent', + payload=routers), + topic='l3_agent.hosta') + + def test_router_remove_from_l3_agent_notification(self): + plugin = manager.NeutronManager.get_plugin() + l3_notifier = plugin.agent_notifiers[constants.AGENT_TYPE_L3] + with mock.patch.object(l3_notifier, 'cast') as mock_l3: + with self.router() as router1: + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTA) + self._add_router_to_l3_agent(hosta_id, + router1['router']['id']) + self._remove_router_from_l3_agent(hosta_id, + router1['router']['id']) + mock_l3.assert_called_with( + mock.ANY, l3_notifier.make_msg( + 'router_removed_from_agent', + payload={'router_id': router1['router']['id']}), + topic='l3_agent.hosta') + + def test_agent_updated_l3_agent_notification(self): + plugin = manager.NeutronManager.get_plugin() + l3_notifier = plugin.agent_notifiers[constants.AGENT_TYPE_L3] + with mock.patch.object(l3_notifier, 'cast') as mock_l3: + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, + L3_HOSTA) + self._disable_agent(hosta_id, admin_state_up=False) + mock_l3.assert_called_with( + mock.ANY, l3_notifier.make_msg( + 'agent_updated', payload={'admin_state_up': False}), + topic='l3_agent.hosta') + + +class OvsAgentSchedulerTestCaseXML(OvsAgentSchedulerTestCase): + fmt = 'xml' diff --git a/neutron/tests/unit/openvswitch/test_openvswitch_plugin.py b/neutron/tests/unit/openvswitch/test_openvswitch_plugin.py new file mode 100644 index 000000000..234a8feb8 --- /dev/null +++ b/neutron/tests/unit/openvswitch/test_openvswitch_plugin.py @@ -0,0 +1,88 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from neutron.extensions import portbindings +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit import test_db_plugin as test_plugin +from neutron.tests.unit import test_extension_allowedaddresspairs as test_pair +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + + +class OpenvswitchPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): + + _plugin_name = ('neutron.plugins.openvswitch.' + 'ovs_neutron_plugin.OVSNeutronPluginV2') + + def setUp(self): + super(OpenvswitchPluginV2TestCase, self).setUp(self._plugin_name) + self.port_create_status = 'DOWN' + + +class TestOpenvswitchBasicGet(test_plugin.TestBasicGet, + OpenvswitchPluginV2TestCase): + pass + + +class TestOpenvswitchV2HTTPResponse(test_plugin.TestV2HTTPResponse, + OpenvswitchPluginV2TestCase): + pass + + +class TestOpenvswitchPortsV2(test_plugin.TestPortsV2, + OpenvswitchPluginV2TestCase): + + def test_update_port_status_build(self): + with self.port() as port: + self.assertEqual(port['port']['status'], 'DOWN') + self.assertEqual(self.port_create_status, 'DOWN') + + +class TestOpenvswitchNetworksV2(test_plugin.TestNetworksV2, + OpenvswitchPluginV2TestCase): + pass + + +class TestOpenvswitchPortBinding(OpenvswitchPluginV2TestCase, + test_bindings.PortBindingsTestCase): + VIF_TYPE = portbindings.VIF_TYPE_OVS + HAS_PORT_FILTER = True + ENABLE_SG = True + FIREWALL_DRIVER = test_sg_rpc.FIREWALL_HYBRID_DRIVER + + def setUp(self, firewall_driver=None): + test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER) + cfg.CONF.set_override( + 'enable_security_group', self.ENABLE_SG, + group='SECURITYGROUP') + super(TestOpenvswitchPortBinding, self).setUp() + + +class TestOpenvswitchPortBindingNoSG(TestOpenvswitchPortBinding): + HAS_PORT_FILTER = False + ENABLE_SG = False + FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER + + +class TestOpenvswitchPortBindingHost( + OpenvswitchPluginV2TestCase, + test_bindings.PortBindingsHostTestCaseMixin): + pass + + +class TestOpenvswitchAllowedAddressPairs(OpenvswitchPluginV2TestCase, + test_pair.TestAllowedAddressPairs): + pass diff --git a/neutron/tests/unit/openvswitch/test_ovs_db.py b/neutron/tests/unit/openvswitch/test_ovs_db.py new file mode 100644 index 000000000..528f894a6 --- /dev/null +++ b/neutron/tests/unit/openvswitch/test_ovs_db.py @@ -0,0 +1,322 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from oslo.config import cfg +from six import moves +import testtools +from testtools import matchers + +from neutron.common import exceptions as n_exc +from neutron.db import api as db +from neutron.openstack.common.db import exception as db_exc +from neutron.openstack.common.db.sqlalchemy import session +from neutron.plugins.openvswitch import ovs_db_v2 +from neutron.plugins.openvswitch import ovs_models_v2 as ovs_models +from neutron.tests import base +from neutron.tests.unit import test_db_plugin as test_plugin + +PHYS_NET = 'physnet1' +PHYS_NET_2 = 'physnet2' +VLAN_MIN = 10 +VLAN_MAX = 19 +VLAN_RANGES = {PHYS_NET: [(VLAN_MIN, VLAN_MAX)]} +UPDATED_VLAN_RANGES = {PHYS_NET: [(VLAN_MIN + 5, VLAN_MAX + 5)], + PHYS_NET_2: [(VLAN_MIN + 20, VLAN_MAX + 20)]} +TUN_MIN = 100 +TUN_MAX = 109 +TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)] +UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)] + +PLUGIN_NAME = ('neutron.plugins.openvswitch.' + 'ovs_neutron_plugin.OVSNeutronPluginV2') + + +class VlanAllocationsTest(base.BaseTestCase): + def setUp(self): + super(VlanAllocationsTest, self).setUp() + db.configure_db() + ovs_db_v2.sync_vlan_allocations(VLAN_RANGES) + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def test_sync_vlan_allocations(self): + self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MIN - 1)) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MIN).allocated) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MIN + 1).allocated) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MAX - 1).allocated) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MAX).allocated) + self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MAX + 1)) + + ovs_db_v2.sync_vlan_allocations(UPDATED_VLAN_RANGES) + + self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MIN + 5 - 1)) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MIN + 5). + allocated) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MIN + 5 + 1). + allocated) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MAX + 5 - 1). + allocated) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MAX + 5). + allocated) + self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MAX + 5 + 1)) + + self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET_2, + VLAN_MIN + 20 - 1)) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET_2, + VLAN_MIN + 20). + allocated) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET_2, + VLAN_MIN + 20 + 1). + allocated) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET_2, + VLAN_MAX + 20 - 1). + allocated) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET_2, + VLAN_MAX + 20). + allocated) + self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET_2, + VLAN_MAX + 20 + 1)) + + ovs_db_v2.sync_vlan_allocations(VLAN_RANGES) + + self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MIN - 1)) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MIN).allocated) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MIN + 1).allocated) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MAX - 1).allocated) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MAX).allocated) + self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET, + VLAN_MAX + 1)) + + self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET_2, + VLAN_MIN + 20)) + self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET_2, + VLAN_MAX + 20)) + + def test_vlan_pool(self): + vlan_ids = set() + for x in moves.xrange(VLAN_MIN, VLAN_MAX + 1): + physical_network, vlan_id = ovs_db_v2.reserve_vlan(self.session) + self.assertEqual(physical_network, PHYS_NET) + self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) + self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) + vlan_ids.add(vlan_id) + + with testtools.ExpectedException(n_exc.NoNetworkAvailable): + physical_network, vlan_id = ovs_db_v2.reserve_vlan(self.session) + + ovs_db_v2.release_vlan(self.session, PHYS_NET, vlan_ids.pop(), + VLAN_RANGES) + physical_network, vlan_id = ovs_db_v2.reserve_vlan(self.session) + self.assertEqual(physical_network, PHYS_NET) + self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) + self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) + vlan_ids.add(vlan_id) + + for vlan_id in vlan_ids: + ovs_db_v2.release_vlan(self.session, PHYS_NET, vlan_id, + VLAN_RANGES) + + def test_specific_vlan_inside_pool(self): + vlan_id = VLAN_MIN + 5 + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + vlan_id).allocated) + ovs_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id) + self.assertTrue(ovs_db_v2.get_vlan_allocation(PHYS_NET, + vlan_id).allocated) + + with testtools.ExpectedException(n_exc.VlanIdInUse): + ovs_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id) + + ovs_db_v2.release_vlan(self.session, PHYS_NET, vlan_id, VLAN_RANGES) + self.assertFalse(ovs_db_v2.get_vlan_allocation(PHYS_NET, + vlan_id).allocated) + + def test_specific_vlan_outside_pool(self): + vlan_id = VLAN_MAX + 5 + self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET, vlan_id)) + ovs_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id) + self.assertTrue(ovs_db_v2.get_vlan_allocation(PHYS_NET, + vlan_id).allocated) + + with testtools.ExpectedException(n_exc.VlanIdInUse): + ovs_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id) + + ovs_db_v2.release_vlan(self.session, PHYS_NET, vlan_id, VLAN_RANGES) + self.assertIsNone(ovs_db_v2.get_vlan_allocation(PHYS_NET, vlan_id)) + + def test_sync_with_allocated_false(self): + vlan_ids = set() + for x in moves.xrange(VLAN_MIN, VLAN_MAX + 1): + physical_network, vlan_id = ovs_db_v2.reserve_vlan(self.session) + self.assertEqual(physical_network, PHYS_NET) + self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) + self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) + vlan_ids.add(vlan_id) + + ovs_db_v2.release_vlan(self.session, PHYS_NET, vlan_ids.pop(), + VLAN_RANGES) + ovs_db_v2.sync_vlan_allocations({}) + + +class TunnelAllocationsTest(base.BaseTestCase): + def setUp(self): + super(TunnelAllocationsTest, self).setUp() + db.configure_db() + ovs_db_v2.sync_tunnel_allocations(TUNNEL_RANGES) + self.session = db.get_session() + self.addCleanup(db.clear_db) + + def test_sync_tunnel_allocations(self): + self.assertIsNone(ovs_db_v2.get_tunnel_allocation(TUN_MIN - 1)) + self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MIN).allocated) + self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MIN + 1). + allocated) + self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MAX - 1). + allocated) + self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MAX).allocated) + self.assertIsNone(ovs_db_v2.get_tunnel_allocation(TUN_MAX + 1)) + + ovs_db_v2.sync_tunnel_allocations(UPDATED_TUNNEL_RANGES) + + self.assertIsNone(ovs_db_v2.get_tunnel_allocation(TUN_MIN + 5 - 1)) + self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MIN + 5). + allocated) + self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MIN + 5 + 1). + allocated) + self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MAX + 5 - 1). + allocated) + self.assertFalse(ovs_db_v2.get_tunnel_allocation(TUN_MAX + 5). + allocated) + self.assertIsNone(ovs_db_v2.get_tunnel_allocation(TUN_MAX + 5 + 1)) + + def test_tunnel_pool(self): + tunnel_ids = set() + for x in moves.xrange(TUN_MIN, TUN_MAX + 1): + tunnel_id = ovs_db_v2.reserve_tunnel(self.session) + self.assertThat(tunnel_id, matchers.GreaterThan(TUN_MIN - 1)) + self.assertThat(tunnel_id, matchers.LessThan(TUN_MAX + 1)) + tunnel_ids.add(tunnel_id) + + with testtools.ExpectedException(n_exc.NoNetworkAvailable): + tunnel_id = ovs_db_v2.reserve_tunnel(self.session) + + ovs_db_v2.release_tunnel(self.session, tunnel_ids.pop(), TUNNEL_RANGES) + tunnel_id = ovs_db_v2.reserve_tunnel(self.session) + self.assertThat(tunnel_id, matchers.GreaterThan(TUN_MIN - 1)) + self.assertThat(tunnel_id, matchers.LessThan(TUN_MAX + 1)) + tunnel_ids.add(tunnel_id) + + for tunnel_id in tunnel_ids: + ovs_db_v2.release_tunnel(self.session, tunnel_id, TUNNEL_RANGES) + + def test_add_tunnel_endpoints(self): + tun_1 = ovs_db_v2.add_tunnel_endpoint('192.168.0.1') + tun_2 = ovs_db_v2.add_tunnel_endpoint('192.168.0.2') + self.assertEqual(1, tun_1.id) + self.assertEqual('192.168.0.1', tun_1.ip_address) + self.assertEqual(2, tun_2.id) + self.assertEqual('192.168.0.2', tun_2.ip_address) + + def test_specific_tunnel_inside_pool(self): + tunnel_id = TUN_MIN + 5 + self.assertFalse(ovs_db_v2.get_tunnel_allocation(tunnel_id).allocated) + ovs_db_v2.reserve_specific_tunnel(self.session, tunnel_id) + self.assertTrue(ovs_db_v2.get_tunnel_allocation(tunnel_id).allocated) + + with testtools.ExpectedException(n_exc.TunnelIdInUse): + ovs_db_v2.reserve_specific_tunnel(self.session, tunnel_id) + + ovs_db_v2.release_tunnel(self.session, tunnel_id, TUNNEL_RANGES) + self.assertFalse(ovs_db_v2.get_tunnel_allocation(tunnel_id).allocated) + + def test_specific_tunnel_outside_pool(self): + tunnel_id = TUN_MAX + 5 + self.assertIsNone(ovs_db_v2.get_tunnel_allocation(tunnel_id)) + ovs_db_v2.reserve_specific_tunnel(self.session, tunnel_id) + self.assertTrue(ovs_db_v2.get_tunnel_allocation(tunnel_id).allocated) + + with testtools.ExpectedException(n_exc.TunnelIdInUse): + ovs_db_v2.reserve_specific_tunnel(self.session, tunnel_id) + + ovs_db_v2.release_tunnel(self.session, tunnel_id, TUNNEL_RANGES) + self.assertIsNone(ovs_db_v2.get_tunnel_allocation(tunnel_id)) + + def test_add_tunnel_endpoint_create_new_endpoint(self): + addr = '10.0.0.1' + ovs_db_v2.add_tunnel_endpoint(addr) + self.assertIsNotNone(self.session.query(ovs_models.TunnelEndpoint). + filter_by(ip_address=addr).first()) + + def test_add_tunnel_endpoint_retrieve_an_existing_endpoint(self): + addr = '10.0.0.1' + self.session.add(ovs_models.TunnelEndpoint(ip_address=addr, id=1)) + self.session.flush() + + tunnel = ovs_db_v2.add_tunnel_endpoint(addr) + self.assertEqual(tunnel.id, 1) + self.assertEqual(tunnel.ip_address, addr) + + def test_add_tunnel_endpoint_handle_duplicate_error(self): + with mock.patch.object(session.Session, 'query') as query_mock: + error = db_exc.DBDuplicateEntry(['id']) + query_mock.side_effect = error + + with testtools.ExpectedException(n_exc.NeutronException): + ovs_db_v2.add_tunnel_endpoint('10.0.0.1', 5) + self.assertEqual(query_mock.call_count, 5) + + +class NetworkBindingsTest(test_plugin.NeutronDbPluginV2TestCase): + def setUp(self): + cfg.CONF.set_override('network_vlan_ranges', ['physnet1:1000:2999'], + group='OVS') + super(NetworkBindingsTest, self).setUp(plugin=PLUGIN_NAME) + db.configure_db() + self.session = db.get_session() + + def test_add_network_binding(self): + params = {'provider:network_type': 'vlan', + 'provider:physical_network': PHYS_NET, + 'provider:segmentation_id': 1234} + params['arg_list'] = tuple(params.keys()) + with self.network(**params) as network: + TEST_NETWORK_ID = network['network']['id'] + binding = ovs_db_v2.get_network_binding(self.session, + TEST_NETWORK_ID) + self.assertIsNotNone(binding) + self.assertEqual(binding.network_id, TEST_NETWORK_ID) + self.assertEqual(binding.network_type, 'vlan') + self.assertEqual(binding.physical_network, PHYS_NET) + self.assertEqual(binding.segmentation_id, 1234) diff --git a/neutron/tests/unit/openvswitch/test_ovs_defaults.py b/neutron/tests/unit/openvswitch/test_ovs_defaults.py new file mode 100644 index 000000000..0d5c00f73 --- /dev/null +++ b/neutron/tests/unit/openvswitch/test_ovs_defaults.py @@ -0,0 +1,35 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from neutron.plugins.openvswitch.common import config # noqa +from neutron.tests import base + + +class ConfigurationTest(base.BaseTestCase): + + def test_defaults(self): + self.assertEqual('br-int', cfg.CONF.OVS.integration_bridge) + self.assertFalse(cfg.CONF.OVS.enable_tunneling) + self.assertEqual('br-tun', cfg.CONF.OVS.tunnel_bridge) + self.assertEqual(2, cfg.CONF.AGENT.polling_interval) + self.assertEqual('sudo', cfg.CONF.AGENT.root_helper) + self.assertEqual('local', cfg.CONF.OVS.tenant_network_type) + self.assertEqual(0, len(cfg.CONF.OVS.bridge_mappings)) + self.assertEqual(0, len(cfg.CONF.OVS.network_vlan_ranges)) + self.assertEqual(0, len(cfg.CONF.OVS.tunnel_id_ranges)) + self.assertFalse(cfg.CONF.AGENT.l2_population) + self.assertFalse(cfg.CONF.AGENT.arp_responder) diff --git a/neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py b/neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py new file mode 100644 index 000000000..b1d2371b3 --- /dev/null +++ b/neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py @@ -0,0 +1,954 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import sys + +import mock +import netaddr +from oslo.config import cfg +import testtools + +from neutron.agent.linux import async_process +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import utils +from neutron.common import constants as n_const +from neutron.openstack.common import log +from neutron.plugins.common import constants as p_const +from neutron.plugins.openvswitch.agent import ovs_neutron_agent +from neutron.plugins.openvswitch.common import constants +from neutron.tests import base + + +NOTIFIER = ('neutron.plugins.openvswitch.' + 'ovs_neutron_plugin.AgentNotifierApi') +OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0" + +FAKE_MAC = '00:11:22:33:44:55' +FAKE_IP1 = '10.0.0.1' +FAKE_IP2 = '10.0.0.2' + + +class CreateAgentConfigMap(base.BaseTestCase): + + def test_create_agent_config_map_succeeds(self): + self.assertTrue(ovs_neutron_agent.create_agent_config_map(cfg.CONF)) + + def test_create_agent_config_map_fails_for_invalid_tunnel_config(self): + # An ip address is required for tunneling but there is no default, + # verify this for both gre and vxlan tunnels. + cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE], + group='AGENT') + with testtools.ExpectedException(ValueError): + ovs_neutron_agent.create_agent_config_map(cfg.CONF) + cfg.CONF.set_override('tunnel_types', [p_const.TYPE_VXLAN], + group='AGENT') + with testtools.ExpectedException(ValueError): + ovs_neutron_agent.create_agent_config_map(cfg.CONF) + + def test_create_agent_config_map_enable_tunneling(self): + # Verify setting only enable_tunneling will default tunnel_type to GRE + cfg.CONF.set_override('tunnel_types', None, group='AGENT') + cfg.CONF.set_override('enable_tunneling', True, group='OVS') + cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS') + cfgmap = ovs_neutron_agent.create_agent_config_map(cfg.CONF) + self.assertEqual(cfgmap['tunnel_types'], [p_const.TYPE_GRE]) + + def test_create_agent_config_map_fails_no_local_ip(self): + # An ip address is required for tunneling but there is no default + cfg.CONF.set_override('enable_tunneling', True, group='OVS') + with testtools.ExpectedException(ValueError): + ovs_neutron_agent.create_agent_config_map(cfg.CONF) + + def test_create_agent_config_map_fails_for_invalid_tunnel_type(self): + cfg.CONF.set_override('tunnel_types', ['foobar'], group='AGENT') + with testtools.ExpectedException(ValueError): + ovs_neutron_agent.create_agent_config_map(cfg.CONF) + + def test_create_agent_config_map_multiple_tunnel_types(self): + cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS') + cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE, + p_const.TYPE_VXLAN], group='AGENT') + cfgmap = ovs_neutron_agent.create_agent_config_map(cfg.CONF) + self.assertEqual(cfgmap['tunnel_types'], + [p_const.TYPE_GRE, p_const.TYPE_VXLAN]) + + +class TestOvsNeutronAgent(base.BaseTestCase): + + def setUp(self): + super(TestOvsNeutronAgent, self).setUp() + notifier_p = mock.patch(NOTIFIER) + notifier_cls = notifier_p.start() + self.notifier = mock.Mock() + notifier_cls.return_value = self.notifier + cfg.CONF.set_default('firewall_driver', + 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + # Avoid rpc initialization for unit tests + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + kwargs = ovs_neutron_agent.create_agent_config_map(cfg.CONF) + + class MockFixedIntervalLoopingCall(object): + def __init__(self, f): + self.f = f + + def start(self, interval=0): + self.f() + + with contextlib.nested( + mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.' + 'OVSNeutronAgent.setup_integration_br', + return_value=mock.Mock()), + mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.' + 'OVSNeutronAgent.setup_ancillary_bridges', + return_value=[]), + mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.' + 'set_secure_mode'), + mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.' + 'get_local_port_mac', + return_value='00:00:00:00:00:01'), + mock.patch('neutron.agent.linux.utils.get_interface_mac', + return_value='00:00:00:00:00:01'), + mock.patch('neutron.openstack.common.loopingcall.' + 'FixedIntervalLoopingCall', + new=MockFixedIntervalLoopingCall), + mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.' + 'OVSNeutronAgent._check_arp_responder_support', + return_value=True)): + self.agent = ovs_neutron_agent.OVSNeutronAgent(**kwargs) + self.agent.tun_br = mock.Mock() + self.agent.sg_agent = mock.Mock() + + def _mock_port_bound(self, ofport=None, new_local_vlan=None, + old_local_vlan=None): + port = mock.Mock() + port.ofport = ofport + net_uuid = 'my-net-uuid' + if old_local_vlan is not None: + self.agent.local_vlan_map[net_uuid] = ( + ovs_neutron_agent.LocalVLANMapping( + old_local_vlan, None, None, None)) + with contextlib.nested( + mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.' + 'set_db_attribute', return_value=True), + mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.' + 'db_get_val', return_value=str(old_local_vlan)), + mock.patch.object(self.agent.int_br, 'delete_flows') + ) as (set_ovs_db_func, get_ovs_db_func, delete_flows_func): + self.agent.port_bound(port, net_uuid, 'local', None, None, False) + get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag") + if new_local_vlan != old_local_vlan: + set_ovs_db_func.assert_called_once_with( + "Port", mock.ANY, "tag", str(new_local_vlan)) + if ofport != -1: + delete_flows_func.assert_called_once_with(in_port=port.ofport) + else: + self.assertFalse(delete_flows_func.called) + else: + self.assertFalse(set_ovs_db_func.called) + self.assertFalse(delete_flows_func.called) + + def test_port_bound_deletes_flows_for_valid_ofport(self): + self._mock_port_bound(ofport=1, new_local_vlan=1) + + def test_port_bound_ignores_flows_for_invalid_ofport(self): + self._mock_port_bound(ofport=-1, new_local_vlan=1) + + def test_port_bound_does_not_rewire_if_already_bound(self): + self._mock_port_bound(ofport=-1, new_local_vlan=1, old_local_vlan=1) + + def _test_port_dead(self, cur_tag=None): + port = mock.Mock() + port.ofport = 1 + with contextlib.nested( + mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.' + 'set_db_attribute', return_value=True), + mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.' + 'db_get_val', return_value=cur_tag), + mock.patch.object(self.agent.int_br, 'add_flow') + ) as (set_ovs_db_func, get_ovs_db_func, add_flow_func): + self.agent.port_dead(port) + get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag") + if cur_tag == ovs_neutron_agent.DEAD_VLAN_TAG: + self.assertFalse(set_ovs_db_func.called) + self.assertFalse(add_flow_func.called) + else: + set_ovs_db_func.assert_called_once_with( + "Port", mock.ANY, "tag", str(ovs_neutron_agent.DEAD_VLAN_TAG)) + add_flow_func.assert_called_once_with( + priority=2, in_port=port.ofport, actions="drop") + + def test_port_dead(self): + self._test_port_dead() + + def test_port_dead_with_port_already_dead(self): + self._test_port_dead(ovs_neutron_agent.DEAD_VLAN_TAG) + + def mock_scan_ports(self, vif_port_set=None, registered_ports=None, + updated_ports=None, port_tags_dict=None): + if port_tags_dict is None: # Because empty dicts evaluate as False. + port_tags_dict = {} + with contextlib.nested( + mock.patch.object(self.agent.int_br, 'get_vif_port_set', + return_value=vif_port_set), + mock.patch.object(self.agent.int_br, 'get_port_tag_dict', + return_value=port_tags_dict) + ): + return self.agent.scan_ports(registered_ports, updated_ports) + + def test_scan_ports_returns_current_only_for_unchanged_ports(self): + vif_port_set = set([1, 3]) + registered_ports = set([1, 3]) + expected = {'current': vif_port_set} + actual = self.mock_scan_ports(vif_port_set, registered_ports) + self.assertEqual(expected, actual) + + def test_scan_ports_returns_port_changes(self): + vif_port_set = set([1, 3]) + registered_ports = set([1, 2]) + expected = dict(current=vif_port_set, added=set([3]), removed=set([2])) + actual = self.mock_scan_ports(vif_port_set, registered_ports) + self.assertEqual(expected, actual) + + def _test_scan_ports_with_updated_ports(self, updated_ports): + vif_port_set = set([1, 3, 4]) + registered_ports = set([1, 2, 4]) + expected = dict(current=vif_port_set, added=set([3]), + removed=set([2]), updated=set([4])) + actual = self.mock_scan_ports(vif_port_set, registered_ports, + updated_ports) + self.assertEqual(expected, actual) + + def test_scan_ports_finds_known_updated_ports(self): + self._test_scan_ports_with_updated_ports(set([4])) + + def test_scan_ports_ignores_unknown_updated_ports(self): + # the port '5' was not seen on current ports. Hence it has either + # never been wired or already removed and should be ignored + self._test_scan_ports_with_updated_ports(set([4, 5])) + + def test_scan_ports_ignores_updated_port_if_removed(self): + vif_port_set = set([1, 3]) + registered_ports = set([1, 2]) + updated_ports = set([1, 2]) + expected = dict(current=vif_port_set, added=set([3]), + removed=set([2]), updated=set([1])) + actual = self.mock_scan_ports(vif_port_set, registered_ports, + updated_ports) + self.assertEqual(expected, actual) + + def test_scan_ports_no_vif_changes_returns_updated_port_only(self): + vif_port_set = set([1, 2, 3]) + registered_ports = set([1, 2, 3]) + updated_ports = set([2]) + expected = dict(current=vif_port_set, updated=set([2])) + actual = self.mock_scan_ports(vif_port_set, registered_ports, + updated_ports) + self.assertEqual(expected, actual) + + def test_update_ports_returns_changed_vlan(self): + br = ovs_lib.OVSBridge('br-int', 'sudo') + mac = "ca:fe:de:ad:be:ef" + port = ovs_lib.VifPort(1, 1, 1, mac, br) + lvm = ovs_neutron_agent.LocalVLANMapping( + 1, '1', None, 1, {port.vif_id: port}) + local_vlan_map = {'1': lvm} + vif_port_set = set([1, 3]) + registered_ports = set([1, 2]) + port_tags_dict = {1: []} + expected = dict( + added=set([3]), current=vif_port_set, + removed=set([2]), updated=set([1]) + ) + with mock.patch.dict(self.agent.local_vlan_map, local_vlan_map): + actual = self.mock_scan_ports( + vif_port_set, registered_ports, port_tags_dict=port_tags_dict) + self.assertEqual(expected, actual) + + def test_treat_devices_added_returns_true_for_missing_device(self): + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'get_device_details', + side_effect=Exception()), + mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', + return_value=mock.Mock())): + self.assertTrue(self.agent.treat_devices_added_or_updated([{}], + False)) + + def _mock_treat_devices_added_updated(self, details, port, func_name): + """Mock treat devices added or updated. + + :param details: the details to return for the device + :param port: the port that get_vif_port_by_id should return + :param func_name: the function that should be called + :returns: whether the named function was called + """ + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'get_device_details', + return_value=details), + mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', + return_value=port), + mock.patch.object(self.agent.plugin_rpc, 'update_device_up'), + mock.patch.object(self.agent.plugin_rpc, 'update_device_down'), + mock.patch.object(self.agent, func_name) + ) as (get_dev_fn, get_vif_func, upd_dev_up, upd_dev_down, func): + self.assertFalse(self.agent.treat_devices_added_or_updated([{}], + False)) + return func.called + + def test_treat_devices_added_updated_ignores_invalid_ofport(self): + port = mock.Mock() + port.ofport = -1 + self.assertFalse(self._mock_treat_devices_added_updated( + mock.MagicMock(), port, 'port_dead')) + + def test_treat_devices_added_updated_marks_unknown_port_as_dead(self): + port = mock.Mock() + port.ofport = 1 + self.assertTrue(self._mock_treat_devices_added_updated( + mock.MagicMock(), port, 'port_dead')) + + def test_treat_devices_added_does_not_process_missing_port(self): + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'get_device_details'), + mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', + return_value=None) + ) as (get_dev_fn, get_vif_func): + self.assertFalse(get_dev_fn.called) + + def test_treat_devices_added__updated_updates_known_port(self): + details = mock.MagicMock() + details.__contains__.side_effect = lambda x: True + self.assertTrue(self._mock_treat_devices_added_updated( + details, mock.Mock(), 'treat_vif_port')) + + def test_treat_devices_added_updated_put_port_down(self): + fake_details_dict = {'admin_state_up': False, + 'port_id': 'xxx', + 'device': 'xxx', + 'network_id': 'yyy', + 'physical_network': 'foo', + 'segmentation_id': 'bar', + 'network_type': 'baz'} + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'get_device_details', + return_value=fake_details_dict), + mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', + return_value=mock.MagicMock()), + mock.patch.object(self.agent.plugin_rpc, 'update_device_up'), + mock.patch.object(self.agent.plugin_rpc, 'update_device_down'), + mock.patch.object(self.agent, 'treat_vif_port') + ) as (get_dev_fn, get_vif_func, upd_dev_up, + upd_dev_down, treat_vif_port): + self.assertFalse(self.agent.treat_devices_added_or_updated([{}], + False)) + self.assertTrue(treat_vif_port.called) + self.assertTrue(upd_dev_down.called) + + def test_treat_devices_removed_returns_true_for_missing_device(self): + with mock.patch.object(self.agent.plugin_rpc, 'update_device_down', + side_effect=Exception()): + self.assertTrue(self.agent.treat_devices_removed([{}])) + + def _mock_treat_devices_removed(self, port_exists): + details = dict(exists=port_exists) + with mock.patch.object(self.agent.plugin_rpc, 'update_device_down', + return_value=details): + with mock.patch.object(self.agent, 'port_unbound') as port_unbound: + self.assertFalse(self.agent.treat_devices_removed([{}])) + self.assertTrue(port_unbound.called) + + def test_treat_devices_removed_unbinds_port(self): + self._mock_treat_devices_removed(True) + + def test_treat_devices_removed_ignores_missing_port(self): + self._mock_treat_devices_removed(False) + + def _test_process_network_ports(self, port_info): + with contextlib.nested( + mock.patch.object(self.agent.sg_agent, "setup_port_filters"), + mock.patch.object(self.agent, "treat_devices_added_or_updated", + return_value=False), + mock.patch.object(self.agent, "treat_devices_removed", + return_value=False) + ) as (setup_port_filters, device_added_updated, device_removed): + self.assertFalse(self.agent.process_network_ports(port_info, + False)) + setup_port_filters.assert_called_once_with( + port_info['added'], port_info.get('updated', set())) + device_added_updated.assert_called_once_with( + port_info['added'] | port_info.get('updated', set()), False) + device_removed.assert_called_once_with(port_info['removed']) + + def test_process_network_ports(self): + self._test_process_network_ports( + {'current': set(['tap0']), + 'removed': set(['eth0']), + 'added': set(['eth1'])}) + + def test_process_network_port_with_updated_ports(self): + self._test_process_network_ports( + {'current': set(['tap0', 'tap1']), + 'updated': set(['tap1', 'eth1']), + 'removed': set(['eth0']), + 'added': set(['eth1'])}) + + def test_report_state(self): + with mock.patch.object(self.agent.state_rpc, + "report_state") as report_st: + self.agent.int_br_device_count = 5 + self.agent._report_state() + report_st.assert_called_with(self.agent.context, + self.agent.agent_state) + self.assertNotIn("start_flag", self.agent.agent_state) + self.assertEqual( + self.agent.agent_state["configurations"]["devices"], + self.agent.int_br_device_count + ) + + def test_network_delete(self): + with contextlib.nested( + mock.patch.object(self.agent, "reclaim_local_vlan"), + mock.patch.object(self.agent.tun_br, "cleanup_tunnel_port") + ) as (recl_fn, clean_tun_fn): + self.agent.network_delete("unused_context", + network_id="123") + self.assertFalse(recl_fn.called) + self.agent.local_vlan_map["123"] = "LVM object" + self.agent.network_delete("unused_context", + network_id="123") + self.assertFalse(clean_tun_fn.called) + recl_fn.assert_called_with("123") + + def test_port_update(self): + port = {"id": "123", + "network_id": "124", + "admin_state_up": False} + self.agent.port_update("unused_context", + port=port, + network_type="vlan", + segmentation_id="1", + physical_network="physnet") + self.assertEqual(set(['123']), self.agent.updated_ports) + + def test_setup_physical_bridges(self): + with contextlib.nested( + mock.patch.object(ip_lib, "device_exists"), + mock.patch.object(sys, "exit"), + mock.patch.object(utils, "execute"), + mock.patch.object(ovs_lib.OVSBridge, "remove_all_flows"), + mock.patch.object(ovs_lib.OVSBridge, "add_flow"), + mock.patch.object(ovs_lib.OVSBridge, "add_port"), + mock.patch.object(ovs_lib.OVSBridge, "delete_port"), + mock.patch.object(self.agent.int_br, "add_port"), + mock.patch.object(self.agent.int_br, "delete_port"), + mock.patch.object(ip_lib.IPWrapper, "add_veth"), + mock.patch.object(ip_lib.IpLinkCommand, "delete"), + mock.patch.object(ip_lib.IpLinkCommand, "set_up"), + mock.patch.object(ip_lib.IpLinkCommand, "set_mtu") + ) as (devex_fn, sysexit_fn, utilsexec_fn, remflows_fn, ovs_addfl_fn, + ovs_addport_fn, ovs_delport_fn, br_addport_fn, + br_delport_fn, addveth_fn, linkdel_fn, linkset_fn, linkmtu_fn): + devex_fn.return_value = True + parent = mock.MagicMock() + parent.attach_mock(utilsexec_fn, 'utils_execute') + parent.attach_mock(linkdel_fn, 'link_delete') + parent.attach_mock(addveth_fn, 'add_veth') + addveth_fn.return_value = (ip_lib.IPDevice("int-br-eth1"), + ip_lib.IPDevice("phy-br-eth1")) + ovs_addport_fn.return_value = "int_ofport" + br_addport_fn.return_value = "phys_veth" + self.agent.setup_physical_bridges({"physnet1": "br-eth"}) + expected_calls = [mock.call.link_delete(), + mock.call.utils_execute(['/sbin/udevadm', + 'settle', + '--timeout=10']), + mock.call.add_veth('int-br-eth', + 'phy-br-eth')] + parent.assert_has_calls(expected_calls, any_order=False) + self.assertEqual(self.agent.int_ofports["physnet1"], + "phys_veth") + self.assertEqual(self.agent.phys_ofports["physnet1"], + "int_ofport") + + def test_get_veth_name(self): + bridge1 = "A_REALLY_LONG_BRIDGE_NAME1" + bridge2 = "A_REALLY_LONG_BRIDGE_NAME2" + self.assertEqual(len(self.agent.get_veth_name('int-', bridge1)), + ip_lib.VETH_MAX_NAME_LENGTH) + self.assertEqual(len(self.agent.get_veth_name('int-', bridge2)), + ip_lib.VETH_MAX_NAME_LENGTH) + self.assertNotEqual(self.agent.get_veth_name('int-', bridge1), + self.agent.get_veth_name('int-', bridge2)) + + def test_port_unbound(self): + with mock.patch.object(self.agent, "reclaim_local_vlan") as reclvl_fn: + self.agent.enable_tunneling = True + lvm = mock.Mock() + lvm.network_type = "gre" + lvm.vif_ports = {"vif1": mock.Mock()} + self.agent.local_vlan_map["netuid12345"] = lvm + self.agent.port_unbound("vif1", "netuid12345") + self.assertTrue(reclvl_fn.called) + reclvl_fn.called = False + + lvm.vif_ports = {} + self.agent.port_unbound("vif1", "netuid12345") + self.assertEqual(reclvl_fn.call_count, 2) + + lvm.vif_ports = {"vif1": mock.Mock()} + self.agent.port_unbound("vif3", "netuid12345") + self.assertEqual(reclvl_fn.call_count, 2) + + def _prepare_l2_pop_ofports(self): + lvm1 = mock.Mock() + lvm1.network_type = 'gre' + lvm1.vlan = 'vlan1' + lvm1.segmentation_id = 'seg1' + lvm1.tun_ofports = set(['1']) + lvm2 = mock.Mock() + lvm2.network_type = 'gre' + lvm2.vlan = 'vlan2' + lvm2.segmentation_id = 'seg2' + lvm2.tun_ofports = set(['1', '2']) + self.agent.local_vlan_map = {'net1': lvm1, 'net2': lvm2} + self.agent.tun_br_ofports = {'gre': + {'1.1.1.1': '1', '2.2.2.2': '2'}} + self.agent.arp_responder_enabled = True + + def test_fdb_ignore_network(self): + self._prepare_l2_pop_ofports() + fdb_entry = {'net3': {}} + with contextlib.nested( + mock.patch.object(self.agent.tun_br, 'add_flow'), + mock.patch.object(self.agent.tun_br, 'delete_flows'), + mock.patch.object(self.agent, 'setup_tunnel_port'), + mock.patch.object(self.agent, 'cleanup_tunnel_port') + ) as (add_flow_fn, del_flow_fn, add_tun_fn, clean_tun_fn): + self.agent.fdb_add(None, fdb_entry) + self.assertFalse(add_flow_fn.called) + self.assertFalse(add_tun_fn.called) + self.agent.fdb_remove(None, fdb_entry) + self.assertFalse(del_flow_fn.called) + self.assertFalse(clean_tun_fn.called) + + def test_fdb_ignore_self(self): + self._prepare_l2_pop_ofports() + self.agent.local_ip = 'agent_ip' + fdb_entry = {'net2': + {'network_type': 'gre', + 'segment_id': 'tun2', + 'ports': + {'agent_ip': + [[FAKE_MAC, FAKE_IP1], + n_const.FLOODING_ENTRY]}}} + with mock.patch.object(self.agent.tun_br, + "defer_apply_on") as defer_fn: + self.agent.fdb_add(None, fdb_entry) + self.assertFalse(defer_fn.called) + + self.agent.fdb_remove(None, fdb_entry) + self.assertFalse(defer_fn.called) + + def test_fdb_add_flows(self): + self._prepare_l2_pop_ofports() + fdb_entry = {'net1': + {'network_type': 'gre', + 'segment_id': 'tun1', + 'ports': + {'2.2.2.2': + [[FAKE_MAC, FAKE_IP1], + n_const.FLOODING_ENTRY]}}} + with contextlib.nested( + mock.patch.object(self.agent.tun_br, 'add_flow'), + mock.patch.object(self.agent.tun_br, 'mod_flow'), + mock.patch.object(self.agent, 'setup_tunnel_port'), + ) as (add_flow_fn, mod_flow_fn, add_tun_fn): + self.agent.fdb_add(None, fdb_entry) + self.assertFalse(add_tun_fn.called) + actions = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' + 'mod_dl_src:%(mac)s,' + 'load:0x2->NXM_OF_ARP_OP[],' + 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' + 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],' + 'load:%(mac)#x->NXM_NX_ARP_SHA[],' + 'load:%(ip)#x->NXM_OF_ARP_SPA[],' + 'in_port' % + {'mac': netaddr.EUI(FAKE_MAC, dialect=netaddr.mac_unix), + 'ip': netaddr.IPAddress(FAKE_IP1)}) + add_flow_fn.assert_has_calls([ + mock.call(table=constants.ARP_RESPONDER, + priority=1, + proto='arp', + dl_vlan='vlan1', + nw_dst=FAKE_IP1, + actions=actions), + mock.call(table=constants.UCAST_TO_TUN, + priority=2, + dl_vlan='vlan1', + dl_dst=FAKE_MAC, + actions='strip_vlan,' + 'set_tunnel:seg1,output:2') + ]) + mod_flow_fn.assert_called_with(table=constants.FLOOD_TO_TUN, + dl_vlan='vlan1', + actions='strip_vlan,' + 'set_tunnel:seg1,output:1,2') + + def test_fdb_del_flows(self): + self._prepare_l2_pop_ofports() + fdb_entry = {'net2': + {'network_type': 'gre', + 'segment_id': 'tun2', + 'ports': + {'2.2.2.2': + [[FAKE_MAC, FAKE_IP1], + n_const.FLOODING_ENTRY]}}} + with contextlib.nested( + mock.patch.object(self.agent.tun_br, 'mod_flow'), + mock.patch.object(self.agent.tun_br, 'delete_flows'), + ) as (mod_flow_fn, del_flow_fn): + self.agent.fdb_remove(None, fdb_entry) + mod_flow_fn.assert_called_with(table=constants.FLOOD_TO_TUN, + dl_vlan='vlan2', + actions='strip_vlan,' + 'set_tunnel:seg2,output:1') + expected = [mock.call(table=constants.ARP_RESPONDER, + proto='arp', + dl_vlan='vlan2', + nw_dst=FAKE_IP1), + mock.call(table=constants.UCAST_TO_TUN, + dl_vlan='vlan2', + dl_dst=FAKE_MAC), + mock.call(in_port='2')] + del_flow_fn.assert_has_calls(expected) + + def test_fdb_add_port(self): + self._prepare_l2_pop_ofports() + fdb_entry = {'net1': + {'network_type': 'gre', + 'segment_id': 'tun1', + 'ports': {'1.1.1.1': [[FAKE_MAC, FAKE_IP1]]}}} + with contextlib.nested( + mock.patch.object(self.agent.tun_br, 'add_flow'), + mock.patch.object(self.agent.tun_br, 'mod_flow'), + mock.patch.object(self.agent, 'setup_tunnel_port') + ) as (add_flow_fn, mod_flow_fn, add_tun_fn): + self.agent.fdb_add(None, fdb_entry) + self.assertFalse(add_tun_fn.called) + fdb_entry['net1']['ports']['10.10.10.10'] = [[FAKE_MAC, FAKE_IP1]] + self.agent.fdb_add(None, fdb_entry) + add_tun_fn.assert_called_with('gre-0a0a0a0a', '10.10.10.10', 'gre') + + def test_fdb_del_port(self): + self._prepare_l2_pop_ofports() + fdb_entry = {'net2': + {'network_type': 'gre', + 'segment_id': 'tun2', + 'ports': {'2.2.2.2': [n_const.FLOODING_ENTRY]}}} + with contextlib.nested( + mock.patch.object(self.agent.tun_br, 'delete_flows'), + mock.patch.object(self.agent.tun_br, 'delete_port') + ) as (del_flow_fn, del_port_fn): + self.agent.fdb_remove(None, fdb_entry) + del_port_fn.assert_called_once_with('gre-02020202') + + def test_fdb_update_chg_ip(self): + self._prepare_l2_pop_ofports() + fdb_entries = {'chg_ip': + {'net1': + {'agent_ip': + {'before': [[FAKE_MAC, FAKE_IP1]], + 'after': [[FAKE_MAC, FAKE_IP2]]}}}} + with contextlib.nested( + mock.patch.object(self.agent.tun_br, 'add_flow'), + mock.patch.object(self.agent.tun_br, 'delete_flows') + ) as (add_flow_fn, del_flow_fn): + self.agent.fdb_update(None, fdb_entries) + actions = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' + 'mod_dl_src:%(mac)s,' + 'load:0x2->NXM_OF_ARP_OP[],' + 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' + 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],' + 'load:%(mac)#x->NXM_NX_ARP_SHA[],' + 'load:%(ip)#x->NXM_OF_ARP_SPA[],' + 'in_port' % + {'mac': netaddr.EUI(FAKE_MAC, dialect=netaddr.mac_unix), + 'ip': netaddr.IPAddress(FAKE_IP2)}) + add_flow_fn.assert_called_once_with(table=constants.ARP_RESPONDER, + priority=1, + proto='arp', + dl_vlan='vlan1', + nw_dst=FAKE_IP2, + actions=actions) + del_flow_fn.assert_called_once_with(table=constants.ARP_RESPONDER, + proto='arp', + dl_vlan='vlan1', + nw_dst=FAKE_IP1) + + def test_recl_lv_port_to_preserve(self): + self._prepare_l2_pop_ofports() + self.agent.l2_pop = True + self.agent.enable_tunneling = True + with mock.patch.object( + self.agent.tun_br, 'cleanup_tunnel_port' + ) as clean_tun_fn: + self.agent.reclaim_local_vlan('net1') + self.assertFalse(clean_tun_fn.called) + + def test_recl_lv_port_to_remove(self): + self._prepare_l2_pop_ofports() + self.agent.l2_pop = True + self.agent.enable_tunneling = True + with contextlib.nested( + mock.patch.object(self.agent.tun_br, 'delete_port'), + mock.patch.object(self.agent.tun_br, 'delete_flows') + ) as (del_port_fn, del_flow_fn): + self.agent.reclaim_local_vlan('net2') + del_port_fn.assert_called_once_with('gre-02020202') + + def test_daemon_loop_uses_polling_manager(self): + with mock.patch( + 'neutron.agent.linux.polling.get_polling_manager') as mock_get_pm: + with mock.patch.object(self.agent, 'rpc_loop') as mock_loop: + self.agent.daemon_loop() + mock_get_pm.assert_called_with(True, 'sudo', + constants.DEFAULT_OVSDBMON_RESPAWN) + mock_loop.assert_called_once_with(polling_manager=mock.ANY) + + def test_setup_tunnel_port_error_negative(self): + with contextlib.nested( + mock.patch.object(self.agent.tun_br, 'add_tunnel_port', + return_value='-1'), + mock.patch.object(ovs_neutron_agent.LOG, 'error') + ) as (add_tunnel_port_fn, log_error_fn): + ofport = self.agent.setup_tunnel_port( + 'gre-1', 'remote_ip', p_const.TYPE_GRE) + add_tunnel_port_fn.assert_called_once_with( + 'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE, + self.agent.vxlan_udp_port, self.agent.dont_fragment) + log_error_fn.assert_called_once_with( + _("Failed to set-up %(type)s tunnel port to %(ip)s"), + {'type': p_const.TYPE_GRE, 'ip': 'remote_ip'}) + self.assertEqual(ofport, 0) + + def test_setup_tunnel_port_error_not_int(self): + with contextlib.nested( + mock.patch.object(self.agent.tun_br, 'add_tunnel_port', + return_value=None), + mock.patch.object(ovs_neutron_agent.LOG, 'exception'), + mock.patch.object(ovs_neutron_agent.LOG, 'error') + ) as (add_tunnel_port_fn, log_exc_fn, log_error_fn): + ofport = self.agent.setup_tunnel_port( + 'gre-1', 'remote_ip', p_const.TYPE_GRE) + add_tunnel_port_fn.assert_called_once_with( + 'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE, + self.agent.vxlan_udp_port, self.agent.dont_fragment) + log_exc_fn.assert_called_once_with( + _("ofport should have a value that can be " + "interpreted as an integer")) + log_error_fn.assert_called_once_with( + _("Failed to set-up %(type)s tunnel port to %(ip)s"), + {'type': p_const.TYPE_GRE, 'ip': 'remote_ip'}) + self.assertEqual(ofport, 0) + + def test_setup_tunnel_port_error_negative_df_disabled(self): + with contextlib.nested( + mock.patch.object(self.agent.tun_br, 'add_tunnel_port', + return_value='-1'), + mock.patch.object(ovs_neutron_agent.LOG, 'error') + ) as (add_tunnel_port_fn, log_error_fn): + self.agent.dont_fragment = False + ofport = self.agent.setup_tunnel_port( + 'gre-1', 'remote_ip', p_const.TYPE_GRE) + add_tunnel_port_fn.assert_called_once_with( + 'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE, + self.agent.vxlan_udp_port, self.agent.dont_fragment) + log_error_fn.assert_called_once_with( + _("Failed to set-up %(type)s tunnel port to %(ip)s"), + {'type': p_const.TYPE_GRE, 'ip': 'remote_ip'}) + self.assertEqual(ofport, 0) + + def test_tunnel_sync_with_ovs_plugin(self): + fake_tunnel_details = {'tunnels': [{'id': '42', + 'ip_address': '100.101.102.103'}]} + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', + return_value=fake_tunnel_details), + mock.patch.object(self.agent, 'setup_tunnel_port') + ) as (tunnel_sync_rpc_fn, setup_tunnel_port_fn): + self.agent.tunnel_types = ['gre'] + self.agent.tunnel_sync() + expected_calls = [mock.call('gre-42', '100.101.102.103', 'gre')] + setup_tunnel_port_fn.assert_has_calls(expected_calls) + + def test_tunnel_sync_with_ml2_plugin(self): + fake_tunnel_details = {'tunnels': [{'ip_address': '100.101.31.15'}]} + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', + return_value=fake_tunnel_details), + mock.patch.object(self.agent, 'setup_tunnel_port') + ) as (tunnel_sync_rpc_fn, setup_tunnel_port_fn): + self.agent.tunnel_types = ['vxlan'] + self.agent.tunnel_sync() + expected_calls = [mock.call('vxlan-64651f0f', + '100.101.31.15', 'vxlan')] + setup_tunnel_port_fn.assert_has_calls(expected_calls) + + def test_tunnel_sync_invalid_ip_address(self): + fake_tunnel_details = {'tunnels': [{'ip_address': '300.300.300.300'}, + {'ip_address': '100.100.100.100'}]} + with contextlib.nested( + mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', + return_value=fake_tunnel_details), + mock.patch.object(self.agent, 'setup_tunnel_port') + ) as (tunnel_sync_rpc_fn, setup_tunnel_port_fn): + self.agent.tunnel_types = ['vxlan'] + self.agent.tunnel_sync() + setup_tunnel_port_fn.assert_called_once_with('vxlan-64646464', + '100.100.100.100', + 'vxlan') + + def test_tunnel_update(self): + kwargs = {'tunnel_ip': '10.10.10.10', + 'tunnel_type': 'gre'} + self.agent.setup_tunnel_port = mock.Mock() + self.agent.enable_tunneling = True + self.agent.tunnel_types = ['gre'] + self.agent.l2_pop = False + self.agent.tunnel_update(context=None, **kwargs) + expected_calls = [mock.call('gre-0a0a0a0a', '10.10.10.10', 'gre')] + self.agent.setup_tunnel_port.assert_has_calls(expected_calls) + + def test_ovs_restart(self): + reply2 = {'current': set(['tap0']), + 'added': set(['tap2']), + 'removed': set([])} + + reply3 = {'current': set(['tap2']), + 'added': set([]), + 'removed': set(['tap0'])} + + with contextlib.nested( + mock.patch.object(async_process.AsyncProcess, "_spawn"), + mock.patch.object(log.ContextAdapter, 'exception'), + mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + 'scan_ports'), + mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + 'process_network_ports'), + mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + 'check_ovs_restart'), + mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + 'setup_integration_br'), + mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + 'setup_physical_bridges') + ) as (spawn_fn, log_exception, scan_ports, process_network_ports, + check_ovs_restart, setup_int_br, setup_phys_br): + log_exception.side_effect = Exception( + 'Fake exception to get out of the loop') + scan_ports.side_effect = [reply2, reply3] + process_network_ports.side_effect = [ + False, Exception('Fake exception to get out of the loop')] + check_ovs_restart.side_effect = [False, True] + + # This will exit after the second loop + try: + self.agent.daemon_loop() + except Exception: + pass + + scan_ports.assert_has_calls([ + mock.call(set(), set()), + mock.call(set(), set()) + ]) + process_network_ports.assert_has_calls([ + mock.call({'current': set(['tap0']), + 'removed': set([]), + 'added': set(['tap2'])}, False), + mock.call({'current': set(['tap2']), + 'removed': set(['tap0']), + 'added': set([])}, True) + ]) + + # Verify the second time through the loop we triggered an + # OVS restart and re-setup the bridges + setup_int_br.assert_has_calls([mock.call()]) + setup_phys_br.assert_has_calls([mock.call({})]) + + +class AncillaryBridgesTest(base.BaseTestCase): + + def setUp(self): + super(AncillaryBridgesTest, self).setUp() + notifier_p = mock.patch(NOTIFIER) + notifier_cls = notifier_p.start() + self.notifier = mock.Mock() + notifier_cls.return_value = self.notifier + cfg.CONF.set_default('firewall_driver', + 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + # Avoid rpc initialization for unit tests + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + cfg.CONF.set_override('report_interval', 0, 'AGENT') + self.kwargs = ovs_neutron_agent.create_agent_config_map(cfg.CONF) + + def _test_ancillary_bridges(self, bridges, ancillary): + device_ids = ancillary[:] + + def pullup_side_effect(self, *args): + result = device_ids.pop(0) + return result + + with contextlib.nested( + mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.' + 'OVSNeutronAgent.setup_integration_br', + return_value=mock.Mock()), + mock.patch('neutron.agent.linux.utils.get_interface_mac', + return_value='00:00:00:00:00:01'), + mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.' + 'get_local_port_mac', + return_value='00:00:00:00:00:01'), + mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.' + 'set_secure_mode'), + mock.patch('neutron.agent.linux.ovs_lib.get_bridges', + return_value=bridges), + mock.patch( + 'neutron.agent.linux.ovs_lib.get_bridge_external_bridge_id', + side_effect=pullup_side_effect), + mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.' + 'OVSNeutronAgent._check_arp_responder_support', + return_value=True)): + self.agent = ovs_neutron_agent.OVSNeutronAgent(**self.kwargs) + self.assertEqual(len(ancillary), len(self.agent.ancillary_brs)) + if ancillary: + bridges = [br.br_name for br in self.agent.ancillary_brs] + for br in ancillary: + self.assertIn(br, bridges) + + def test_ancillary_bridges_single(self): + bridges = ['br-int', 'br-ex'] + self._test_ancillary_bridges(bridges, ['br-ex']) + + def test_ancillary_bridges_none(self): + bridges = ['br-int'] + self._test_ancillary_bridges(bridges, []) + + def test_ancillary_bridges_multiple(self): + bridges = ['br-int', 'br-ex1', 'br-ex2'] + self._test_ancillary_bridges(bridges, ['br-ex1', 'br-ex2']) diff --git a/neutron/tests/unit/openvswitch/test_ovs_rpcapi.py b/neutron/tests/unit/openvswitch/test_ovs_rpcapi.py new file mode 100644 index 000000000..e8f75b9f4 --- /dev/null +++ b/neutron/tests/unit/openvswitch/test_ovs_rpcapi.py @@ -0,0 +1,123 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for openvswitch rpc +""" + +import fixtures + +from neutron.agent import rpc as agent_rpc +from neutron.common import topics +from neutron.openstack.common import context +from neutron.plugins.openvswitch.common import constants +from neutron.plugins.openvswitch import ovs_neutron_plugin as povs +from neutron.tests import base + + +class rpcApiTestCase(base.BaseTestCase): + + def _test_ovs_api(self, rpcapi, topic, method, rpc_method, **kwargs): + ctxt = context.RequestContext('fake_user', 'fake_project') + expected_retval = 'foo' if method == 'call' else None + expected_msg = rpcapi.make_msg(method, **kwargs) + if rpc_method == 'cast' and method == 'run_instance': + kwargs['call'] = False + + self.fake_args = None + self.fake_kwargs = None + + def _fake_rpc_method(*args, **kwargs): + self.fake_args = args + self.fake_kwargs = kwargs + if expected_retval: + return expected_retval + + self.useFixture(fixtures.MonkeyPatch( + 'neutron.common.rpc_compat.RpcProxy.' + rpc_method, + _fake_rpc_method)) + + retval = getattr(rpcapi, method)(ctxt, **kwargs) + + self.assertEqual(retval, expected_retval) + expected_args = [ctxt, expected_msg] + expected_kwargs = {'topic': topic} + + # skip the first argument which is 'self' + for arg, expected_arg in zip(self.fake_args[1:], expected_args): + self.assertEqual(arg, expected_arg) + self.assertEqual(expected_kwargs, self.fake_kwargs) + + def test_delete_network(self): + rpcapi = povs.AgentNotifierApi(topics.AGENT) + self._test_ovs_api(rpcapi, + topics.get_topic_name(topics.AGENT, + topics.NETWORK, + topics.DELETE), + 'network_delete', rpc_method='fanout_cast', + network_id='fake_request_spec') + + def test_port_update(self): + rpcapi = povs.AgentNotifierApi(topics.AGENT) + self._test_ovs_api(rpcapi, + topics.get_topic_name(topics.AGENT, + topics.PORT, + topics.UPDATE), + 'port_update', rpc_method='fanout_cast', + port='fake_port', + network_type='fake_network_type', + segmentation_id='fake_segmentation_id', + physical_network='fake_physical_network') + + def test_tunnel_update(self): + rpcapi = povs.AgentNotifierApi(topics.AGENT) + self._test_ovs_api(rpcapi, + topics.get_topic_name(topics.AGENT, + constants.TUNNEL, + topics.UPDATE), + 'tunnel_update', rpc_method='fanout_cast', + tunnel_ip='fake_ip', tunnel_id='fake_id', + tunnel_type=None) + + def test_device_details(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_ovs_api(rpcapi, topics.PLUGIN, + 'get_device_details', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id') + + def test_update_device_down(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_ovs_api(rpcapi, topics.PLUGIN, + 'update_device_down', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id', + host='fake_host') + + def test_tunnel_sync(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_ovs_api(rpcapi, topics.PLUGIN, + 'tunnel_sync', rpc_method='call', + tunnel_ip='fake_tunnel_ip', + tunnel_type=None) + + def test_update_device_up(self): + rpcapi = agent_rpc.PluginApi(topics.PLUGIN) + self._test_ovs_api(rpcapi, topics.PLUGIN, + 'update_device_up', rpc_method='call', + device='fake_device', + agent_id='fake_agent_id', + host='fake_host') diff --git a/neutron/tests/unit/openvswitch/test_ovs_security_group.py b/neutron/tests/unit/openvswitch/test_ovs_security_group.py new file mode 100644 index 000000000..50e2caf27 --- /dev/null +++ b/neutron/tests/unit/openvswitch/test_ovs_security_group.py @@ -0,0 +1,104 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.api.v2 import attributes +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.tests.unit import test_extension_security_group as test_sg +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + +PLUGIN_NAME = ('neutron.plugins.openvswitch.' + 'ovs_neutron_plugin.OVSNeutronPluginV2') +NOTIFIER = ('neutron.plugins.openvswitch.' + 'ovs_neutron_plugin.AgentNotifierApi') + + +class OpenvswitchSecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self, plugin=None): + test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER) + notifier_p = mock.patch(NOTIFIER) + notifier_cls = notifier_p.start() + self.notifier = mock.Mock() + notifier_cls.return_value = self.notifier + self._attribute_map_bk_ = {} + for item in attributes.RESOURCE_ATTRIBUTE_MAP: + self._attribute_map_bk_[item] = (attributes. + RESOURCE_ATTRIBUTE_MAP[item]. + copy()) + super(OpenvswitchSecurityGroupsTestCase, self).setUp(PLUGIN_NAME) + + def tearDown(self): + super(OpenvswitchSecurityGroupsTestCase, self).tearDown() + attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_ + + +class TestOpenvswitchSGServerRpcCallBack( + OpenvswitchSecurityGroupsTestCase, + test_sg_rpc.SGServerRpcCallBackMixinTestCase): + pass + + +class TestOpenvswitchSGServerRpcCallBackXML( + OpenvswitchSecurityGroupsTestCase, + test_sg_rpc.SGServerRpcCallBackMixinTestCaseXML): + pass + + +class TestOpenvswitchSecurityGroups(OpenvswitchSecurityGroupsTestCase, + test_sg.TestSecurityGroups, + test_sg_rpc.SGNotificationTestMixin): + def test_security_group_get_port_from_device(self): + with self.network() as n: + with self.subnet(n): + with self.security_group() as sg: + security_group_id = sg['security_group']['id'] + res = self._create_port(self.fmt, n['network']['id']) + port = self.deserialize(self.fmt, res) + fixed_ips = port['port']['fixed_ips'] + data = {'port': {'fixed_ips': fixed_ips, + 'name': port['port']['name'], + ext_sg.SECURITYGROUPS: + [security_group_id]}} + + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + port_id = res['port']['id'] + plugin = manager.NeutronManager.get_plugin() + callbacks = plugin.endpoints[0] + port_dict = callbacks.get_port_from_device(port_id) + self.assertEqual(port_id, port_dict['id']) + self.assertEqual([security_group_id], + port_dict[ext_sg.SECURITYGROUPS]) + self.assertEqual([], port_dict['security_group_rules']) + self.assertEqual([fixed_ips[0]['ip_address']], + port_dict['fixed_ips']) + self._delete('ports', port_id) + + def test_security_group_get_port_from_device_with_no_port(self): + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin.endpoints[0].get_port_from_device('bad_device_id') + self.assertIsNone(port_dict) + + +class TestOpenvswitchSecurityGroupsXML(TestOpenvswitchSecurityGroups): + fmt = 'xml' diff --git a/neutron/tests/unit/openvswitch/test_ovs_tunnel.py b/neutron/tests/unit/openvswitch/test_ovs_tunnel.py new file mode 100644 index 000000000..642659037 --- /dev/null +++ b/neutron/tests/unit/openvswitch/test_ovs_tunnel.py @@ -0,0 +1,603 @@ +# Copyright 2012 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import contextlib + +import mock +from oslo.config import cfg + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.openstack.common import log +from neutron.plugins.common import constants as p_const +from neutron.plugins.openvswitch.agent import ovs_neutron_agent +from neutron.plugins.openvswitch.common import constants +from neutron.tests import base + + +# Useful global dummy variables. +NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7' +LS_ID = 42 +LV_ID = 42 +LV_IDS = [42, 43] +VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8' +VIF_MAC = '3c:09:24:1e:78:23' +OFPORT_NUM = 1 +VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM, + VIF_ID, VIF_MAC, 'switch') +VIF_PORTS = {VIF_ID: VIF_PORT} +LVM = ovs_neutron_agent.LocalVLANMapping(LV_ID, 'gre', None, LS_ID, VIF_PORTS) +LVM_FLAT = ovs_neutron_agent.LocalVLANMapping( + LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS) +LVM_VLAN = ovs_neutron_agent.LocalVLANMapping( + LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS) + +TUN_OFPORTS = {p_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}} + +BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00" +UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00" + + +class DummyPort: + def __init__(self, interface_id): + self.interface_id = interface_id + + +class DummyVlanBinding: + def __init__(self, network_id, vlan_id): + self.network_id = network_id + self.vlan_id = vlan_id + + +class TunnelTest(base.BaseTestCase): + + def setUp(self): + super(TunnelTest, self).setUp() + cfg.CONF.set_default('firewall_driver', + 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + cfg.CONF.set_override('rpc_backend', + 'neutron.openstack.common.rpc.impl_fake') + cfg.CONF.set_override('report_interval', 0, 'AGENT') + + check_arp_responder_str = ('neutron.plugins.openvswitch.agent.' + 'ovs_neutron_agent.OVSNeutronAgent.' + '_check_arp_responder_support') + self.mock_check_arp_resp = mock.patch(check_arp_responder_str).start() + self.mock_check_arp_resp.return_value = True + + self.INT_BRIDGE = 'integration_bridge' + self.TUN_BRIDGE = 'tunnel_bridge' + self.MAP_TUN_BRIDGE = 'tun_br_map' + self.NET_MAPPING = {'net1': self.MAP_TUN_BRIDGE} + self.INT_OFPORT = 11111 + self.TUN_OFPORT = 22222 + self.MAP_TUN_OFPORT = 33333 + self.VETH_MTU = None + self.inta = mock.Mock() + self.intb = mock.Mock() + + self.ovs_bridges = {self.INT_BRIDGE: mock.Mock(), + self.TUN_BRIDGE: mock.Mock(), + self.MAP_TUN_BRIDGE: mock.Mock(), + } + + self.mock_bridge = mock.patch.object(ovs_lib, 'OVSBridge').start() + self.mock_bridge.side_effect = (lambda br_name, root_helper: + self.ovs_bridges[br_name]) + self.mock_bridge_expected = [ + mock.call(self.INT_BRIDGE, 'sudo'), + mock.call(self.MAP_TUN_BRIDGE, 'sudo'), + mock.call(self.TUN_BRIDGE, 'sudo'), + ] + + self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE] + self.mock_int_bridge.get_local_port_mac.return_value = '000000000001' + self.mock_int_bridge_expected = [ + mock.call.set_secure_mode(), + mock.call.get_local_port_mac(), + mock.call.delete_port('patch-tun'), + mock.call.remove_all_flows(), + mock.call.add_flow(priority=1, actions='normal'), + mock.call.add_flow(priority=0, table=constants.CANARY_TABLE, + actions='drop') + ] + + self.mock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE] + self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE + self.mock_map_tun_bridge.add_port.return_value = None + self.mock_map_tun_bridge_expected = [ + mock.call.remove_all_flows(), + mock.call.add_flow(priority=1, actions='normal'), + mock.call.delete_port('phy-%s' % self.MAP_TUN_BRIDGE), + mock.call.add_port(self.intb), + ] + self.mock_int_bridge.add_port.return_value = None + self.mock_int_bridge_expected += [ + mock.call.delete_port('int-%s' % self.MAP_TUN_BRIDGE), + mock.call.add_port(self.inta) + ] + self.inta_expected = [mock.call.link.set_up()] + self.intb_expected = [mock.call.link.set_up()] + + self.mock_int_bridge_expected += [ + mock.call.add_flow(priority=2, in_port=None, actions='drop') + ] + self.mock_map_tun_bridge_expected += [ + mock.call.add_flow(priority=2, in_port=None, actions='drop') + ] + + self.mock_tun_bridge = self.ovs_bridges[self.TUN_BRIDGE] + self.mock_tun_bridge_expected = [ + mock.call.reset_bridge(), + mock.call.add_patch_port('patch-int', 'patch-tun'), + ] + self.mock_int_bridge_expected += [ + mock.call.add_patch_port('patch-tun', 'patch-int') + ] + self.mock_int_bridge.add_patch_port.return_value = self.TUN_OFPORT + self.mock_tun_bridge.add_patch_port.return_value = self.INT_OFPORT + + self.mock_tun_bridge_expected += [ + mock.call.remove_all_flows(), + mock.call.add_flow(priority=1, + in_port=self.INT_OFPORT, + actions="resubmit(,%s)" % + constants.PATCH_LV_TO_TUN), + mock.call.add_flow(priority=0, actions="drop"), + mock.call.add_flow(priority=0, table=constants.PATCH_LV_TO_TUN, + dl_dst=UCAST_MAC, + actions="resubmit(,%s)" % + constants.UCAST_TO_TUN), + mock.call.add_flow(priority=0, table=constants.PATCH_LV_TO_TUN, + dl_dst=BCAST_MAC, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN), + ] + for tunnel_type in constants.TUNNEL_NETWORK_TYPES: + self.mock_tun_bridge_expected.append( + mock.call.add_flow( + table=constants.TUN_TABLE[tunnel_type], + priority=0, + actions="drop")) + learned_flow = ("table=%s," + "priority=1," + "hard_timeout=300," + "NXM_OF_VLAN_TCI[0..11]," + "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," + "load:0->NXM_OF_VLAN_TCI[]," + "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," + "output:NXM_OF_IN_PORT[]" % + constants.UCAST_TO_TUN) + self.mock_tun_bridge_expected += [ + mock.call.add_flow(table=constants.LEARN_FROM_TUN, + priority=1, + actions="learn(%s),output:%s" % + (learned_flow, self.INT_OFPORT)), + mock.call.add_flow(table=constants.UCAST_TO_TUN, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN), + mock.call.add_flow(table=constants.FLOOD_TO_TUN, + priority=0, + actions="drop") + ] + + self.device_exists = mock.patch.object(ip_lib, 'device_exists').start() + self.device_exists.return_value = True + self.device_exists_expected = [ + mock.call(self.MAP_TUN_BRIDGE, 'sudo'), + mock.call('int-%s' % self.MAP_TUN_BRIDGE, 'sudo'), + ] + + self.ipdevice = mock.patch.object(ip_lib, 'IPDevice').start() + self.ipdevice_expected = [ + mock.call('int-%s' % self.MAP_TUN_BRIDGE, 'sudo'), + mock.call().link.delete() + ] + self.ipwrapper = mock.patch.object(ip_lib, 'IPWrapper').start() + add_veth = self.ipwrapper.return_value.add_veth + add_veth.return_value = [self.inta, self.intb] + self.ipwrapper_expected = [ + mock.call('sudo'), + mock.call().add_veth('int-%s' % self.MAP_TUN_BRIDGE, + 'phy-%s' % self.MAP_TUN_BRIDGE) + ] + + self.get_bridges = mock.patch.object(ovs_lib, 'get_bridges').start() + self.get_bridges.return_value = [self.INT_BRIDGE, + self.TUN_BRIDGE, + self.MAP_TUN_BRIDGE] + self.get_bridges_expected = [ + mock.call('sudo') + ] + self.execute = mock.patch('neutron.agent.linux.utils.execute').start() + self.execute_expected = [mock.call(['/sbin/udevadm', 'settle', + '--timeout=10'])] + + def _verify_mock_call(self, mock_obj, expected): + mock_obj.assert_has_calls(expected) + self.assertEqual(len(mock_obj.mock_calls), len(expected)) + + def _verify_mock_calls(self): + self._verify_mock_call(self.mock_bridge, self.mock_bridge_expected) + self._verify_mock_call(self.mock_int_bridge, + self.mock_int_bridge_expected) + self._verify_mock_call(self.mock_map_tun_bridge, + self.mock_map_tun_bridge_expected) + self._verify_mock_call(self.mock_tun_bridge, + self.mock_tun_bridge_expected) + self._verify_mock_call(self.device_exists, self.device_exists_expected) + self._verify_mock_call(self.ipdevice, self.ipdevice_expected) + self._verify_mock_call(self.ipwrapper, self.ipwrapper_expected) + self._verify_mock_call(self.get_bridges, self.get_bridges_expected) + self._verify_mock_call(self.inta, self.inta_expected) + self._verify_mock_call(self.intb, self.intb_expected) + self._verify_mock_call(self.execute, self.execute_expected) + + def test_construct(self): + ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + self._verify_mock_calls() + + # TODO(ethuleau): Initially, local ARP responder is be dependent to the + # ML2 l2 population mechanism driver. + # The next two tests use l2_pop flag to test ARP responder + def test_construct_with_arp_responder(self): + ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU, l2_population=True, + arp_responder=True) + self.mock_tun_bridge_expected.insert( + 5, mock.call.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=1, + proto="arp", + dl_dst="ff:ff:ff:ff:ff:ff", + actions="resubmit(,%s)" % + constants.ARP_RESPONDER) + ) + self.mock_tun_bridge_expected.insert( + 12, mock.call.add_flow(table=constants.ARP_RESPONDER, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) + ) + self._verify_mock_calls() + + def test_construct_without_arp_responder(self): + ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU, l2_population=False, + arp_responder=True) + self._verify_mock_calls() + + def test_construct_vxlan(self): + ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', + self.NET_MAPPING, + 'sudo', 2, ['vxlan'], + self.VETH_MTU) + self._verify_mock_calls() + + def test_provision_local_vlan(self): + ofports = ','.join(TUN_OFPORTS[p_const.TYPE_GRE].values()) + self.mock_tun_bridge_expected += [ + mock.call.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=LV_ID, + actions="strip_vlan," + "set_tunnel:%s,output:%s" % + (LS_ID, ofports)), + mock.call.add_flow(table=constants.TUN_TABLE['gre'], + priority=1, + tun_id=LS_ID, + actions="mod_vlan_vid:%s,resubmit(,%s)" % + (LV_ID, constants.LEARN_FROM_TUN)), + ] + + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.available_local_vlans = set([LV_ID]) + a.tun_br_ofports = TUN_OFPORTS + a.provision_local_vlan(NET_UUID, p_const.TYPE_GRE, None, LS_ID) + self._verify_mock_calls() + + def test_provision_local_vlan_flat(self): + action_string = 'strip_vlan,normal' + self.mock_map_tun_bridge_expected.append( + mock.call.add_flow(priority=4, in_port=self.MAP_TUN_OFPORT, + dl_vlan=LV_ID, actions=action_string)) + + action_string = 'mod_vlan_vid:%s,normal' % LV_ID + self.mock_int_bridge_expected.append( + mock.call.add_flow(priority=3, in_port=self.INT_OFPORT, + dl_vlan=65535, actions=action_string)) + + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.available_local_vlans = set([LV_ID]) + a.phys_brs['net1'] = self.mock_map_tun_bridge + a.phys_ofports['net1'] = self.MAP_TUN_OFPORT + a.int_ofports['net1'] = self.INT_OFPORT + a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net1', LS_ID) + self._verify_mock_calls() + + def test_provision_local_vlan_flat_fail(self): + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net2', LS_ID) + self._verify_mock_calls() + + def test_provision_local_vlan_vlan(self): + action_string = 'mod_vlan_vid:%s,normal' % LS_ID + self.mock_map_tun_bridge_expected.append( + mock.call.add_flow(priority=4, in_port=self.MAP_TUN_OFPORT, + dl_vlan=LV_ID, actions=action_string)) + + action_string = 'mod_vlan_vid:%s,normal' % LS_ID + self.mock_int_bridge_expected.append( + mock.call.add_flow(priority=3, in_port=self.INT_OFPORT, + dl_vlan=LV_ID, actions=action_string)) + + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.available_local_vlans = set([LV_ID]) + a.phys_brs['net1'] = self.mock_map_tun_bridge + a.phys_ofports['net1'] = self.MAP_TUN_OFPORT + a.int_ofports['net1'] = self.INT_OFPORT + a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net1', LS_ID) + self._verify_mock_calls() + + def test_provision_local_vlan_vlan_fail(self): + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net2', LS_ID) + self._verify_mock_calls() + + def test_reclaim_local_vlan(self): + self.mock_tun_bridge_expected += [ + mock.call.delete_flows( + table=constants.TUN_TABLE['gre'], tun_id=LS_ID), + mock.call.delete_flows(dl_vlan=LVM.vlan) + ] + + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.available_local_vlans = set() + a.local_vlan_map[NET_UUID] = LVM + a.reclaim_local_vlan(NET_UUID) + self.assertIn(LVM.vlan, a.available_local_vlans) + self._verify_mock_calls() + + def test_reclaim_local_vlan_flat(self): + self.mock_map_tun_bridge_expected.append( + mock.call.delete_flows( + in_port=self.MAP_TUN_OFPORT, dl_vlan=LVM_FLAT.vlan)) + self.mock_int_bridge_expected.append( + mock.call.delete_flows( + dl_vlan=65535, in_port=self.INT_OFPORT)) + + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.phys_brs['net1'] = self.mock_map_tun_bridge + a.phys_ofports['net1'] = self.MAP_TUN_OFPORT + a.int_ofports['net1'] = self.INT_OFPORT + + a.available_local_vlans = set() + a.local_vlan_map[NET_UUID] = LVM_FLAT + a.reclaim_local_vlan(NET_UUID) + self.assertIn(LVM_FLAT.vlan, a.available_local_vlans) + self._verify_mock_calls() + + def test_reclaim_local_vlan_vlan(self): + self.mock_map_tun_bridge_expected.append( + mock.call.delete_flows( + in_port=self.MAP_TUN_OFPORT, dl_vlan=LVM_VLAN.vlan)) + self.mock_int_bridge_expected.append( + mock.call.delete_flows( + dl_vlan=LV_ID, in_port=self.INT_OFPORT)) + + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.phys_brs['net1'] = self.mock_map_tun_bridge + a.phys_ofports['net1'] = self.MAP_TUN_OFPORT + a.int_ofports['net1'] = self.INT_OFPORT + + a.available_local_vlans = set() + a.local_vlan_map[NET_UUID] = LVM_VLAN + a.reclaim_local_vlan(NET_UUID) + self.assertIn(LVM_VLAN.vlan, a.available_local_vlans) + self._verify_mock_calls() + + def test_port_bound(self): + self.mock_int_bridge_expected += [ + mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag'), + mock.call.set_db_attribute('Port', VIF_PORT.port_name, + 'tag', str(LVM.vlan)), + mock.call.delete_flows(in_port=VIF_PORT.ofport) + ] + + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.local_vlan_map[NET_UUID] = LVM + a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID, False) + self._verify_mock_calls() + + def test_port_unbound(self): + with mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + 'reclaim_local_vlan') as reclaim_local_vlan: + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.local_vlan_map[NET_UUID] = LVM + a.port_unbound(VIF_ID, NET_UUID) + + reclaim_local_vlan.assert_called_once_with(NET_UUID) + self._verify_mock_calls() + + def test_port_dead(self): + self.mock_int_bridge_expected += [ + mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag'), + mock.call.set_db_attribute( + 'Port', VIF_PORT.port_name, + 'tag', ovs_neutron_agent.DEAD_VLAN_TAG), + mock.call.add_flow(priority=2, in_port=VIF_PORT.ofport, + actions='drop') + ] + + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.available_local_vlans = set([LV_ID]) + a.local_vlan_map[NET_UUID] = LVM + a.port_dead(VIF_PORT) + self._verify_mock_calls() + + def test_tunnel_update(self): + tunnel_port = '9999' + self.mock_tun_bridge.add_tunnel_port.return_value = tunnel_port + self.mock_tun_bridge_expected += [ + mock.call.add_tunnel_port('gre-1', '10.0.10.1', '10.0.0.1', + 'gre', 4789, True), + mock.call.add_flow(priority=1, in_port=tunnel_port, + actions='resubmit(,2)') + ] + + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.tunnel_update( + mock.sentinel.ctx, tunnel_id='1', tunnel_ip='10.0.10.1', + tunnel_type=p_const.TYPE_GRE) + self._verify_mock_calls() + + def test_tunnel_update_self(self): + a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + a.tunnel_update( + mock.sentinel.ctx, tunnel_id='1', tunnel_ip='10.0.0.1') + self._verify_mock_calls() + + def test_daemon_loop(self): + reply2 = {'current': set(['tap0']), + 'added': set(['tap2']), + 'removed': set([])} + + reply3 = {'current': set(['tap2']), + 'added': set([]), + 'removed': set(['tap0'])} + + self.mock_int_bridge_expected += [ + mock.call.dump_flows_for_table(constants.CANARY_TABLE), + mock.call.dump_flows_for_table(constants.CANARY_TABLE) + ] + + with contextlib.nested( + mock.patch.object(log.ContextAdapter, 'exception'), + mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + 'scan_ports'), + mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + 'process_network_ports') + ) as (log_exception, scan_ports, process_network_ports): + log_exception.side_effect = Exception( + 'Fake exception to get out of the loop') + scan_ports.side_effect = [reply2, reply3] + process_network_ports.side_effect = [ + False, Exception('Fake exception to get out of the loop')] + + q_agent = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE, + self.TUN_BRIDGE, + '10.0.0.1', + self.NET_MAPPING, + 'sudo', 2, ['gre'], + self.VETH_MTU) + + # Hack to test loop + # We start method and expect it will raise after 2nd loop + # If something goes wrong, assert_has_calls below will catch it + try: + q_agent.daemon_loop() + except Exception: + pass + + # FIXME(salv-orlando): There should not be assertions on log messages + log_exception.assert_called_once_with( + "Error while processing VIF ports") + scan_ports.assert_has_calls([ + mock.call(set(), set()), + mock.call(set(['tap0']), set()) + ]) + process_network_ports.assert_has_calls([ + mock.call({'current': set(['tap0']), + 'removed': set([]), + 'added': set(['tap2'])}, False), + mock.call({'current': set(['tap2']), + 'removed': set(['tap0']), + 'added': set([])}, False) + ]) + self._verify_mock_calls() + + +class TunnelTestWithMTU(TunnelTest): + + def setUp(self): + super(TunnelTestWithMTU, self).setUp() + self.VETH_MTU = 1500 + self.inta_expected.append(mock.call.link.set_mtu(self.VETH_MTU)) + self.intb_expected.append(mock.call.link.set_mtu(self.VETH_MTU)) diff --git a/neutron/tests/unit/plumgrid/__init__.py b/neutron/tests/unit/plumgrid/__init__.py new file mode 100644 index 000000000..39e9b8d13 --- /dev/null +++ b/neutron/tests/unit/plumgrid/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/neutron/tests/unit/plumgrid/test_plumgrid_plugin.py b/neutron/tests/unit/plumgrid/test_plumgrid_plugin.py new file mode 100644 index 000000000..92fa937c5 --- /dev/null +++ b/neutron/tests/unit/plumgrid/test_plumgrid_plugin.py @@ -0,0 +1,171 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. + +""" +Test cases for Neutron PLUMgrid Plug-in +""" + +import mock + +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron import manager +from neutron.openstack.common import importutils +from neutron.plugins.plumgrid.plumgrid_plugin import plumgrid_plugin +from neutron.tests.unit import _test_extension_portbindings as test_bindings +from neutron.tests.unit import test_db_plugin as test_plugin + + +PLUM_DRIVER = ('neutron.plugins.plumgrid.drivers.fake_plumlib.Plumlib') +FAKE_DIRECTOR = '1.1.1.1' +FAKE_PORT = '1234' +FAKE_USERNAME = 'fake_admin' +FAKE_PASSWORD = 'fake_password' +FAKE_TIMEOUT = '0' + + +class PLUMgridPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): + _plugin_name = ('neutron.plugins.plumgrid.plumgrid_plugin.' + 'plumgrid_plugin.NeutronPluginPLUMgridV2') + + def setUp(self): + def mocked_plumlib_init(self): + director_plumgrid = FAKE_DIRECTOR + director_port = FAKE_PORT + director_username = FAKE_USERNAME + director_password = FAKE_PASSWORD + timeout = FAKE_TIMEOUT + self._plumlib = importutils.import_object(PLUM_DRIVER) + self._plumlib.director_conn(director_plumgrid, + director_port, timeout, + director_username, + director_password) + + with mock.patch.object(plumgrid_plugin.NeutronPluginPLUMgridV2, + 'plumgrid_init', new=mocked_plumlib_init): + super(PLUMgridPluginV2TestCase, self).setUp(self._plugin_name) + + def tearDown(self): + super(PLUMgridPluginV2TestCase, self).tearDown() + + +class TestPlumgridPluginNetworksV2(test_plugin.TestNetworksV2, + PLUMgridPluginV2TestCase): + pass + + +class TestPlumgridV2HTTPResponse(test_plugin.TestV2HTTPResponse, + PLUMgridPluginV2TestCase): + pass + + +class TestPlumgridPluginPortsV2(test_plugin.TestPortsV2, + PLUMgridPluginV2TestCase): + def test_range_allocation(self): + self.skipTest("Plugin does not support Neutron allocation process") + + +class TestPlumgridPluginSubnetsV2(test_plugin.TestSubnetsV2, + PLUMgridPluginV2TestCase): + _unsupported = ( + 'test_create_subnet_default_gw_conflict_allocation_pool_returns_409', + 'test_create_subnet_defaults', 'test_create_subnet_gw_values', + 'test_update_subnet_gateway_in_allocation_pool_returns_409', + 'test_update_subnet_allocation_pools', + 'test_update_subnet_allocation_pools_invalid_pool_for_cidr') + + def setUp(self): + if self._testMethodName in self._unsupported: + self.skipTest("Plugin does not support Neutron allocation process") + super(TestPlumgridPluginSubnetsV2, self).setUp() + + +class TestPlumgridPluginPortBinding(PLUMgridPluginV2TestCase, + test_bindings.PortBindingsTestCase): + VIF_TYPE = portbindings.VIF_TYPE_IOVISOR + + def setUp(self): + super(TestPlumgridPluginPortBinding, self).setUp() + + +class TestPlumgridNetworkAdminState(PLUMgridPluginV2TestCase): + def test_network_admin_state(self): + name = 'network_test' + admin_status_up = False + tenant_id = 'tenant_test' + network = {'network': {'name': name, + 'admin_state_up': admin_status_up, + 'tenant_id': tenant_id}} + plugin = manager.NeutronManager.get_plugin() + self.assertEqual(plugin._network_admin_state(network), network) + + +class TestPlumgridAllocationPool(PLUMgridPluginV2TestCase): + def test_allocate_pools_for_subnet(self): + cidr = '10.0.0.0/24' + gateway_ip = '10.0.0.254' + subnet = {'gateway_ip': gateway_ip, + 'cidr': cidr, + 'ip_version': 4} + allocation_pool = [{"start": '10.0.0.2', + "end": '10.0.0.253'}] + context = None + plugin = manager.NeutronManager.get_plugin() + pool = plugin._allocate_pools_for_subnet(context, subnet) + self.assertEqual(allocation_pool, pool) + + +class TestPlumgridProvidernet(PLUMgridPluginV2TestCase): + + def test_create_provider_network(self): + tenant_id = 'admin' + data = {'network': {'name': 'net1', + 'admin_state_up': True, + 'tenant_id': tenant_id, + provider.NETWORK_TYPE: 'vlan', + provider.SEGMENTATION_ID: 3333, + provider.PHYSICAL_NETWORK: 'phy3333'}} + + network_req = self.new_create_request('networks', data, self.fmt) + net = self.deserialize(self.fmt, network_req.get_response(self.api)) + plumlib = importutils.import_object(PLUM_DRIVER) + plumlib.create_network(tenant_id, net, data) + self.assertEqual(net['network'][provider.NETWORK_TYPE], 'vlan') + self.assertEqual(net['network'][provider.SEGMENTATION_ID], 3333) + self.assertEqual(net['network'][provider.PHYSICAL_NETWORK], 'phy3333') + + +class TestDisassociateFloatingIP(PLUMgridPluginV2TestCase): + + def test_disassociate_floating_ip(self): + port_id = "abcdefgh" + tenant_id = "94eb42de4e331" + fip_net_id = "b843d18245678" + fip_addr = "10.0.3.44" + fip_id = "e623679734051" + fip = {"router_id": "94eb42de4e331", + "tenant_id": tenant_id, + "floating_network_id": fip_net_id, + "fixed_ip_address": "192.168.8.2", + "floating_ip_address": fip_addr, + "port_id": port_id, + "id": fip_id} + plumlib = importutils.import_object(PLUM_DRIVER) + fip_res = plumlib.disassociate_floatingips(fip, port_id) + self.assertEqual(fip_res["id"], fip_id) + self.assertEqual(fip_res["floating_ip_address"], fip_addr) + self.assertEqual(fip_res["floating_network_id"], fip_net_id) diff --git a/neutron/tests/unit/ryu/__init__.py b/neutron/tests/unit/ryu/__init__.py new file mode 100644 index 000000000..7e503debd --- /dev/null +++ b/neutron/tests/unit/ryu/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/ryu/fake_ryu.py b/neutron/tests/unit/ryu/fake_ryu.py new file mode 100644 index 000000000..994748f80 --- /dev/null +++ b/neutron/tests/unit/ryu/fake_ryu.py @@ -0,0 +1,42 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + + +def patch_fake_ryu_client(): + ryu_mod = mock.Mock() + ryu_app_mod = ryu_mod.app + ryu_app_client = ryu_app_mod.client + conf_switch_key = ryu_app_mod.conf_switch_key + conf_switch_key.OVSDB_ADDR = 'ovsdb_addr' + conf_switch_key.OVS_TUNNEL_ADDR = 'ovs_tunnel_addr' + rest_nw_id = ryu_app_mod.rest_nw_id + rest_nw_id.NW_ID_EXTERNAL = '__NW_ID_EXTERNAL__' + rest_nw_id.NW_ID_RESERVED = '__NW_ID_RESERVED__' + rest_nw_id.NW_ID_VPORT_GRE = '__NW_ID_VPORT_GRE__' + rest_nw_id.NW_ID_UNKNOWN = '__NW_ID_UNKNOWN__' + rest_nw_id.RESERVED_NETWORK_IDS = [ + rest_nw_id.NW_ID_EXTERNAL, + rest_nw_id.NW_ID_RESERVED, + rest_nw_id.NW_ID_VPORT_GRE, + rest_nw_id.NW_ID_UNKNOWN, + ] + return mock.patch.dict('sys.modules', + {'ryu': ryu_mod, + 'ryu.app': ryu_app_mod, + 'ryu.app.client': ryu_app_client, + 'ryu.app.conf_switch_key': conf_switch_key, + 'ryu.app.rest_nw_id': rest_nw_id}) diff --git a/neutron/tests/unit/ryu/test_defaults.py b/neutron/tests/unit/ryu/test_defaults.py new file mode 100644 index 000000000..8e840f48a --- /dev/null +++ b/neutron/tests/unit/ryu/test_defaults.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Isaku Yamahata +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.plugins.ryu.common import config # noqa +from neutron.tests import base + + +class ConfigurationTest(base.BaseTestCase): + """Configuration file Tests.""" + def test_defaults(self): + self.assertEqual('br-int', cfg.CONF.OVS.integration_bridge) + self.assertEqual(2, cfg.CONF.AGENT.polling_interval) + self.assertEqual('sudo', cfg.CONF.AGENT.root_helper) + self.assertEqual('127.0.0.1:8080', cfg.CONF.OVS.openflow_rest_api) + self.assertEqual(1, cfg.CONF.OVS.tunnel_key_min) + self.assertEqual(0xffffff, cfg.CONF.OVS.tunnel_key_max) + self.assertEqual(6634, cfg.CONF.OVS.ovsdb_port) diff --git a/neutron/tests/unit/ryu/test_ryu_agent.py b/neutron/tests/unit/ryu/test_ryu_agent.py new file mode 100644 index 000000000..756b39d04 --- /dev/null +++ b/neutron/tests/unit/ryu/test_ryu_agent.py @@ -0,0 +1,651 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import httplib + +import mock + +from neutron.openstack.common import importutils +from neutron.tests import base +from neutron.tests.unit.ryu import fake_ryu + + +class RyuAgentTestCase(base.BaseTestCase): + + _AGENT_NAME = 'neutron.plugins.ryu.agent.ryu_neutron_agent' + + def setUp(self): + super(RyuAgentTestCase, self).setUp() + self.fake_ryu = fake_ryu.patch_fake_ryu_client().start() + self.mod_agent = importutils.import_module(self._AGENT_NAME) + + +class TestOVSNeutronOFPRyuAgent(RyuAgentTestCase): + def setUp(self): + super(TestOVSNeutronOFPRyuAgent, self).setUp() + self.plugin_api = mock.patch( + self._AGENT_NAME + '.RyuPluginApi').start() + self.ovsbridge = mock.patch( + self._AGENT_NAME + '.OVSBridge').start() + self.vifportset = mock.patch( + self._AGENT_NAME + '.VifPortSet').start() + self.q_ctx = mock.patch( + self._AGENT_NAME + '.q_context').start() + self.agent_rpc = mock.patch( + self._AGENT_NAME + '.agent_rpc.create_consumers').start() + self.sg_rpc = mock.patch( + self._AGENT_NAME + '.sg_rpc').start() + self.sg_agent = mock.patch( + self._AGENT_NAME + '.RyuSecurityGroupAgent').start() + + def mock_rest_addr(self, rest_addr): + integ_br = 'integ_br' + tunnel_ip = '192.168.0.1' + ovsdb_ip = '172.16.0.1' + ovsdb_port = 16634 + interval = 2 + root_helper = 'helper' + + self.mod_agent.OVSBridge.return_value.datapath_id = '1234' + + mock_context = mock.Mock(return_value='abc') + self.q_ctx.get_admin_context_without_session = mock_context + + mock_rest_addr = mock.Mock(return_value=rest_addr) + self.plugin_api.return_value.get_ofp_rest_api_addr = mock_rest_addr + + # Instantiate OVSNeutronOFPRyuAgent + return self.mod_agent.OVSNeutronOFPRyuAgent( + integ_br, tunnel_ip, ovsdb_ip, ovsdb_port, interval, root_helper) + + def test_valid_rest_addr(self): + self.mock_rest_addr('192.168.0.1:8080') + + # OVSBridge + self.ovsbridge.assert_has_calls([ + mock.call('integ_br', 'helper'), + mock.call().find_datapath_id() + ]) + + # RyuPluginRpc + self.plugin_api.assert_has_calls([ + mock.call('q-plugin'), + mock.call().get_ofp_rest_api_addr('abc') + ]) + + # Agent RPC + self.agent_rpc.assert_has_calls([ + mock.call(mock.ANY, 'q-agent-notifier', mock.ANY) + ]) + + # OFPClient + self.mod_agent.client.OFPClient.assert_has_calls([ + mock.call('192.168.0.1:8080') + ]) + + # VifPortSet + self.vifportset.assert_has_calls([ + mock.call( + self.ovsbridge.return_value, + self.mod_agent.client.OFPClient.return_value), + mock.call().setup() + ]) + + # SwitchConfClient + self.mod_agent.client.SwitchConfClient.assert_has_calls([ + mock.call('192.168.0.1:8080'), + mock.call().set_key('1234', 'ovs_tunnel_addr', '192.168.0.1'), + mock.call().set_key('1234', 'ovsdb_addr', + 'tcp:%s:%d' % ('172.16.0.1', 16634)) + ]) + + # OVSBridge + self.ovsbridge.return_value.set_manager.assert_has_calls([ + mock.call('ptcp:%d' % 16634) + ]) + + def test_invalid_rest_addr(self): + self.assertRaises(self.mod_agent.n_exc.Invalid, + self.mock_rest_addr, ('')) + + def mock_port_update(self, **kwargs): + agent = self.mock_rest_addr('192.168.0.1:8080') + agent.port_update(mock.Mock(), **kwargs) + + def test_port_update(self, **kwargs): + port = {'id': 1, 'security_groups': 'default'} + + with mock.patch.object(self.ovsbridge.return_value, + 'get_vif_port_by_id', + return_value=1) as get_vif: + self.mock_port_update(port=port) + + get_vif.assert_called_once_with(1) + self.sg_agent.assert_has_calls([ + mock.call().refresh_firewall() + ]) + + def test_port_update_not_vifport(self, **kwargs): + port = {'id': 1, 'security_groups': 'default'} + + with mock.patch.object(self.ovsbridge.return_value, + 'get_vif_port_by_id', + return_value=0) as get_vif: + self.mock_port_update(port=port) + + get_vif.assert_called_once_with(1) + self.assertFalse(self.sg_agent.return_value.refresh_firewall.called) + + def test_port_update_without_secgroup(self, **kwargs): + port = {'id': 1} + + with mock.patch.object(self.ovsbridge.return_value, + 'get_vif_port_by_id', + return_value=1) as get_vif: + self.mock_port_update(port=port) + + get_vif.assert_called_once_with(1) + self.assertFalse(self.sg_agent.return_value.refresh_firewall.called) + + def mock_update_ports(self, vif_port_set=None, registered_ports=None): + with mock.patch.object(self.ovsbridge.return_value, + 'get_vif_port_set', + return_value=vif_port_set): + agent = self.mock_rest_addr('192.168.0.1:8080') + return agent._update_ports(registered_ports) + + def test_update_ports_unchanged(self): + self.assertIsNone(self.mock_update_ports()) + + def test_update_ports_changed(self): + vif_port_set = set([1, 3]) + registered_ports = set([1, 2]) + expected = dict(current=vif_port_set, + added=set([3]), + removed=set([2])) + + actual = self.mock_update_ports(vif_port_set, registered_ports) + + self.assertEqual(expected, actual) + + def mock_process_devices_filter(self, port_info): + agent = self.mock_rest_addr('192.168.0.1:8080') + agent._process_devices_filter(port_info) + + def test_process_devices_filter_add(self): + port_info = {'added': 1} + + self.mock_process_devices_filter(port_info) + + self.sg_agent.assert_has_calls([ + mock.call().prepare_devices_filter(1) + ]) + + def test_process_devices_filter_remove(self): + port_info = {'removed': 2} + + self.mock_process_devices_filter(port_info) + + self.sg_agent.assert_has_calls([ + mock.call().remove_devices_filter(2) + ]) + + def test_process_devices_filter_both(self): + port_info = {'added': 1, 'removed': 2} + + self.mock_process_devices_filter(port_info) + + self.sg_agent.assert_has_calls([ + mock.call().prepare_devices_filter(1), + mock.call().remove_devices_filter(2) + ]) + + def test_process_devices_filter_none(self): + port_info = {} + + self.mock_process_devices_filter(port_info) + + self.assertFalse( + self.sg_agent.return_value.prepare_devices_filter.called) + self.assertFalse( + self.sg_agent.return_value.remove_devices_filter.called) + + +class TestRyuPluginApi(RyuAgentTestCase): + def test_get_ofp_rest_api_addr(self): + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.RyuPluginApi.make_msg', + return_value='msg'), + mock.patch(self._AGENT_NAME + '.RyuPluginApi.call', + return_value='10.0.0.1') + ) as (mock_msg, mock_call): + api = self.mod_agent.RyuPluginApi('topics') + addr = api.get_ofp_rest_api_addr('context') + + self.assertEqual(addr, '10.0.0.1') + mock_msg.assert_has_calls([ + mock.call('get_ofp_rest_api') + ]) + mock_call.assert_has_calls([ + mock.call('context', 'msg', topic='topics') + ]) + + +class TestVifPortSet(RyuAgentTestCase): + def test_setup(self): + attrs = {'switch.datapath_id': 'dp1', 'ofport': 'p1'} + p1 = mock.Mock(**attrs) + attrs = {'switch.datapath_id': 'dp2', 'ofport': 'p2'} + p2 = mock.Mock(**attrs) + attrs = {'get_external_ports.return_value': [p1, p2]} + int_br = mock.Mock(**attrs) + with mock.patch(self._AGENT_NAME + '.client.OFPClient') as client: + api = client() + vif = self.mod_agent.VifPortSet(int_br, api) + vif.setup() + + client.assert_has_calls([ + mock.call().update_port('__NW_ID_EXTERNAL__', 'dp1', 'p1'), + mock.call().update_port('__NW_ID_EXTERNAL__', 'dp2', 'p2') + ]) + + def test_setup_empty(self): + attrs = {'get_external_ports.return_value': []} + int_br = mock.Mock(**attrs) + api = mock.Mock() + + vif = self.mod_agent.VifPortSet(int_br, api) + vif.setup() + + self.assertEqual(api.update_port.call_count, 0) + + +class TestOVSBridge(RyuAgentTestCase): + def setUp(self): + super(TestOVSBridge, self).setUp() + self.lib_ovs = mock.patch( + 'neutron.agent.linux.ovs_lib.OVSBridge').start() + + def test_find_datapath_id(self): + with mock.patch(self._AGENT_NAME + '.OVSBridge.get_datapath_id', + return_value='1234') as mock_get_dpid: + br = self.mod_agent.OVSBridge('br_name', 'helper') + br.find_datapath_id() + + mock_get_dpid.assert_has_calls([ + mock.call() + ]) + self.assertEqual(br.datapath_id, '1234') + + def test_set_manager(self): + with mock.patch( + self._AGENT_NAME + '.OVSBridge.run_vsctl') as mock_vsctl: + br = self.mod_agent.OVSBridge('br_name', 'helper') + br.set_manager('target') + + mock_vsctl.assert_has_calls([ + mock.call(['set-manager', 'target']) + ]) + + def test_get_ofport(self): + with mock.patch( + self._AGENT_NAME + '.OVSBridge.db_get_val', + return_value=1) as mock_db: + br = self.mod_agent.OVSBridge('br_name', 'helper') + ofport = br.get_ofport('name') + + mock_db.assert_has_calls([ + mock.call('Interface', 'name', 'ofport') + ]) + self.assertEqual(ofport, 1) + + def test_get_ports(self): + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.OVSBridge.get_port_name_list', + return_value=['p1', 'p2']), + mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport', + return_value=1) + ) as (mock_name, mock_ofport): + get_port = mock.Mock(side_effect=['port1', 'port2']) + br = self.mod_agent.OVSBridge('br_name', 'helper') + ports = br._get_ports(get_port) + + mock_name.assert_has_calls([ + mock.call() + ]) + mock_ofport.assert_has_calls([ + mock.call('p1'), + mock.call('p2') + ]) + get_port.assert_has_calls([ + mock.call('p1'), + mock.call('p2') + ]) + self.assertEqual(len(ports), 2) + self.assertEqual(ports, ['port1', 'port2']) + + def test_get_ports_empty(self): + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.OVSBridge.get_port_name_list', + return_value=[]), + mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport', + return_value=1) + ) as (mock_name, mock_ofport): + get_port = mock.Mock(side_effect=['port1', 'port2']) + br = self.mod_agent.OVSBridge('br_name', 'helper') + ports = br._get_ports(get_port) + + mock_name.assert_has_calls([ + mock.call() + ]) + self.assertEqual(mock_ofport.call_count, 0) + self.assertEqual(get_port.call_count, 0) + self.assertEqual(len(ports), 0) + + def test_get_ports_invalid_ofport(self): + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.OVSBridge.get_port_name_list', + return_value=['p1', 'p2']), + mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport', + side_effect=[-1, 1]) + ) as (mock_name, mock_ofport): + get_port = mock.Mock(side_effect=['port1', 'port2']) + br = self.mod_agent.OVSBridge('br_name', 'helper') + ports = br._get_ports(get_port) + + mock_name.assert_has_calls([ + mock.call() + ]) + mock_ofport.assert_has_calls([ + mock.call('p1'), + mock.call('p2') + ]) + get_port.assert_has_calls([ + mock.call('p2') + ]) + self.assertEqual(len(ports), 1) + self.assertEqual(ports, ['port1']) + + def test_get_ports_invalid_port(self): + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.OVSBridge.get_port_name_list', + return_value=['p1', 'p2']), + mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport', + side_effect=[1, 2]) + ) as (mock_name, mock_ofport): + get_port = mock.Mock(side_effect=[None, 'port2']) + br = self.mod_agent.OVSBridge('br_name', 'helper') + ports = br._get_ports(get_port) + + mock_name.assert_has_calls([ + mock.call() + ]) + mock_ofport.assert_has_calls([ + mock.call('p1'), + mock.call('p2') + ]) + get_port.assert_has_calls([ + mock.call('p1'), + mock.call('p2') + ]) + self.assertEqual(len(ports), 1) + self.assertEqual(ports, ['port2']) + + def test_get_external_port(self): + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.OVSBridge.db_get_map', + side_effect=[None, {'opts': 'opts_val'}]), + mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport', + return_value=1), + mock.patch('neutron.agent.linux.ovs_lib.VifPort') + ) as (mock_db, mock_ofport, mock_vif): + br = self.mod_agent.OVSBridge('br_name', 'helper') + vifport = br._get_external_port('iface') + + mock_db.assert_has_calls([ + mock.call('Interface', 'iface', 'external_ids'), + mock.call('Interface', 'iface', 'options'), + ]) + mock_ofport.assert_has_calls([ + mock.call('iface') + ]) + mock_vif.assert_has_calls([ + mock.call('iface', 1, None, None, br) + ]) + self.assertEqual(vifport, mock_vif.return_value) + + def test_get_external_port_vmport(self): + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.OVSBridge.db_get_map', + side_effect=[{'extids': 'extid_val'}, + {'opts': 'opts_val'}]), + mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport', + return_value=1), + mock.patch('neutron.agent.linux.ovs_lib.VifPort') + ) as (mock_db, mock_ofport, mock_vif): + br = self.mod_agent.OVSBridge('br_name', 'helper') + vifport = br._get_external_port('iface') + + mock_db.assert_has_calls([ + mock.call('Interface', 'iface', 'external_ids'), + ]) + self.assertEqual(mock_ofport.call_count, 0) + self.assertEqual(mock_vif.call_count, 0) + self.assertIsNone(vifport) + + def test_get_external_port_tunnel(self): + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.OVSBridge.db_get_map', + side_effect=[None, {'remote_ip': '0.0.0.0'}]), + mock.patch(self._AGENT_NAME + '.OVSBridge.get_ofport', + return_value=1), + mock.patch('neutron.agent.linux.ovs_lib.VifPort') + ) as (mock_db, mock_ofport, mock_vif): + br = self.mod_agent.OVSBridge('br_name', 'helper') + vifport = br._get_external_port('iface') + + mock_db.assert_has_calls([ + mock.call('Interface', 'iface', 'external_ids'), + mock.call('Interface', 'iface', 'options'), + ]) + self.assertEqual(mock_ofport.call_count, 0) + self.assertEqual(mock_vif.call_count, 0) + self.assertIsNone(vifport) + + def test_get_external_ports(self): + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.OVSBridge._get_external_port'), + mock.patch(self._AGENT_NAME + '.OVSBridge._get_ports') + ) as (mock_extport, mock_port): + br = self.mod_agent.OVSBridge('br_name', 'helper') + br.get_external_ports() + + mock_port.assert_has_calls([ + mock.call(mock_extport) + ]) + + +class TestRyuNeutronAgent(RyuAgentTestCase): + def test_get_my_ip(self): + sock_attrs = { + 'return_value.getsockname.return_value': ['1.2.3.4', '']} + with mock.patch('socket.socket', **sock_attrs): + addr = self.mod_agent._get_my_ip() + + self.assertEqual(addr, '1.2.3.4') + + def test_get_ip_from_nic(self): + mock_device = mock.Mock() + mock_device.addr.list = mock.Mock( + return_value=[{'ip_version': 6, 'cidr': '::ffff:1.2.3.4'}, + {'ip_version': 4, 'cidr': '1.2.3.4/8'}]) + mock_ip_wrapper = mock.Mock() + mock_ip_wrapper.device = mock.Mock(return_value=mock_device) + with mock.patch(self._AGENT_NAME + '.ip_lib.IPWrapper', + return_value=mock_ip_wrapper): + addr = self.mod_agent._get_ip_from_nic('eth0') + + self.assertEqual(addr, '1.2.3.4') + + def test_get_ip_from_nic_empty(self): + mock_device = mock.Mock() + mock_device.addr.list = mock.Mock(return_value=[]) + mock_ip_wrapper = mock.Mock() + mock_ip_wrapper.device = mock.Mock(return_value=mock_device) + with mock.patch(self._AGENT_NAME + '.ip_lib.IPWrapper', + return_value=mock_ip_wrapper): + addr = self.mod_agent._get_ip_from_nic('eth0') + + self.assertIsNone(addr) + + def test_get_ip_ip(self): + cfg_attrs = {'CONF.OVS.cfg_ip': '1.2.3.4', + 'CONF.OVS.cfg_iface': 'eth0'} + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.cfg', **cfg_attrs), + mock.patch(self._AGENT_NAME + '._get_ip_from_nic', + return_value='10.0.0.1'), + mock.patch(self._AGENT_NAME + '._get_my_ip', + return_value='172.16.0.1') + ) as (_cfg, mock_nicip, mock_myip): + ip = self.mod_agent._get_ip('cfg_ip', 'cfg_iface') + + self.assertEqual(mock_nicip.call_count, 0) + self.assertEqual(mock_myip.call_count, 0) + self.assertEqual(ip, '1.2.3.4') + + def test_get_ip_nic(self): + cfg_attrs = {'CONF.OVS.cfg_ip': None, + 'CONF.OVS.cfg_iface': 'eth0'} + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.cfg', **cfg_attrs), + mock.patch(self._AGENT_NAME + '._get_ip_from_nic', + return_value='10.0.0.1'), + mock.patch(self._AGENT_NAME + '._get_my_ip', + return_value='172.16.0.1') + ) as (_cfg, mock_nicip, mock_myip): + ip = self.mod_agent._get_ip('cfg_ip', 'cfg_iface') + + mock_nicip.assert_has_calls([ + mock.call('eth0') + ]) + self.assertEqual(mock_myip.call_count, 0) + self.assertEqual(ip, '10.0.0.1') + + def test_get_ip_myip(self): + cfg_attrs = {'CONF.OVS.cfg_ip': None, + 'CONF.OVS.cfg_iface': None} + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.cfg', **cfg_attrs), + mock.patch(self._AGENT_NAME + '._get_ip_from_nic', + return_value='10.0.0.1'), + mock.patch(self._AGENT_NAME + '._get_my_ip', + return_value='172.16.0.1') + ) as (_cfg, mock_nicip, mock_myip): + ip = self.mod_agent._get_ip('cfg_ip', 'cfg_iface') + + self.assertEqual(mock_nicip.call_count, 0) + mock_myip.assert_has_calls([ + mock.call() + ]) + self.assertEqual(ip, '172.16.0.1') + + def test_get_ip_nic_myip(self): + cfg_attrs = {'CONF.OVS.cfg_ip': None, + 'CONF.OVS.cfg_iface': 'eth0'} + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.cfg', **cfg_attrs), + mock.patch(self._AGENT_NAME + '._get_ip_from_nic', + return_value=None), + mock.patch(self._AGENT_NAME + '._get_my_ip', + return_value='172.16.0.1') + ) as (_cfg, mock_nicip, mock_myip): + ip = self.mod_agent._get_ip('cfg_ip', 'cfg_iface') + + mock_nicip.assert_has_calls([ + mock.call('eth0') + ]) + mock_myip.assert_has_calls([ + mock.call() + ]) + self.assertEqual(ip, '172.16.0.1') + + def test_get_tunnel_ip(self): + with mock.patch(self._AGENT_NAME + '._get_ip', + return_value='1.2.3.4') as mock_getip: + ip = self.mod_agent._get_tunnel_ip() + + mock_getip.assert_has_calls([ + mock.call('tunnel_ip', 'tunnel_interface') + ]) + self.assertEqual(ip, '1.2.3.4') + + def test_get_ovsdb_ip(self): + with mock.patch(self._AGENT_NAME + '._get_ip', + return_value='1.2.3.4') as mock_getip: + ip = self.mod_agent._get_ovsdb_ip() + + mock_getip.assert_has_calls([ + mock.call('ovsdb_ip', 'ovsdb_interface') + ]) + self.assertEqual(ip, '1.2.3.4') + + def mock_main(self): + cfg_attrs = {'CONF.OVS.integration_bridge': 'integ_br', + 'CONF.OVS.ovsdb_port': 16634, + 'CONF.AGENT.polling_interval': 2, + 'CONF.AGENT.root_helper': 'helper'} + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.cfg', **cfg_attrs), + mock.patch(self._AGENT_NAME + '.common_config'), + mock.patch(self._AGENT_NAME + '._get_tunnel_ip', + return_value='10.0.0.1'), + mock.patch(self._AGENT_NAME + '._get_ovsdb_ip', + return_value='172.16.0.1'), + ) as (mock_conf, mock_common_conf, _tun, _ovsdb): + self.mod_agent.main() + + mock_common_conf.assert_has_calls([ + mock.call(mock_conf) + ]) + + def test_main(self): + agent_attrs = {'daemon_loop.side_effect': SystemExit(0)} + with mock.patch(self._AGENT_NAME + '.OVSNeutronOFPRyuAgent', + **agent_attrs) as mock_agent: + self.assertRaises(SystemExit, self.mock_main) + + mock_agent.assert_has_calls([ + mock.call('integ_br', '10.0.0.1', '172.16.0.1', 16634, 2, + 'helper'), + mock.call().daemon_loop() + ]) + + def test_main_raise(self): + with contextlib.nested( + mock.patch(self._AGENT_NAME + '.OVSNeutronOFPRyuAgent', + side_effect=httplib.HTTPException('boom')), + mock.patch('sys.exit', side_effect=SystemExit(0)) + ) as (mock_agent, mock_exit): + self.assertRaises(SystemExit, self.mock_main) + + mock_agent.assert_has_calls([ + mock.call('integ_br', '10.0.0.1', '172.16.0.1', 16634, 2, + 'helper') + ]) + mock_exit.assert_has_calls([ + mock.call(1) + ]) diff --git a/neutron/tests/unit/ryu/test_ryu_db.py b/neutron/tests/unit/ryu/test_ryu_db.py new file mode 100644 index 000000000..3dd49fcc8 --- /dev/null +++ b/neutron/tests/unit/ryu/test_ryu_db.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Isaku Yamahata +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import operator + +from neutron.db import api as db +from neutron.plugins.ryu.common import config # noqa +from neutron.plugins.ryu.db import api_v2 as db_api_v2 +from neutron.plugins.ryu.db import models_v2 as ryu_models_v2 # noqa +from neutron.tests.unit import test_db_plugin as test_plugin + + +class RyuDBTest(test_plugin.NeutronDbPluginV2TestCase): + @staticmethod + def _tunnel_key_sort(key_list): + key_list.sort(key=operator.attrgetter('tunnel_key')) + return [(key.network_id, key.tunnel_key) for key in key_list] + + def test_key_allocation(self): + tunnel_key = db_api_v2.TunnelKey() + session = db.get_session() + with contextlib.nested(self.network('network-0'), + self.network('network-1') + ) as (network_0, network_1): + network_id0 = network_0['network']['id'] + key0 = tunnel_key.allocate(session, network_id0) + network_id1 = network_1['network']['id'] + key1 = tunnel_key.allocate(session, network_id1) + key_list = tunnel_key.all_list() + self.assertEqual(len(key_list), 2) + + expected_list = [(network_id0, key0), (network_id1, key1)] + self.assertEqual(self._tunnel_key_sort(key_list), + expected_list) + + tunnel_key.delete(session, network_id0) + key_list = tunnel_key.all_list() + self.assertEqual(self._tunnel_key_sort(key_list), + [(network_id1, key1)]) + + tunnel_key.delete(session, network_id1) + self.assertEqual(tunnel_key.all_list(), []) diff --git a/neutron/tests/unit/ryu/test_ryu_plugin.py b/neutron/tests/unit/ryu/test_ryu_plugin.py new file mode 100644 index 000000000..e0dfe283e --- /dev/null +++ b/neutron/tests/unit/ryu/test_ryu_plugin.py @@ -0,0 +1,51 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from neutron import manager +from neutron.plugins.ryu.db import models_v2 as ryu_models_v2 # noqa +from neutron.tests.unit.ryu import fake_ryu +from neutron.tests.unit import test_db_plugin as test_plugin + + +class RyuPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): + + _plugin_name = 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2' + + def setUp(self): + self.ryu_patcher = fake_ryu.patch_fake_ryu_client() + self.ryu_patcher.start() + super(RyuPluginV2TestCase, self).setUp(self._plugin_name) + self.addCleanup(self.ryu_patcher.stop) + plugin = manager.NeutronManager.get_plugin() + plugin.notifier = mock.Mock() + + +class TestRyuBasicGet(test_plugin.TestBasicGet, RyuPluginV2TestCase): + pass + + +class TestRyuV2HTTPResponse(test_plugin.TestV2HTTPResponse, + RyuPluginV2TestCase): + pass + + +class TestRyuPortsV2(test_plugin.TestPortsV2, RyuPluginV2TestCase): + pass + + +class TestRyuNetworksV2(test_plugin.TestNetworksV2, RyuPluginV2TestCase): + pass diff --git a/neutron/tests/unit/ryu/test_ryu_security_group.py b/neutron/tests/unit/ryu/test_ryu_security_group.py new file mode 100644 index 000000000..a023136ef --- /dev/null +++ b/neutron/tests/unit/ryu/test_ryu_security_group.py @@ -0,0 +1,92 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +import mock + +from neutron.api.v2 import attributes +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.tests.unit.ryu import fake_ryu +from neutron.tests.unit import test_extension_security_group as test_sg +from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc + +PLUGIN_NAME = ('neutron.plugins.ryu.' + 'ryu_neutron_plugin.RyuNeutronPluginV2') +NOTIFIER = ('neutron.plugins.ryu.' + 'ryu_neutron_plugin.AgentNotifierApi') + + +class RyuSecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase): + _plugin_name = PLUGIN_NAME + + def setUp(self, plugin=None): + test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER) + self.fake_ryu = fake_ryu.patch_fake_ryu_client().start() + self.notifier = mock.patch(NOTIFIER).start().return_value + self._attribute_map_bk_ = {} + for item in attributes.RESOURCE_ATTRIBUTE_MAP: + self._attribute_map_bk_[item] = (attributes. + RESOURCE_ATTRIBUTE_MAP[item]. + copy()) + super(RyuSecurityGroupsTestCase, self).setUp(PLUGIN_NAME) + + def tearDown(self): + super(RyuSecurityGroupsTestCase, self).tearDown() + attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_ + + +class TestRyuSecurityGroups(RyuSecurityGroupsTestCase, + test_sg.TestSecurityGroups, + test_sg_rpc.SGNotificationTestMixin): + def test_security_group_get_port_from_device(self): + with contextlib.nested(self.network(), + self.security_group()) as (n, sg): + with self.subnet(n): + security_group_id = sg['security_group']['id'] + res = self._create_port(self.fmt, n['network']['id']) + port = self.deserialize(self.fmt, res) + fixed_ips = port['port']['fixed_ips'] + data = {'port': {'fixed_ips': fixed_ips, + 'name': port['port']['name'], + ext_sg.SECURITYGROUPS: + [security_group_id]}} + + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + port_id = res['port']['id'] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin.endpoints[0].get_port_from_device(port_id) + self.assertEqual(port_id, port_dict['id']) + self.assertEqual([security_group_id], + port_dict[ext_sg.SECURITYGROUPS]) + self.assertEqual([], port_dict['security_group_rules']) + self.assertEqual([fixed_ips[0]['ip_address']], + port_dict['fixed_ips']) + self._delete('ports', port_id) + + def test_security_group_get_port_from_device_with_no_port(self): + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin.endpoints[0].get_port_from_device('bad_device_id') + self.assertIsNone(port_dict) + + +class TestRyuSecurityGroupsXML(TestRyuSecurityGroups): + fmt = 'xml' diff --git a/neutron/tests/unit/services/__init__.py b/neutron/tests/unit/services/__init__.py new file mode 100644 index 000000000..ce18bf6d6 --- /dev/null +++ b/neutron/tests/unit/services/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost diff --git a/neutron/tests/unit/services/firewall/__init__.py b/neutron/tests/unit/services/firewall/__init__.py new file mode 100644 index 000000000..cae279d0a --- /dev/null +++ b/neutron/tests/unit/services/firewall/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/services/firewall/agents/__init__.py b/neutron/tests/unit/services/firewall/agents/__init__.py new file mode 100644 index 000000000..cae279d0a --- /dev/null +++ b/neutron/tests/unit/services/firewall/agents/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/services/firewall/agents/l3reference/__init__.py b/neutron/tests/unit/services/firewall/agents/l3reference/__init__.py new file mode 100644 index 000000000..cae279d0a --- /dev/null +++ b/neutron/tests/unit/services/firewall/agents/l3reference/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/services/firewall/agents/l3reference/test_firewall_l3_agent.py b/neutron/tests/unit/services/firewall/agents/l3reference/test_firewall_l3_agent.py new file mode 100644 index 000000000..7daffce19 --- /dev/null +++ b/neutron/tests/unit/services/firewall/agents/l3reference/test_firewall_l3_agent.py @@ -0,0 +1,391 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. +# @author: Sridar Kandaswamy, skandasw@cisco.com, Cisco Systems, Inc. +# @author: Dan Florea, dflorea@cisco.com, Cisco Systems, Inc. + +import contextlib +import uuid + +import mock +from oslo.config import cfg + +from neutron.agent.common import config as agent_config +from neutron.agent import l3_agent +from neutron.agent.linux import ip_lib +from neutron.common import config as base_config +from neutron import context +from neutron.plugins.common import constants +from neutron.services.firewall.agents.l3reference import firewall_l3_agent +from neutron.tests import base +from neutron.tests.unit.services.firewall.agents import test_firewall_agent_api + + +class FWaasHelper(object): + def __init__(self, host): + pass + + +class FWaasAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, FWaasHelper): + def __init__(self, conf=None): + super(FWaasAgent, self).__init__(conf) + + +class TestFwaasL3AgentRpcCallback(base.BaseTestCase): + def setUp(self): + super(TestFwaasL3AgentRpcCallback, self).setUp() + + self.conf = cfg.ConfigOpts() + self.conf.register_opts(base_config.core_opts) + self.conf.register_opts(l3_agent.L3NATAgent.OPTS) + agent_config.register_use_namespaces_opts_helper(self.conf) + agent_config.register_root_helper(self.conf) + self.conf.root_helper = 'sudo' + self.api = FWaasAgent(self.conf) + self.api.fwaas_driver = test_firewall_agent_api.NoopFwaasDriver() + + def test_create_firewall(self): + fake_firewall = {'id': 0} + with mock.patch.object( + self.api, + '_invoke_driver_for_plugin_api' + ) as mock_driver: + self.assertEqual( + self.api.create_firewall( + mock.sentinel.context, + fake_firewall, + 'host'), + mock_driver.return_value) + + def test_update_firewall(self): + fake_firewall = {'id': 0} + with mock.patch.object( + self.api, + '_invoke_driver_for_plugin_api' + ) as mock_driver: + self.assertEqual( + self.api.update_firewall( + mock.sentinel.context, + fake_firewall, + 'host'), + mock_driver.return_value) + + def test_delete_firewall(self): + fake_firewall = {'id': 0} + with mock.patch.object( + self.api, + '_invoke_driver_for_plugin_api' + ) as mock_driver: + self.assertEqual( + self.api.delete_firewall( + mock.sentinel.context, + fake_firewall, + 'host'), + mock_driver.return_value) + + def test_invoke_driver_for_plugin_api(self): + fake_firewall = {'id': 0, 'tenant_id': 1, + 'admin_state_up': True} + self.api.plugin_rpc = mock.Mock() + with contextlib.nested( + mock.patch.object(self.api.plugin_rpc, 'get_routers'), + mock.patch.object(self.api, '_get_router_info_list_for_tenant'), + mock.patch.object(self.api.fwaas_driver, 'create_firewall'), + mock.patch.object(self.api.fwplugin_rpc, 'set_firewall_status') + ) as ( + mock_get_routers, + mock_get_router_info_list_for_tenant, + mock_driver_create_firewall, + mock_set_firewall_status): + + mock_driver_create_firewall.return_value = True + self.api.create_firewall( + context=mock.sentinel.context, + firewall=fake_firewall, host='host') + + mock_get_routers.assert_called_once_with( + mock.sentinel.context) + + mock_get_router_info_list_for_tenant.assert_called_once_with( + mock_get_routers.return_value, fake_firewall['tenant_id']) + + mock_set_firewall_status.assert_called_once_with( + mock.sentinel.context, + fake_firewall['id'], + 'ACTIVE') + + def test_invoke_driver_for_plugin_api_admin_state_down(self): + fake_firewall = {'id': 0, 'tenant_id': 1, + 'admin_state_up': False} + self.api.plugin_rpc = mock.Mock() + with contextlib.nested( + mock.patch.object(self.api.plugin_rpc, 'get_routers'), + mock.patch.object(self.api, '_get_router_info_list_for_tenant'), + mock.patch.object(self.api.fwaas_driver, 'update_firewall'), + mock.patch.object(self.api.fwplugin_rpc, + 'get_firewalls_for_tenant'), + mock.patch.object(self.api.fwplugin_rpc, 'set_firewall_status') + ) as ( + mock_get_routers, + mock_get_router_info_list_for_tenant, + mock_driver_update_firewall, + mock_get_firewalls_for_tenant, + mock_set_firewall_status): + + mock_driver_update_firewall.return_value = True + self.api.update_firewall( + context=mock.sentinel.context, + firewall=fake_firewall, host='host') + + mock_get_routers.assert_called_once_with( + mock.sentinel.context) + + mock_get_router_info_list_for_tenant.assert_called_once_with( + mock_get_routers.return_value, fake_firewall['tenant_id']) + + mock_set_firewall_status.assert_called_once_with( + mock.sentinel.context, + fake_firewall['id'], + 'DOWN') + + def test_invoke_driver_for_plugin_api_delete(self): + fake_firewall = {'id': 0, 'tenant_id': 1, + 'admin_state_up': True} + self.api.plugin_rpc = mock.Mock() + with contextlib.nested( + mock.patch.object(self.api.plugin_rpc, 'get_routers'), + mock.patch.object(self.api, '_get_router_info_list_for_tenant'), + mock.patch.object(self.api.fwaas_driver, 'delete_firewall'), + mock.patch.object(self.api.fwplugin_rpc, 'firewall_deleted') + ) as ( + mock_get_routers, + mock_get_router_info_list_for_tenant, + mock_driver_delete_firewall, + mock_firewall_deleted): + + mock_driver_delete_firewall.return_value = True + self.api.delete_firewall( + context=mock.sentinel.context, + firewall=fake_firewall, host='host') + + mock_get_routers.assert_called_once_with( + mock.sentinel.context) + + mock_get_router_info_list_for_tenant.assert_called_once_with( + mock_get_routers.return_value, fake_firewall['tenant_id']) + + mock_firewall_deleted.assert_called_once_with( + mock.sentinel.context, + fake_firewall['id']) + + def test_delete_firewall_no_router(self): + fake_firewall = {'id': 0, 'tenant_id': 1} + self.api.plugin_rpc = mock.Mock() + with contextlib.nested( + mock.patch.object(self.api.plugin_rpc, 'get_routers'), + mock.patch.object(self.api, '_get_router_info_list_for_tenant'), + mock.patch.object(self.api.fwplugin_rpc, 'firewall_deleted') + ) as ( + mock_get_routers, + mock_get_router_info_list_for_tenant, + mock_firewall_deleted): + + mock_get_router_info_list_for_tenant.return_value = [] + self.api.delete_firewall( + context=mock.sentinel.context, + firewall=fake_firewall, host='host') + + mock_get_routers.assert_called_once_with( + mock.sentinel.context) + + mock_get_router_info_list_for_tenant.assert_called_once_with( + mock_get_routers.return_value, fake_firewall['tenant_id']) + + mock_firewall_deleted.assert_called_once_with( + mock.sentinel.context, + fake_firewall['id']) + + def test_process_router_add_fw_update(self): + fake_firewall_list = [{'id': 0, 'tenant_id': 1, + 'status': constants.PENDING_UPDATE, + 'admin_state_up': True}] + fake_router = {'id': 1111, 'tenant_id': 2} + self.api.plugin_rpc = mock.Mock() + ri = mock.Mock() + ri.router = fake_router + routers = [ri.router] + with contextlib.nested( + mock.patch.object(self.api.plugin_rpc, 'get_routers'), + mock.patch.object(self.api, '_get_router_info_list_for_tenant'), + mock.patch.object(self.api.fwaas_driver, 'update_firewall'), + mock.patch.object(self.api.fwplugin_rpc, 'set_firewall_status'), + mock.patch.object(self.api.fwplugin_rpc, + 'get_firewalls_for_tenant'), + mock.patch.object(context, 'Context') + ) as ( + mock_get_routers, + mock_get_router_info_list_for_tenant, + mock_driver_update_firewall, + mock_set_firewall_status, + mock_get_firewalls_for_tenant, + mock_Context): + + mock_driver_update_firewall.return_value = True + ctx = mock.sentinel.context + mock_Context.return_value = ctx + mock_get_router_info_list_for_tenant.return_value = routers + mock_get_firewalls_for_tenant.return_value = fake_firewall_list + + self.api._process_router_add(ri) + mock_get_router_info_list_for_tenant.assert_called_with( + routers, + ri.router['tenant_id']) + mock_get_firewalls_for_tenant.assert_called_once_with(ctx) + mock_driver_update_firewall.assert_called_once_with( + routers, + fake_firewall_list[0]) + + mock_set_firewall_status.assert_called_once_with( + ctx, + fake_firewall_list[0]['id'], + constants.ACTIVE) + + def test_process_router_add_fw_delete(self): + fake_firewall_list = [{'id': 0, 'tenant_id': 1, + 'status': constants.PENDING_DELETE}] + fake_router = {'id': 1111, 'tenant_id': 2} + self.api.plugin_rpc = mock.Mock() + ri = mock.Mock() + ri.router = fake_router + routers = [ri.router] + with contextlib.nested( + mock.patch.object(self.api.plugin_rpc, 'get_routers'), + mock.patch.object(self.api, '_get_router_info_list_for_tenant'), + mock.patch.object(self.api.fwaas_driver, 'delete_firewall'), + mock.patch.object(self.api.fwplugin_rpc, 'firewall_deleted'), + mock.patch.object(self.api.fwplugin_rpc, + 'get_firewalls_for_tenant'), + mock.patch.object(context, 'Context') + ) as ( + mock_get_routers, + mock_get_router_info_list_for_tenant, + mock_driver_delete_firewall, + mock_firewall_deleted, + mock_get_firewalls_for_tenant, + mock_Context): + + mock_driver_delete_firewall.return_value = True + ctx = mock.sentinel.context + mock_Context.return_value = ctx + mock_get_router_info_list_for_tenant.return_value = routers + mock_get_firewalls_for_tenant.return_value = fake_firewall_list + + self.api._process_router_add(ri) + mock_get_router_info_list_for_tenant.assert_called_with( + routers, + ri.router['tenant_id']) + mock_get_firewalls_for_tenant.assert_called_once_with(ctx) + mock_driver_delete_firewall.assert_called_once_with( + routers, + fake_firewall_list[0]) + + mock_firewall_deleted.assert_called_once_with( + ctx, + fake_firewall_list[0]['id']) + + def _prepare_router_data(self, use_namespaces): + router = {'id': str(uuid.uuid4()), 'tenant_id': str(uuid.uuid4())} + return l3_agent.RouterInfo(router['id'], self.conf.root_helper, + use_namespaces, router=router) + + def _get_router_info_list_with_namespace_helper(self, + router_use_namespaces): + self.conf.set_override('use_namespaces', True) + ri = self._prepare_router_data( + use_namespaces=router_use_namespaces) + routers = [ri.router] + self.api.router_info = {ri.router_id: ri} + with mock.patch.object(ip_lib.IPWrapper, + 'get_namespaces') as mock_get_namespaces: + mock_get_namespaces.return_value = ri.ns_name + router_info_list = self.api._get_router_info_list_for_tenant( + routers, + ri.router['tenant_id']) + self.assertEqual([ri], router_info_list) + mock_get_namespaces.assert_called_once_with( + self.conf.root_helper) + + def _get_router_info_list_without_namespace_helper(self, + router_use_namespaces): + self.conf.set_override('use_namespaces', False) + ri = self._prepare_router_data( + use_namespaces=router_use_namespaces) + routers = [ri.router] + self.api.router_info = {ri.router_id: ri} + router_info_list = self.api._get_router_info_list_for_tenant( + routers, + ri.router['tenant_id']) + if router_use_namespaces: + self.assertFalse(router_info_list) + else: + self.assertEqual([ri], router_info_list) + + def test_get_router_info_list_for_tenant_for_namespaces_enabled(self): + self._get_router_info_list_with_namespace_helper( + router_use_namespaces=True) + + def test_get_router_info_list_for_tenant_for_namespaces_disabled(self): + self._get_router_info_list_without_namespace_helper( + router_use_namespaces=False) + + def test_get_router_info_list_tenant_with_namespace_router_without(self): + self._get_router_info_list_with_namespace_helper( + router_use_namespaces=False) + + def test_get_router_info_list_tenant_without_namespace_router_with(self): + self._get_router_info_list_without_namespace_helper( + router_use_namespaces=True) + + def _get_router_info_list_router_without_router_info_helper(self, + rtr_with_ri): + self.conf.set_override('use_namespaces', True) + # ri.router with associated router_info (ri) + # rtr2 has no router_info + ri = self._prepare_router_data(use_namespaces=True) + rtr2 = {'id': str(uuid.uuid4()), 'tenant_id': ri.router['tenant_id']} + routers = [rtr2] + self.api.router_info = {} + ri_expected = [] + if rtr_with_ri: + self.api.router_info[ri.router_id] = ri + routers.append(ri.router) + ri_expected.append(ri) + with mock.patch.object(ip_lib.IPWrapper, + 'get_namespaces') as mock_get_namespaces: + mock_get_namespaces.return_value = ri.ns_name + router_info_list = self.api._get_router_info_list_for_tenant( + routers, + ri.router['tenant_id']) + self.assertEqual(ri_expected, router_info_list) + + def test_get_router_info_list_router_without_router_info(self): + self._get_router_info_list_router_without_router_info_helper( + rtr_with_ri=False) + + def test_get_router_info_list_two_routers_one_without_router_info(self): + self._get_router_info_list_router_without_router_info_helper( + rtr_with_ri=True) diff --git a/neutron/tests/unit/services/firewall/agents/test_firewall_agent_api.py b/neutron/tests/unit/services/firewall/agents/test_firewall_agent_api.py new file mode 100644 index 000000000..3b76c5af3 --- /dev/null +++ b/neutron/tests/unit/services/firewall/agents/test_firewall_agent_api.py @@ -0,0 +1,105 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. +# @author: Sridar Kandaswamy, skandasw@cisco.com, Cisco Systems, Inc. +# @author: Dan Florea, dflorea@cisco.com, Cisco Systems, Inc. + +import contextlib +import mock + +from neutron.services.firewall.agents import firewall_agent_api as api +from neutron.services.firewall.drivers import fwaas_base as base_driver +from neutron.tests import base + + +class NoopFwaasDriver(base_driver.FwaasDriverBase): + """Noop Fwaas Driver. + + Firewall driver which does nothing. + This driver is for disabling Fwaas functionality. + """ + + def create_firewall(self, apply_list, firewall): + pass + + def delete_firewall(self, apply_list, firewall): + pass + + def update_firewall(self, apply_list, firewall): + pass + + def apply_default_policy(self, apply_list, firewall): + pass + + +class TestFWaaSAgentApi(base.BaseTestCase): + def setUp(self): + super(TestFWaaSAgentApi, self).setUp() + + self.api = api.FWaaSPluginApiMixin( + 'topic', + 'host') + + def test_init(self): + self.assertEqual(self.api.host, 'host') + + def test_set_firewall_status(self): + with contextlib.nested( + mock.patch.object(self.api, 'make_msg'), + mock.patch.object(self.api, 'call') + ) as (mock_make_msg, mock_call): + + self.assertEqual( + self.api.set_firewall_status( + mock.sentinel.context, + 'firewall_id', + 'status'), + mock_call.return_value) + + mock_make_msg.assert_called_once_with( + 'set_firewall_status', + host='host', + firewall_id='firewall_id', + status='status') + + mock_call.assert_called_once_with( + mock.sentinel.context, + mock_make_msg.return_value, + topic='topic') + + def test_firewall_deleted(self): + with contextlib.nested( + mock.patch.object(self.api, 'make_msg'), + mock.patch.object(self.api, 'call') + ) as (mock_make_msg, mock_call): + + self.assertEqual( + self.api.firewall_deleted( + mock.sentinel.context, + 'firewall_id'), + mock_call.return_value) + + mock_make_msg.assert_called_once_with( + 'firewall_deleted', + host='host', + firewall_id='firewall_id') + + mock_call.assert_called_once_with( + mock.sentinel.context, + mock_make_msg.return_value, + topic='topic') diff --git a/neutron/tests/unit/services/firewall/agents/varmour/__init__.py b/neutron/tests/unit/services/firewall/agents/varmour/__init__.py new file mode 100755 index 000000000..5e8da711f --- /dev/null +++ b/neutron/tests/unit/services/firewall/agents/varmour/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/services/firewall/agents/varmour/test_varmour_router.py b/neutron/tests/unit/services/firewall/agents/varmour/test_varmour_router.py new file mode 100644 index 000000000..6a08e34b2 --- /dev/null +++ b/neutron/tests/unit/services/firewall/agents/varmour/test_varmour_router.py @@ -0,0 +1,322 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 vArmour Networks Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Gary Duan, vArmour Networks Inc. +# + + +import mock +from oslo.config import cfg + +from neutron.agent.common import config as agent_config +from neutron.agent import l3_agent +from neutron.agent.linux import interface +from neutron.common import config as base_config +from neutron.common import constants as l3_constants +from neutron.openstack.common import uuidutils +from neutron.services.firewall.agents.varmour import varmour_router +from neutron.services.firewall.agents.varmour import varmour_utils +from neutron.tests import base + +_uuid = uuidutils.generate_uuid +HOSTNAME = 'myhost' +FAKE_DIRECTOR = '1.1.1.1' + + +class TestVarmourRouter(base.BaseTestCase): + + def setUp(self): + super(TestVarmourRouter, self).setUp() + self.conf = cfg.ConfigOpts() + self.conf.register_opts(base_config.core_opts) + self.conf.register_opts(varmour_router.vArmourL3NATAgent.OPTS) + agent_config.register_interface_driver_opts_helper(self.conf) + agent_config.register_use_namespaces_opts_helper(self.conf) + agent_config.register_root_helper(self.conf) + self.conf.register_opts(interface.OPTS) + self.conf.set_override('interface_driver', + 'neutron.agent.linux.interface.NullDriver') + self.conf.root_helper = 'sudo' + + self.device_exists_p = mock.patch( + 'neutron.agent.linux.ip_lib.device_exists') + self.device_exists = self.device_exists_p.start() + + self.utils_exec_p = mock.patch( + 'neutron.agent.linux.utils.execute') + self.utils_exec = self.utils_exec_p.start() + + self.external_process_p = mock.patch( + 'neutron.agent.linux.external_process.ProcessManager') + self.external_process = self.external_process_p.start() + + self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') + driver_cls = self.dvr_cls_p.start() + self.mock_driver = mock.MagicMock() + self.mock_driver.DEV_NAME_LEN = ( + interface.LinuxInterfaceDriver.DEV_NAME_LEN) + driver_cls.return_value = self.mock_driver + + self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper') + ip_cls = self.ip_cls_p.start() + self.mock_ip = mock.MagicMock() + ip_cls.return_value = self.mock_ip + + self.looping_call_p = mock.patch( + 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall') + self.looping_call_p.start() + + def _create_router(self): + router = varmour_router.vArmourL3NATAgent(HOSTNAME, self.conf) + router.rest.server = FAKE_DIRECTOR + router.rest.user = 'varmour' + router.rest.passwd = 'varmour' + return router + + def _del_all_internal_ports(self, router): + router[l3_constants.INTERFACE_KEY] = [] + + def _del_internal_ports(self, router, port_idx): + del router[l3_constants.INTERFACE_KEY][port_idx] + + def _add_internal_ports(self, router, port_count=1): + self._del_all_internal_ports(router) + for i in range(port_count): + port = {'id': _uuid(), + 'network_id': _uuid(), + 'admin_state_up': True, + 'fixed_ips': [{'ip_address': '10.0.%s.4' % i, + 'subnet_id': _uuid()}], + 'mac_address': 'ca:fe:de:ad:be:ef', + 'subnet': {'cidr': '10.0.%s.0/24' % i, + 'gateway_ip': '10.0.%s.1' % i}} + router[l3_constants.INTERFACE_KEY].append(port) + + def _del_all_floating_ips(self, router): + router[l3_constants.FLOATINGIP_KEY] = [] + + def _del_floating_ips(self, router, port_idx): + del router[l3_constants.FLOATINGIP_KEY][port_idx] + + def _add_floating_ips(self, router, port_count=1): + self._del_all_floating_ips(router) + for i in range(port_count): + fip = {'id': _uuid(), + 'port_id': router['gw_port']['id'], + 'floating_ip_address': '172.24.4.%s' % (100 + i), + 'fixed_ip_address': '10.0.0.%s' % (100 + i)} + router[l3_constants.FLOATINGIP_KEY].append(fip) + + def _prepare_router_data(self, enable_snat=None): + router_id = _uuid() + ex_gw_port = {'id': _uuid(), + 'network_id': _uuid(), + 'fixed_ips': [{'ip_address': '172.24.4.2', + 'subnet_id': _uuid()}], + 'subnet': {'cidr': '172.24.4.0/24', + 'gateway_ip': '172.24.4.1'}, + 'ip_cidr': '172.24.4.226/28'} + int_ports = [] + + router = { + 'id': router_id, + l3_constants.INTERFACE_KEY: int_ports, + 'routes': [], + 'gw_port': ex_gw_port} + if enable_snat is not None: + router['enable_snat'] = enable_snat + + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + return ri + + def test_agent_add_internal_network(self): + router = self._create_router() + try: + router.rest.auth() + except Exception: + # skip the test, firewall is not deployed + return + + ri = self._prepare_router_data(enable_snat=True) + router._router_added(ri.router['id'], ri.router) + + url = varmour_utils.REST_URL_CONF_NAT_RULE + prefix = varmour_utils.get_snat_rule_name(ri) + + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0, 'prefix %s' % prefix) + + self._add_internal_ports(ri.router, port_count=1) + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 1, 'prefix %s' % prefix) + + router._router_removed(ri.router['id']) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0, 'prefix %s' % prefix) + + def test_agent_remove_internal_network(self): + router = self._create_router() + try: + router.rest.auth() + except Exception: + # skip the test, firewall is not deployed + return + + ri = self._prepare_router_data(enable_snat=True) + router._router_added(ri.router['id'], ri.router) + + url = varmour_utils.REST_URL_CONF_NAT_RULE + prefix = varmour_utils.get_snat_rule_name(ri) + + self._add_internal_ports(ri.router, port_count=2) + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 2, 'prefix %s' % prefix) + + self._del_internal_ports(ri.router, 0) + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 1, 'prefix %s' % prefix) + + self._del_all_internal_ports(ri.router) + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0, 'prefix %s' % prefix) + + router._router_removed(ri.router['id']) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0, 'prefix %s' % prefix) + + def test_agent_add_floating_ips(self): + router = self._create_router() + try: + router.rest.auth() + except Exception: + # skip the test, firewall is not deployed + return + + ri = self._prepare_router_data(enable_snat=True) + self._add_internal_ports(ri.router, port_count=1) + router._router_added(ri.router['id'], ri.router) + + url = varmour_utils.REST_URL_CONF_NAT_RULE + prefix = varmour_utils.get_dnat_rule_name(ri) + + self._add_floating_ips(ri.router, port_count=1) + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 1, 'prefix %s' % prefix) + + self._add_floating_ips(ri.router, port_count=2) + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 2, 'prefix %s' % prefix) + + router._router_removed(ri.router['id']) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0, 'prefix %s' % prefix) + + def test_agent_remove_floating_ips(self): + router = self._create_router() + try: + router.rest.auth() + except Exception: + # skip the test, firewall is not deployed + return + + ri = self._prepare_router_data(enable_snat=True) + self._add_internal_ports(ri.router, port_count=1) + self._add_floating_ips(ri.router, port_count=2) + router._router_added(ri.router['id'], ri.router) + + url = varmour_utils.REST_URL_CONF_NAT_RULE + prefix = varmour_utils.get_dnat_rule_name(ri) + + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 2, 'prefix %s' % prefix) + + self._del_floating_ips(ri.router, 0) + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 1, 'prefix %s' % prefix) + + self._del_all_floating_ips(ri.router) + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0, 'prefix %s' % prefix) + + router._router_removed(ri.router['id']) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0, 'prefix %s' % prefix) + + def test_agent_external_gateway(self): + router = self._create_router() + try: + router.rest.auth() + except Exception: + # skip the test, firewall is not deployed + return + + ri = self._prepare_router_data(enable_snat=True) + router._router_added(ri.router['id'], ri.router) + + url = varmour_utils.REST_URL_CONF_ZONE + prefix = varmour_utils.get_untrusted_zone_name(ri) + + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 1, 'prefix %s' % prefix) + + del ri.router['gw_port'] + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 1, 'prefix %s' % prefix) + + router._router_removed(ri.router['id']) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0, 'prefix %s' % prefix) + + def test_agent_snat_enable(self): + router = self._create_router() + try: + router.rest.auth() + except Exception: + # skip the test, firewall is not deployed + return + + ri = self._prepare_router_data(enable_snat=True) + router._router_added(ri.router['id'], ri.router) + + url = varmour_utils.REST_URL_CONF_NAT_RULE + prefix = varmour_utils.get_snat_rule_name(ri) + + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0, 'prefix %s' % prefix) + + ri.router['enable_snat'] = False + router.process_router(ri) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0, 'prefix %s' % prefix) + + router._router_removed(ri.router['id']) + n = router.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0, 'prefix %s' % prefix) diff --git a/neutron/tests/unit/services/firewall/drivers/__init__.py b/neutron/tests/unit/services/firewall/drivers/__init__.py new file mode 100644 index 000000000..cae279d0a --- /dev/null +++ b/neutron/tests/unit/services/firewall/drivers/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/services/firewall/drivers/linux/__init__.py b/neutron/tests/unit/services/firewall/drivers/linux/__init__.py new file mode 100644 index 000000000..cae279d0a --- /dev/null +++ b/neutron/tests/unit/services/firewall/drivers/linux/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/services/firewall/drivers/linux/test_iptables_fwaas.py b/neutron/tests/unit/services/firewall/drivers/linux/test_iptables_fwaas.py new file mode 100644 index 000000000..4bcef20e6 --- /dev/null +++ b/neutron/tests/unit/services/firewall/drivers/linux/test_iptables_fwaas.py @@ -0,0 +1,218 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Dell Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rajesh Mohan, Rajesh_Mohan3@Dell.com, DELL Inc. + +import mock +from oslo.config import cfg + +from neutron.agent.common import config as a_cfg +import neutron.services.firewall.drivers.linux.iptables_fwaas as fwaas +from neutron.tests import base +from neutron.tests.unit import test_api_v2 + + +_uuid = test_api_v2._uuid +FAKE_SRC_PREFIX = '10.0.0.0/24' +FAKE_DST_PREFIX = '20.0.0.0/24' +FAKE_PROTOCOL = 'tcp' +FAKE_SRC_PORT = 5000 +FAKE_DST_PORT = 22 +FAKE_FW_ID = 'fake-fw-uuid' + + +class IptablesFwaasTestCase(base.BaseTestCase): + def setUp(self): + super(IptablesFwaasTestCase, self).setUp() + cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT') + self.utils_exec_p = mock.patch( + 'neutron.agent.linux.utils.execute') + self.utils_exec = self.utils_exec_p.start() + self.iptables_cls_p = mock.patch( + 'neutron.agent.linux.iptables_manager.IptablesManager') + self.iptables_cls_p.start() + self.firewall = fwaas.IptablesFwaasDriver() + + def _fake_rules_v4(self, fwid, apply_list): + rule_list = [] + rule1 = {'enabled': True, + 'action': 'allow', + 'ip_version': 4, + 'protocol': 'tcp', + 'destination_port': '80', + 'source_ip_address': '10.24.4.2'} + rule2 = {'enabled': True, + 'action': 'deny', + 'ip_version': 4, + 'protocol': 'tcp', + 'destination_port': '22'} + ingress_chain = ('iv4%s' % fwid)[:11] + egress_chain = ('ov4%s' % fwid)[:11] + for router_info_inst in apply_list: + v4filter_inst = router_info_inst.iptables_manager.ipv4['filter'] + v4filter_inst.chains.append(ingress_chain) + v4filter_inst.chains.append(egress_chain) + rule_list.append(rule1) + rule_list.append(rule2) + return rule_list + + def _fake_firewall_no_rule(self): + rule_list = [] + fw_inst = {'id': FAKE_FW_ID, + 'admin_state_up': True, + 'tenant_id': 'tenant-uuid', + 'firewall_rule_list': rule_list} + return fw_inst + + def _fake_firewall(self, rule_list): + fw_inst = {'id': FAKE_FW_ID, + 'admin_state_up': True, + 'tenant_id': 'tenant-uuid', + 'firewall_rule_list': rule_list} + return fw_inst + + def _fake_firewall_with_admin_down(self, rule_list): + fw_inst = {'id': FAKE_FW_ID, + 'admin_state_up': False, + 'tenant_id': 'tenant-uuid', + 'firewall_rule_list': rule_list} + return fw_inst + + def _fake_apply_list(self, router_count=1): + apply_list = [] + while router_count > 0: + iptables_inst = mock.Mock() + v4filter_inst = mock.Mock() + v6filter_inst = mock.Mock() + v4filter_inst.chains = [] + v6filter_inst.chains = [] + iptables_inst.ipv4 = {'filter': v4filter_inst} + iptables_inst.ipv6 = {'filter': v6filter_inst} + router_info_inst = mock.Mock() + router_info_inst.iptables_manager = iptables_inst + apply_list.append(router_info_inst) + router_count -= 1 + return apply_list + + def _setup_firewall_with_rules(self, func, router_count=1): + apply_list = self._fake_apply_list(router_count=router_count) + rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list) + firewall = self._fake_firewall(rule_list) + func(apply_list, firewall) + invalid_rule = '-m state --state INVALID -j DROP' + est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT' + rule1 = '-p tcp --dport 80 -s 10.24.4.2 -j ACCEPT' + rule2 = '-p tcp --dport 22 -j DROP' + ingress_chain = 'iv4%s' % firewall['id'] + egress_chain = 'ov4%s' % firewall['id'] + bname = fwaas.iptables_manager.binary_name + ipt_mgr_ichain = '%s-%s' % (bname, ingress_chain[:11]) + ipt_mgr_echain = '%s-%s' % (bname, egress_chain[:11]) + for router_info_inst in apply_list: + v4filter_inst = router_info_inst.iptables_manager.ipv4['filter'] + calls = [mock.call.ensure_remove_chain('iv4fake-fw-uuid'), + mock.call.ensure_remove_chain('ov4fake-fw-uuid'), + mock.call.ensure_remove_chain('fwaas-default-policy'), + mock.call.add_chain('fwaas-default-policy'), + mock.call.add_rule('fwaas-default-policy', '-j DROP'), + mock.call.add_chain(ingress_chain), + mock.call.add_rule(ingress_chain, invalid_rule), + mock.call.add_rule(ingress_chain, est_rule), + mock.call.add_chain(egress_chain), + mock.call.add_rule(egress_chain, invalid_rule), + mock.call.add_rule(egress_chain, est_rule), + mock.call.add_rule(ingress_chain, rule1), + mock.call.add_rule(egress_chain, rule1), + mock.call.add_rule(ingress_chain, rule2), + mock.call.add_rule(egress_chain, rule2), + mock.call.add_rule('FORWARD', + '-o qr-+ -j %s' % ipt_mgr_ichain), + mock.call.add_rule('FORWARD', + '-i qr-+ -j %s' % ipt_mgr_echain), + mock.call.add_rule('FORWARD', + '-o qr-+ -j %s-fwaas-defau' % bname), + mock.call.add_rule('FORWARD', + '-i qr-+ -j %s-fwaas-defau' % bname)] + v4filter_inst.assert_has_calls(calls) + + def test_create_firewall_no_rules(self): + apply_list = self._fake_apply_list() + firewall = self._fake_firewall_no_rule() + self.firewall.create_firewall(apply_list, firewall) + invalid_rule = '-m state --state INVALID -j DROP' + est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT' + bname = fwaas.iptables_manager.binary_name + + for ip_version in (4, 6): + ingress_chain = ('iv%s%s' % (ip_version, firewall['id'])) + egress_chain = ('ov%s%s' % (ip_version, firewall['id'])) + calls = [mock.call.ensure_remove_chain( + 'iv%sfake-fw-uuid' % ip_version), + mock.call.ensure_remove_chain( + 'ov%sfake-fw-uuid' % ip_version), + mock.call.ensure_remove_chain('fwaas-default-policy'), + mock.call.add_chain('fwaas-default-policy'), + mock.call.add_rule('fwaas-default-policy', '-j DROP'), + mock.call.add_chain(ingress_chain), + mock.call.add_rule(ingress_chain, invalid_rule), + mock.call.add_rule(ingress_chain, est_rule), + mock.call.add_chain(egress_chain), + mock.call.add_rule(egress_chain, invalid_rule), + mock.call.add_rule(egress_chain, est_rule), + mock.call.add_rule('FORWARD', + '-o qr-+ -j %s-fwaas-defau' % bname), + mock.call.add_rule('FORWARD', + '-i qr-+ -j %s-fwaas-defau' % bname)] + if ip_version == 4: + v4filter_inst = apply_list[0].iptables_manager.ipv4['filter'] + v4filter_inst.assert_has_calls(calls) + else: + v6filter_inst = apply_list[0].iptables_manager.ipv6['filter'] + v6filter_inst.assert_has_calls(calls) + + def test_create_firewall_with_rules(self): + self._setup_firewall_with_rules(self.firewall.create_firewall) + + def test_create_firewall_with_rules_two_routers(self): + self._setup_firewall_with_rules(self.firewall.create_firewall, + router_count=2) + + def test_update_firewall_with_rules(self): + self._setup_firewall_with_rules(self.firewall.update_firewall) + + def test_delete_firewall(self): + apply_list = self._fake_apply_list() + firewall = self._fake_firewall_no_rule() + self.firewall.delete_firewall(apply_list, firewall) + ingress_chain = 'iv4%s' % firewall['id'] + egress_chain = 'ov4%s' % firewall['id'] + calls = [mock.call.ensure_remove_chain(ingress_chain), + mock.call.ensure_remove_chain(egress_chain), + mock.call.ensure_remove_chain('fwaas-default-policy')] + apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls) + + def test_create_firewall_with_admin_down(self): + apply_list = self._fake_apply_list() + rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list) + firewall = self._fake_firewall_with_admin_down(rule_list) + self.firewall.create_firewall(apply_list, firewall) + calls = [mock.call.ensure_remove_chain('iv4fake-fw-uuid'), + mock.call.ensure_remove_chain('ov4fake-fw-uuid'), + mock.call.ensure_remove_chain('fwaas-default-policy'), + mock.call.add_chain('fwaas-default-policy'), + mock.call.add_rule('fwaas-default-policy', '-j DROP')] + apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls) diff --git a/neutron/tests/unit/services/firewall/drivers/varmour/__init__.py b/neutron/tests/unit/services/firewall/drivers/varmour/__init__.py new file mode 100755 index 000000000..5e8da711f --- /dev/null +++ b/neutron/tests/unit/services/firewall/drivers/varmour/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/services/firewall/drivers/varmour/test_varmour_fwaas.py b/neutron/tests/unit/services/firewall/drivers/varmour/test_varmour_fwaas.py new file mode 100644 index 000000000..c65af6bc5 --- /dev/null +++ b/neutron/tests/unit/services/firewall/drivers/varmour/test_varmour_fwaas.py @@ -0,0 +1,290 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 vArmour Networks Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Gary Duan, vArmour Networks Inc. +# + + +import mock +from oslo.config import cfg + +from neutron.agent.common import config as agent_config +from neutron.agent import l3_agent +from neutron.agent.linux import interface +from neutron.common import config as base_config +from neutron.common import constants as l3_constants +from neutron.openstack.common import uuidutils +from neutron.services.firewall.agents.varmour import varmour_router +from neutron.services.firewall.agents.varmour import varmour_utils +from neutron.services.firewall.drivers.varmour import varmour_fwaas +from neutron.tests import base + +_uuid = uuidutils.generate_uuid +HOSTNAME = 'myhost' +FAKE_DIRECTOR = '1.1.1.1' + + +class TestBasicRouterOperations(base.BaseTestCase): + + def setUp(self): + super(TestBasicRouterOperations, self).setUp() + self.conf = cfg.ConfigOpts() + self.conf.register_opts(base_config.core_opts) + self.conf.register_opts(varmour_router.vArmourL3NATAgent.OPTS) + agent_config.register_interface_driver_opts_helper(self.conf) + agent_config.register_use_namespaces_opts_helper(self.conf) + agent_config.register_root_helper(self.conf) + self.conf.register_opts(interface.OPTS) + self.conf.set_override('interface_driver', + 'neutron.agent.linux.interface.NullDriver') + self.conf.root_helper = 'sudo' + + self.device_exists_p = mock.patch( + 'neutron.agent.linux.ip_lib.device_exists') + self.device_exists = self.device_exists_p.start() + + self.utils_exec_p = mock.patch( + 'neutron.agent.linux.utils.execute') + self.utils_exec = self.utils_exec_p.start() + + self.external_process_p = mock.patch( + 'neutron.agent.linux.external_process.ProcessManager') + self.external_process = self.external_process_p.start() + + self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') + driver_cls = self.dvr_cls_p.start() + self.mock_driver = mock.MagicMock() + self.mock_driver.DEV_NAME_LEN = ( + interface.LinuxInterfaceDriver.DEV_NAME_LEN) + driver_cls.return_value = self.mock_driver + + self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper') + ip_cls = self.ip_cls_p.start() + self.mock_ip = mock.MagicMock() + ip_cls.return_value = self.mock_ip + + self.looping_call_p = mock.patch( + 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall') + self.looping_call_p.start() + + def _create_router(self): + router = varmour_router.vArmourL3NATAgent(HOSTNAME, self.conf) + router.rest.server = FAKE_DIRECTOR + router.rest.user = 'varmour' + router.rest.passwd = 'varmour' + return router + + def _create_fwaas(self): + fwaas = varmour_fwaas.vArmourFwaasDriver() + fwaas.rest.server = FAKE_DIRECTOR + fwaas.rest.user = 'varmour' + fwaas.rest.passwd = 'varmour' + return fwaas + + def _del_all_internal_ports(self, router): + router[l3_constants.INTERFACE_KEY] = [] + + def _del_internal_ports(self, router, port_idx): + del router[l3_constants.INTERFACE_KEY][port_idx] + + def _add_internal_ports(self, router, port_count=1): + self._del_all_internal_ports(router) + for i in range(port_count): + port = {'id': _uuid(), + 'network_id': _uuid(), + 'admin_state_up': True, + 'fixed_ips': [{'ip_address': '10.0.%s.4' % i, + 'subnet_id': _uuid()}], + 'mac_address': 'ca:fe:de:ad:be:ef', + 'subnet': {'cidr': '10.0.%s.0/24' % i, + 'gateway_ip': '10.0.%s.1' % i}} + router[l3_constants.INTERFACE_KEY].append(port) + + def _del_all_floating_ips(self, router): + router[l3_constants.FLOATINGIP_KEY] = [] + + def _del_floating_ips(self, router, port_idx): + del router[l3_constants.FLOATINGIP_KEY][port_idx] + + def _add_floating_ips(self, router, port_count=1): + self._del_all_floating_ips(router) + for i in range(port_count): + fip = {'id': _uuid(), + 'port_id': router['gw_port']['id'], + 'floating_ip_address': '172.24.4.%s' % (100 + i), + 'fixed_ip_address': '10.0.0.%s' % (100 + i)} + router[l3_constants.FLOATINGIP_KEY].append(fip) + + def _prepare_router_data(self, enable_snat=None): + router_id = _uuid() + ex_gw_port = {'id': _uuid(), + 'network_id': _uuid(), + 'fixed_ips': [{'ip_address': '172.24.4.2', + 'subnet_id': _uuid()}], + 'subnet': {'cidr': '172.24.4.0/24', + 'gateway_ip': '172.24.4.1'}, + 'ip_cidr': '172.24.4.226/28'} + int_ports = [] + + router = { + 'id': router_id, + l3_constants.INTERFACE_KEY: int_ports, + 'routes': [], + 'gw_port': ex_gw_port} + if enable_snat is not None: + router['enable_snat'] = enable_snat + + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + return ri + + def _add_firewall_rules(self, fw, rule_count=1): + rules = [] + for i in range(rule_count): + rule = {'id': _uuid(), + 'enabled': True, + 'action': 'deny' if (i % 2 == 0) else 'allow', + 'ip_version': 4, + 'protocol': 'tcp', + 'source_ip_address': '10.0.0.%s/24' % (100 + i), + 'destination_port': '%s' % (100 + i)} + rules.append(rule) + fw['firewall_rule_list'] = rules + + def _prepare_firewall_data(self): + fw = {'id': _uuid(), + 'admin_state_up': True, + 'firewall_rule_list': []} + return fw + + def test_firewall_without_rule(self): + router = self._create_router() + fwaas = self._create_fwaas() + try: + router.rest.auth() + except Exception: + # skip the test, firewall is not deployed + return + + ri = self._prepare_router_data(enable_snat=True) + self._add_internal_ports(ri.router, port_count=1) + self._add_floating_ips(ri.router, port_count=1) + router._router_added(ri.router['id'], ri.router) + + rl = [ri] + + fw = self._prepare_firewall_data() + fwaas.create_firewall(rl, fw) + + url = varmour_utils.REST_URL_CONF_POLICY + prefix = varmour_utils.get_firewall_object_prefix(ri, fw) + + n = fwaas.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0) + + fwaas.delete_firewall(rl, fw) + n = fwaas.rest.count_cfg_objs(url, prefix) + self.assertEqual(n, 0) + + router._router_removed(ri.router['id']) + + def test_firewall_with_rules(self): + router = self._create_router() + fwaas = self._create_fwaas() + try: + router.rest.auth() + except Exception: + # skip the test, firewall is not deployed + return + + ri = self._prepare_router_data(enable_snat=True) + self._add_internal_ports(ri.router, port_count=1) + self._add_floating_ips(ri.router, port_count=1) + router._router_added(ri.router['id'], ri.router) + + rl = [ri] + + fw = self._prepare_firewall_data() + self._add_firewall_rules(fw, 2) + fwaas.create_firewall(rl, fw) + + prefix = varmour_utils.get_firewall_object_prefix(ri, fw) + pol_url = varmour_utils.REST_URL_CONF_POLICY + serv_url = varmour_utils.REST_URL_CONF_SERVICE + addr_url = varmour_utils.REST_URL_CONF_ADDR + + # 3x number of policies + n = fwaas.rest.count_cfg_objs(pol_url, prefix) + self.assertEqual(n, 6) + n = fwaas.rest.count_cfg_objs(addr_url, prefix) + self.assertEqual(n, 2) + n = fwaas.rest.count_cfg_objs(serv_url, prefix) + self.assertEqual(n, 2) + + fwaas.delete_firewall(rl, fw) + n = fwaas.rest.count_cfg_objs(pol_url, prefix) + self.assertEqual(n, 0) + + router._router_removed(ri.router['id']) + + def test_firewall_add_remove_rules(self): + router = self._create_router() + fwaas = self._create_fwaas() + try: + router.rest.auth() + except Exception: + # skip the test, firewall is not deployed + return + + ri = self._prepare_router_data(enable_snat=True) + self._add_internal_ports(ri.router, port_count=1) + self._add_floating_ips(ri.router, port_count=1) + router._router_added(ri.router['id'], ri.router) + + rl = [ri] + + fw = self._prepare_firewall_data() + self._add_firewall_rules(fw, 2) + fwaas.create_firewall(rl, fw) + + prefix = varmour_utils.get_firewall_object_prefix(ri, fw) + pol_url = varmour_utils.REST_URL_CONF_POLICY + serv_url = varmour_utils.REST_URL_CONF_SERVICE + addr_url = varmour_utils.REST_URL_CONF_ADDR + + # 3x number of policies + n = fwaas.rest.count_cfg_objs(pol_url, prefix) + self.assertEqual(n, 6) + n = fwaas.rest.count_cfg_objs(addr_url, prefix) + self.assertEqual(n, 2) + n = fwaas.rest.count_cfg_objs(serv_url, prefix) + self.assertEqual(n, 2) + + self._add_firewall_rules(fw, 1) + fwaas.create_firewall(rl, fw) + n = fwaas.rest.count_cfg_objs(pol_url, prefix) + self.assertEqual(n, 3) + n = fwaas.rest.count_cfg_objs(addr_url, prefix) + self.assertEqual(n, 1) + n = fwaas.rest.count_cfg_objs(serv_url, prefix) + self.assertEqual(n, 1) + + fwaas.delete_firewall(rl, fw) + n = fwaas.rest.count_cfg_objs(pol_url, prefix) + self.assertEqual(n, 0) + + router._router_removed(ri.router['id']) diff --git a/neutron/tests/unit/services/firewall/test_fwaas_plugin.py b/neutron/tests/unit/services/firewall/test_fwaas_plugin.py new file mode 100644 index 000000000..2430d69c6 --- /dev/null +++ b/neutron/tests/unit/services/firewall/test_fwaas_plugin.py @@ -0,0 +1,401 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. + + +import contextlib + +import mock +from webob import exc + +from neutron import context +from neutron.extensions import firewall +from neutron.plugins.common import constants as const +from neutron.services.firewall import fwaas_plugin +from neutron.tests import base +from neutron.tests.unit.db.firewall import test_db_firewall + + +FW_PLUGIN_KLASS = ( + "neutron.services.firewall.fwaas_plugin.FirewallPlugin" +) + + +class TestFirewallCallbacks(test_db_firewall.FirewallPluginDbTestCase): + + def setUp(self): + super(TestFirewallCallbacks, + self).setUp(fw_plugin=FW_PLUGIN_KLASS) + self.callbacks = self.plugin.endpoints[0] + + def test_set_firewall_status(self): + ctx = context.get_admin_context() + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(firewall_policy_id=fwp_id, + admin_state_up= + test_db_firewall.ADMIN_STATE_UP) as fw: + fw_id = fw['firewall']['id'] + res = self.callbacks.set_firewall_status(ctx, fw_id, + const.ACTIVE, + host='dummy') + fw_db = self.plugin.get_firewall(ctx, fw_id) + self.assertEqual(fw_db['status'], const.ACTIVE) + self.assertTrue(res) + res = self.callbacks.set_firewall_status(ctx, fw_id, + const.ERROR) + fw_db = self.plugin.get_firewall(ctx, fw_id) + self.assertEqual(fw_db['status'], const.ERROR) + self.assertFalse(res) + + def test_set_firewall_status_pending_delete(self): + ctx = context.get_admin_context() + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(firewall_policy_id=fwp_id, + admin_state_up= + test_db_firewall.ADMIN_STATE_UP) as fw: + fw_id = fw['firewall']['id'] + fw_db = self.plugin._get_firewall(ctx, fw_id) + fw_db['status'] = const.PENDING_DELETE + ctx.session.flush() + res = self.callbacks.set_firewall_status(ctx, fw_id, + const.ACTIVE, + host='dummy') + fw_db = self.plugin.get_firewall(ctx, fw_id) + self.assertEqual(fw_db['status'], const.PENDING_DELETE) + self.assertFalse(res) + + def test_firewall_deleted(self): + ctx = context.get_admin_context() + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(firewall_policy_id=fwp_id, + admin_state_up=test_db_firewall.ADMIN_STATE_UP, + no_delete=True) as fw: + fw_id = fw['firewall']['id'] + with ctx.session.begin(subtransactions=True): + fw_db = self.plugin._get_firewall(ctx, fw_id) + fw_db['status'] = const.PENDING_DELETE + ctx.session.flush() + res = self.callbacks.firewall_deleted(ctx, fw_id, + host='dummy') + self.assertTrue(res) + self.assertRaises(firewall.FirewallNotFound, + self.plugin.get_firewall, + ctx, fw_id) + + def test_firewall_deleted_error(self): + ctx = context.get_admin_context() + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall( + firewall_policy_id=fwp_id, + admin_state_up=test_db_firewall.ADMIN_STATE_UP, + ) as fw: + fw_id = fw['firewall']['id'] + res = self.callbacks.firewall_deleted(ctx, fw_id, + host='dummy') + self.assertFalse(res) + fw_db = self.plugin._get_firewall(ctx, fw_id) + self.assertEqual(fw_db['status'], const.ERROR) + + def test_get_firewall_for_tenant(self): + tenant_id = 'test-tenant' + ctx = context.Context('', tenant_id) + with contextlib.nested(self.firewall_rule(name='fwr1', + tenant_id=tenant_id), + self.firewall_rule(name='fwr2', + tenant_id=tenant_id), + self.firewall_rule(name='fwr3', + tenant_id=tenant_id) + ) as fr: + with self.firewall_policy(tenant_id=tenant_id) as fwp: + fwp_id = fwp['firewall_policy']['id'] + fw_rule_ids = [r['firewall_rule']['id'] for r in fr] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp_id) + res = req.get_response(self.ext_api) + attrs = self._get_test_firewall_attrs() + attrs['firewall_policy_id'] = fwp_id + with self.firewall(firewall_policy_id=fwp_id, + tenant_id=tenant_id, + admin_state_up= + test_db_firewall.ADMIN_STATE_UP) as fw: + fw_id = fw['firewall']['id'] + res = self.callbacks.get_firewalls_for_tenant(ctx, + host='dummy') + fw_rules = ( + self.plugin._make_firewall_dict_with_rules(ctx, + fw_id) + ) + self.assertEqual(res[0], fw_rules) + self._compare_firewall_rule_lists( + fwp_id, fr, res[0]['firewall_rule_list']) + + def test_get_firewall_for_tenant_without_rules(self): + tenant_id = 'test-tenant' + ctx = context.Context('', tenant_id) + with self.firewall_policy(tenant_id=tenant_id) as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs = self._get_test_firewall_attrs() + attrs['firewall_policy_id'] = fwp_id + with self.firewall(firewall_policy_id=fwp_id, tenant_id=tenant_id, + admin_state_up=test_db_firewall.ADMIN_STATE_UP + ) as fw: + fw_list = [fw['firewall']] + f = self.callbacks.get_firewalls_for_tenant_without_rules + res = f(ctx, host='dummy') + for fw in res: + del fw['shared'] + self.assertEqual(res, fw_list) + + +class TestFirewallAgentApi(base.BaseTestCase): + def setUp(self): + super(TestFirewallAgentApi, self).setUp() + + self.api = fwaas_plugin.FirewallAgentApi('topic', 'host') + self.mock_fanoutcast = mock.patch.object(self.api, + 'fanout_cast').start() + self.mock_msg = mock.patch.object(self.api, 'make_msg').start() + + def test_init(self): + self.assertEqual(self.api.topic, 'topic') + self.assertEqual(self.api.host, 'host') + + def _call_test_helper(self, method_name): + rv = getattr(self.api, method_name)(mock.sentinel.context, 'test') + self.assertEqual(rv, self.mock_fanoutcast.return_value) + self.mock_fanoutcast.assert_called_once_with( + mock.sentinel.context, + self.mock_msg.return_value, + topic='topic' + ) + + self.mock_msg.assert_called_once_with( + method_name, + firewall='test', + host='host' + ) + + def test_create_firewall(self): + self._call_test_helper('create_firewall') + + def test_update_firewall(self): + self._call_test_helper('update_firewall') + + def test_delete_firewall(self): + self._call_test_helper('delete_firewall') + + +class TestFirewallPluginBase(test_db_firewall.TestFirewallDBPlugin): + + def setUp(self): + super(TestFirewallPluginBase, self).setUp(fw_plugin=FW_PLUGIN_KLASS) + self.callbacks = self.plugin.endpoints[0] + + def test_create_second_firewall_not_permitted(self): + with self.firewall(): + res = self._create_firewall( + None, 'firewall2', description='test', + firewall_policy_id=None, admin_state_up=True) + self.assertEqual(res.status_int, exc.HTTPConflict.code) + + def test_create_firewall_admin_not_affected_by_other_tenant(self): + # Create fw with admin after creating fw with other tenant + with self.firewall(tenant_id='other-tenant') as fw1: + with self.firewall() as fw2: + self.assertEqual('other-tenant', fw1['firewall']['tenant_id']) + self.assertEqual(self._tenant_id, fw2['firewall']['tenant_id']) + + def test_update_firewall(self): + ctx = context.get_admin_context() + name = "new_firewall1" + attrs = self._get_test_firewall_attrs(name) + + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + with self.firewall(firewall_policy_id=fwp_id, + admin_state_up= + test_db_firewall.ADMIN_STATE_UP) as firewall: + fw_id = firewall['firewall']['id'] + res = self.callbacks.set_firewall_status(ctx, fw_id, + const.ACTIVE) + data = {'firewall': {'name': name}} + req = self.new_update_request('firewalls', data, fw_id) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + attrs = self._replace_firewall_status(attrs, + const.PENDING_CREATE, + const.PENDING_UPDATE) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall'][k], v) + + def test_update_firewall_fails_when_firewall_pending(self): + name = "new_firewall1" + attrs = self._get_test_firewall_attrs(name) + + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + with self.firewall(firewall_policy_id=fwp_id, + admin_state_up= + test_db_firewall.ADMIN_STATE_UP) as firewall: + fw_id = firewall['firewall']['id'] + data = {'firewall': {'name': name}} + req = self.new_update_request('firewalls', data, fw_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, exc.HTTPConflict.code) + + def test_update_firewall_shared_fails_for_non_admin(self): + ctx = context.get_admin_context() + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(firewall_policy_id=fwp_id, + admin_state_up= + test_db_firewall.ADMIN_STATE_UP, + tenant_id='noadmin') as firewall: + fw_id = firewall['firewall']['id'] + self.callbacks.set_firewall_status(ctx, fw_id, + const.ACTIVE) + data = {'firewall': {'shared': True}} + req = self.new_update_request( + 'firewalls', data, fw_id, + context=context.Context('', 'noadmin')) + res = req.get_response(self.ext_api) + # returns 404 due to security reasons + self.assertEqual(res.status_int, exc.HTTPNotFound.code) + + def test_update_firewall_policy_fails_when_firewall_pending(self): + name = "new_firewall1" + attrs = self._get_test_firewall_attrs(name) + + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + with self.firewall(firewall_policy_id=fwp_id, + admin_state_up= + test_db_firewall.ADMIN_STATE_UP): + data = {'firewall_policy': {'name': name}} + req = self.new_update_request('firewall_policies', + data, fwp_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, exc.HTTPConflict.code) + + def test_update_firewall_rule_fails_when_firewall_pending(self): + with self.firewall_rule(name='fwr1') as fr: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + fr_id = fr['firewall_rule']['id'] + fw_rule_ids = [fr_id] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp_id) + req.get_response(self.ext_api) + with self.firewall(firewall_policy_id=fwp_id, + admin_state_up= + test_db_firewall.ADMIN_STATE_UP): + data = {'firewall_rule': {'protocol': 'udp'}} + req = self.new_update_request('firewall_rules', + data, fr_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, exc.HTTPConflict.code) + + def test_delete_firewall(self): + ctx = context.get_admin_context() + attrs = self._get_test_firewall_attrs() + # stop the AgentRPC patch for this one to test pending states + self.agentapi_delf_p.stop() + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + with self.firewall(firewall_policy_id=fwp_id, + admin_state_up= + test_db_firewall.ADMIN_STATE_UP) as firewall: + fw_id = firewall['firewall']['id'] + attrs = self._replace_firewall_status(attrs, + const.PENDING_CREATE, + const.PENDING_DELETE) + req = self.new_delete_request('firewalls', fw_id) + req.get_response(self.ext_api) + fw_db = self.plugin._get_firewall(ctx, fw_id) + for k, v in attrs.iteritems(): + self.assertEqual(fw_db[k], v) + # cleanup the pending firewall + self.plugin.endpoints[0].firewall_deleted(ctx, fw_id) + + def test_delete_firewall_after_agent_delete(self): + ctx = context.get_admin_context() + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(firewall_policy_id=fwp_id, + no_delete=True) as fw: + fw_id = fw['firewall']['id'] + req = self.new_delete_request('firewalls', fw_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, exc.HTTPNoContent.code) + self.assertRaises(firewall.FirewallNotFound, + self.plugin.get_firewall, + ctx, fw_id) + + def test_make_firewall_dict_with_in_place_rules(self): + ctx = context.get_admin_context() + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3')) as fr: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + fw_rule_ids = [r['firewall_rule']['id'] for r in fr] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp_id) + req.get_response(self.ext_api) + attrs = self._get_test_firewall_attrs() + attrs['firewall_policy_id'] = fwp_id + with self.firewall(firewall_policy_id=fwp_id, + admin_state_up= + test_db_firewall.ADMIN_STATE_UP) as fw: + fw_id = fw['firewall']['id'] + fw_rules = ( + self.plugin._make_firewall_dict_with_rules(ctx, + fw_id) + ) + self.assertEqual(fw_rules['id'], fw_id) + self._compare_firewall_rule_lists( + fwp_id, fr, fw_rules['firewall_rule_list']) + + def test_make_firewall_dict_with_in_place_rules_no_policy(self): + ctx = context.get_admin_context() + with self.firewall() as fw: + fw_id = fw['firewall']['id'] + fw_rules = self.plugin._make_firewall_dict_with_rules(ctx, fw_id) + self.assertEqual(fw_rules['firewall_rule_list'], []) + + def test_list_firewalls(self): + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(name='fw1', firewall_policy_id=fwp_id, + description='fw') as fwalls: + self._test_list_resources('firewall', [fwalls], + query_params='description=fw') diff --git a/neutron/tests/unit/services/l3_router/__init__.py b/neutron/tests/unit/services/l3_router/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/services/l3_router/test_l3_apic_plugin.py b/neutron/tests/unit/services/l3_router/test_l3_apic_plugin.py new file mode 100644 index 000000000..6bc33ef28 --- /dev/null +++ b/neutron/tests/unit/services/l3_router/test_l3_apic_plugin.py @@ -0,0 +1,134 @@ +# Copyright (c) 2014 Cisco Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Arvind Somya (asomya@cisco.com), Cisco Systems + +import mock + +from neutron.services.l3_router import l3_apic +from neutron.tests import base + +TENANT = 'tenant1' +TENANT_CONTRACT = 'abcd' +ROUTER = 'router1' +SUBNET = 'subnet1' +NETWORK = 'network1' +NETWORK_NAME = 'one_network' +NETWORK_EPG = 'one_network-epg' +TEST_SEGMENT1 = 'test-segment1' +SUBNET_GATEWAY = '10.3.2.1' +SUBNET_CIDR = '10.3.1.0/24' +SUBNET_NETMASK = '24' + + +class FakeContext(object): + def __init__(self): + self.tenant_id = None + + +class FakeContract(object): + def __init__(self): + self.contract_id = '123' + + +class FakeEpg(object): + def __init__(self): + self.epg_id = 'abcd_epg' + + +class FakePort(object): + def __init__(self): + self.id = 'Fake_port_id' + self.network_id = NETWORK + self.subnet_id = SUBNET + + +class TestCiscoApicL3Plugin(base.BaseTestCase): + + def setUp(self): + super(TestCiscoApicL3Plugin, self).setUp() + mock.patch('neutron.plugins.ml2.drivers.cisco.apic.apic_manager.' + 'APICManager').start() + self.plugin = l3_apic.ApicL3ServicePlugin() + self.context = FakeContext() + self.context.tenant_id = TENANT + self.interface_info = {'subnet_id': SUBNET, 'network_id': NETWORK, + 'name': NETWORK_NAME} + + self.contract = FakeContract() + self.plugin.manager.create_tenant_contract = mock.Mock() + ctmk = mock.PropertyMock(return_value=self.contract.contract_id) + type(self.plugin.manager.create_tenant_contract).contract_id = ctmk + self.epg = FakeEpg() + self.plugin.manager.ensure_epg_created_for_network = mock.Mock() + epmk = mock.PropertyMock(return_value=self.epg.epg_id) + type(self.plugin.manager.ensure_epg_created_for_network).epg_id = epmk + + self.plugin.manager.db.get_provider_contract = mock.Mock( + return_value=None) + self.plugin.manager.set_contract_for_epg = mock.Mock( + return_value=True) + + self.plugin.get_subnet = mock.Mock(return_value=self.interface_info) + self.plugin.get_network = mock.Mock(return_value=self.interface_info) + mock.patch('neutron.db.l3_gwmode_db.L3_NAT_db_mixin.' + '_core_plugin').start() + mock.patch('neutron.db.l3_gwmode_db.L3_NAT_db_mixin.' + 'add_router_interface').start() + mock.patch('neutron.db.l3_gwmode_db.L3_NAT_db_mixin.' + 'remove_router_interface').start() + mock.patch('neutron.openstack.common.excutils.' + 'save_and_reraise_exception').start() + + def test_add_router_interface(self): + mgr = self.plugin.manager + self.plugin.add_router_interface(self.context, ROUTER, + self.interface_info) + mgr.create_tenant_contract.assert_called_once_with(TENANT) + mgr.create_tenant_contract.assertEqual(TENANT_CONTRACT) + mgr.ensure_epg_created_for_network.assert_called_once_with( + TENANT, NETWORK, NETWORK_NAME) + mgr.ensure_epg_created_for_network.assertEqual(NETWORK_EPG) + mgr.db.get_provider_contract.assert_called_once() + mgr.db.get_provider_contract.assertEqual(None) + mgr.set_contract_for_epg.assert_called_once() + + def test_remove_router_interface(self): + mgr = self.plugin.manager + self.plugin.remove_router_interface(self.context, ROUTER, + self.interface_info) + mgr.create_tenant_contract.assert_called_once_with(TENANT) + mgr.ensure_epg_created_for_network.assert_called_once_with( + TENANT, NETWORK, NETWORK_NAME) + mgr.ensure_epg_created_for_network.assertEqual(NETWORK_EPG) + mgr.delete_contract_for_epg.assert_called_once() + + def test_add_router_interface_fail_contract_delete(self): + mgr = self.plugin.manager + with mock.patch('neutron.db.l3_gwmode_db.L3_NAT_db_mixin.' + 'add_router_interface', + side_effect=KeyError()): + self.plugin.add_router_interface(self.context, ROUTER, + self.interface_info) + mgr.delete_contract_for_epg.assert_called_once() + + def test_delete_router_interface_fail_contract_create(self): + mgr = self.plugin.manager + with mock.patch('neutron.db.l3_gwmode_db.L3_NAT_db_mixin.' + 'remove_router_interface', + side_effect=KeyError()): + self.plugin.remove_router_interface(self.context, ROUTER, + self.interface_info) + mgr.set_contract_for_epg.assert_called_once() diff --git a/neutron/tests/unit/services/loadbalancer/__init__.py b/neutron/tests/unit/services/loadbalancer/__init__.py new file mode 100644 index 000000000..ce18bf6d6 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost diff --git a/neutron/tests/unit/services/loadbalancer/agent/__init__.py b/neutron/tests/unit/services/loadbalancer/agent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/services/loadbalancer/agent/test_agent.py b/neutron/tests/unit/services/loadbalancer/agent/test_agent.py new file mode 100644 index 000000000..955d6e1a8 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/agent/test_agent.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import contextlib +import mock +from oslo.config import cfg + +from neutron.services.loadbalancer.agent import agent +from neutron.tests import base + + +class TestLbaasService(base.BaseTestCase): + def test_start(self): + with mock.patch.object( + agent.rpc_compat.Service, 'start' + ) as mock_start: + + mgr = mock.Mock() + cfg.CONF.periodic_interval = mock.Mock(return_value=10) + agent_service = agent.LbaasAgentService('host', 'topic', mgr) + agent_service.start() + + self.assertTrue(mock_start.called) + + def test_main(self): + logging_str = 'neutron.agent.common.config.setup_logging' + with contextlib.nested( + mock.patch(logging_str), + mock.patch.object(agent.service, 'launch'), + mock.patch('sys.argv'), + mock.patch.object(agent.manager, 'LbaasAgentManager'), + mock.patch.object(cfg.CONF, 'register_opts') + ) as (mock_logging, mock_launch, sys_argv, mgr_cls, ro): + agent.main() + + mock_launch.assert_called_once_with(mock.ANY) diff --git a/neutron/tests/unit/services/loadbalancer/agent/test_agent_manager.py b/neutron/tests/unit/services/loadbalancer/agent/test_agent_manager.py new file mode 100644 index 000000000..6a593b367 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/agent/test_agent_manager.py @@ -0,0 +1,371 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import contextlib + +import mock + +from neutron.plugins.common import constants +from neutron.services.loadbalancer.agent import agent_manager as manager +from neutron.tests import base + + +class TestManager(base.BaseTestCase): + def setUp(self): + super(TestManager, self).setUp() + + mock_conf = mock.Mock() + mock_conf.device_driver = ['devdriver'] + + self.mock_importer = mock.patch.object(manager, 'importutils').start() + + rpc_mock_cls = mock.patch( + 'neutron.services.loadbalancer.agent.agent_api.LbaasAgentApi' + ).start() + + # disable setting up periodic state reporting + mock_conf.AGENT.report_interval = 0 + + self.mgr = manager.LbaasAgentManager(mock_conf) + self.rpc_mock = rpc_mock_cls.return_value + self.log = mock.patch.object(manager, 'LOG').start() + self.driver_mock = mock.Mock() + self.mgr.device_drivers = {'devdriver': self.driver_mock} + self.mgr.instance_mapping = {'1': 'devdriver', '2': 'devdriver'} + self.mgr.needs_resync = False + + def test_initialize_service_hook(self): + with mock.patch.object(self.mgr, 'sync_state') as sync: + self.mgr.initialize_service_hook(mock.Mock()) + sync.assert_called_once_with() + + def test_periodic_resync_needs_sync(self): + with mock.patch.object(self.mgr, 'sync_state') as sync: + self.mgr.needs_resync = True + self.mgr.periodic_resync(mock.Mock()) + sync.assert_called_once_with() + + def test_periodic_resync_no_sync(self): + with mock.patch.object(self.mgr, 'sync_state') as sync: + self.mgr.needs_resync = False + self.mgr.periodic_resync(mock.Mock()) + self.assertFalse(sync.called) + + def test_collect_stats(self): + self.mgr.collect_stats(mock.Mock()) + self.rpc_mock.update_pool_stats.assert_has_calls([ + mock.call('1', mock.ANY), + mock.call('2', mock.ANY) + ]) + + def test_collect_stats_exception(self): + self.driver_mock.get_stats.side_effect = Exception + + self.mgr.collect_stats(mock.Mock()) + + self.assertFalse(self.rpc_mock.called) + self.assertTrue(self.mgr.needs_resync) + self.assertTrue(self.log.exception.called) + + def _sync_state_helper(self, ready, reloaded, destroyed): + with contextlib.nested( + mock.patch.object(self.mgr, '_reload_pool'), + mock.patch.object(self.mgr, '_destroy_pool') + ) as (reload, destroy): + + self.rpc_mock.get_ready_devices.return_value = ready + + self.mgr.sync_state() + + self.assertEqual(len(reloaded), len(reload.mock_calls)) + self.assertEqual(len(destroyed), len(destroy.mock_calls)) + + reload.assert_has_calls([mock.call(i) for i in reloaded]) + destroy.assert_has_calls([mock.call(i) for i in destroyed]) + self.assertFalse(self.mgr.needs_resync) + + def test_sync_state_all_known(self): + self._sync_state_helper(['1', '2'], ['1', '2'], []) + + def test_sync_state_all_unknown(self): + self.mgr.instance_mapping = {} + self._sync_state_helper(['1', '2'], ['1', '2'], []) + + def test_sync_state_destroy_all(self): + self._sync_state_helper([], [], ['1', '2']) + + def test_sync_state_both(self): + self.mgr.instance_mapping = {'1': 'devdriver'} + self._sync_state_helper(['2'], ['2'], ['1']) + + def test_sync_state_exception(self): + self.rpc_mock.get_ready_devices.side_effect = Exception + + self.mgr.sync_state() + + self.assertTrue(self.log.exception.called) + self.assertTrue(self.mgr.needs_resync) + + def test_reload_pool(self): + config = {'driver': 'devdriver'} + self.rpc_mock.get_logical_device.return_value = config + pool_id = 'new_id' + self.assertNotIn(pool_id, self.mgr.instance_mapping) + + self.mgr._reload_pool(pool_id) + + self.driver_mock.deploy_instance.assert_called_once_with(config) + self.assertIn(pool_id, self.mgr.instance_mapping) + self.rpc_mock.pool_deployed.assert_called_once_with(pool_id) + + def test_reload_pool_driver_not_found(self): + config = {'driver': 'unknown_driver'} + self.rpc_mock.get_logical_device.return_value = config + pool_id = 'new_id' + self.assertNotIn(pool_id, self.mgr.instance_mapping) + + self.mgr._reload_pool(pool_id) + + self.assertTrue(self.log.error.called) + self.assertFalse(self.driver_mock.deploy_instance.called) + self.assertNotIn(pool_id, self.mgr.instance_mapping) + self.assertFalse(self.rpc_mock.pool_deployed.called) + + def test_reload_pool_exception_on_driver(self): + config = {'driver': 'devdriver'} + self.rpc_mock.get_logical_device.return_value = config + self.driver_mock.deploy_instance.side_effect = Exception + pool_id = 'new_id' + self.assertNotIn(pool_id, self.mgr.instance_mapping) + + self.mgr._reload_pool(pool_id) + + self.driver_mock.deploy_instance.assert_called_once_with(config) + self.assertNotIn(pool_id, self.mgr.instance_mapping) + self.assertFalse(self.rpc_mock.pool_deployed.called) + self.assertTrue(self.log.exception.called) + self.assertTrue(self.mgr.needs_resync) + + def test_destroy_pool(self): + pool_id = '1' + self.assertIn(pool_id, self.mgr.instance_mapping) + + self.mgr._destroy_pool(pool_id) + + self.driver_mock.undeploy_instance.assert_called_once_with(pool_id) + self.assertNotIn(pool_id, self.mgr.instance_mapping) + self.rpc_mock.pool_destroyed.assert_called_once_with(pool_id) + self.assertFalse(self.mgr.needs_resync) + + def test_destroy_pool_exception_on_driver(self): + pool_id = '1' + self.assertIn(pool_id, self.mgr.instance_mapping) + self.driver_mock.undeploy_instance.side_effect = Exception + + self.mgr._destroy_pool(pool_id) + + self.driver_mock.undeploy_instance.assert_called_once_with(pool_id) + self.assertIn(pool_id, self.mgr.instance_mapping) + self.assertFalse(self.rpc_mock.pool_destroyed.called) + self.assertTrue(self.log.exception.called) + self.assertTrue(self.mgr.needs_resync) + + def test_get_driver_unknown_device(self): + self.assertRaises(manager.DeviceNotFoundOnAgent, + self.mgr._get_driver, 'unknown') + + def test_remove_orphans(self): + self.mgr.remove_orphans() + self.driver_mock.remove_orphans.assert_called_once_with(['1', '2']) + + def test_create_vip(self): + vip = {'id': 'id1', 'pool_id': '1'} + self.mgr.create_vip(mock.Mock(), vip) + self.driver_mock.create_vip.assert_called_once_with(vip) + self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'], + constants.ACTIVE) + + def test_create_vip_failed(self): + vip = {'id': 'id1', 'pool_id': '1'} + self.driver_mock.create_vip.side_effect = Exception + self.mgr.create_vip(mock.Mock(), vip) + self.driver_mock.create_vip.assert_called_once_with(vip) + self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'], + constants.ERROR) + + def test_update_vip(self): + old_vip = {'id': 'id1'} + vip = {'id': 'id1', 'pool_id': '1'} + self.mgr.update_vip(mock.Mock(), old_vip, vip) + self.driver_mock.update_vip.assert_called_once_with(old_vip, vip) + self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'], + constants.ACTIVE) + + def test_update_vip_failed(self): + old_vip = {'id': 'id1'} + vip = {'id': 'id1', 'pool_id': '1'} + self.driver_mock.update_vip.side_effect = Exception + self.mgr.update_vip(mock.Mock(), old_vip, vip) + self.driver_mock.update_vip.assert_called_once_with(old_vip, vip) + self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'], + constants.ERROR) + + def test_delete_vip(self): + vip = {'id': 'id1', 'pool_id': '1'} + self.mgr.delete_vip(mock.Mock(), vip) + self.driver_mock.delete_vip.assert_called_once_with(vip) + + def test_create_pool(self): + pool = {'id': 'id1'} + self.assertNotIn(pool['id'], self.mgr.instance_mapping) + self.mgr.create_pool(mock.Mock(), pool, 'devdriver') + self.driver_mock.create_pool.assert_called_once_with(pool) + self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'], + constants.ACTIVE) + self.assertIn(pool['id'], self.mgr.instance_mapping) + + def test_create_pool_failed(self): + pool = {'id': 'id1'} + self.assertNotIn(pool['id'], self.mgr.instance_mapping) + self.driver_mock.create_pool.side_effect = Exception + self.mgr.create_pool(mock.Mock(), pool, 'devdriver') + self.driver_mock.create_pool.assert_called_once_with(pool) + self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'], + constants.ERROR) + self.assertNotIn(pool['id'], self.mgr.instance_mapping) + + def test_update_pool(self): + old_pool = {'id': '1'} + pool = {'id': '1'} + self.mgr.update_pool(mock.Mock(), old_pool, pool) + self.driver_mock.update_pool.assert_called_once_with(old_pool, pool) + self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'], + constants.ACTIVE) + + def test_update_pool_failed(self): + old_pool = {'id': '1'} + pool = {'id': '1'} + self.driver_mock.update_pool.side_effect = Exception + self.mgr.update_pool(mock.Mock(), old_pool, pool) + self.driver_mock.update_pool.assert_called_once_with(old_pool, pool) + self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'], + constants.ERROR) + + def test_delete_pool(self): + pool = {'id': '1'} + self.assertIn(pool['id'], self.mgr.instance_mapping) + self.mgr.delete_pool(mock.Mock(), pool) + self.driver_mock.delete_pool.assert_called_once_with(pool) + self.assertNotIn(pool['id'], self.mgr.instance_mapping) + + def test_create_member(self): + member = {'id': 'id1', 'pool_id': '1'} + self.mgr.create_member(mock.Mock(), member) + self.driver_mock.create_member.assert_called_once_with(member) + self.rpc_mock.update_status.assert_called_once_with('member', + member['id'], + constants.ACTIVE) + + def test_create_member_failed(self): + member = {'id': 'id1', 'pool_id': '1'} + self.driver_mock.create_member.side_effect = Exception + self.mgr.create_member(mock.Mock(), member) + self.driver_mock.create_member.assert_called_once_with(member) + self.rpc_mock.update_status.assert_called_once_with('member', + member['id'], + constants.ERROR) + + def test_update_member(self): + old_member = {'id': 'id1'} + member = {'id': 'id1', 'pool_id': '1'} + self.mgr.update_member(mock.Mock(), old_member, member) + self.driver_mock.update_member.assert_called_once_with(old_member, + member) + self.rpc_mock.update_status.assert_called_once_with('member', + member['id'], + constants.ACTIVE) + + def test_update_member_failed(self): + old_member = {'id': 'id1'} + member = {'id': 'id1', 'pool_id': '1'} + self.driver_mock.update_member.side_effect = Exception + self.mgr.update_member(mock.Mock(), old_member, member) + self.driver_mock.update_member.assert_called_once_with(old_member, + member) + self.rpc_mock.update_status.assert_called_once_with('member', + member['id'], + constants.ERROR) + + def test_delete_member(self): + member = {'id': 'id1', 'pool_id': '1'} + self.mgr.delete_member(mock.Mock(), member) + self.driver_mock.delete_member.assert_called_once_with(member) + + def test_create_monitor(self): + monitor = {'id': 'id1'} + assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'} + self.mgr.create_pool_health_monitor(mock.Mock(), monitor, '1') + self.driver_mock.create_pool_health_monitor.assert_called_once_with( + monitor, '1') + self.rpc_mock.update_status.assert_called_once_with('health_monitor', + assoc_id, + constants.ACTIVE) + + def test_create_monitor_failed(self): + monitor = {'id': 'id1'} + assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'} + self.driver_mock.create_pool_health_monitor.side_effect = Exception + self.mgr.create_pool_health_monitor(mock.Mock(), monitor, '1') + self.driver_mock.create_pool_health_monitor.assert_called_once_with( + monitor, '1') + self.rpc_mock.update_status.assert_called_once_with('health_monitor', + assoc_id, + constants.ERROR) + + def test_update_monitor(self): + monitor = {'id': 'id1'} + assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'} + self.mgr.update_pool_health_monitor(mock.Mock(), monitor, monitor, '1') + self.driver_mock.update_pool_health_monitor.assert_called_once_with( + monitor, monitor, '1') + self.rpc_mock.update_status.assert_called_once_with('health_monitor', + assoc_id, + constants.ACTIVE) + + def test_update_monitor_failed(self): + monitor = {'id': 'id1'} + assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'} + self.driver_mock.update_pool_health_monitor.side_effect = Exception + self.mgr.update_pool_health_monitor(mock.Mock(), monitor, monitor, '1') + self.driver_mock.update_pool_health_monitor.assert_called_once_with( + monitor, monitor, '1') + self.rpc_mock.update_status.assert_called_once_with('health_monitor', + assoc_id, + constants.ERROR) + + def test_delete_monitor(self): + monitor = {'id': 'id1'} + self.mgr.delete_pool_health_monitor(mock.Mock(), monitor, '1') + self.driver_mock.delete_pool_health_monitor.assert_called_once_with( + monitor, '1') + + def test_agent_disabled(self): + payload = {'admin_state_up': False} + self.mgr.agent_updated(mock.Mock(), payload) + self.driver_mock.undeploy_instance.assert_has_calls( + [mock.call('1'), mock.call('2')]) diff --git a/neutron/tests/unit/services/loadbalancer/agent/test_api.py b/neutron/tests/unit/services/loadbalancer/agent/test_api.py new file mode 100644 index 000000000..94513ee9d --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/agent/test_api.py @@ -0,0 +1,166 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import mock + +from neutron.services.loadbalancer.agent import agent_api as api +from neutron.tests import base + + +class TestApiCache(base.BaseTestCase): + def setUp(self): + super(TestApiCache, self).setUp() + + self.api = api.LbaasAgentApi('topic', mock.sentinel.context, 'host') + self.make_msg = mock.patch.object(self.api, 'make_msg').start() + self.mock_call = mock.patch.object(self.api, 'call').start() + + def test_init(self): + self.assertEqual(self.api.host, 'host') + self.assertEqual(self.api.context, mock.sentinel.context) + + def test_get_ready_devices(self): + self.assertEqual( + self.api.get_ready_devices(), + self.mock_call.return_value + ) + + self.make_msg.assert_called_once_with('get_ready_devices', host='host') + self.mock_call.assert_called_once_with( + mock.sentinel.context, + self.make_msg.return_value, + topic='topic' + ) + + def test_get_logical_device(self): + self.assertEqual( + self.api.get_logical_device('pool_id'), + self.mock_call.return_value + ) + + self.make_msg.assert_called_once_with( + 'get_logical_device', + pool_id='pool_id') + + self.mock_call.assert_called_once_with( + mock.sentinel.context, + self.make_msg.return_value, + topic='topic' + ) + + def test_pool_destroyed(self): + self.assertEqual( + self.api.pool_destroyed('pool_id'), + self.mock_call.return_value + ) + + self.make_msg.assert_called_once_with( + 'pool_destroyed', + pool_id='pool_id') + + self.mock_call.assert_called_once_with( + mock.sentinel.context, + self.make_msg.return_value, + topic='topic' + ) + + def test_pool_deployed(self): + self.assertEqual( + self.api.pool_deployed('pool_id'), + self.mock_call.return_value + ) + + self.make_msg.assert_called_once_with( + 'pool_deployed', + pool_id='pool_id') + + self.mock_call.assert_called_once_with( + mock.sentinel.context, + self.make_msg.return_value, + topic='topic' + ) + + def test_update_status(self): + self.assertEqual( + self.api.update_status('pool', 'pool_id', 'ACTIVE'), + self.mock_call.return_value + ) + + self.make_msg.assert_called_once_with( + 'update_status', + obj_type='pool', + obj_id='pool_id', + status='ACTIVE') + + self.mock_call.assert_called_once_with( + mock.sentinel.context, + self.make_msg.return_value, + topic='topic' + ) + + def test_plug_vip_port(self): + self.assertEqual( + self.api.plug_vip_port('port_id'), + self.mock_call.return_value + ) + + self.make_msg.assert_called_once_with( + 'plug_vip_port', + port_id='port_id', + host='host') + + self.mock_call.assert_called_once_with( + mock.sentinel.context, + self.make_msg.return_value, + topic='topic' + ) + + def test_unplug_vip_port(self): + self.assertEqual( + self.api.unplug_vip_port('port_id'), + self.mock_call.return_value + ) + + self.make_msg.assert_called_once_with( + 'unplug_vip_port', + port_id='port_id', + host='host') + + self.mock_call.assert_called_once_with( + mock.sentinel.context, + self.make_msg.return_value, + topic='topic' + ) + + def test_update_pool_stats(self): + self.assertEqual( + self.api.update_pool_stats('pool_id', {'stat': 'stat'}), + self.mock_call.return_value + ) + + self.make_msg.assert_called_once_with( + 'update_pool_stats', + pool_id='pool_id', + stats={'stat': 'stat'}, + host='host') + + self.mock_call.assert_called_once_with( + mock.sentinel.context, + self.make_msg.return_value, + topic='topic' + ) diff --git a/neutron/tests/unit/services/loadbalancer/drivers/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/__init__.py new file mode 100644 index 000000000..ce18bf6d6 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/drivers/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost diff --git a/neutron/tests/unit/services/loadbalancer/drivers/embrane/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/embrane/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/services/loadbalancer/drivers/embrane/test_embrane_defaults.py b/neutron/tests/unit/services/loadbalancer/drivers/embrane/test_embrane_defaults.py new file mode 100644 index 000000000..cffb2ae37 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/drivers/embrane/test_embrane_defaults.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from oslo.config import cfg + +from neutron.services.loadbalancer.drivers.embrane import config # noqa +from neutron.tests import base + + +class ConfigurationTest(base.BaseTestCase): + + def test_defaults(self): + self.assertEqual('small', cfg.CONF.heleoslb.lb_flavor) + self.assertEqual(60, cfg.CONF.heleoslb.sync_interval) diff --git a/neutron/tests/unit/services/loadbalancer/drivers/embrane/test_plugin_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/embrane/test_plugin_driver.py new file mode 100644 index 000000000..56a02a208 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/drivers/embrane/test_plugin_driver.py @@ -0,0 +1,93 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com + +import sys + +import mock +from oslo.config import cfg + +from neutron import context +from neutron.openstack.common.db import exception as n_exc +from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer + +HELEOSAPIMOCK = mock.Mock() +sys.modules["heleosapi"] = HELEOSAPIMOCK +from neutron.services.loadbalancer.drivers.embrane import config # noqa +from neutron.services.loadbalancer.drivers.embrane import constants as h_con +from neutron.services.loadbalancer.drivers.embrane import db as h_db +# Stop the mock from persisting indefinitely in the global modules space +del sys.modules["heleosapi"] + +EMBRANE_PROVIDER = ('LOADBALANCER:lbaas:neutron.services.' + 'loadbalancer.drivers.embrane.driver.' + 'EmbraneLbaas:default') + + +class TestLoadBalancerPluginBase( + test_db_loadbalancer.LoadBalancerPluginDbTestCase): + + def setUp(self): + cfg.CONF.set_override('admin_password', "admin123", 'heleoslb') + cfg.CONF.set_override('sync_interval', 0, 'heleoslb') + mock.patch.dict(sys.modules, {'heleosapi': HELEOSAPIMOCK}).start() + super(TestLoadBalancerPluginBase, self).setUp( + lbaas_provider=EMBRANE_PROVIDER) + self.driver = self.plugin.drivers['lbaas'] + # prevent module mock from saving calls between tests + self.addCleanup(HELEOSAPIMOCK.reset_mock) + + +class TestLoadBalancerPlugin(test_db_loadbalancer.TestLoadBalancer, + TestLoadBalancerPluginBase): + + def test_create_vip_with_session_persistence_with_app_cookie(self): + self.skip("App cookie persistence not supported.") + + def test_pool_port(self): + with self.port(no_delete=True) as port: + with self.pool() as pool: + h_db.add_pool_port(context.get_admin_context(), + pool['pool']['id'], port['port']['id']) + pool_port = h_db.get_pool_port(context.get_admin_context(), + pool['pool']['id']) + self.assertIsNotNone(pool_port) + pool_port = h_db.get_pool_port(context.get_admin_context(), + pool['pool']['id']) + self.assertIsNone(pool_port) + + def test_create_pool_port_no_port(self): + with self.pool() as pool: + self.assertRaises(n_exc.DBError, + h_db.add_pool_port, + context.get_admin_context(), + pool['pool']['id'], None) + + def test_lb_operations_handlers(self): + h = self.driver._dispatcher.handlers + self.assertIsNotNone(h[h_con.Events.ADD_OR_UPDATE_MEMBER]) + self.assertIsNotNone(h[h_con.Events.CREATE_VIP]) + self.assertIsNotNone(h[h_con.Events.DELETE_MEMBER]) + self.assertIsNotNone(h[h_con.Events.DELETE_VIP]) + self.assertIsNotNone(h[h_con.Events.POLL_GRAPH]) + self.assertIsNotNone(h[h_con.Events.REMOVE_MEMBER]) + self.assertIsNotNone(h[h_con.Events.UPDATE_POOL]) + self.assertIsNotNone(h[h_con.Events.UPDATE_VIP]) + self.assertIsNotNone(h[h_con.Events.UPDATE_POOL_HM]) + self.assertIsNotNone(h[h_con.Events.DELETE_POOL_HM]) + self.assertIsNotNone(h[h_con.Events.ADD_POOL_HM]) diff --git a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/__init__.py new file mode 100644 index 000000000..ce18bf6d6 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost diff --git a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py new file mode 100644 index 000000000..a35e15936 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py @@ -0,0 +1,228 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Mirantis, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Oleg Bondarev (obondarev@mirantis.com) + +import contextlib + +import mock + +from neutron.services.loadbalancer.drivers.haproxy import cfg +from neutron.tests import base + + +class TestHaproxyCfg(base.BaseTestCase): + def test_save_config(self): + with contextlib.nested( + mock.patch('neutron.services.loadbalancer.' + 'drivers.haproxy.cfg._build_global'), + mock.patch('neutron.services.loadbalancer.' + 'drivers.haproxy.cfg._build_defaults'), + mock.patch('neutron.services.loadbalancer.' + 'drivers.haproxy.cfg._build_frontend'), + mock.patch('neutron.services.loadbalancer.' + 'drivers.haproxy.cfg._build_backend'), + mock.patch('neutron.agent.linux.utils.replace_file') + ) as (b_g, b_d, b_f, b_b, replace): + test_config = ['globals', 'defaults', 'frontend', 'backend'] + b_g.return_value = [test_config[0]] + b_d.return_value = [test_config[1]] + b_f.return_value = [test_config[2]] + b_b.return_value = [test_config[3]] + + cfg.save_config('test_path', mock.Mock()) + replace.assert_called_once_with('test_path', + '\n'.join(test_config)) + + def test_build_global(self): + expected_opts = ['global', + '\tdaemon', + '\tuser nobody', + '\tgroup test_group', + '\tlog /dev/log local0', + '\tlog /dev/log local1 notice', + '\tstats socket test_path mode 0666 level user'] + opts = cfg._build_global(mock.Mock(), 'test_path', 'test_group') + self.assertEqual(expected_opts, list(opts)) + + def test_build_defaults(self): + expected_opts = ['defaults', + '\tlog global', + '\tretries 3', + '\toption redispatch', + '\ttimeout connect 5000', + '\ttimeout client 50000', + '\ttimeout server 50000'] + opts = cfg._build_defaults(mock.Mock()) + self.assertEqual(expected_opts, list(opts)) + + def test_build_frontend(self): + test_config = {'vip': {'id': 'vip_id', + 'protocol': 'HTTP', + 'port': {'fixed_ips': [ + {'ip_address': '10.0.0.2'}] + }, + 'protocol_port': 80, + 'connection_limit': 2000, + }, + 'pool': {'id': 'pool_id'}} + expected_opts = ['frontend vip_id', + '\toption tcplog', + '\tbind 10.0.0.2:80', + '\tmode http', + '\tdefault_backend pool_id', + '\tmaxconn 2000', + '\toption forwardfor'] + opts = cfg._build_frontend(test_config) + self.assertEqual(expected_opts, list(opts)) + + test_config['vip']['connection_limit'] = -1 + expected_opts.remove('\tmaxconn 2000') + opts = cfg._build_frontend(test_config) + self.assertEqual(expected_opts, list(opts)) + + def test_build_backend(self): + test_config = {'pool': {'id': 'pool_id', + 'protocol': 'HTTP', + 'lb_method': 'ROUND_ROBIN'}, + 'members': [{'status': 'ACTIVE', + 'admin_state_up': True, + 'id': 'member1_id', + 'address': '10.0.0.3', + 'protocol_port': 80, + 'weight': 1}, + {'status': 'INACTIVE', + 'admin_state_up': True, + 'id': 'member2_id', + 'address': '10.0.0.4', + 'protocol_port': 80, + 'weight': 1}, + {'status': 'PENDING_CREATE', + 'admin_state_up': True, + 'id': 'member3_id', + 'address': '10.0.0.5', + 'protocol_port': 80, + 'weight': 1}], + 'healthmonitors': [{'admin_state_up': True, + 'delay': 3, + 'max_retries': 4, + 'timeout': 2, + 'type': 'TCP'}], + 'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}} + expected_opts = ['backend pool_id', + '\tmode http', + '\tbalance roundrobin', + '\toption forwardfor', + '\ttimeout check 2s', + '\tcookie SRV insert indirect nocache', + '\tserver member1_id 10.0.0.3:80 weight 1 ' + 'check inter 3s fall 4 cookie 0', + '\tserver member2_id 10.0.0.4:80 weight 1 ' + 'check inter 3s fall 4 cookie 1', + '\tserver member3_id 10.0.0.5:80 weight 1 ' + 'check inter 3s fall 4 cookie 2'] + opts = cfg._build_backend(test_config) + self.assertEqual(expected_opts, list(opts)) + + def test_get_server_health_option(self): + test_config = {'healthmonitors': [{'admin_state_up': False, + 'delay': 3, + 'max_retries': 4, + 'timeout': 2, + 'type': 'TCP', + 'http_method': 'GET', + 'url_path': '/', + 'expected_codes': '200'}]} + self.assertEqual(('', []), cfg._get_server_health_option(test_config)) + + self.assertEqual(('', []), cfg._get_server_health_option(test_config)) + + test_config['healthmonitors'][0]['admin_state_up'] = True + expected = (' check inter 3s fall 4', ['timeout check 2s']) + self.assertEqual(expected, cfg._get_server_health_option(test_config)) + + test_config['healthmonitors'][0]['type'] = 'HTTPS' + expected = (' check inter 3s fall 4', + ['timeout check 2s', + 'option httpchk GET /', + 'http-check expect rstatus 200', + 'option ssl-hello-chk']) + self.assertEqual(expected, cfg._get_server_health_option(test_config)) + + def test_has_http_cookie_persistence(self): + config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}} + self.assertTrue(cfg._has_http_cookie_persistence(config)) + + config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}} + self.assertFalse(cfg._has_http_cookie_persistence(config)) + + config = {'vip': {'session_persistence': {}}} + self.assertFalse(cfg._has_http_cookie_persistence(config)) + + def test_get_session_persistence(self): + config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}} + self.assertEqual(cfg._get_session_persistence(config), + ['stick-table type ip size 10k', 'stick on src']) + + config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}, + 'members': []} + self.assertEqual([], cfg._get_session_persistence(config)) + + config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}} + self.assertEqual([], cfg._get_session_persistence(config)) + + config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}, + 'members': [{'id': 'member1_id'}]} + self.assertEqual(cfg._get_session_persistence(config), + ['cookie SRV insert indirect nocache']) + + config = {'vip': {'session_persistence': {'type': 'APP_COOKIE', + 'cookie_name': 'test'}}} + self.assertEqual(cfg._get_session_persistence(config), + ['appsession test len 56 timeout 3h']) + + config = {'vip': {'session_persistence': {'type': 'APP_COOKIE'}}} + self.assertEqual(cfg._get_session_persistence(config), []) + + config = {'vip': {'session_persistence': {'type': 'UNSUPPORTED'}}} + self.assertEqual(cfg._get_session_persistence(config), []) + + def test_expand_expected_codes(self): + exp_codes = '' + self.assertEqual(cfg._expand_expected_codes(exp_codes), set([])) + exp_codes = '200' + self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['200'])) + exp_codes = '200, 201' + self.assertEqual(cfg._expand_expected_codes(exp_codes), + set(['200', '201'])) + exp_codes = '200, 201,202' + self.assertEqual(cfg._expand_expected_codes(exp_codes), + set(['200', '201', '202'])) + exp_codes = '200-202' + self.assertEqual(cfg._expand_expected_codes(exp_codes), + set(['200', '201', '202'])) + exp_codes = '200-202, 205' + self.assertEqual(cfg._expand_expected_codes(exp_codes), + set(['200', '201', '202', '205'])) + exp_codes = '200, 201-203' + self.assertEqual(cfg._expand_expected_codes(exp_codes), + set(['200', '201', '202', '203'])) + exp_codes = '200, 201-203, 205' + self.assertEqual(cfg._expand_expected_codes(exp_codes), + set(['200', '201', '202', '203', '205'])) + exp_codes = '201-200, 205' + self.assertEqual(cfg._expand_expected_codes(exp_codes), set(['205'])) diff --git a/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py new file mode 100644 index 000000000..450727bc2 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py @@ -0,0 +1,550 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import contextlib + +import mock + +from neutron.common import exceptions +from neutron.services.loadbalancer.drivers.haproxy import namespace_driver +from neutron.tests import base + + +class TestHaproxyNSDriver(base.BaseTestCase): + def setUp(self): + super(TestHaproxyNSDriver, self).setUp() + + conf = mock.Mock() + conf.haproxy.loadbalancer_state_path = '/the/path' + conf.interface_driver = 'intdriver' + conf.haproxy.user_group = 'test_group' + conf.haproxy.send_gratuitous_arp = 3 + conf.AGENT.root_helper = 'sudo_test' + self.conf = conf + self.mock_importer = mock.patch.object(namespace_driver, + 'importutils').start() + + self.rpc_mock = mock.Mock() + self.driver = namespace_driver.HaproxyNSDriver( + conf, + self.rpc_mock + ) + self.vif_driver = mock.Mock() + self.driver.vif_driver = self.vif_driver + + self.fake_config = { + 'pool': {'id': 'pool_id', 'status': 'ACTIVE', + 'admin_state_up': True}, + 'vip': {'id': 'vip_id', 'port': {'id': 'port_id'}, + 'status': 'ACTIVE', 'admin_state_up': True} + } + + def test_get_name(self): + self.assertEqual(self.driver.get_name(), namespace_driver.DRIVER_NAME) + + def test_create(self): + with mock.patch.object(self.driver, '_plug') as plug: + with mock.patch.object(self.driver, '_spawn') as spawn: + self.driver.create(self.fake_config) + + plug.assert_called_once_with( + 'qlbaas-pool_id', {'id': 'port_id'} + ) + spawn.assert_called_once_with(self.fake_config) + + def test_update(self): + with contextlib.nested( + mock.patch.object(self.driver, '_get_state_file_path'), + mock.patch.object(self.driver, '_spawn'), + mock.patch('__builtin__.open') + ) as (gsp, spawn, mock_open): + mock_open.return_value = ['5'] + + self.driver.update(self.fake_config) + + mock_open.assert_called_once_with(gsp.return_value, 'r') + spawn.assert_called_once_with(self.fake_config, ['-sf', '5']) + + def test_spawn(self): + with contextlib.nested( + mock.patch.object(namespace_driver.hacfg, 'save_config'), + mock.patch.object(self.driver, '_get_state_file_path'), + mock.patch('neutron.agent.linux.ip_lib.IPWrapper') + ) as (mock_save, gsp, ip_wrap): + gsp.side_effect = lambda x, y: y + + self.driver._spawn(self.fake_config) + + mock_save.assert_called_once_with('conf', self.fake_config, + 'sock', 'test_group') + cmd = ['haproxy', '-f', 'conf', '-p', 'pid'] + ip_wrap.assert_has_calls([ + mock.call('sudo_test', 'qlbaas-pool_id'), + mock.call().netns.execute(cmd) + ]) + + def test_undeploy_instance(self): + with contextlib.nested( + mock.patch.object(self.driver, '_get_state_file_path'), + mock.patch.object(namespace_driver, 'kill_pids_in_file'), + mock.patch.object(self.driver, '_unplug'), + mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), + mock.patch('os.path.isdir'), + mock.patch('shutil.rmtree') + ) as (gsp, kill, unplug, ip_wrap, isdir, rmtree): + gsp.side_effect = lambda x, y: '/pool/' + y + + self.driver.pool_to_port_id['pool_id'] = 'port_id' + isdir.return_value = True + + self.driver.undeploy_instance('pool_id') + + kill.assert_called_once_with('sudo_test', '/pool/pid') + unplug.assert_called_once_with('qlbaas-pool_id', 'port_id') + isdir.assert_called_once_with('/pool') + rmtree.assert_called_once_with('/pool') + ip_wrap.assert_has_calls([ + mock.call('sudo_test', 'qlbaas-pool_id'), + mock.call().garbage_collect_namespace() + ]) + + def test_undeploy_instance_with_ns_cleanup(self): + with contextlib.nested( + mock.patch.object(self.driver, '_get_state_file_path'), + mock.patch.object(self.driver, 'vif_driver'), + mock.patch.object(namespace_driver, 'kill_pids_in_file'), + mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), + mock.patch('os.path.isdir'), + mock.patch('shutil.rmtree') + ) as (gsp, vif, kill, ip_wrap, isdir, rmtree): + device = mock.Mock() + device_name = 'port_device' + device.name = device_name + ip_wrap.return_value.get_devices.return_value = [device] + + self.driver.undeploy_instance('pool_id', cleanup_namespace=True) + vif.unplug.assert_called_once_with(device_name, + namespace='qlbaas-pool_id') + + def test_remove_orphans(self): + with contextlib.nested( + mock.patch.object(self.driver, 'exists'), + mock.patch.object(self.driver, 'undeploy_instance'), + mock.patch('os.listdir'), + mock.patch('os.path.exists') + ) as (exists, undeploy, listdir, path_exists): + known = ['known1', 'known2'] + unknown = ['unknown1', 'unknown2'] + listdir.return_value = known + unknown + exists.side_effect = lambda x: x == 'unknown2' + + self.driver.remove_orphans(known) + + undeploy.assert_called_once_with('unknown2', + cleanup_namespace=True) + + def test_exists(self): + with contextlib.nested( + mock.patch.object(self.driver, '_get_state_file_path'), + mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), + mock.patch('socket.socket'), + mock.patch('os.path.exists'), + ) as (gsp, ip_wrap, socket, path_exists): + gsp.side_effect = lambda x, y: '/pool/' + y + + ip_wrap.return_value.netns.exists.return_value = True + path_exists.return_value = True + + self.driver.exists('pool_id') + + ip_wrap.assert_has_calls([ + mock.call('sudo_test'), + mock.call().netns.exists('qlbaas-pool_id') + ]) + + self.assertTrue(self.driver.exists('pool_id')) + + def test_get_stats(self): + raw_stats = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,' + 'dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,' + 'act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,' + 'sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,' + 'check_status,check_code,check_duration,hrsp_1xx,' + 'hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,' + 'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,\n' + '8e271901-69ed-403e-a59b-f53cf77ef208,BACKEND,1,2,3,4,0,' + '10,7764,2365,0,0,,0,0,0,0,UP,1,1,0,,0,103780,0,,1,2,0,,0' + ',,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,\n\n' + 'a557019b-dc07-4688-9af4-f5cf02bb6d4b,' + '32a6c2a3-420a-44c3-955d-86bd2fc6871e,0,0,0,1,,7,1120,' + '224,,0,,0,0,0,0,UP,1,1,0,0,1,2623,303,,1,2,1,,7,,2,0,,' + '1,L7OK,200,98,0,7,0,0,0,0,0,,,,0,0,\n' + 'a557019b-dc07-4688-9af4-f5cf02bb6d4b,' + 'd9aea044-8867-4e80-9875-16fb808fa0f9,0,0,0,2,,12,0,0,,' + '0,,0,0,8,4,DOWN,1,1,0,9,2,308,675,,1,2,2,,4,,2,0,,2,' + 'L4CON,,2999,0,0,0,0,0,0,0,,,,0,0,\n') + raw_stats_empty = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,' + 'bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,' + 'status,weight,act,bck,chkfail,chkdown,lastchg,' + 'downtime,qlimit,pid,iid,sid,throttle,lbtot,' + 'tracked,type,rate,rate_lim,rate_max,check_status,' + 'check_code,check_duration,hrsp_1xx,hrsp_2xx,' + 'hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,' + 'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,' + '\n') + with contextlib.nested( + mock.patch.object(self.driver, '_get_state_file_path'), + mock.patch('socket.socket'), + mock.patch('os.path.exists'), + ) as (gsp, socket, path_exists): + gsp.side_effect = lambda x, y: '/pool/' + y + path_exists.return_value = True + socket.return_value = socket + socket.recv.return_value = raw_stats + + exp_stats = {'connection_errors': '0', + 'active_connections': '3', + 'current_sessions': '3', + 'bytes_in': '7764', + 'max_connections': '4', + 'max_sessions': '4', + 'bytes_out': '2365', + 'response_errors': '0', + 'total_sessions': '10', + 'total_connections': '10', + 'members': { + '32a6c2a3-420a-44c3-955d-86bd2fc6871e': { + 'status': 'ACTIVE', + 'health': 'L7OK', + 'failed_checks': '0' + }, + 'd9aea044-8867-4e80-9875-16fb808fa0f9': { + 'status': 'INACTIVE', + 'health': 'L4CON', + 'failed_checks': '9' + } + } + } + stats = self.driver.get_stats('pool_id') + self.assertEqual(exp_stats, stats) + + socket.recv.return_value = raw_stats_empty + self.assertEqual({'members': {}}, self.driver.get_stats('pool_id')) + + path_exists.return_value = False + socket.reset_mock() + self.assertEqual({}, self.driver.get_stats('pool_id')) + self.assertFalse(socket.called) + + def test_plug(self): + test_port = {'id': 'port_id', + 'network_id': 'net_id', + 'mac_address': 'mac_addr', + 'fixed_ips': [{'ip_address': '10.0.0.2', + 'subnet': {'cidr': '10.0.0.0/24', + 'gateway_ip': '10.0.0.1'}}]} + with contextlib.nested( + mock.patch('neutron.agent.linux.ip_lib.device_exists'), + mock.patch('netaddr.IPNetwork'), + mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), + ) as (dev_exists, ip_net, ip_wrap): + self.vif_driver.get_device_name.return_value = 'test_interface' + dev_exists.return_value = False + ip_net.return_value = ip_net + ip_net.prefixlen = 24 + + self.driver._plug('test_ns', test_port) + self.rpc_mock.plug_vip_port.assert_called_once_with( + test_port['id']) + self.assertTrue(dev_exists.called) + self.vif_driver.plug.assert_called_once_with('net_id', 'port_id', + 'test_interface', + 'mac_addr', + namespace='test_ns') + self.vif_driver.init_l3.assert_called_once_with('test_interface', + ['10.0.0.2/24'], + namespace= + 'test_ns') + cmd = ['route', 'add', 'default', 'gw', '10.0.0.1'] + cmd_arping = ['arping', '-U', '-I', + 'test_interface', '-c', + self.conf.haproxy.send_gratuitous_arp, '10.0.0.2'] + ip_wrap.assert_has_calls([ + mock.call('sudo_test', namespace='test_ns'), + mock.call().netns.execute(cmd, check_exit_code=False), + mock.call().netns.execute(cmd_arping, check_exit_code=False), + ]) + + dev_exists.return_value = True + self.assertRaises(exceptions.PreexistingDeviceFailure, + self.driver._plug, 'test_ns', test_port, False) + + def test_plug_not_send_gratuitous_arp(self): + self.conf.haproxy.send_gratuitous_arp = 0 + test_port = {'id': 'port_id', + 'network_id': 'net_id', + 'mac_address': 'mac_addr', + 'fixed_ips': [{'ip_address': '10.0.0.2', + 'subnet': {'cidr': '10.0.0.0/24', + 'gateway_ip': '10.0.0.1'}}]} + with contextlib.nested( + mock.patch('neutron.agent.linux.ip_lib.device_exists'), + mock.patch('netaddr.IPNetwork'), + mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), + ) as (dev_exists, ip_net, ip_wrap): + self.vif_driver.get_device_name.return_value = 'test_interface' + dev_exists.return_value = False + ip_net.return_value = ip_net + ip_net.prefixlen = 24 + + self.driver._plug('test_ns', test_port) + cmd = ['route', 'add', 'default', 'gw', '10.0.0.1'] + expected = [ + mock.call('sudo_test', namespace='test_ns'), + mock.call().netns.execute(cmd, check_exit_code=False)] + self.assertEqual(expected, ip_wrap.mock_calls) + + def test_plug_no_gw(self): + test_port = {'id': 'port_id', + 'network_id': 'net_id', + 'mac_address': 'mac_addr', + 'fixed_ips': [{'ip_address': '10.0.0.2', + 'subnet': {'cidr': '10.0.0.0/24'}}]} + with contextlib.nested( + mock.patch('neutron.agent.linux.ip_lib.device_exists'), + mock.patch('netaddr.IPNetwork'), + mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), + ) as (dev_exists, ip_net, ip_wrap): + self.vif_driver.get_device_name.return_value = 'test_interface' + dev_exists.return_value = False + ip_net.return_value = ip_net + ip_net.prefixlen = 24 + + self.driver._plug('test_ns', test_port) + self.rpc_mock.plug_vip_port.assert_called_once_with( + test_port['id']) + self.assertTrue(dev_exists.called) + self.vif_driver.plug.assert_called_once_with('net_id', 'port_id', + 'test_interface', + 'mac_addr', + namespace='test_ns') + self.vif_driver.init_l3.assert_called_once_with('test_interface', + ['10.0.0.2/24'], + namespace= + 'test_ns') + self.assertFalse(ip_wrap.called) + dev_exists.return_value = True + self.assertRaises(exceptions.PreexistingDeviceFailure, + self.driver._plug, 'test_ns', test_port, False) + + def test_plug_gw_in_host_routes(self): + test_port = {'id': 'port_id', + 'network_id': 'net_id', + 'mac_address': 'mac_addr', + 'fixed_ips': [{'ip_address': '10.0.0.2', + 'subnet': {'cidr': '10.0.0.0/24', + 'host_routes': + [{'destination': '0.0.0.0/0', + 'nexthop': '10.0.0.1'}]}}]} + with contextlib.nested( + mock.patch('neutron.agent.linux.ip_lib.device_exists'), + mock.patch('netaddr.IPNetwork'), + mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), + ) as (dev_exists, ip_net, ip_wrap): + self.vif_driver.get_device_name.return_value = 'test_interface' + dev_exists.return_value = False + ip_net.return_value = ip_net + ip_net.prefixlen = 24 + + self.driver._plug('test_ns', test_port) + self.rpc_mock.plug_vip_port.assert_called_once_with( + test_port['id']) + self.assertTrue(dev_exists.called) + self.vif_driver.plug.assert_called_once_with('net_id', 'port_id', + 'test_interface', + 'mac_addr', + namespace='test_ns') + self.vif_driver.init_l3.assert_called_once_with('test_interface', + ['10.0.0.2/24'], + namespace= + 'test_ns') + cmd = ['route', 'add', 'default', 'gw', '10.0.0.1'] + ip_wrap.assert_has_calls([ + mock.call('sudo_test', namespace='test_ns'), + mock.call().netns.execute(cmd, check_exit_code=False), + ]) + + def test_unplug(self): + self.vif_driver.get_device_name.return_value = 'test_interface' + + self.driver._unplug('test_ns', 'port_id') + self.rpc_mock.unplug_vip_port.assert_called_once_with('port_id') + self.vif_driver.unplug('test_interface', namespace='test_ns') + + def test_kill_pids_in_file(self): + with contextlib.nested( + mock.patch('os.path.exists'), + mock.patch('__builtin__.open'), + mock.patch('neutron.agent.linux.utils.execute'), + mock.patch.object(namespace_driver.LOG, 'exception'), + ) as (path_exists, mock_open, mock_execute, mock_log): + file_mock = mock.MagicMock() + mock_open.return_value = file_mock + file_mock.__enter__.return_value = file_mock + file_mock.__iter__.return_value = iter(['123']) + + path_exists.return_value = False + namespace_driver.kill_pids_in_file('sudo_test', 'test_path') + path_exists.assert_called_once_with('test_path') + self.assertFalse(mock_open.called) + self.assertFalse(mock_execute.called) + + path_exists.return_value = True + mock_execute.side_effect = RuntimeError + namespace_driver.kill_pids_in_file('sudo_test', 'test_path') + self.assertTrue(mock_log.called) + mock_execute.assert_called_once_with( + ['kill', '-9', '123'], 'sudo_test') + + def test_get_state_file_path(self): + with mock.patch('os.makedirs') as mkdir: + path = self.driver._get_state_file_path('pool_id', 'conf') + self.assertEqual('/the/path/pool_id/conf', path) + mkdir.assert_called_once_with('/the/path/pool_id', 0o755) + + def test_deploy_instance(self): + with mock.patch.object(self.driver, 'exists') as exists: + with mock.patch.object(self.driver, 'update') as update: + self.driver.deploy_instance(self.fake_config) + exists.assert_called_once_with(self.fake_config['pool']['id']) + update.assert_called_once_with(self.fake_config) + + def test_deploy_instance_non_existing(self): + with mock.patch.object(self.driver, 'exists') as exists: + with mock.patch.object(self.driver, 'create') as create: + exists.return_value = False + self.driver.deploy_instance(self.fake_config) + exists.assert_called_once_with(self.fake_config['pool']['id']) + create.assert_called_once_with(self.fake_config) + + def test_deploy_instance_vip_status_non_active(self): + with mock.patch.object(self.driver, 'exists') as exists: + self.fake_config['vip']['status'] = 'NON_ACTIVE' + self.driver.deploy_instance(self.fake_config) + self.assertFalse(exists.called) + + def test_deploy_instance_vip_admin_state_down(self): + with mock.patch.object(self.driver, 'exists') as exists: + self.fake_config['vip']['admin_state_up'] = False + self.driver.deploy_instance(self.fake_config) + self.assertFalse(exists.called) + + def test_deploy_instance_no_vip(self): + with mock.patch.object(self.driver, 'exists') as exists: + del self.fake_config['vip'] + self.driver.deploy_instance(self.fake_config) + self.assertFalse(exists.called) + + def test_deploy_instance_pool_status_non_active(self): + with mock.patch.object(self.driver, 'exists') as exists: + self.fake_config['pool']['status'] = 'NON_ACTIVE' + self.driver.deploy_instance(self.fake_config) + self.assertFalse(exists.called) + + def test_deploy_instance_pool_admin_state_down(self): + with mock.patch.object(self.driver, 'exists') as exists: + self.fake_config['pool']['admin_state_up'] = False + self.driver.deploy_instance(self.fake_config) + self.assertFalse(exists.called) + + def test_refresh_device(self): + with mock.patch.object(self.driver, 'deploy_instance') as deploy: + pool_id = 'pool_id1' + self.driver._refresh_device(pool_id) + self.rpc_mock.get_logical_device.assert_called_once_with(pool_id) + deploy.assert_called_once_with( + self.rpc_mock.get_logical_device.return_value) + + def test_create_vip(self): + with mock.patch.object(self.driver, '_refresh_device') as refresh: + self.driver.create_vip({'pool_id': '1'}) + refresh.assert_called_once_with('1') + + def test_update_vip(self): + with mock.patch.object(self.driver, '_refresh_device') as refresh: + self.driver.update_vip({}, {'pool_id': '1'}) + refresh.assert_called_once_with('1') + + def test_delete_vip(self): + with mock.patch.object(self.driver, 'undeploy_instance') as undeploy: + self.driver.delete_vip({'pool_id': '1'}) + undeploy.assert_called_once_with('1') + + def test_create_pool(self): + with mock.patch.object(self.driver, '_refresh_device') as refresh: + self.driver.create_pool({'id': '1'}) + self.assertFalse(refresh.called) + + def test_update_pool(self): + with mock.patch.object(self.driver, '_refresh_device') as refresh: + self.driver.update_pool({}, {'id': '1'}) + refresh.assert_called_once_with('1') + + def test_delete_pool_existing(self): + with mock.patch.object(self.driver, 'undeploy_instance') as undeploy: + with mock.patch.object(self.driver, 'exists') as exists: + exists.return_value = True + self.driver.delete_pool({'id': '1'}) + undeploy.assert_called_once_with('1') + + def test_delete_pool_non_existing(self): + with mock.patch.object(self.driver, 'undeploy_instance') as undeploy: + with mock.patch.object(self.driver, 'exists') as exists: + exists.return_value = False + self.driver.delete_pool({'id': '1'}) + self.assertFalse(undeploy.called) + + def test_create_member(self): + with mock.patch.object(self.driver, '_refresh_device') as refresh: + self.driver.create_member({'pool_id': '1'}) + refresh.assert_called_once_with('1') + + def test_update_member(self): + with mock.patch.object(self.driver, '_refresh_device') as refresh: + self.driver.update_member({}, {'pool_id': '1'}) + refresh.assert_called_once_with('1') + + def test_delete_member(self): + with mock.patch.object(self.driver, '_refresh_device') as refresh: + self.driver.delete_member({'pool_id': '1'}) + refresh.assert_called_once_with('1') + + def test_create_pool_health_monitor(self): + with mock.patch.object(self.driver, '_refresh_device') as refresh: + self.driver.create_pool_health_monitor('', '1') + refresh.assert_called_once_with('1') + + def test_update_pool_health_monitor(self): + with mock.patch.object(self.driver, '_refresh_device') as refresh: + self.driver.update_pool_health_monitor('', '', '1') + refresh.assert_called_once_with('1') + + def test_delete_pool_health_monitor(self): + with mock.patch.object(self.driver, '_refresh_device') as refresh: + self.driver.delete_pool_health_monitor('', '1') + refresh.assert_called_once_with('1') diff --git a/neutron/tests/unit/services/loadbalancer/drivers/netscaler/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/netscaler/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client.py b/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client.py new file mode 100644 index 000000000..6585e60e4 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client.py @@ -0,0 +1,204 @@ +# Copyright 2014 Citrix Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +import requests + +from neutron.services.loadbalancer.drivers.netscaler import ncc_client +from neutron.services.loadbalancer.drivers.netscaler import netscaler_driver +from neutron.tests.unit import testlib_api + +NCC_CLIENT_CLASS = ('neutron.services.loadbalancer.drivers' + '.netscaler.ncc_client.NSClient') + +TESTURI_SCHEME = 'http' +TESTURI_HOSTNAME = '1.1.1.1' +TESTURI_PORT = 4433 +TESTURI_PATH = '/ncc_service/1.0' +TESTURI = '%s://%s:%s%s' % (TESTURI_SCHEME, TESTURI_HOSTNAME, + TESTURI_PORT, TESTURI_PATH) +TEST_USERNAME = 'user211' +TEST_PASSWORD = '@30xHl5cT' +TEST_TENANT_ID = '9c5245a2-0432-9d4c-4829-9bd7028603a1' +TESTVIP_ID = '52ab5d71-6bb2-457f-8414-22a4ba55efec' + + +class TestNSClient(testlib_api.WebTestCase): + + """A Unit test for the NetScaler NCC client module.""" + + def setUp(self): + self.log = mock.patch.object(ncc_client, 'LOG').start() + super(TestNSClient, self).setUp() + # mock the requests.request function call + self.request_method_mock = mock.Mock() + requests.request = self.request_method_mock + self.testclient = self._get_nsclient() + + def test_instantiate_nsclient_with_empty_uri(self): + """Asserts that a call with empty URI will raise an exception.""" + self.assertRaises(ncc_client.NCCException, ncc_client.NSClient, + '', TEST_USERNAME, TEST_PASSWORD) + + def test_create_resource_with_no_connection(self): + """Asserts that a call with no connection will raise an exception.""" + # mock a connection object that fails to establish a connection + self.request_method_mock.side_effect = ( + requests.exceptions.ConnectionError()) + resource_path = netscaler_driver.VIPS_RESOURCE + resource_name = netscaler_driver.VIP_RESOURCE + resource_body = self._get_testvip_httpbody_for_create() + # call method under test: create_resource() and assert that + # it raises an exception + self.assertRaises(ncc_client.NCCException, + self.testclient.create_resource, + TEST_TENANT_ID, resource_path, + resource_name, resource_body) + + def test_create_resource_with_error(self): + """Asserts that a failed create call raises an exception.""" + # create a mock object to represent a valid http response + # with a failure status code. + fake_response = requests.Response() + fake_response.status_code = requests.codes.unauthorized + fake_response.headers = [] + requests.request.return_value = fake_response + resource_path = netscaler_driver.VIPS_RESOURCE + resource_name = netscaler_driver.VIP_RESOURCE + resource_body = self._get_testvip_httpbody_for_create() + # call method under test: create_resource + # and assert that it raises the expected exception. + self.assertRaises(ncc_client.NCCException, + self.testclient.create_resource, + TEST_TENANT_ID, resource_path, + resource_name, resource_body) + + def test_create_resource(self): + """Asserts that a correct call will succeed.""" + # obtain the mock object that corresponds to the call of request() + fake_response = requests.Response() + fake_response.status_code = requests.codes.created + fake_response.headers = [] + self.request_method_mock.return_value = fake_response + resource_path = netscaler_driver.VIPS_RESOURCE + resource_name = netscaler_driver.VIP_RESOURCE + resource_body = self._get_testvip_httpbody_for_create() + # call method under test: create_resource() + self.testclient.create_resource(TEST_TENANT_ID, resource_path, + resource_name, resource_body) + # assert that request() was called + # with the expected params. + resource_url = "%s/%s" % (self.testclient.service_uri, resource_path) + self.request_method_mock.assert_called_once_with( + 'POST', + url=resource_url, + headers=mock.ANY, + data=mock.ANY) + + def test_update_resource_with_error(self): + """Asserts that a failed update call raises an exception.""" + # create a valid http response with a failure status code. + fake_response = requests.Response() + fake_response.status_code = requests.codes.unauthorized + fake_response.headers = [] + # obtain the mock object that corresponds to the call of request() + self.request_method_mock.return_value = fake_response + resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE, + TESTVIP_ID) + resource_name = netscaler_driver.VIP_RESOURCE + resource_body = self._get_testvip_httpbody_for_update() + # call method under test: update_resource() and + # assert that it raises the expected exception. + self.assertRaises(ncc_client.NCCException, + self.testclient.update_resource, + TEST_TENANT_ID, resource_path, + resource_name, resource_body) + + def test_update_resource(self): + """Asserts that a correct update call will succeed.""" + # create a valid http response with a successful status code. + fake_response = requests.Response() + fake_response.status_code = requests.codes.ok + fake_response.headers = [] + # obtain the mock object that corresponds to the call of request() + self.request_method_mock.return_value = fake_response + resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE, + TESTVIP_ID) + resource_name = netscaler_driver.VIP_RESOURCE + resource_body = self._get_testvip_httpbody_for_update() + # call method under test: update_resource. + self.testclient.update_resource(TEST_TENANT_ID, resource_path, + resource_name, resource_body) + resource_url = "%s/%s" % (self.testclient.service_uri, resource_path) + # assert that requests.request() was called with the + # expected params. + self.request_method_mock.assert_called_once_with( + 'PUT', + url=resource_url, + headers=mock.ANY, + data=mock.ANY) + + def test_delete_resource_with_error(self): + """Asserts that a failed delete call raises an exception.""" + # create a valid http response with a failure status code. + fake_response = requests.Response() + fake_response.status_code = requests.codes.unauthorized + fake_response.headers = [] + resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE, + TESTVIP_ID) + # call method under test: create_resource + self.assertRaises(ncc_client.NCCException, + self.testclient.remove_resource, + TEST_TENANT_ID, resource_path) + + def test_delete_resource(self): + """Asserts that a correct delete call will succeed.""" + # create a valid http response with a failure status code. + fake_response = requests.Response() + fake_response.status_code = requests.codes.ok + fake_response.headers = [] + # obtain the mock object that corresponds to the call of request() + self.request_method_mock.return_value = fake_response + resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE, + TESTVIP_ID) + resource_url = "%s/%s" % (self.testclient.service_uri, resource_path) + # call method under test: create_resource + self.testclient.remove_resource(TEST_TENANT_ID, resource_path) + # assert that httplib.HTTPConnection request() was called with the + # expected params + self.request_method_mock.assert_called_once_with( + 'DELETE', + url=resource_url, + headers=mock.ANY, + data=mock.ANY) + + def _get_nsclient(self): + return ncc_client.NSClient(TESTURI, TEST_USERNAME, TEST_PASSWORD) + + def _get_testvip_httpbody_for_create(self): + body = { + 'name': 'vip1', + 'address': '10.0.0.3', + 'pool_id': 'da477c13-24cd-4c9f-8c19-757a61ef3b9d', + 'protocol': 'HTTP', + 'protocol_port': 80, + 'admin_state_up': True, + } + return body + + def _get_testvip_httpbody_for_update(self): + body = {} + body['name'] = 'updated vip1' + body['admin_state_up'] = False + return body diff --git a/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_driver.py new file mode 100644 index 000000000..e10c1a3d7 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_driver.py @@ -0,0 +1,802 @@ +# Copyright 2014 Citrix Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +import mock + +from neutron.common import exceptions +from neutron import context +from neutron.db.loadbalancer import loadbalancer_db +from neutron import manager +from neutron.plugins.common import constants +from neutron.services.loadbalancer.drivers.netscaler import ncc_client +from neutron.services.loadbalancer.drivers.netscaler import netscaler_driver +from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer + + +LBAAS_DRIVER_CLASS = ('neutron.services.loadbalancer.drivers' + '.netscaler.netscaler_driver' + '.NetScalerPluginDriver') + +NCC_CLIENT_CLASS = ('neutron.services.loadbalancer.drivers' + '.netscaler.ncc_client' + '.NSClient') + +LBAAS_PROVIDER_NAME = 'netscaler' +LBAAS_PROVIDER = ('LOADBALANCER:%s:%s:default' % + (LBAAS_PROVIDER_NAME, LBAAS_DRIVER_CLASS)) + +#Test data +TESTVIP_ID = '52ab5d71-6bb2-457f-8414-22a4ba55efec' +TESTPOOL_ID = 'da477c13-24cd-4c9f-8c19-757a61ef3b9d' +TESTMEMBER_ID = '84dea8bc-3416-4fb0-83f9-2ca6e7173bee' +TESTMONITOR_ID = '9b9245a2-0413-4f15-87ef-9a41ef66048c' + +TESTVIP_PORT_ID = '327d9662-ade9-4c74-aaf6-c76f145c1180' +TESTPOOL_PORT_ID = '132c1dbb-d3d8-45aa-96e3-71f2ea51651e' +TESTPOOL_SNATIP_ADDRESS = '10.0.0.50' +TESTPOOL_SNAT_PORT = { + 'id': TESTPOOL_PORT_ID, + 'fixed_ips': [{'ip_address': TESTPOOL_SNATIP_ADDRESS}] +} +TESTVIP_IP = '10.0.1.100' +TESTMEMBER_IP = '10.0.0.5' + + +class TestLoadBalancerPluginBase(test_db_loadbalancer + .LoadBalancerPluginDbTestCase): + + def setUp(self): + super(TestLoadBalancerPluginBase, self).setUp( + lbaas_provider=LBAAS_PROVIDER) + loaded_plugins = manager.NeutronManager().get_service_plugins() + self.plugin_instance = loaded_plugins[constants.LOADBALANCER] + + +class TestNetScalerPluginDriver(TestLoadBalancerPluginBase): + + """Unit tests for the NetScaler LBaaS driver module.""" + + def setUp(self): + mock.patch.object(netscaler_driver, 'LOG').start() + + # mock the NSClient class (REST client) + client_mock_cls = mock.patch(NCC_CLIENT_CLASS).start() + + #mock the REST methods of the NSClient class + self.client_mock_instance = client_mock_cls.return_value + self.create_resource_mock = self.client_mock_instance.create_resource + self.create_resource_mock.side_effect = mock_create_resource_func + self.update_resource_mock = self.client_mock_instance.update_resource + self.update_resource_mock.side_effect = mock_update_resource_func + self.retrieve_resource_mock = (self.client_mock_instance + .retrieve_resource) + self.retrieve_resource_mock.side_effect = mock_retrieve_resource_func + self.remove_resource_mock = self.client_mock_instance.remove_resource + self.remove_resource_mock.side_effect = mock_remove_resource_func + super(TestNetScalerPluginDriver, self).setUp() + self.plugin_instance.drivers[LBAAS_PROVIDER_NAME] = ( + netscaler_driver.NetScalerPluginDriver(self.plugin_instance)) + self.driver = self.plugin_instance.drivers[LBAAS_PROVIDER_NAME] + self.context = context.get_admin_context() + + def test_create_vip(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') + ) as (subnet, mock_get_subnet): + mock_get_subnet.return_value = subnet['subnet'] + with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: + testvip = self._build_testvip_contents(subnet['subnet'], + pool['pool']) + expectedvip = self._build_expectedvip_contents( + testvip, + subnet['subnet']) + # mock the LBaaS plugin update_status(). + self._mock_update_status() + # reset the create_resource() mock + self.create_resource_mock.reset_mock() + # execute the method under test + self.driver.create_vip(self.context, testvip) + # First, assert that create_resource was called once + # with expected params. + self.create_resource_mock.assert_called_once_with( + None, + netscaler_driver.VIPS_RESOURCE, + netscaler_driver.VIP_RESOURCE, + expectedvip) + #Finally, assert that the vip object is now ACTIVE + self.mock_update_status_obj.assert_called_once_with( + mock.ANY, + loadbalancer_db.Vip, + expectedvip['id'], + constants.ACTIVE) + + def test_create_vip_without_connection(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') + ) as (subnet, mock_get_subnet): + mock_get_subnet.return_value = subnet['subnet'] + with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: + testvip = self._build_testvip_contents(subnet['subnet'], + pool['pool']) + expectedvip = self._build_expectedvip_contents( + testvip, + subnet['subnet']) + errorcode = ncc_client.NCCException.CONNECTION_ERROR + self.create_resource_mock.side_effect = ( + ncc_client.NCCException(errorcode)) + # mock the plugin's update_status() + self._mock_update_status() + # reset the create_resource() mock + self.create_resource_mock.reset_mock() + # execute the method under test. + self.driver.create_vip(self.context, testvip) + # First, assert that update_resource was called once + # with expected params. + self.create_resource_mock.assert_called_once_with( + None, + netscaler_driver.VIPS_RESOURCE, + netscaler_driver.VIP_RESOURCE, + expectedvip) + #Finally, assert that the vip object is in ERROR state + self.mock_update_status_obj.assert_called_once_with( + mock.ANY, + loadbalancer_db.Vip, + testvip['id'], + constants.ERROR) + + def test_update_vip(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') + ) as (subnet, mock_get_subnet): + mock_get_subnet.return_value = subnet['subnet'] + with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: + with self.vip(pool=pool, subnet=subnet) as vip: + updated_vip = self._build_updated_testvip_contents( + vip['vip'], + subnet['subnet'], + pool['pool']) + expectedvip = self._build_updated_expectedvip_contents( + updated_vip, + subnet['subnet'], + pool['pool']) + # mock the plugin's update_status() + self._mock_update_status() + # reset the update_resource() mock + self.update_resource_mock.reset_mock() + # execute the method under test + self.driver.update_vip(self.context, updated_vip, + updated_vip) + vip_resource_path = "%s/%s" % ( + (netscaler_driver.VIPS_RESOURCE, + vip['vip']['id'])) + # First, assert that update_resource was called once + # with expected params. + (self.update_resource_mock + .assert_called_once_with( + None, + vip_resource_path, + netscaler_driver.VIP_RESOURCE, + expectedvip)) + #Finally, assert that the vip object is now ACTIVE + self.mock_update_status_obj.assert_called_once_with( + mock.ANY, + loadbalancer_db.Vip, + vip['vip']['id'], + constants.ACTIVE) + + def test_delete_vip(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') + ) as (subnet, mock_get_subnet): + mock_get_subnet.return_value = subnet['subnet'] + with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: + with contextlib.nested( + self.vip(pool=pool, subnet=subnet), + mock.patch.object(self.driver.plugin, '_delete_db_vip') + ) as (vip, mock_delete_db_vip): + mock_delete_db_vip.return_value = None + #reset the remove_resource() mock + self.remove_resource_mock.reset_mock() + # execute the method under test + self.driver.delete_vip(self.context, vip['vip']) + vip_resource_path = "%s/%s" % ( + (netscaler_driver.VIPS_RESOURCE, + vip['vip']['id'])) + # Assert that remove_resource() was called once + # with expected params. + (self.remove_resource_mock + .assert_called_once_with(None, vip_resource_path)) + + def test_create_pool(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'), + mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'), + mock.patch.object(self.driver.plugin._core_plugin, 'create_port') + ) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port): + mock_get_subnet.return_value = subnet['subnet'] + mock_get_ports.return_value = None + mock_create_port.return_value = TESTPOOL_SNAT_PORT + testpool = self._build_testpool_contents(subnet['subnet']) + expectedpool = self._build_expectedpool_contents(testpool, + subnet['subnet']) + #reset the create_resource() mock + self.create_resource_mock.reset_mock() + # mock the plugin's update_status() + self._mock_update_status() + # execute the method under test + self.driver.create_pool(self.context, testpool) + # First, assert that create_resource was called once + # with expected params. + (self.create_resource_mock + .assert_called_once_with(None, + netscaler_driver.POOLS_RESOURCE, + netscaler_driver.POOL_RESOURCE, + expectedpool)) + #Finally, assert that the pool object is now ACTIVE + self.mock_update_status_obj.assert_called_once_with( + mock.ANY, + loadbalancer_db.Pool, + expectedpool['id'], + constants.ACTIVE) + + def test_create_pool_with_error(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'), + mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'), + mock.patch.object(self.driver.plugin._core_plugin, 'create_port') + ) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port): + mock_get_subnet.return_value = subnet['subnet'] + mock_get_ports.return_value = None + mock_create_port.return_value = TESTPOOL_SNAT_PORT + errorcode = ncc_client.NCCException.CONNECTION_ERROR + self.create_resource_mock.side_effect = (ncc_client + .NCCException(errorcode)) + testpool = self._build_testpool_contents(subnet['subnet']) + expectedpool = self._build_expectedpool_contents(testpool, + subnet['subnet']) + # mock the plugin's update_status() + self._mock_update_status() + #reset the create_resource() mock + self.create_resource_mock.reset_mock() + # execute the method under test. + self.driver.create_pool(self.context, testpool) + # Also assert that create_resource was called once + # with expected params. + (self.create_resource_mock + .assert_called_once_with(None, + netscaler_driver.POOLS_RESOURCE, + netscaler_driver.POOL_RESOURCE, + expectedpool)) + #Finally, assert that the pool object is in ERROR state + self.mock_update_status_obj.assert_called_once_with( + mock.ANY, + loadbalancer_db.Pool, + expectedpool['id'], + constants.ERROR) + + def test_create_pool_with_snatportcreate_failure(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'), + mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'), + mock.patch.object(self.driver.plugin._core_plugin, 'create_port') + ) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port): + mock_get_subnet.return_value = subnet['subnet'] + mock_get_ports.return_value = None + mock_create_port.side_effect = exceptions.NeutronException() + testpool = self._build_testpool_contents(subnet['subnet']) + #reset the create_resource() mock + self.create_resource_mock.reset_mock() + # execute the method under test. + self.assertRaises(exceptions.NeutronException, + self.driver.create_pool, + self.context, testpool) + + def test_update_pool(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') + ) as (subnet, mock_get_subnet): + mock_get_subnet.return_value = subnet['subnet'] + with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: + updated_pool = self._build_updated_testpool_contents( + pool['pool'], + subnet['subnet']) + expectedpool = self._build_updated_expectedpool_contents( + updated_pool, + subnet['subnet']) + # mock the plugin's update_status() + self._mock_update_status() + # reset the update_resource() mock + self.update_resource_mock.reset_mock() + # execute the method under test. + self.driver.update_pool(self.context, pool['pool'], + updated_pool) + pool_resource_path = "%s/%s" % ( + (netscaler_driver.POOLS_RESOURCE, + pool['pool']['id'])) + # First, assert that update_resource was called once + # with expected params. + (self.update_resource_mock + .assert_called_once_with(None, + pool_resource_path, + netscaler_driver.POOL_RESOURCE, + expectedpool)) + #Finally, assert that the pool object is now ACTIVE + self.mock_update_status_obj.assert_called_once_with( + mock.ANY, + loadbalancer_db.Pool, + pool['pool']['id'], + constants.ACTIVE) + + def test_delete_pool(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') + ) as (subnet, mock_get_subnet): + mock_get_subnet.return_value = subnet['subnet'] + with contextlib.nested( + self.pool(provider=LBAAS_PROVIDER_NAME), + mock.patch.object(self.driver.plugin._core_plugin, + 'delete_port'), + mock.patch.object(self.driver.plugin._core_plugin, + 'get_ports'), + mock.patch.object(self.driver.plugin, + 'get_pools'), + mock.patch.object(self.driver.plugin, + '_delete_db_pool') + ) as (pool, mock_delete_port, mock_get_ports, mock_get_pools, + mock_delete_db_pool): + mock_delete_port.return_value = None + mock_get_ports.return_value = [{'id': TESTPOOL_PORT_ID}] + mock_get_pools.return_value = [] + mock_delete_db_pool.return_value = None + #reset the remove_resource() mock + self.remove_resource_mock.reset_mock() + # execute the method under test. + self.driver.delete_pool(self.context, pool['pool']) + pool_resource_path = "%s/%s" % ( + (netscaler_driver.POOLS_RESOURCE, + pool['pool']['id'])) + # Assert that delete_resource was called + # once with expected params. + (self.remove_resource_mock + .assert_called_once_with(None, pool_resource_path)) + + def test_create_member(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, + 'get_subnet') + ) as (subnet, mock_get_subnet): + mock_get_subnet.return_value = subnet['subnet'] + with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: + testmember = self._build_testmember_contents(pool['pool']) + expectedmember = self._build_expectedmember_contents( + testmember) + # mock the plugin's update_status() + self._mock_update_status() + #reset the create_resource() mock + self.create_resource_mock.reset_mock() + # execute the method under test. + self.driver.create_member(self.context, testmember) + # First, assert that create_resource was called once + # with expected params. + (self.create_resource_mock + .assert_called_once_with( + None, + netscaler_driver.POOLMEMBERS_RESOURCE, + netscaler_driver.POOLMEMBER_RESOURCE, + expectedmember)) + #Finally, assert that the member object is now ACTIVE + self.mock_update_status_obj.assert_called_once_with( + mock.ANY, + loadbalancer_db.Member, + expectedmember['id'], + constants.ACTIVE) + + def test_update_member(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') + ) as (subnet, mock_get_subnet): + mock_get_subnet.return_value = subnet['subnet'] + with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: + with self.member(pool_id=pool['pool']['id']) as member: + updatedmember = (self._build_updated_testmember_contents( + member['member'])) + expectedmember = (self + ._build_updated_expectedmember_contents( + updatedmember)) + # mock the plugin's update_status() + self._mock_update_status() + # reset the update_resource() mock + self.update_resource_mock.reset_mock() + # execute the method under test + self.driver.update_member(self.context, + member['member'], + updatedmember) + member_resource_path = "%s/%s" % ( + (netscaler_driver.POOLMEMBERS_RESOURCE, + member['member']['id'])) + # First, assert that update_resource was called once + # with expected params. + (self.update_resource_mock + .assert_called_once_with( + None, + member_resource_path, + netscaler_driver.POOLMEMBER_RESOURCE, + expectedmember)) + #Finally, assert that the member object is now ACTIVE + self.mock_update_status_obj.assert_called_once_with( + mock.ANY, + loadbalancer_db.Member, + member['member']['id'], + constants.ACTIVE) + + def test_delete_member(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') + ) as (subnet, mock_get_subnet): + mock_get_subnet.return_value = subnet['subnet'] + with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: + with contextlib.nested( + self.member(pool_id=pool['pool']['id']), + mock.patch.object(self.driver.plugin, '_delete_db_member') + ) as (member, mock_delete_db_member): + mock_delete_db_member.return_value = None + # reset the remove_resource() mock + self.remove_resource_mock.reset_mock() + # execute the method under test + self.driver.delete_member(self.context, + member['member']) + member_resource_path = "%s/%s" % ( + (netscaler_driver.POOLMEMBERS_RESOURCE, + member['member']['id'])) + # Assert that delete_resource was called once + # with expected params. + (self.remove_resource_mock + .assert_called_once_with(None, member_resource_path)) + + def test_create_pool_health_monitor(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') + ) as (subnet, mock_get_subnet): + mock_get_subnet.return_value = subnet['subnet'] + with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: + testhealthmonitor = self._build_testhealthmonitor_contents( + pool['pool']) + expectedhealthmonitor = ( + self._build_expectedhealthmonitor_contents( + testhealthmonitor)) + with mock.patch.object(self.driver.plugin, + 'update_pool_health_monitor') as mhm: + # reset the create_resource() mock + self.create_resource_mock.reset_mock() + # execute the method under test. + self.driver.create_pool_health_monitor(self.context, + testhealthmonitor, + pool['pool']['id']) + # First, assert that create_resource was called once + # with expected params. + resource_path = "%s/%s/%s" % ( + netscaler_driver.POOLS_RESOURCE, + pool['pool']['id'], + netscaler_driver.MONITORS_RESOURCE) + (self.create_resource_mock + .assert_called_once_with( + None, + resource_path, + netscaler_driver.MONITOR_RESOURCE, + expectedhealthmonitor)) + # Finally, assert that the healthmonitor object is + # now ACTIVE. + (mhm.assert_called_once_with( + mock.ANY, + expectedhealthmonitor['id'], + pool['pool']['id'], + constants.ACTIVE, "")) + + def test_update_pool_health_monitor(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') + ) as (subnet, mock_get_subnet): + mock_get_subnet.return_value = subnet['subnet'] + with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: + with self.health_monitor( + pool_id=pool['pool']['id'] + ) as (health_monitor): + updatedhealthmonitor = ( + self._build_updated_testhealthmonitor_contents( + health_monitor['health_monitor'])) + expectedhealthmonitor = ( + self._build_updated_expectedhealthmonitor_contents( + updatedhealthmonitor)) + with mock.patch.object(self.driver.plugin, + 'update_pool_health_monitor')as mhm: + # reset the update_resource() mock + self.update_resource_mock.reset_mock() + # execute the method under test. + self.driver.update_pool_health_monitor( + self.context, + health_monitor['health_monitor'], + updatedhealthmonitor, + pool['pool']['id']) + monitor_resource_path = "%s/%s" % ( + (netscaler_driver.MONITORS_RESOURCE, + health_monitor['health_monitor']['id'])) + # First, assert that update_resource was called once + # with expected params. + self.update_resource_mock.assert_called_once_with( + None, + monitor_resource_path, + netscaler_driver.MONITOR_RESOURCE, + expectedhealthmonitor) + #Finally, assert that the member object is now ACTIVE + (mhm.assert_called_once_with( + mock.ANY, + health_monitor['health_monitor']['id'], + pool['pool']['id'], + constants.ACTIVE, "")) + + def test_delete_pool_health_monitor(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') + ) as (subnet, mock_get_subnet): + mock_get_subnet.return_value = subnet['subnet'] + with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: + with contextlib.nested( + self.health_monitor(pool_id=pool['pool']['id']), + mock.patch.object(self.driver.plugin, + '_delete_db_pool_health_monitor') + ) as (health_monitor, mock_delete_db_monitor): + mock_delete_db_monitor.return_value = None + # reset the remove_resource() mock + self.remove_resource_mock.reset_mock() + # execute the method under test. + self.driver.delete_pool_health_monitor( + self.context, + health_monitor['health_monitor'], + pool['pool']['id']) + monitor_resource_path = "%s/%s/%s/%s" % ( + netscaler_driver.POOLS_RESOURCE, + pool['pool']['id'], + netscaler_driver.MONITORS_RESOURCE, + health_monitor['health_monitor']['id']) + # Assert that delete_resource was called once + # with expected params. + self.remove_resource_mock.assert_called_once_with( + None, + monitor_resource_path) + + def _build_testvip_contents(self, subnet, pool): + vip_obj = dict(id=TESTVIP_ID, + name='testvip', + description='a test vip', + tenant_id=self._tenant_id, + subnet_id=subnet['id'], + address=TESTVIP_IP, + port_id=TESTVIP_PORT_ID, + pool_id=pool['id'], + protocol='HTTP', + protocol_port=80, + connection_limit=1000, + admin_state_up=True, + status='PENDING_CREATE', + status_description='') + return vip_obj + + def _build_expectedvip_contents(self, testvip, subnet): + expectedvip = dict(id=testvip['id'], + name=testvip['name'], + description=testvip['description'], + tenant_id=testvip['tenant_id'], + subnet_id=testvip['subnet_id'], + address=testvip['address'], + network_id=subnet['network_id'], + port_id=testvip['port_id'], + pool_id=testvip['pool_id'], + protocol=testvip['protocol'], + protocol_port=testvip['protocol_port'], + connection_limit=testvip['connection_limit'], + admin_state_up=testvip['admin_state_up']) + return expectedvip + + def _build_updated_testvip_contents(self, testvip, subnet, pool): + #update some updateable fields of the vip + testvip['name'] = 'udpated testvip' + testvip['description'] = 'An updated version of test vip' + testvip['connection_limit'] = 2000 + return testvip + + def _build_updated_expectedvip_contents(self, testvip, subnet, pool): + expectedvip = dict(name=testvip['name'], + description=testvip['description'], + connection_limit=testvip['connection_limit'], + admin_state_up=testvip['admin_state_up'], + pool_id=testvip['pool_id']) + return expectedvip + + def _build_testpool_contents(self, subnet): + pool_obj = dict(id=TESTPOOL_ID, + name='testpool', + description='a test pool', + tenant_id=self._tenant_id, + subnet_id=subnet['id'], + protocol='HTTP', + vip_id=None, + admin_state_up=True, + lb_method='ROUND_ROBIN', + status='PENDING_CREATE', + status_description='', + members=[], + health_monitors=[], + health_monitors_status=None, + provider=LBAAS_PROVIDER_NAME) + return pool_obj + + def _build_expectedpool_contents(self, testpool, subnet): + expectedpool = dict(id=testpool['id'], + name=testpool['name'], + description=testpool['description'], + tenant_id=testpool['tenant_id'], + subnet_id=testpool['subnet_id'], + network_id=subnet['network_id'], + protocol=testpool['protocol'], + vip_id=testpool['vip_id'], + lb_method=testpool['lb_method'], + snat_ip=TESTPOOL_SNATIP_ADDRESS, + port_id=TESTPOOL_PORT_ID, + admin_state_up=testpool['admin_state_up']) + return expectedpool + + def _build_updated_testpool_contents(self, testpool, subnet): + updated_pool = dict(testpool.items()) + updated_pool['name'] = 'udpated testpool' + updated_pool['description'] = 'An updated version of test pool' + updated_pool['lb_method'] = 'LEAST_CONNECTIONS' + updated_pool['admin_state_up'] = True + updated_pool['provider'] = LBAAS_PROVIDER_NAME + updated_pool['status'] = 'PENDING_UPDATE' + updated_pool['status_description'] = '' + updated_pool['members'] = [] + updated_pool["health_monitors"] = [] + updated_pool["health_monitors_status"] = None + return updated_pool + + def _build_updated_expectedpool_contents(self, testpool, subnet): + expectedpool = dict(name=testpool['name'], + description=testpool['description'], + lb_method=testpool['lb_method'], + admin_state_up=testpool['admin_state_up']) + return expectedpool + + def _build_testmember_contents(self, pool): + member_obj = dict( + id=TESTMEMBER_ID, + tenant_id=self._tenant_id, + pool_id=pool['id'], + address=TESTMEMBER_IP, + protocol_port=8080, + weight=2, + admin_state_up=True, + status='PENDING_CREATE', + status_description='') + return member_obj + + def _build_expectedmember_contents(self, testmember): + expectedmember = dict( + id=testmember['id'], + tenant_id=testmember['tenant_id'], + pool_id=testmember['pool_id'], + address=testmember['address'], + protocol_port=testmember['protocol_port'], + weight=testmember['weight'], + admin_state_up=testmember['admin_state_up']) + return expectedmember + + def _build_updated_testmember_contents(self, testmember): + updated_member = dict(testmember.items()) + updated_member.update( + weight=3, + admin_state_up=True, + status='PENDING_CREATE', + status_description='' + ) + return updated_member + + def _build_updated_expectedmember_contents(self, testmember): + expectedmember = dict(weight=testmember['weight'], + pool_id=testmember['pool_id'], + admin_state_up=testmember['admin_state_up']) + return expectedmember + + def _build_testhealthmonitor_contents(self, pool): + monitor_obj = dict( + id=TESTMONITOR_ID, + tenant_id=self._tenant_id, + type='TCP', + delay=10, + timeout=5, + max_retries=3, + admin_state_up=True, + pools=[]) + pool_obj = dict(status='PENDING_CREATE', + status_description=None, + pool_id=pool['id']) + monitor_obj['pools'].append(pool_obj) + return monitor_obj + + def _build_expectedhealthmonitor_contents(self, testhealthmonitor): + expectedmonitor = dict(id=testhealthmonitor['id'], + tenant_id=testhealthmonitor['tenant_id'], + type=testhealthmonitor['type'], + delay=testhealthmonitor['delay'], + timeout=testhealthmonitor['timeout'], + max_retries=testhealthmonitor['max_retries'], + admin_state_up=( + testhealthmonitor['admin_state_up'])) + return expectedmonitor + + def _build_updated_testhealthmonitor_contents(self, testmonitor): + updated_monitor = dict(testmonitor.items()) + updated_monitor.update( + delay=30, + timeout=3, + max_retries=5, + admin_state_up=True + ) + return updated_monitor + + def _build_updated_expectedhealthmonitor_contents(self, testmonitor): + expectedmonitor = dict(delay=testmonitor['delay'], + timeout=testmonitor['timeout'], + max_retries=testmonitor['max_retries'], + admin_state_up=testmonitor['admin_state_up']) + return expectedmonitor + + def _mock_update_status(self): + #patch the plugin's update_status() method with a mock object + self.mock_update_status_patcher = mock.patch.object( + self.driver.plugin, + 'update_status') + self.mock_update_status_obj = self.mock_update_status_patcher.start() + + +def mock_create_resource_func(*args, **kwargs): + return 201, {} + + +def mock_update_resource_func(*args, **kwargs): + return 202, {} + + +def mock_retrieve_resource_func(*args, **kwargs): + return 200, {} + + +def mock_remove_resource_func(*args, **kwargs): + return 200, {} diff --git a/neutron/tests/unit/services/loadbalancer/drivers/radware/__init__.py b/neutron/tests/unit/services/loadbalancer/drivers/radware/__init__.py new file mode 100644 index 000000000..e708e47db --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/drivers/radware/__init__.py @@ -0,0 +1,15 @@ +# copyright 2013 Radware LTD. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Avishay Balderman, Radware diff --git a/neutron/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py b/neutron/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py new file mode 100644 index 000000000..ca6080e1f --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py @@ -0,0 +1,961 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Radware LTD. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Avishay Balderman, Radware + +import re + +import contextlib +import mock +from oslo.config import cfg +from six.moves import queue as Queue + +from neutron import context +from neutron.extensions import loadbalancer +from neutron import manager +from neutron.openstack.common import jsonutils as json +from neutron.plugins.common import constants +from neutron.services.loadbalancer.drivers.radware import driver +from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc +from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer + +GET_200 = ('/api/workflow/', '/api/service/', '/api/workflowTemplate') +SERVER_DOWN_CODES = (-1, 301, 307) + + +class QueueMock(Queue.Queue): + def __init__(self, completion_handler): + self.completion_handler = completion_handler + super(QueueMock, self).__init__() + + def put_nowait(self, oper): + self.completion_handler(oper) + + +def _recover_function_mock(action, resource, data, headers, binary=False): + pass + + +def rest_call_function_mock(action, resource, data, headers, binary=False): + if rest_call_function_mock.RESPOND_WITH_ERROR: + return 400, 'error_status', 'error_description', None + if rest_call_function_mock.RESPOND_WITH_SERVER_DOWN in SERVER_DOWN_CODES: + val = rest_call_function_mock.RESPOND_WITH_SERVER_DOWN + return val, 'error_status', 'error_description', None + if action == 'GET': + return _get_handler(resource) + elif action == 'DELETE': + return _delete_handler(resource) + elif action == 'POST': + return _post_handler(resource, binary) + else: + return 0, None, None, None + + +def _get_handler(resource): + if resource == GET_200[2]: + if rest_call_function_mock.TEMPLATES_MISSING: + data = json.loads('[]') + else: + data = json.loads( + '[{"name":"openstack_l2_l3"},{"name":"openstack_l4"}]' + ) + return 200, '', '', data + + if resource in GET_200: + return 200, '', '', '' + else: + data = json.loads('{"complete":"True", "success": "True"}') + return 202, '', '', data + + +def _delete_handler(resource): + return 404, '', '', {'message': 'Not Found'} + + +def _post_handler(resource, binary): + if re.search(r'/api/workflow/.+/action/.+', resource): + data = json.loads('{"uri":"some_uri"}') + return 202, '', '', data + elif re.search(r'/api/service\?name=.+', resource): + data = json.loads('{"links":{"actions":{"provision":"someuri"}}}') + return 201, '', '', data + elif binary: + return 201, '', '', '' + else: + return 202, '', '', '' + +RADWARE_PROVIDER = ('LOADBALANCER:radware:neutron.services.' + 'loadbalancer.drivers.radware.driver.' + 'LoadBalancerDriver:default') + + +class TestLoadBalancerPluginBase( + test_db_loadbalancer.LoadBalancerPluginDbTestCase): + + def setUp(self): + super(TestLoadBalancerPluginBase, self).setUp( + lbaas_provider=RADWARE_PROVIDER) + + loaded_plugins = manager.NeutronManager().get_service_plugins() + self.plugin_instance = loaded_plugins[constants.LOADBALANCER] + + +class TestLoadBalancerPlugin(TestLoadBalancerPluginBase): + def setUp(self): + super(TestLoadBalancerPlugin, self).setUp() + + rest_call_function_mock.__dict__.update( + {'RESPOND_WITH_ERROR': False}) + rest_call_function_mock.__dict__.update( + {'TEMPLATES_MISSING': False}) + rest_call_function_mock.__dict__.update( + {'RESPOND_WITH_SERVER_DOWN': 200}) + + self.operation_completer_start_mock = mock.Mock( + return_value=None) + self.operation_completer_join_mock = mock.Mock( + return_value=None) + self.driver_rest_call_mock = mock.Mock( + side_effect=rest_call_function_mock) + self.flip_servers_mock = mock.Mock( + return_value=None) + self.recover_mock = mock.Mock( + side_effect=_recover_function_mock) + + radware_driver = self.plugin_instance.drivers['radware'] + radware_driver.completion_handler.start = ( + self.operation_completer_start_mock) + radware_driver.completion_handler.join = ( + self.operation_completer_join_mock) + self.orig_call = radware_driver.rest_client.call + self.orig__call = radware_driver.rest_client._call + radware_driver.rest_client.call = self.driver_rest_call_mock + radware_driver.rest_client._call = self.driver_rest_call_mock + radware_driver.rest_client._flip_servers = self.flip_servers_mock + radware_driver.rest_client._recover = self.recover_mock + radware_driver.completion_handler.rest_client.call = ( + self.driver_rest_call_mock) + + radware_driver.queue = QueueMock( + radware_driver.completion_handler.handle_operation_completion) + + self.addCleanup(radware_driver.completion_handler.join) + + def test_rest_client_recover_was_called(self): + """Call the real REST client and verify _recover is called.""" + radware_driver = self.plugin_instance.drivers['radware'] + radware_driver.rest_client.call = self.orig_call + radware_driver.rest_client._call = self.orig__call + self.assertRaises(r_exc.RESTRequestFailure, + radware_driver._verify_workflow_templates) + self.recover_mock.assert_called_once() + + def test_rest_client_flip_servers(self): + radware_driver = self.plugin_instance.drivers['radware'] + server = radware_driver.rest_client.server + sec_server = radware_driver.rest_client.secondary_server + radware_driver.rest_client._flip_servers() + self.assertEqual(server, + radware_driver.rest_client.secondary_server) + self.assertEqual(sec_server, + radware_driver.rest_client.server) + + def test_verify_workflow_templates_server_down(self): + """Test the rest call failure when backend is down.""" + for value in SERVER_DOWN_CODES: + rest_call_function_mock.__dict__.update( + {'RESPOND_WITH_SERVER_DOWN': value}) + self.assertRaises(r_exc.RESTRequestFailure, + self.plugin_instance.drivers['radware']. + _verify_workflow_templates) + + def test_verify_workflow_templates(self): + """Test the rest call failure handling by Exception raising.""" + rest_call_function_mock.__dict__.update( + {'TEMPLATES_MISSING': True}) + + self.assertRaises(r_exc.WorkflowMissing, + self.plugin_instance.drivers['radware']. + _verify_workflow_templates) + + def test_create_vip_failure(self): + """Test the rest call failure handling by Exception raising.""" + with self.network(do_delete=False) as network: + with self.subnet(network=network, do_delete=False) as subnet: + with self.pool(no_delete=True, + provider='radware', + subnet_id=subnet['subnet']['id']) as pool: + vip_data = { + 'name': 'vip1', + 'subnet_id': subnet['subnet']['id'], + 'pool_id': pool['pool']['id'], + 'description': '', + 'protocol_port': 80, + 'protocol': 'HTTP', + 'connection_limit': -1, + 'admin_state_up': True, + 'status': constants.PENDING_CREATE, + 'tenant_id': self._tenant_id, + 'session_persistence': '' + } + + rest_call_function_mock.__dict__.update( + {'RESPOND_WITH_ERROR': True}) + + self.assertRaises(r_exc.RESTRequestFailure, + self.plugin_instance.create_vip, + context.get_admin_context(), + {'vip': vip_data}) + + def test_create_vip(self): + with self.subnet() as subnet: + with self.pool(provider='radware', + subnet_id=subnet['subnet']['id']) as pool: + vip_data = { + 'name': 'vip1', + 'subnet_id': subnet['subnet']['id'], + 'pool_id': pool['pool']['id'], + 'description': '', + 'protocol_port': 80, + 'protocol': 'HTTP', + 'connection_limit': -1, + 'admin_state_up': True, + 'status': constants.PENDING_CREATE, + 'tenant_id': self._tenant_id, + 'session_persistence': '' + } + + vip = self.plugin_instance.create_vip( + context.get_admin_context(), {'vip': vip_data}) + + # Test creation REST calls + calls = [ + mock.call('GET', u'/api/service/srv_' + + subnet['subnet']['network_id'], None, None), + mock.call('POST', u'/api/service?name=srv_' + + subnet['subnet']['network_id'] + '&tenant=' + + vip['tenant_id'], mock.ANY, + driver.CREATE_SERVICE_HEADER), + mock.call('GET', u'/api/workflow/l2_l3_' + + subnet['subnet']['network_id'], None, None), + mock.call('POST', '/api/workflow/l2_l3_' + + subnet['subnet']['network_id'] + + '/action/setup_l2_l3', + mock.ANY, driver.TEMPLATE_HEADER), + mock.call('POST', 'someuri', + None, driver.PROVISION_HEADER), + + + mock.call('POST', '/api/workflowTemplate/' + + 'openstack_l4' + + '?name=' + pool['pool']['id'], + mock.ANY, + driver.TEMPLATE_HEADER), + mock.call('POST', '/api/workflowTemplate/' + + 'openstack_l2_l3' + + '?name=l2_l3_' + subnet['subnet']['network_id'], + mock.ANY, + driver.TEMPLATE_HEADER), + + mock.call('POST', '/api/workflow/' + pool['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER), + mock.call('GET', '/api/workflow/' + + pool['pool']['id'], None, None) + ] + self.driver_rest_call_mock.assert_has_calls(calls, + any_order=True) + + #Test DB + new_vip = self.plugin_instance.get_vip( + context.get_admin_context(), + vip['id'] + ) + self.assertEqual(new_vip['status'], constants.ACTIVE) + + # Delete VIP + self.plugin_instance.delete_vip( + context.get_admin_context(), vip['id']) + + # Test deletion REST calls + calls = [ + mock.call('DELETE', u'/api/workflow/' + pool['pool']['id'], + None, None) + ] + self.driver_rest_call_mock.assert_has_calls( + calls, any_order=True) + + def test_create_vip_2_leg(self): + """Test creation of a VIP where Alteon VIP and PIP are different.""" + + with self.subnet(cidr='10.0.0.0/24') as subnet: + with self.subnet(cidr='10.0.1.0/24') as pool_sub: + with self.pool(provider='radware', + subnet_id=pool_sub['subnet']['id']) as pool: + vip_data = { + 'name': 'vip1', + 'subnet_id': subnet['subnet']['id'], + 'pool_id': pool['pool']['id'], + 'description': '', + 'protocol_port': 80, + 'protocol': 'HTTP', + 'connection_limit': -1, + 'admin_state_up': True, + 'status': constants.PENDING_CREATE, + 'tenant_id': self._tenant_id, + 'session_persistence': '' + } + + vip = self.plugin_instance.create_vip( + context.get_admin_context(), {'vip': vip_data}) + name_suffix = '%s_%s' % (subnet['subnet']['network_id'], + pool_sub['subnet']['network_id']) + # Test creation REST calls + calls = [ + mock.call('GET', '/api/workflowTemplate', None, None), + mock.call('GET', '/api/service/srv_' + name_suffix, + None, None), + mock.call('POST', '/api/service?name=srv_' + + name_suffix + '&tenant=' + vip['tenant_id'], + mock.ANY, driver.CREATE_SERVICE_HEADER), + mock.call('POST', 'someuri', + None, driver.PROVISION_HEADER), + mock.call('GET', '/api/workflow/l2_l3_' + name_suffix, + None, None), + mock.call('POST', '/api/workflowTemplate/' + + 'openstack_l2_l3' + + '?name=l2_l3_' + name_suffix, + mock.ANY, + driver.TEMPLATE_HEADER), + mock.call('POST', '/api/workflow/l2_l3_' + + name_suffix + '/action/setup_l2_l3', + mock.ANY, driver.TEMPLATE_HEADER), + mock.call('GET', '/api/workflow/' + + pool['pool']['id'], None, None), + mock.call('POST', '/api/workflowTemplate/' + + 'openstack_l4' + + '?name=' + pool['pool']['id'], + mock.ANY, + driver.TEMPLATE_HEADER), + mock.call('POST', '/api/workflow/' + + pool['pool']['id'] + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER) + ] + self.driver_rest_call_mock.assert_has_calls(calls) + #Test DB + new_vip = self.plugin_instance.get_vip( + context.get_admin_context(), + vip['id'] + ) + self.assertEqual(new_vip['status'], constants.ACTIVE) + + # Test that PIP neutron port was created + pip_port_filter = { + 'name': ['pip_' + vip['id']], + } + plugin = manager.NeutronManager.get_plugin() + num_ports = plugin.get_ports_count( + context.get_admin_context(), filters=pip_port_filter) + self.assertTrue(num_ports > 0) + + # Delete VIP + self.plugin_instance.delete_vip( + context.get_admin_context(), vip['id']) + + # Test deletion REST calls + calls = [ + mock.call('DELETE', u'/api/workflow/' + + pool['pool']['id'], None, None) + ] + self.driver_rest_call_mock.assert_has_calls(calls) + + def test_update_vip(self): + with self.subnet() as subnet: + with self.pool(provider='radware', + no_delete=True, + subnet_id=subnet['subnet']['id']) as pool: + vip_data = { + 'name': 'vip1', + 'subnet_id': subnet['subnet']['id'], + 'pool_id': pool['pool']['id'], + 'description': '', + 'protocol_port': 80, + 'protocol': 'HTTP', + 'connection_limit': -1, + 'admin_state_up': True, + 'status': constants.PENDING_CREATE, + 'tenant_id': self._tenant_id, + 'session_persistence': '' + } + + vip = self.plugin_instance.create_vip( + context.get_admin_context(), {'vip': vip_data}) + + vip_data['status'] = constants.PENDING_UPDATE + self.plugin_instance.update_vip( + context.get_admin_context(), + vip['id'], {'vip': vip_data}) + + # Test REST calls + calls = [ + mock.call('POST', '/api/workflow/' + pool['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER), + ] + self.driver_rest_call_mock.assert_has_calls( + calls, any_order=True) + + updated_vip = self.plugin_instance.get_vip( + context.get_admin_context(), vip['id']) + self.assertEqual(updated_vip['status'], constants.ACTIVE) + + # delete VIP + self.plugin_instance.delete_vip( + context.get_admin_context(), vip['id']) + + def test_update_vip_2_leg(self): + """Test update of a VIP where Alteon VIP and PIP are different.""" + + with self.subnet(cidr='10.0.0.0/24') as subnet: + with self.subnet(cidr='10.0.1.0/24') as pool_subnet: + with self.pool(provider='radware', + subnet_id=pool_subnet['subnet']['id']) as pool: + vip_data = { + 'name': 'vip1', + 'subnet_id': subnet['subnet']['id'], + 'pool_id': pool['pool']['id'], + 'description': '', + 'protocol_port': 80, + 'protocol': 'HTTP', + 'connection_limit': -1, + 'admin_state_up': True, + 'status': constants.PENDING_CREATE, + 'tenant_id': self._tenant_id, + 'session_persistence': '' + } + + vip = self.plugin_instance.create_vip( + context.get_admin_context(), {'vip': vip_data}) + + self.plugin_instance.update_vip( + context.get_admin_context(), + vip['id'], {'vip': vip_data}) + + # Test REST calls + calls = [ + mock.call('POST', '/api/workflow/' + + pool['pool']['id'] + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER), + ] + self.driver_rest_call_mock.assert_has_calls(calls) + + updated_vip = self.plugin_instance.get_vip( + context.get_admin_context(), vip['id']) + self.assertEqual(updated_vip['status'], constants.ACTIVE) + + # delete VIP + self.plugin_instance.delete_vip( + context.get_admin_context(), vip['id']) + + def test_delete_vip_failure(self): + plugin = self.plugin_instance + + with self.network(do_delete=False) as network: + with self.subnet(network=network, do_delete=False) as subnet: + with self.pool(no_delete=True, + provider='radware', + subnet_id=subnet['subnet']['id']) as pool: + with contextlib.nested( + self.member(pool_id=pool['pool']['id'], + no_delete=True), + self.member(pool_id=pool['pool']['id'], + address='192.168.1.101', + no_delete=True), + self.health_monitor(no_delete=True), + self.vip(pool=pool, subnet=subnet, no_delete=True) + ) as (mem1, mem2, hm, vip): + + plugin.create_pool_health_monitor( + context.get_admin_context(), hm, pool['pool']['id'] + ) + + rest_call_function_mock.__dict__.update( + {'RESPOND_WITH_ERROR': True}) + + plugin.delete_vip( + context.get_admin_context(), vip['vip']['id']) + + u_vip = plugin.get_vip( + context.get_admin_context(), vip['vip']['id']) + u_pool = plugin.get_pool( + context.get_admin_context(), pool['pool']['id']) + u_mem1 = plugin.get_member( + context.get_admin_context(), mem1['member']['id']) + u_mem2 = plugin.get_member( + context.get_admin_context(), mem2['member']['id']) + u_phm = plugin.get_pool_health_monitor( + context.get_admin_context(), + hm['health_monitor']['id'], pool['pool']['id']) + + self.assertEqual(u_vip['status'], constants.ERROR) + self.assertEqual(u_pool['status'], constants.ACTIVE) + self.assertEqual(u_mem1['status'], constants.ACTIVE) + self.assertEqual(u_mem2['status'], constants.ACTIVE) + self.assertEqual(u_phm['status'], constants.ACTIVE) + + def test_delete_vip(self): + with self.subnet() as subnet: + with self.pool(provider='radware', + no_delete=True, + subnet_id=subnet['subnet']['id']) as pool: + vip_data = { + 'name': 'vip1', + 'subnet_id': subnet['subnet']['id'], + 'pool_id': pool['pool']['id'], + 'description': '', + 'protocol_port': 80, + 'protocol': 'HTTP', + 'connection_limit': -1, + 'admin_state_up': True, + 'status': constants.PENDING_CREATE, + 'tenant_id': self._tenant_id, + 'session_persistence': '' + } + + vip = self.plugin_instance.create_vip( + context.get_admin_context(), {'vip': vip_data}) + + self.plugin_instance.delete_vip( + context.get_admin_context(), vip['id']) + + calls = [ + mock.call('DELETE', '/api/workflow/' + pool['pool']['id'], + None, None) + ] + self.driver_rest_call_mock.assert_has_calls( + calls, any_order=True) + + self.assertRaises(loadbalancer.VipNotFound, + self.plugin_instance.get_vip, + context.get_admin_context(), vip['id']) + + def test_delete_vip_2_leg(self): + """Test deletion of a VIP where Alteon VIP and PIP are different.""" + + self.driver_rest_call_mock.reset_mock() + with self.subnet(cidr='10.0.0.0/24') as subnet: + with self.subnet(cidr='10.0.1.0/24') as pool_subnet: + with self.pool(provider='radware', + no_delete=True, + subnet_id=pool_subnet['subnet']['id']) as pool: + vip_data = { + 'name': 'vip1', + 'subnet_id': subnet['subnet']['id'], + 'pool_id': pool['pool']['id'], + 'description': '', + 'protocol_port': 80, + 'protocol': 'HTTP', + 'connection_limit': -1, + 'admin_state_up': True, + 'status': constants.PENDING_CREATE, + 'tenant_id': self._tenant_id, + 'session_persistence': '' + } + + vip = self.plugin_instance.create_vip( + context.get_admin_context(), {'vip': vip_data}) + + self.plugin_instance.delete_vip( + context.get_admin_context(), vip['id']) + + calls = [ + mock.call('DELETE', '/api/workflow/' + + pool['pool']['id'], None, None) + ] + self.driver_rest_call_mock.assert_has_calls(calls) + + # Test that PIP neutron port was deleted + pip_port_filter = { + 'name': ['pip_' + vip['id']], + } + plugin = manager.NeutronManager.get_plugin() + num_ports = plugin.get_ports_count( + context.get_admin_context(), filters=pip_port_filter) + self.assertTrue(num_ports == 0) + + self.assertRaises(loadbalancer.VipNotFound, + self.plugin_instance.get_vip, + context.get_admin_context(), vip['id']) + + def test_update_pool(self): + with self.subnet(): + with self.pool() as pool: + del pool['pool']['provider'] + del pool['pool']['status'] + self.plugin_instance.update_pool( + context.get_admin_context(), + pool['pool']['id'], pool) + pool_db = self.plugin_instance.get_pool( + context.get_admin_context(), pool['pool']['id']) + self.assertEqual(pool_db['status'], constants.PENDING_UPDATE) + + def test_delete_pool_with_vip(self): + with self.subnet() as subnet: + with self.pool(provider='radware', + no_delete=True, + subnet_id=subnet['subnet']['id']) as pool: + with self.vip(pool=pool, subnet=subnet): + self.assertRaises(loadbalancer.PoolInUse, + self.plugin_instance.delete_pool, + context.get_admin_context(), + pool['pool']['id']) + + def test_create_member_with_vip(self): + with self.subnet() as subnet: + with self.pool(provider='radware', + subnet_id=subnet['subnet']['id']) as p: + with self.vip(pool=p, subnet=subnet): + with self.member(pool_id=p['pool']['id']): + calls = [ + mock.call( + 'POST', '/api/workflow/' + p['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER + ), + mock.call( + 'POST', '/api/workflow/' + p['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER + ) + ] + self.driver_rest_call_mock.assert_has_calls( + calls, any_order=True) + + def test_create_member_on_different_subnets(self): + with contextlib.nested( + self.subnet(), + self.subnet(cidr='20.0.0.0/24'), + self.subnet(cidr='30.0.0.0/24') + ) as (vip_sub, pool_sub, member_sub): + with self.pool(provider='radware', + subnet_id=pool_sub['subnet']['id']) as pool: + with contextlib.nested( + self.port(subnet=vip_sub, + fixed_ips=[{'ip_address': '10.0.0.2'}]), + self.port(subnet=pool_sub, + fixed_ips=[{'ip_address': '20.0.0.2'}]), + self.port(subnet=member_sub, + fixed_ips=[{'ip_address': '30.0.0.2'}]) + ): + with contextlib.nested( + self.member(pool_id=pool['pool']['id'], + address='10.0.0.2'), + self.member(pool_id=pool['pool']['id'], + address='20.0.0.2'), + self.member(pool_id=pool['pool']['id'], + address='30.0.0.2') + ) as (member_vip, member_pool, member_out): + with self.vip(pool=pool, subnet=vip_sub): + calls = [ + mock.call( + 'POST', '/api/workflow/' + + pool['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER + ) + ] + self.driver_rest_call_mock.assert_has_calls( + calls, any_order=True) + + mock_calls = self.driver_rest_call_mock.mock_calls + params = mock_calls[-2][1][2]['parameters'] + member_subnet_array = params['member_subnet_array'] + member_mask_array = params['member_mask_array'] + member_gw_array = params['member_gw_array'] + self.assertEqual(member_subnet_array, + ['10.0.0.0', + '255.255.255.255', + '30.0.0.0']) + self.assertEqual(member_mask_array, + ['255.255.255.0', + '255.255.255.255', + '255.255.255.0']) + self.assertEqual( + member_gw_array, + [pool_sub['subnet']['gateway_ip'], + '255.255.255.255', + pool_sub['subnet']['gateway_ip']]) + + def test_create_member_on_different_subnet_no_port(self): + with contextlib.nested( + self.subnet(), + self.subnet(cidr='20.0.0.0/24'), + self.subnet(cidr='30.0.0.0/24') + ) as (vip_sub, pool_sub, member_sub): + with self.pool(provider='radware', + subnet_id=pool_sub['subnet']['id']) as pool: + with self.member(pool_id=pool['pool']['id'], + address='30.0.0.2'): + with self.vip(pool=pool, subnet=vip_sub): + calls = [ + mock.call( + 'POST', '/api/workflow/' + + pool['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER + ) + ] + self.driver_rest_call_mock.assert_has_calls( + calls, any_order=True) + + mock_calls = self.driver_rest_call_mock.mock_calls + params = mock_calls[-2][1][2]['parameters'] + member_subnet_array = params['member_subnet_array'] + member_mask_array = params['member_mask_array'] + member_gw_array = params['member_gw_array'] + self.assertEqual(member_subnet_array, + ['30.0.0.2']) + self.assertEqual(member_mask_array, + ['255.255.255.255']) + self.assertEqual(member_gw_array, + [pool_sub['subnet']['gateway_ip']]) + + def test_create_member_on_different_subnet_multiple_ports(self): + cfg.CONF.set_override("allow_overlapping_ips", 'true') + with self.network() as other_net: + with contextlib.nested( + self.subnet(), + self.subnet(cidr='20.0.0.0/24'), + self.subnet(cidr='30.0.0.0/24'), + self.subnet(network=other_net, cidr='30.0.0.0/24') + ) as (vip_sub, pool_sub, member_sub1, member_sub2): + with self.pool(provider='radware', + subnet_id=pool_sub['subnet']['id']) as pool: + with contextlib.nested( + self.port(subnet=member_sub1, + fixed_ips=[{'ip_address': '30.0.0.2'}]), + self.port(subnet=member_sub2, + fixed_ips=[{'ip_address': '30.0.0.2'}])): + with self.member(pool_id=pool['pool']['id'], + address='30.0.0.2'): + with self.vip(pool=pool, subnet=vip_sub): + calls = [ + mock.call( + 'POST', '/api/workflow/' + + pool['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER + ) + ] + self.driver_rest_call_mock.assert_has_calls( + calls, any_order=True) + + calls = self.driver_rest_call_mock.mock_calls + params = calls[-2][1][2]['parameters'] + m_sub_array = params['member_subnet_array'] + m_mask_array = params['member_mask_array'] + m_gw_array = params['member_gw_array'] + self.assertEqual(m_sub_array, + ['30.0.0.2']) + self.assertEqual(m_mask_array, + ['255.255.255.255']) + self.assertEqual( + m_gw_array, + [pool_sub['subnet']['gateway_ip']]) + + def test_update_member_with_vip(self): + with self.subnet() as subnet: + with self.pool(provider='radware', + subnet_id=subnet['subnet']['id']) as p: + with self.member(pool_id=p['pool']['id']) as member: + with self.vip(pool=p, subnet=subnet): + self.plugin_instance.update_member( + context.get_admin_context(), + member['member']['id'], member + ) + calls = [ + mock.call( + 'POST', '/api/workflow/' + p['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER + ), + mock.call( + 'POST', '/api/workflow/' + p['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER + ) + ] + self.driver_rest_call_mock.assert_has_calls( + calls, any_order=True) + + updated_member = self.plugin_instance.get_member( + context.get_admin_context(), + member['member']['id'] + ) + + updated_member = self.plugin_instance.get_member( + context.get_admin_context(), + member['member']['id'] + ) + self.assertEqual(updated_member['status'], + constants.ACTIVE) + + def test_update_member_without_vip(self): + with self.subnet(): + with self.pool(provider='radware') as pool: + with self.member(pool_id=pool['pool']['id']) as member: + member['member']['status'] = constants.PENDING_UPDATE + updated_member = self.plugin_instance.update_member( + context.get_admin_context(), + member['member']['id'], member + ) + self.assertEqual(updated_member['status'], + constants.PENDING_UPDATE) + + def test_delete_member_with_vip(self): + with self.subnet() as subnet: + with self.pool(provider='radware', + subnet_id=subnet['subnet']['id']) as p: + with self.member(pool_id=p['pool']['id'], + no_delete=True) as m: + with self.vip(pool=p, subnet=subnet): + + # Reset mock and + # wait for being sure the member + # Changed status from PENDING-CREATE + # to ACTIVE + + self.plugin_instance.delete_member( + context.get_admin_context(), + m['member']['id'] + ) + + name, args, kwargs = ( + self.driver_rest_call_mock.mock_calls[-2] + ) + deletion_post_graph = str(args[2]) + + self.assertTrue(re.search( + r'.*\'member_address_array\': \[\].*', + deletion_post_graph + )) + + calls = [ + mock.call( + 'POST', '/api/workflow/' + p['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER + ) + ] + self.driver_rest_call_mock.assert_has_calls( + calls, any_order=True) + + self.assertRaises(loadbalancer.MemberNotFound, + self.plugin_instance.get_member, + context.get_admin_context(), + m['member']['id']) + + def test_delete_member_without_vip(self): + with self.subnet(): + with self.pool(provider='radware') as p: + with self.member(pool_id=p['pool']['id'], no_delete=True) as m: + self.plugin_instance.delete_member( + context.get_admin_context(), m['member']['id'] + ) + self.assertRaises(loadbalancer.MemberNotFound, + self.plugin_instance.get_member, + context.get_admin_context(), + m['member']['id']) + + def test_create_hm_with_vip(self): + with self.subnet() as subnet: + with self.health_monitor() as hm: + with self.pool(provider='radware', + subnet_id=subnet['subnet']['id']) as pool: + with self.vip(pool=pool, subnet=subnet): + + self.plugin_instance.create_pool_health_monitor( + context.get_admin_context(), + hm, pool['pool']['id'] + ) + + # Test REST calls + calls = [ + mock.call( + 'POST', '/api/workflow/' + pool['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER + ), + mock.call( + 'POST', '/api/workflow/' + pool['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER + ) + ] + self.driver_rest_call_mock.assert_has_calls( + calls, any_order=True) + + phm = self.plugin_instance.get_pool_health_monitor( + context.get_admin_context(), + hm['health_monitor']['id'], pool['pool']['id'] + ) + self.assertEqual(phm['status'], constants.ACTIVE) + + def test_delete_pool_hm_with_vip(self): + with self.subnet() as subnet: + with self.health_monitor(no_delete=True) as hm: + with self.pool(provider='radware', + subnet_id=subnet['subnet']['id']) as pool: + with self.vip(pool=pool, subnet=subnet): + self.plugin_instance.create_pool_health_monitor( + context.get_admin_context(), + hm, pool['pool']['id'] + ) + + self.plugin_instance.delete_pool_health_monitor( + context.get_admin_context(), + hm['health_monitor']['id'], + pool['pool']['id'] + ) + + name, args, kwargs = ( + self.driver_rest_call_mock.mock_calls[-2] + ) + deletion_post_graph = str(args[2]) + + self.assertTrue(re.search( + r'.*\'hm_uuid_array\': \[\].*', + deletion_post_graph + )) + + calls = [ + mock.call( + 'POST', '/api/workflow/' + pool['pool']['id'] + + '/action/BaseCreate', + mock.ANY, driver.TEMPLATE_HEADER + ) + ] + self.driver_rest_call_mock.assert_has_calls( + calls, any_order=True) + + self.assertRaises( + loadbalancer.PoolMonitorAssociationNotFound, + self.plugin_instance.get_pool_health_monitor, + context.get_admin_context(), + hm['health_monitor']['id'], + pool['pool']['id'] + ) diff --git a/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py b/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py new file mode 100644 index 000000000..6a9221306 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py @@ -0,0 +1,753 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import contextlib + +import mock +from six import moves +from webob import exc + +from neutron import context +from neutron.db.loadbalancer import loadbalancer_db as ldb +from neutron.db import servicetype_db as st_db +from neutron.extensions import loadbalancer +from neutron.extensions import portbindings +from neutron import manager +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.services.loadbalancer.drivers.common import agent_driver_base +from neutron.tests import base +from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer +from neutron.tests.unit import testlib_api + + +class TestLoadBalancerPluginBase( + test_db_loadbalancer.LoadBalancerPluginDbTestCase): + + def setUp(self): + def reset_device_driver(): + agent_driver_base.AgentDriverBase.device_driver = None + self.addCleanup(reset_device_driver) + + self.mock_importer = mock.patch.object( + agent_driver_base, 'importutils').start() + + # needed to reload provider configuration + st_db.ServiceTypeManager._instance = None + agent_driver_base.AgentDriverBase.device_driver = 'dummy' + super(TestLoadBalancerPluginBase, self).setUp( + lbaas_provider=('LOADBALANCER:lbaas:neutron.services.' + 'loadbalancer.drivers.common.agent_driver_base.' + 'AgentDriverBase:default')) + + # we need access to loaded plugins to modify models + loaded_plugins = manager.NeutronManager().get_service_plugins() + + self.plugin_instance = loaded_plugins[constants.LOADBALANCER] + + +class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase): + def setUp(self): + super(TestLoadBalancerCallbacks, self).setUp() + + self.callbacks = agent_driver_base.LoadBalancerCallbacks( + self.plugin_instance + ) + get_lbaas_agents_patcher = mock.patch( + 'neutron.services.loadbalancer.agent_scheduler' + '.LbaasAgentSchedulerDbMixin.get_lbaas_agents') + get_lbaas_agents_patcher.start() + + def test_get_ready_devices(self): + with self.vip() as vip: + with mock.patch('neutron.services.loadbalancer.agent_scheduler' + '.LbaasAgentSchedulerDbMixin.' + 'list_pools_on_lbaas_agent') as mock_agent_pools: + mock_agent_pools.return_value = { + 'pools': [{'id': vip['vip']['pool_id']}]} + ready = self.callbacks.get_ready_devices( + context.get_admin_context(), + ) + self.assertEqual(ready, [vip['vip']['pool_id']]) + + def test_get_ready_devices_multiple_vips_and_pools(self): + ctx = context.get_admin_context() + + # add 3 pools and 2 vips directly to DB + # to create 2 "ready" devices and one pool without vip + pools = [] + for i in moves.xrange(3): + pools.append(ldb.Pool(id=uuidutils.generate_uuid(), + subnet_id=self._subnet_id, + protocol="HTTP", + lb_method="ROUND_ROBIN", + status=constants.ACTIVE, + admin_state_up=True)) + ctx.session.add(pools[i]) + + vip0 = ldb.Vip(id=uuidutils.generate_uuid(), + protocol_port=80, + protocol="HTTP", + pool_id=pools[0].id, + status=constants.ACTIVE, + admin_state_up=True, + connection_limit=3) + ctx.session.add(vip0) + pools[0].vip_id = vip0.id + + vip1 = ldb.Vip(id=uuidutils.generate_uuid(), + protocol_port=80, + protocol="HTTP", + pool_id=pools[1].id, + status=constants.ACTIVE, + admin_state_up=True, + connection_limit=3) + ctx.session.add(vip1) + pools[1].vip_id = vip1.id + + ctx.session.flush() + + self.assertEqual(ctx.session.query(ldb.Pool).count(), 3) + self.assertEqual(ctx.session.query(ldb.Vip).count(), 2) + with mock.patch('neutron.services.loadbalancer.agent_scheduler' + '.LbaasAgentSchedulerDbMixin' + '.list_pools_on_lbaas_agent') as mock_agent_pools: + mock_agent_pools.return_value = {'pools': [{'id': pools[0].id}, + {'id': pools[1].id}, + {'id': pools[2].id}]} + ready = self.callbacks.get_ready_devices(ctx) + self.assertEqual(len(ready), 3) + self.assertIn(pools[0].id, ready) + self.assertIn(pools[1].id, ready) + self.assertIn(pools[2].id, ready) + # cleanup + ctx.session.query(ldb.Pool).delete() + ctx.session.query(ldb.Vip).delete() + + def test_get_ready_devices_inactive_vip(self): + with self.vip() as vip: + + # set the vip inactive need to use plugin directly since + # status is not tenant mutable + self.plugin_instance.update_vip( + context.get_admin_context(), + vip['vip']['id'], + {'vip': {'status': constants.INACTIVE}} + ) + with mock.patch('neutron.services.loadbalancer.agent_scheduler' + '.LbaasAgentSchedulerDbMixin.' + 'list_pools_on_lbaas_agent') as mock_agent_pools: + mock_agent_pools.return_value = { + 'pools': [{'id': vip['vip']['pool_id']}]} + ready = self.callbacks.get_ready_devices( + context.get_admin_context(), + ) + self.assertEqual([vip['vip']['pool_id']], ready) + + def test_get_ready_devices_inactive_pool(self): + with self.vip() as vip: + + # set the pool inactive need to use plugin directly since + # status is not tenant mutable + self.plugin_instance.update_pool( + context.get_admin_context(), + vip['vip']['pool_id'], + {'pool': {'status': constants.INACTIVE}} + ) + with mock.patch('neutron.services.loadbalancer.agent_scheduler' + '.LbaasAgentSchedulerDbMixin.' + 'list_pools_on_lbaas_agent') as mock_agent_pools: + mock_agent_pools.return_value = { + 'pools': [{'id': vip['vip']['pool_id']}]} + ready = self.callbacks.get_ready_devices( + context.get_admin_context(), + ) + self.assertFalse(ready) + + def test_get_logical_device_non_active(self): + with self.pool() as pool: + ctx = context.get_admin_context() + for status in ('INACTIVE', 'PENDING_CREATE', 'PENDING_UPDATE'): + self.plugin_instance.update_status( + ctx, ldb.Pool, pool['pool']['id'], status) + pool['pool']['status'] = status + expected = { + 'pool': pool['pool'], + 'members': [], + 'healthmonitors': [], + 'driver': 'dummy' + } + + logical_config = self.callbacks.get_logical_device( + ctx, pool['pool']['id'] + ) + + self.assertEqual(expected, logical_config) + + def test_get_logical_device_active(self): + with self.pool() as pool: + with self.vip(pool=pool) as vip: + with self.member(pool_id=vip['vip']['pool_id']) as member: + ctx = context.get_admin_context() + # activate objects + self.plugin_instance.update_status( + ctx, ldb.Pool, pool['pool']['id'], 'ACTIVE') + self.plugin_instance.update_status( + ctx, ldb.Member, member['member']['id'], 'ACTIVE') + self.plugin_instance.update_status( + ctx, ldb.Vip, vip['vip']['id'], 'ACTIVE') + + # build the expected + port = self.plugin_instance._core_plugin.get_port( + ctx, vip['vip']['port_id'] + ) + subnet = self.plugin_instance._core_plugin.get_subnet( + ctx, vip['vip']['subnet_id'] + ) + port['fixed_ips'][0]['subnet'] = subnet + + # reload pool to add members and vip + pool = self.plugin_instance.get_pool( + ctx, pool['pool']['id'] + ) + + pool['status'] = constants.ACTIVE + vip['vip']['status'] = constants.ACTIVE + vip['vip']['port'] = port + member['member']['status'] = constants.ACTIVE + + expected = { + 'pool': pool, + 'vip': vip['vip'], + 'members': [member['member']], + 'healthmonitors': [], + 'driver': 'dummy' + } + + logical_config = self.callbacks.get_logical_device( + ctx, pool['id'] + ) + + self.assertEqual(logical_config, expected) + + def test_get_logical_device_inactive_member(self): + with self.pool() as pool: + with self.vip(pool=pool) as vip: + with self.member(pool_id=vip['vip']['pool_id']) as member: + ctx = context.get_admin_context() + self.plugin_instance.update_status(ctx, ldb.Pool, + pool['pool']['id'], + 'ACTIVE') + self.plugin_instance.update_status(ctx, ldb.Vip, + vip['vip']['id'], + 'ACTIVE') + self.plugin_instance.update_status(ctx, ldb.Member, + member['member']['id'], + 'INACTIVE') + + logical_config = self.callbacks.get_logical_device( + ctx, pool['pool']['id']) + + member['member']['status'] = constants.INACTIVE + self.assertEqual([member['member']], + logical_config['members']) + + def test_get_logical_device_pending_create_member(self): + with self.pool() as pool: + with self.vip(pool=pool) as vip: + with self.member(pool_id=vip['vip']['pool_id']) as member: + ctx = context.get_admin_context() + self.plugin_instance.update_status(ctx, ldb.Pool, + pool['pool']['id'], + 'ACTIVE') + self.plugin_instance.update_status(ctx, ldb.Vip, + vip['vip']['id'], + 'ACTIVE') + + member = self.plugin_instance.get_member( + ctx, member['member']['id']) + self.assertEqual('PENDING_CREATE', + member['status']) + logical_config = self.callbacks.get_logical_device( + ctx, pool['pool']['id']) + + self.assertEqual([member], logical_config['members']) + + def test_get_logical_device_pending_create_health_monitor(self): + with self.health_monitor() as monitor: + with self.pool() as pool: + with self.vip(pool=pool) as vip: + ctx = context.get_admin_context() + self.plugin_instance.update_status(ctx, ldb.Pool, + pool['pool']['id'], + 'ACTIVE') + self.plugin_instance.update_status(ctx, ldb.Vip, + vip['vip']['id'], + 'ACTIVE') + self.plugin_instance.create_pool_health_monitor( + ctx, monitor, pool['pool']['id']) + pool = self.plugin_instance.get_pool( + ctx, pool['pool']['id']) + monitor = self.plugin_instance.get_health_monitor( + ctx, monitor['health_monitor']['id']) + + self.assertEqual( + 'PENDING_CREATE', + pool['health_monitors_status'][0]['status']) + logical_config = self.callbacks.get_logical_device( + ctx, pool['id']) + + self.assertEqual([monitor], + logical_config['healthmonitors']) + + def _update_port_test_helper(self, expected, func, **kwargs): + core = self.plugin_instance._core_plugin + + with self.pool() as pool: + with self.vip(pool=pool) as vip: + with self.member(pool_id=vip['vip']['pool_id']): + ctx = context.get_admin_context() + func(ctx, port_id=vip['vip']['port_id'], **kwargs) + + db_port = core.get_port(ctx, vip['vip']['port_id']) + + for k, v in expected.iteritems(): + self.assertEqual(db_port[k], v) + + def test_plug_vip_port(self): + exp = { + 'device_owner': 'neutron:' + constants.LOADBALANCER, + 'device_id': 'c596ce11-db30-5c72-8243-15acaae8690f', + 'admin_state_up': True + } + self._update_port_test_helper( + exp, + self.callbacks.plug_vip_port, + host='host' + ) + + def test_plug_vip_port_mock_with_host(self): + exp = { + 'device_owner': 'neutron:' + constants.LOADBALANCER, + 'device_id': 'c596ce11-db30-5c72-8243-15acaae8690f', + 'admin_state_up': True, + portbindings.HOST_ID: 'host' + } + with mock.patch.object( + self.plugin._core_plugin, 'update_port') as mock_update_port: + with self.pool() as pool: + with self.vip(pool=pool) as vip: + ctx = context.get_admin_context() + self.callbacks.plug_vip_port( + ctx, port_id=vip['vip']['port_id'], host='host') + mock_update_port.assert_called_once_with( + ctx, vip['vip']['port_id'], + {'port': testlib_api.SubDictMatch(exp)}) + + def test_unplug_vip_port(self): + exp = { + 'device_owner': '', + 'device_id': '', + 'admin_state_up': False + } + self._update_port_test_helper( + exp, + self.callbacks.unplug_vip_port, + host='host' + ) + + def test_pool_deployed(self): + with self.pool() as pool: + with self.vip(pool=pool) as vip: + with self.member(pool_id=vip['vip']['pool_id']) as member: + ctx = context.get_admin_context() + p = self.plugin_instance.get_pool(ctx, pool['pool']['id']) + self.assertEqual('PENDING_CREATE', p['status']) + v = self.plugin_instance.get_vip(ctx, vip['vip']['id']) + self.assertEqual('PENDING_CREATE', v['status']) + m = self.plugin_instance.get_member( + ctx, member['member']['id']) + self.assertEqual('PENDING_CREATE', m['status']) + + self.callbacks.pool_deployed(ctx, pool['pool']['id']) + + p = self.plugin_instance.get_pool(ctx, pool['pool']['id']) + self.assertEqual('ACTIVE', p['status']) + v = self.plugin_instance.get_vip(ctx, vip['vip']['id']) + self.assertEqual('ACTIVE', v['status']) + m = self.plugin_instance.get_member( + ctx, member['member']['id']) + self.assertEqual('ACTIVE', m['status']) + + def test_update_status_pool(self): + with self.pool() as pool: + pool_id = pool['pool']['id'] + ctx = context.get_admin_context() + p = self.plugin_instance.get_pool(ctx, pool_id) + self.assertEqual('PENDING_CREATE', p['status']) + self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE') + p = self.plugin_instance.get_pool(ctx, pool_id) + self.assertEqual('ACTIVE', p['status']) + + def test_update_status_pool_deleted_already(self): + with mock.patch.object(agent_driver_base, 'LOG') as mock_log: + pool_id = 'deleted_pool' + ctx = context.get_admin_context() + self.assertRaises(loadbalancer.PoolNotFound, + self.plugin_instance.get_pool, ctx, pool_id) + self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE') + self.assertTrue(mock_log.warning.called) + + def test_update_status_health_monitor(self): + with contextlib.nested( + self.health_monitor(), + self.pool() + ) as (hm, pool): + pool_id = pool['pool']['id'] + ctx = context.get_admin_context() + self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id) + hm_id = hm['health_monitor']['id'] + h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id, + pool_id) + self.assertEqual('PENDING_CREATE', h['status']) + self.callbacks.update_status( + ctx, 'health_monitor', + {'monitor_id': hm_id, 'pool_id': pool_id}, 'ACTIVE') + h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id, + pool_id) + self.assertEqual('ACTIVE', h['status']) + + +class TestLoadBalancerAgentApi(base.BaseTestCase): + def setUp(self): + super(TestLoadBalancerAgentApi, self).setUp() + + self.api = agent_driver_base.LoadBalancerAgentApi('topic') + self.mock_cast = mock.patch.object(self.api, 'cast').start() + self.mock_msg = mock.patch.object(self.api, 'make_msg').start() + + def test_init(self): + self.assertEqual(self.api.topic, 'topic') + + def _call_test_helper(self, method_name, method_args): + rv = getattr(self.api, method_name)(mock.sentinel.context, + host='host', + **method_args) + self.assertEqual(rv, self.mock_cast.return_value) + self.mock_cast.assert_called_once_with( + mock.sentinel.context, + self.mock_msg.return_value, + topic='topic.host', + version=None + ) + + if method_name == 'agent_updated': + method_args = {'payload': method_args} + self.mock_msg.assert_called_once_with( + method_name, + **method_args + ) + + def test_agent_updated(self): + self._call_test_helper('agent_updated', {'admin_state_up': 'test'}) + + def test_create_pool(self): + self._call_test_helper('create_pool', {'pool': 'test', + 'driver_name': 'dummy'}) + + def test_update_pool(self): + self._call_test_helper('update_pool', {'old_pool': 'test', + 'pool': 'test'}) + + def test_delete_pool(self): + self._call_test_helper('delete_pool', {'pool': 'test'}) + + def test_create_vip(self): + self._call_test_helper('create_vip', {'vip': 'test'}) + + def test_update_vip(self): + self._call_test_helper('update_vip', {'old_vip': 'test', + 'vip': 'test'}) + + def test_delete_vip(self): + self._call_test_helper('delete_vip', {'vip': 'test'}) + + def test_create_member(self): + self._call_test_helper('create_member', {'member': 'test'}) + + def test_update_member(self): + self._call_test_helper('update_member', {'old_member': 'test', + 'member': 'test'}) + + def test_delete_member(self): + self._call_test_helper('delete_member', {'member': 'test'}) + + def test_create_monitor(self): + self._call_test_helper('create_pool_health_monitor', + {'health_monitor': 'test', 'pool_id': 'test'}) + + def test_update_monitor(self): + self._call_test_helper('update_pool_health_monitor', + {'old_health_monitor': 'test', + 'health_monitor': 'test', + 'pool_id': 'test'}) + + def test_delete_monitor(self): + self._call_test_helper('delete_pool_health_monitor', + {'health_monitor': 'test', 'pool_id': 'test'}) + + +class TestLoadBalancerPluginNotificationWrapper(TestLoadBalancerPluginBase): + def setUp(self): + self.log = mock.patch.object(agent_driver_base, 'LOG') + api_cls = mock.patch.object(agent_driver_base, + 'LoadBalancerAgentApi').start() + super(TestLoadBalancerPluginNotificationWrapper, self).setUp() + self.mock_api = api_cls.return_value + + self.mock_get_driver = mock.patch.object(self.plugin_instance, + '_get_driver') + self.mock_get_driver.return_value = (agent_driver_base. + AgentDriverBase( + self.plugin_instance + )) + + def test_create_vip(self): + with self.subnet() as subnet: + with self.pool(subnet=subnet) as pool: + with self.vip(pool=pool, subnet=subnet) as vip: + self.mock_api.create_vip.assert_called_once_with( + mock.ANY, + vip['vip'], + 'host' + ) + + def test_update_vip(self): + with self.subnet() as subnet: + with self.pool(subnet=subnet) as pool: + with self.vip(pool=pool, subnet=subnet) as vip: + ctx = context.get_admin_context() + old_vip = vip['vip'].copy() + vip['vip'].pop('status') + new_vip = self.plugin_instance.update_vip( + ctx, + vip['vip']['id'], + vip + ) + + self.mock_api.update_vip.assert_called_once_with( + mock.ANY, + old_vip, + new_vip, + 'host' + ) + + self.assertEqual( + new_vip['status'], + constants.PENDING_UPDATE + ) + + def test_delete_vip(self): + with self.subnet() as subnet: + with self.pool(subnet=subnet) as pool: + with self.vip(pool=pool, subnet=subnet, no_delete=True) as vip: + ctx = context.get_admin_context() + self.plugin_instance.delete_vip(ctx, vip['vip']['id']) + vip['vip']['status'] = 'PENDING_DELETE' + self.mock_api.delete_vip.assert_called_once_with( + mock.ANY, + vip['vip'], + 'host' + ) + + def test_create_pool(self): + with self.pool() as pool: + self.mock_api.create_pool.assert_called_once_with( + mock.ANY, + pool['pool'], + mock.ANY, + 'dummy' + ) + + def test_update_pool_non_active(self): + with self.pool() as pool: + pool['pool']['status'] = 'INACTIVE' + ctx = context.get_admin_context() + orig_pool = pool['pool'].copy() + del pool['pool']['provider'] + self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool) + self.mock_api.delete_pool.assert_called_once_with( + mock.ANY, orig_pool, 'host') + + def test_update_pool_no_vip_id(self): + with self.pool() as pool: + ctx = context.get_admin_context() + orig_pool = pool['pool'].copy() + del pool['pool']['provider'] + updated = self.plugin_instance.update_pool( + ctx, pool['pool']['id'], pool) + self.mock_api.update_pool.assert_called_once_with( + mock.ANY, orig_pool, updated, 'host') + + def test_update_pool_with_vip_id(self): + with self.pool() as pool: + with self.vip(pool=pool) as vip: + ctx = context.get_admin_context() + old_pool = pool['pool'].copy() + old_pool['vip_id'] = vip['vip']['id'] + del pool['pool']['provider'] + updated = self.plugin_instance.update_pool( + ctx, pool['pool']['id'], pool) + self.mock_api.update_pool.assert_called_once_with( + mock.ANY, old_pool, updated, 'host') + + def test_delete_pool(self): + with self.pool(no_delete=True) as pool: + req = self.new_delete_request('pools', + pool['pool']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, exc.HTTPNoContent.code) + pool['pool']['status'] = 'PENDING_DELETE' + self.mock_api.delete_pool.assert_called_once_with( + mock.ANY, pool['pool'], 'host') + + def test_create_member(self): + with self.pool() as pool: + pool_id = pool['pool']['id'] + with self.member(pool_id=pool_id) as member: + self.mock_api.create_member.assert_called_once_with( + mock.ANY, member['member'], 'host') + + def test_update_member(self): + with self.pool() as pool: + pool_id = pool['pool']['id'] + with self.member(pool_id=pool_id) as member: + ctx = context.get_admin_context() + updated = self.plugin_instance.update_member( + ctx, member['member']['id'], member) + self.mock_api.update_member.assert_called_once_with( + mock.ANY, member['member'], updated, 'host') + + def test_update_member_new_pool(self): + with self.pool() as pool1: + pool1_id = pool1['pool']['id'] + with self.pool() as pool2: + pool2_id = pool2['pool']['id'] + with self.member(pool_id=pool1_id) as member: + self.mock_api.create_member.reset_mock() + ctx = context.get_admin_context() + old_member = member['member'].copy() + member['member']['pool_id'] = pool2_id + updated = self.plugin_instance.update_member( + ctx, member['member']['id'], member) + self.mock_api.delete_member.assert_called_once_with( + mock.ANY, old_member, 'host') + self.mock_api.create_member.assert_called_once_with( + mock.ANY, updated, 'host') + + def test_delete_member(self): + with self.pool() as pool: + pool_id = pool['pool']['id'] + with self.member(pool_id=pool_id, + no_delete=True) as member: + req = self.new_delete_request('members', + member['member']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, exc.HTTPNoContent.code) + member['member']['status'] = 'PENDING_DELETE' + self.mock_api.delete_member.assert_called_once_with( + mock.ANY, member['member'], 'host') + + def test_create_pool_health_monitor(self): + with contextlib.nested( + self.health_monitor(), + self.pool(), + ) as (hm, pool): + pool_id = pool['pool']['id'] + ctx = context.get_admin_context() + self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id) + # hm now has a ref to the pool with which it is associated + hm = self.plugin.get_health_monitor( + ctx, hm['health_monitor']['id']) + self.mock_api.create_pool_health_monitor.assert_called_once_with( + mock.ANY, hm, pool_id, 'host') + + def test_delete_pool_health_monitor(self): + with contextlib.nested( + self.pool(), + self.health_monitor() + ) as (pool, hm): + pool_id = pool['pool']['id'] + ctx = context.get_admin_context() + self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id) + # hm now has a ref to the pool with which it is associated + hm = self.plugin.get_health_monitor( + ctx, hm['health_monitor']['id']) + hm['pools'][0]['status'] = 'PENDING_DELETE' + self.plugin_instance.delete_pool_health_monitor( + ctx, hm['id'], pool_id) + self.mock_api.delete_pool_health_monitor.assert_called_once_with( + mock.ANY, hm, pool_id, 'host') + + def test_update_health_monitor_associated_with_pool(self): + with contextlib.nested( + self.health_monitor(type='HTTP'), + self.pool() + ) as (monitor, pool): + data = { + 'health_monitor': { + 'id': monitor['health_monitor']['id'], + 'tenant_id': self._tenant_id + } + } + req = self.new_create_request( + 'pools', + data, + fmt=self.fmt, + id=pool['pool']['id'], + subresource='health_monitors') + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + # hm now has a ref to the pool with which it is associated + ctx = context.get_admin_context() + hm = self.plugin.get_health_monitor( + ctx, monitor['health_monitor']['id']) + self.mock_api.create_pool_health_monitor.assert_called_once_with( + mock.ANY, + hm, + pool['pool']['id'], + 'host' + ) + + self.mock_api.reset_mock() + data = {'health_monitor': {'delay': 20, + 'timeout': 20, + 'max_retries': 2, + 'admin_state_up': False}} + updated = hm.copy() + updated.update(data['health_monitor']) + req = self.new_update_request("health_monitors", + data, + monitor['health_monitor']['id']) + req.get_response(self.ext_api) + self.mock_api.update_pool_health_monitor.assert_called_once_with( + mock.ANY, + hm, + updated, + pool['pool']['id'], + 'host') diff --git a/neutron/tests/unit/services/loadbalancer/test_agent_scheduler.py b/neutron/tests/unit/services/loadbalancer/test_agent_scheduler.py new file mode 100644 index 000000000..5ee947944 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/test_agent_scheduler.py @@ -0,0 +1,222 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from oslo.config import cfg +from webob import exc + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron import context +from neutron.db import servicetype_db as st_db +from neutron.extensions import agent +from neutron.extensions import lbaas_agentscheduler +from neutron.extensions import loadbalancer +from neutron import manager +from neutron.plugins.common import constants as plugin_const +from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer +from neutron.tests.unit.openvswitch import test_agent_scheduler +from neutron.tests.unit import test_agent_ext_plugin +from neutron.tests.unit import test_db_plugin as test_plugin +from neutron.tests.unit import test_extensions + +LBAAS_HOSTA = 'hosta' + + +class AgentSchedulerTestMixIn(test_agent_scheduler.AgentSchedulerTestMixIn): + def _list_pools_hosted_by_lbaas_agent(self, agent_id, + expected_code=exc.HTTPOk.code, + admin_context=True): + path = "/agents/%s/%s.%s" % (agent_id, + lbaas_agentscheduler.LOADBALANCER_POOLS, + self.fmt) + return self._request_list(path, expected_code=expected_code, + admin_context=admin_context) + + def _get_lbaas_agent_hosting_pool(self, pool_id, + expected_code=exc.HTTPOk.code, + admin_context=True): + path = "/lb/pools/%s/%s.%s" % (pool_id, + lbaas_agentscheduler.LOADBALANCER_AGENT, + self.fmt) + return self._request_list(path, expected_code=expected_code, + admin_context=admin_context) + + +class LBaaSAgentSchedulerTestCase(test_agent_ext_plugin.AgentDBTestMixIn, + AgentSchedulerTestMixIn, + test_db_loadbalancer.LoadBalancerTestMixin, + test_plugin.NeutronDbPluginV2TestCase): + fmt = 'json' + plugin_str = ('neutron.plugins.openvswitch.' + 'ovs_neutron_plugin.OVSNeutronPluginV2') + + def setUp(self): + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + service_plugins = { + 'lb_plugin_name': test_db_loadbalancer.DB_LB_PLUGIN_KLASS} + + #default provider should support agent scheduling + cfg.CONF.set_override( + 'service_provider', + [('LOADBALANCER:lbaas:neutron.services.' + 'loadbalancer.drivers.haproxy.plugin_driver.' + 'HaproxyOnHostPluginDriver:default')], + 'service_providers') + + # need to reload provider configuration + st_db.ServiceTypeManager._instance = None + + super(LBaaSAgentSchedulerTestCase, self).setUp( + self.plugin_str, service_plugins=service_plugins) + ext_mgr = extensions.PluginAwareExtensionManager.get_instance() + self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) + self.adminContext = context.get_admin_context() + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + agent.RESOURCE_ATTRIBUTE_MAP) + self.addCleanup(self.restore_attribute_map) + + def restore_attribute_map(self): + # Restore the original RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + + def test_report_states(self): + self._register_agent_states(lbaas_agents=True) + agents = self._list_agents() + self.assertEqual(6, len(agents['agents'])) + + def test_pool_scheduling_on_pool_creation(self): + self._register_agent_states(lbaas_agents=True) + with self.pool() as pool: + lbaas_agent = self._get_lbaas_agent_hosting_pool( + pool['pool']['id']) + self.assertIsNotNone(lbaas_agent) + self.assertEqual(lbaas_agent['agent']['agent_type'], + constants.AGENT_TYPE_LOADBALANCER) + pools = self._list_pools_hosted_by_lbaas_agent( + lbaas_agent['agent']['id']) + self.assertEqual(1, len(pools['pools'])) + self.assertEqual(pool['pool'], pools['pools'][0]) + + def test_schedule_pool_with_disabled_agent(self): + lbaas_hosta = { + 'binary': 'neutron-loadbalancer-agent', + 'host': LBAAS_HOSTA, + 'topic': 'LOADBALANCER_AGENT', + 'configurations': {'device_drivers': ['haproxy_ns']}, + 'agent_type': constants.AGENT_TYPE_LOADBALANCER} + self._register_one_agent_state(lbaas_hosta) + with self.pool() as pool: + lbaas_agent = self._get_lbaas_agent_hosting_pool( + pool['pool']['id']) + self.assertIsNotNone(lbaas_agent) + + agents = self._list_agents() + self._disable_agent(agents['agents'][0]['id']) + pool = {'pool': {'name': 'test', + 'subnet_id': 'test', + 'lb_method': 'ROUND_ROBIN', + 'protocol': 'HTTP', + 'admin_state_up': True, + 'tenant_id': 'test', + 'description': 'test'}} + lbaas_plugin = manager.NeutronManager.get_service_plugins()[ + plugin_const.LOADBALANCER] + self.assertRaises(loadbalancer.NoEligibleBackend, + lbaas_plugin.create_pool, self.adminContext, pool) + pools = lbaas_plugin.get_pools(self.adminContext) + self.assertEqual('ERROR', pools[0]['status']) + self.assertEqual('No eligible backend', + pools[0]['status_description']) + + def test_schedule_pool_with_down_agent(self): + lbaas_hosta = { + 'binary': 'neutron-loadbalancer-agent', + 'host': LBAAS_HOSTA, + 'topic': 'LOADBALANCER_AGENT', + 'configurations': {'device_drivers': ['haproxy_ns']}, + 'agent_type': constants.AGENT_TYPE_LOADBALANCER} + self._register_one_agent_state(lbaas_hosta) + is_agent_down_str = 'neutron.db.agents_db.AgentDbMixin.is_agent_down' + with mock.patch(is_agent_down_str) as mock_is_agent_down: + mock_is_agent_down.return_value = False + with self.pool() as pool: + lbaas_agent = self._get_lbaas_agent_hosting_pool( + pool['pool']['id']) + self.assertIsNotNone(lbaas_agent) + with mock.patch(is_agent_down_str) as mock_is_agent_down: + mock_is_agent_down.return_value = True + pool = {'pool': {'name': 'test', + 'subnet_id': 'test', + 'lb_method': 'ROUND_ROBIN', + 'protocol': 'HTTP', + 'provider': 'lbaas', + 'admin_state_up': True, + 'tenant_id': 'test', + 'description': 'test'}} + lbaas_plugin = manager.NeutronManager.get_service_plugins()[ + plugin_const.LOADBALANCER] + self.assertRaises(loadbalancer.NoEligibleBackend, + lbaas_plugin.create_pool, + self.adminContext, pool) + pools = lbaas_plugin.get_pools(self.adminContext) + self.assertEqual('ERROR', pools[0]['status']) + self.assertEqual('No eligible backend', + pools[0]['status_description']) + + def test_pool_unscheduling_on_pool_deletion(self): + self._register_agent_states(lbaas_agents=True) + with self.pool(no_delete=True) as pool: + lbaas_agent = self._get_lbaas_agent_hosting_pool( + pool['pool']['id']) + self.assertIsNotNone(lbaas_agent) + self.assertEqual(lbaas_agent['agent']['agent_type'], + constants.AGENT_TYPE_LOADBALANCER) + pools = self._list_pools_hosted_by_lbaas_agent( + lbaas_agent['agent']['id']) + self.assertEqual(1, len(pools['pools'])) + self.assertEqual(pool['pool'], pools['pools'][0]) + + req = self.new_delete_request('pools', + pool['pool']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, exc.HTTPNoContent.code) + pools = self._list_pools_hosted_by_lbaas_agent( + lbaas_agent['agent']['id']) + self.assertEqual(0, len(pools['pools'])) + + def test_pool_scheduling_non_admin_access(self): + self._register_agent_states(lbaas_agents=True) + with self.pool() as pool: + self._get_lbaas_agent_hosting_pool( + pool['pool']['id'], + expected_code=exc.HTTPForbidden.code, + admin_context=False) + self._list_pools_hosted_by_lbaas_agent( + 'fake_id', + expected_code=exc.HTTPForbidden.code, + admin_context=False) + + +class LBaaSAgentSchedulerTestCaseXML(LBaaSAgentSchedulerTestCase): + fmt = 'xml' diff --git a/neutron/tests/unit/services/loadbalancer/test_loadbalancer_plugin.py b/neutron/tests/unit/services/loadbalancer/test_loadbalancer_plugin.py new file mode 100644 index 000000000..95fef74dd --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/test_loadbalancer_plugin.py @@ -0,0 +1,464 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import mock +from webob import exc + +from neutron.api.v2 import attributes as attr +from neutron.extensions import loadbalancer +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_api_v2_extension + + +_uuid = uuidutils.generate_uuid +_get_path = test_api_v2._get_path + + +class LoadBalancerExtensionTestCase(test_api_v2_extension.ExtensionTestCase): + fmt = 'json' + + def setUp(self): + super(LoadBalancerExtensionTestCase, self).setUp() + self._setUpExtension( + 'neutron.extensions.loadbalancer.LoadBalancerPluginBase', + constants.LOADBALANCER, loadbalancer.RESOURCE_ATTRIBUTE_MAP, + loadbalancer.Loadbalancer, 'lb', use_quota=True) + + def test_vip_create(self): + vip_id = _uuid() + data = {'vip': {'name': 'vip1', + 'description': 'descr_vip1', + 'subnet_id': _uuid(), + 'address': '127.0.0.1', + 'protocol_port': 80, + 'protocol': 'HTTP', + 'pool_id': _uuid(), + 'session_persistence': {'type': 'HTTP_COOKIE'}, + 'connection_limit': 100, + 'admin_state_up': True, + 'tenant_id': _uuid()}} + return_value = copy.copy(data['vip']) + return_value.update({'status': "ACTIVE", 'id': vip_id}) + + instance = self.plugin.return_value + instance.create_vip.return_value = return_value + res = self.api.post(_get_path('lb/vips', fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt) + instance.create_vip.assert_called_with(mock.ANY, + vip=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('vip', res) + self.assertEqual(res['vip'], return_value) + + def test_vip_list(self): + vip_id = _uuid() + return_value = [{'name': 'vip1', + 'admin_state_up': True, + 'tenant_id': _uuid(), + 'id': vip_id}] + + instance = self.plugin.return_value + instance.get_vips.return_value = return_value + + res = self.api.get(_get_path('lb/vips', fmt=self.fmt)) + + instance.get_vips.assert_called_with(mock.ANY, fields=mock.ANY, + filters=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_vip_update(self): + vip_id = _uuid() + update_data = {'vip': {'admin_state_up': False}} + return_value = {'name': 'vip1', + 'admin_state_up': False, + 'tenant_id': _uuid(), + 'status': "ACTIVE", + 'id': vip_id} + + instance = self.plugin.return_value + instance.update_vip.return_value = return_value + + res = self.api.put(_get_path('lb/vips', id=vip_id, fmt=self.fmt), + self.serialize(update_data)) + + instance.update_vip.assert_called_with(mock.ANY, vip_id, + vip=update_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('vip', res) + self.assertEqual(res['vip'], return_value) + + def test_vip_get(self): + vip_id = _uuid() + return_value = {'name': 'vip1', + 'admin_state_up': False, + 'tenant_id': _uuid(), + 'status': "ACTIVE", + 'id': vip_id} + + instance = self.plugin.return_value + instance.get_vip.return_value = return_value + + res = self.api.get(_get_path('lb/vips', id=vip_id, fmt=self.fmt)) + + instance.get_vip.assert_called_with(mock.ANY, vip_id, + fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('vip', res) + self.assertEqual(res['vip'], return_value) + + def test_vip_delete(self): + self._test_entity_delete('vip') + + def test_pool_create(self): + pool_id = _uuid() + hm_id = _uuid() + data = {'pool': {'name': 'pool1', + 'description': 'descr_pool1', + 'subnet_id': _uuid(), + 'protocol': 'HTTP', + 'lb_method': 'ROUND_ROBIN', + 'health_monitors': [hm_id], + 'admin_state_up': True, + 'tenant_id': _uuid()}} + return_value = copy.copy(data['pool']) + return_value['provider'] = 'lbaas' + return_value.update({'status': "ACTIVE", 'id': pool_id}) + + instance = self.plugin.return_value + instance.create_pool.return_value = return_value + res = self.api.post(_get_path('lb/pools', fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt) + data['pool']['provider'] = attr.ATTR_NOT_SPECIFIED + instance.create_pool.assert_called_with(mock.ANY, + pool=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('pool', res) + self.assertEqual(res['pool'], return_value) + + def test_pool_list(self): + pool_id = _uuid() + return_value = [{'name': 'pool1', + 'admin_state_up': True, + 'tenant_id': _uuid(), + 'id': pool_id}] + + instance = self.plugin.return_value + instance.get_pools.return_value = return_value + + res = self.api.get(_get_path('lb/pools', fmt=self.fmt)) + + instance.get_pools.assert_called_with(mock.ANY, fields=mock.ANY, + filters=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_pool_update(self): + pool_id = _uuid() + update_data = {'pool': {'admin_state_up': False}} + return_value = {'name': 'pool1', + 'admin_state_up': False, + 'tenant_id': _uuid(), + 'status': "ACTIVE", + 'id': pool_id} + + instance = self.plugin.return_value + instance.update_pool.return_value = return_value + + res = self.api.put(_get_path('lb/pools', id=pool_id, fmt=self.fmt), + self.serialize(update_data)) + + instance.update_pool.assert_called_with(mock.ANY, pool_id, + pool=update_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('pool', res) + self.assertEqual(res['pool'], return_value) + + def test_pool_get(self): + pool_id = _uuid() + return_value = {'name': 'pool1', + 'admin_state_up': False, + 'tenant_id': _uuid(), + 'status': "ACTIVE", + 'id': pool_id} + + instance = self.plugin.return_value + instance.get_pool.return_value = return_value + + res = self.api.get(_get_path('lb/pools', id=pool_id, fmt=self.fmt)) + + instance.get_pool.assert_called_with(mock.ANY, pool_id, + fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('pool', res) + self.assertEqual(res['pool'], return_value) + + def test_pool_delete(self): + self._test_entity_delete('pool') + + def test_pool_stats(self): + pool_id = _uuid() + + stats = {'stats': 'dummy'} + instance = self.plugin.return_value + instance.stats.return_value = stats + + path = _get_path('lb/pools', id=pool_id, + action="stats", fmt=self.fmt) + res = self.api.get(path) + + instance.stats.assert_called_with(mock.ANY, pool_id) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('stats', res) + self.assertEqual(res['stats'], stats['stats']) + + def test_member_create(self): + member_id = _uuid() + data = {'member': {'pool_id': _uuid(), + 'address': '127.0.0.1', + 'protocol_port': 80, + 'weight': 1, + 'admin_state_up': True, + 'tenant_id': _uuid()}} + return_value = copy.copy(data['member']) + return_value.update({'status': "ACTIVE", 'id': member_id}) + + instance = self.plugin.return_value + instance.create_member.return_value = return_value + res = self.api.post(_get_path('lb/members', fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt) + instance.create_member.assert_called_with(mock.ANY, + member=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('member', res) + self.assertEqual(res['member'], return_value) + + def test_member_list(self): + member_id = _uuid() + return_value = [{'name': 'member1', + 'admin_state_up': True, + 'tenant_id': _uuid(), + 'id': member_id}] + + instance = self.plugin.return_value + instance.get_members.return_value = return_value + + res = self.api.get(_get_path('lb/members', fmt=self.fmt)) + + instance.get_members.assert_called_with(mock.ANY, fields=mock.ANY, + filters=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_member_update(self): + member_id = _uuid() + update_data = {'member': {'admin_state_up': False}} + return_value = {'admin_state_up': False, + 'tenant_id': _uuid(), + 'status': "ACTIVE", + 'id': member_id} + + instance = self.plugin.return_value + instance.update_member.return_value = return_value + + res = self.api.put(_get_path('lb/members', id=member_id, + fmt=self.fmt), + self.serialize(update_data)) + + instance.update_member.assert_called_with(mock.ANY, member_id, + member=update_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('member', res) + self.assertEqual(res['member'], return_value) + + def test_member_get(self): + member_id = _uuid() + return_value = {'admin_state_up': False, + 'tenant_id': _uuid(), + 'status': "ACTIVE", + 'id': member_id} + + instance = self.plugin.return_value + instance.get_member.return_value = return_value + + res = self.api.get(_get_path('lb/members', id=member_id, + fmt=self.fmt)) + + instance.get_member.assert_called_with(mock.ANY, member_id, + fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('member', res) + self.assertEqual(res['member'], return_value) + + def test_member_delete(self): + self._test_entity_delete('member') + + def test_health_monitor_create(self): + health_monitor_id = _uuid() + data = {'health_monitor': {'type': 'HTTP', + 'delay': 2, + 'timeout': 1, + 'max_retries': 3, + 'http_method': 'GET', + 'url_path': '/path', + 'expected_codes': '200-300', + 'admin_state_up': True, + 'tenant_id': _uuid()}} + return_value = copy.copy(data['health_monitor']) + return_value.update({'status': "ACTIVE", 'id': health_monitor_id}) + + instance = self.plugin.return_value + instance.create_health_monitor.return_value = return_value + res = self.api.post(_get_path('lb/health_monitors', + fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt) + instance.create_health_monitor.assert_called_with(mock.ANY, + health_monitor=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('health_monitor', res) + self.assertEqual(res['health_monitor'], return_value) + + def test_health_monitor_create_with_timeout_negative(self): + data = {'health_monitor': {'type': 'HTTP', + 'delay': 2, + 'timeout': -1, + 'max_retries': 3, + 'http_method': 'GET', + 'url_path': '/path', + 'expected_codes': '200-300', + 'admin_state_up': True, + 'tenant_id': _uuid()}} + res = self.api.post(_get_path('lb/health_monitors', + fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt, + expect_errors=True) + self.assertEqual(400, res.status_int) + + def test_health_monitor_list(self): + health_monitor_id = _uuid() + return_value = [{'type': 'HTTP', + 'admin_state_up': True, + 'tenant_id': _uuid(), + 'id': health_monitor_id}] + + instance = self.plugin.return_value + instance.get_health_monitors.return_value = return_value + + res = self.api.get(_get_path('lb/health_monitors', fmt=self.fmt)) + + instance.get_health_monitors.assert_called_with( + mock.ANY, fields=mock.ANY, filters=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_health_monitor_update(self): + health_monitor_id = _uuid() + update_data = {'health_monitor': {'admin_state_up': False}} + return_value = {'type': 'HTTP', + 'admin_state_up': False, + 'tenant_id': _uuid(), + 'status': "ACTIVE", + 'id': health_monitor_id} + + instance = self.plugin.return_value + instance.update_health_monitor.return_value = return_value + + res = self.api.put(_get_path('lb/health_monitors', + id=health_monitor_id, + fmt=self.fmt), + self.serialize(update_data)) + + instance.update_health_monitor.assert_called_with( + mock.ANY, health_monitor_id, health_monitor=update_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('health_monitor', res) + self.assertEqual(res['health_monitor'], return_value) + + def test_health_monitor_get(self): + health_monitor_id = _uuid() + return_value = {'type': 'HTTP', + 'admin_state_up': False, + 'tenant_id': _uuid(), + 'status': "ACTIVE", + 'id': health_monitor_id} + + instance = self.plugin.return_value + instance.get_health_monitor.return_value = return_value + + res = self.api.get(_get_path('lb/health_monitors', + id=health_monitor_id, + fmt=self.fmt)) + + instance.get_health_monitor.assert_called_with( + mock.ANY, health_monitor_id, fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('health_monitor', res) + self.assertEqual(res['health_monitor'], return_value) + + def test_health_monitor_delete(self): + self._test_entity_delete('health_monitor') + + def test_create_pool_health_monitor(self): + health_monitor_id = _uuid() + data = {'health_monitor': {'id': health_monitor_id, + 'tenant_id': _uuid()}} + + return_value = copy.copy(data['health_monitor']) + instance = self.plugin.return_value + instance.create_pool_health_monitor.return_value = return_value + res = self.api.post('/lb/pools/id1/health_monitors', + self.serialize(data), + content_type='application/%s' % self.fmt) + instance.create_pool_health_monitor.assert_called_with( + mock.ANY, pool_id='id1', health_monitor=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('health_monitor', res) + self.assertEqual(res['health_monitor'], return_value) + + def test_delete_pool_health_monitor(self): + health_monitor_id = _uuid() + + res = self.api.delete('/lb/pools/id1/health_monitors/%s' % + health_monitor_id) + + instance = self.plugin.return_value + instance.delete_pool_health_monitor.assert_called_with( + mock.ANY, health_monitor_id, pool_id='id1') + self.assertEqual(res.status_int, exc.HTTPNoContent.code) + + +class LoadBalancerExtensionTestCaseXML(LoadBalancerExtensionTestCase): + fmt = 'xml' diff --git a/neutron/tests/unit/services/loadbalancer/test_loadbalancer_quota_ext.py b/neutron/tests/unit/services/loadbalancer/test_loadbalancer_quota_ext.py new file mode 100644 index 000000000..fb5ebba54 --- /dev/null +++ b/neutron/tests/unit/services/loadbalancer/test_loadbalancer_quota_ext.py @@ -0,0 +1,168 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron import context +from neutron import quota +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_quota_ext + +_get_path = test_api_v2._get_path + + +class LBaaSQuotaExtensionTestCase( + test_quota_ext.QuotaExtensionTestCase): + + def setUp(self): + super(LBaaSQuotaExtensionTestCase, self).setUp() + cfg.CONF.set_override( + 'quota_items', + ['vip', 'pool', 'member', 'health_monitor', 'extra1'], + group='QUOTAS') + quota.register_resources_from_config() + + +class LBaaSQuotaExtensionDbTestCase(LBaaSQuotaExtensionTestCase): + fmt = 'json' + + def setUp(self): + cfg.CONF.set_override( + 'quota_driver', + 'neutron.db.quota_db.DbQuotaDriver', + group='QUOTAS') + super(LBaaSQuotaExtensionDbTestCase, self).setUp() + + def test_quotas_loaded_right(self): + res = self.api.get(_get_path('quotas', fmt=self.fmt)) + quota = self.deserialize(res) + self.assertEqual([], quota['quotas']) + self.assertEqual(200, res.status_int) + + def test_quotas_default_values(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env) + quota = self.deserialize(res) + self.assertEqual(10, quota['quota']['vip']) + self.assertEqual(10, quota['quota']['pool']) + self.assertEqual(-1, quota['quota']['member']) + self.assertEqual(-1, quota['quota']['health_monitor']) + self.assertEqual(-1, quota['quota']['extra1']) + + def test_show_quotas_with_admin(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id + '2', + is_admin=True)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env) + self.assertEqual(200, res.status_int) + quota = self.deserialize(res) + self.assertEqual(10, quota['quota']['vip']) + self.assertEqual(10, quota['quota']['pool']) + self.assertEqual(-1, quota['quota']['member']) + self.assertEqual(-1, quota['quota']['health_monitor']) + + def test_show_quotas_with_owner_tenant(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=False)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env) + self.assertEqual(200, res.status_int) + quota = self.deserialize(res) + self.assertEqual(10, quota['quota']['vip']) + self.assertEqual(10, quota['quota']['pool']) + self.assertEqual(-1, quota['quota']['member']) + self.assertEqual(-1, quota['quota']['health_monitor']) + + def test_update_quotas_to_unlimited(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=True)} + quotas = {'quota': {'pool': -1}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), extra_environ=env, + expect_errors=False) + self.assertEqual(200, res.status_int) + + def test_update_quotas_exceeding_current_limit(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=True)} + quotas = {'quota': {'pool': 120}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), extra_environ=env, + expect_errors=False) + self.assertEqual(200, res.status_int) + + def test_update_quotas_with_admin(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id + '2', + is_admin=True)} + quotas = {'quota': {'pool': 100}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), extra_environ=env) + self.assertEqual(200, res.status_int) + env2 = {'neutron.context': context.Context('', tenant_id)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env2) + quota = self.deserialize(res) + self.assertEqual(10, quota['quota']['vip']) + self.assertEqual(100, quota['quota']['pool']) + self.assertEqual(-1, quota['quota']['member']) + self.assertEqual(-1, quota['quota']['health_monitor']) + + +class LBaaSQuotaExtensionDbTestCaseXML(LBaaSQuotaExtensionDbTestCase): + fmt = 'xml' + + +class LBaaSQuotaExtensionCfgTestCase( + LBaaSQuotaExtensionTestCase): + + def setUp(self): + cfg.CONF.set_override( + 'quota_driver', + 'neutron.quota.ConfDriver', + group='QUOTAS') + super(LBaaSQuotaExtensionCfgTestCase, self).setUp() + + def test_quotas_default_values(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env) + quota = self.deserialize(res) + self.assertEqual(10, quota['quota']['vip']) + self.assertEqual(10, quota['quota']['pool']) + self.assertEqual(-1, quota['quota']['member']) + self.assertEqual(-1, quota['quota']['health_monitor']) + self.assertEqual(-1, quota['quota']['extra1']) + + def test_update_quotas_forbidden(self): + tenant_id = 'tenant_id1' + quotas = {'quota': {'pool': 100}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), + expect_errors=True) + self.assertEqual(403, res.status_int) + + +class LBaaSQuotaExtensionCfgTestCaseXML(LBaaSQuotaExtensionCfgTestCase): + fmt = 'xml' diff --git a/neutron/tests/unit/services/metering/__init__.py b/neutron/tests/unit/services/metering/__init__.py new file mode 100644 index 000000000..82a447213 --- /dev/null +++ b/neutron/tests/unit/services/metering/__init__.py @@ -0,0 +1,15 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/services/metering/drivers/__init__.py b/neutron/tests/unit/services/metering/drivers/__init__.py new file mode 100644 index 000000000..82a447213 --- /dev/null +++ b/neutron/tests/unit/services/metering/drivers/__init__.py @@ -0,0 +1,15 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/services/metering/drivers/test_iptables_driver.py b/neutron/tests/unit/services/metering/drivers/test_iptables_driver.py new file mode 100644 index 000000000..ad056f401 --- /dev/null +++ b/neutron/tests/unit/services/metering/drivers/test_iptables_driver.py @@ -0,0 +1,408 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import mock +from oslo.config import cfg + +from neutron.services.metering.drivers.iptables import iptables_driver +from neutron.tests import base +from neutron.tests.unit import test_api_v2 + +_uuid = test_api_v2._uuid + + +class IptablesDriverTestCase(base.BaseTestCase): + def setUp(self): + super(IptablesDriverTestCase, self).setUp() + self.utils_exec_p = mock.patch( + 'neutron.agent.linux.utils.execute') + self.utils_exec = self.utils_exec_p.start() + self.iptables_cls_p = mock.patch( + 'neutron.agent.linux.iptables_manager.IptablesManager') + self.iptables_cls = self.iptables_cls_p.start() + self.iptables_inst = mock.Mock() + self.v4filter_inst = mock.Mock() + self.v6filter_inst = mock.Mock() + self.v4filter_inst.chains = [] + self.v6filter_inst.chains = [] + self.iptables_inst.ipv4 = {'filter': self.v4filter_inst} + self.iptables_inst.ipv6 = {'filter': self.v6filter_inst} + self.iptables_cls.return_value = self.iptables_inst + cfg.CONF.set_override('interface_driver', + 'neutron.agent.linux.interface.NullDriver') + cfg.CONF.set_override('root_helper', + 'fake_sudo', + 'AGENT') + self.metering = iptables_driver.IptablesMeteringDriver('metering', + cfg.CONF) + + def test_root_helper(self): + routers = [{'_metering_labels': [ + {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'rules': []}], + 'admin_state_up': True, + 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', + 'id': '473ec392-1711-44e3-b008-3251ccfc5099', + 'name': 'router1', + 'status': 'ACTIVE', + 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] + self.metering.add_metering_label(None, routers) + + self.iptables_cls.assert_called_with(root_helper='fake_sudo', + namespace=mock.ANY, + binary_name=mock.ANY) + + def test_add_metering_label(self): + routers = [{'_metering_labels': [ + {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'rules': []}], + 'admin_state_up': True, + 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', + 'id': '473ec392-1711-44e3-b008-3251ccfc5099', + 'name': 'router1', + 'status': 'ACTIVE', + 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] + + self.metering.add_metering_label(None, routers) + calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', + wrap=False), + mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-FORWARD', '-j ' + 'neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', + '', + wrap=False)] + + self.v4filter_inst.assert_has_calls(calls) + + def test_add_metering_label_with_rules(self): + routers = [{'_metering_labels': [ + {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'rules': [{ + 'direction': 'ingress', + 'excluded': False, + 'id': '7f1a261f-2489-4ed1-870c-a62754501379', + 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'remote_ip_prefix': '10.0.0.0/24'}]}], + 'admin_state_up': True, + 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', + 'id': '473ec392-1711-44e3-b008-3251ccfc5099', + 'name': 'router1', + 'status': 'ACTIVE', + 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, + {'_metering_labels': [ + {'id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', + 'rules': [{ + 'direction': 'ingress', + 'excluded': True, + 'id': 'fa2441e8-2489-4ed1-870c-a62754501379', + 'metering_label_id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', + 'remote_ip_prefix': '20.0.0.0/24'}]}], + 'admin_state_up': True, + 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', + 'id': '373ec392-1711-44e3-b008-3251ccfc5099', + 'name': 'router2', + 'status': 'ACTIVE', + 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] + + self.metering.add_metering_label(None, routers) + calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', + wrap=False), + mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-FORWARD', '-j ' + 'neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', + '', + wrap=False), + mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', + '-i qg-6d411f48-ec -d 10.0.0.0/24' + ' -j neutron-meter-l-c5df2fe5-c60', + wrap=False, top=False), + mock.call.add_chain('neutron-meter-l-eeef45da-c60', + wrap=False), + mock.call.add_chain('neutron-meter-r-eeef45da-c60', + wrap=False), + mock.call.add_rule('neutron-meter-FORWARD', '-j ' + 'neutron-meter-r-eeef45da-c60', + wrap=False), + mock.call.add_rule('neutron-meter-l-eeef45da-c60', + '', + wrap=False), + mock.call.add_rule('neutron-meter-r-eeef45da-c60', + '-i qg-7d411f48-ec -d 20.0.0.0/24' + ' -j RETURN', + wrap=False, top=True)] + + self.v4filter_inst.assert_has_calls(calls) + + def test_update_metering_label_rules(self): + routers = [{'_metering_labels': [ + {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'rules': [{ + 'direction': 'ingress', + 'excluded': False, + 'id': '7f1a261f-2489-4ed1-870c-a62754501379', + 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'remote_ip_prefix': '10.0.0.0/24'}]}], + 'admin_state_up': True, + 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', + 'id': '473ec392-1711-44e3-b008-3251ccfc5099', + 'name': 'router1', + 'status': 'ACTIVE', + 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] + + self.metering.add_metering_label(None, routers) + + updates = copy.deepcopy(routers) + updates[0]['_metering_labels'][0]['rules'] = [{ + 'direction': 'egress', + 'excluded': True, + 'id': '7f1a261f-2489-4ed1-870c-a62754501379', + 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'remote_ip_prefix': '10.0.0.0/24'}, + {'direction': 'ingress', + 'excluded': False, + 'id': '6f1a261f-2489-4ed1-870c-a62754501379', + 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'remote_ip_prefix': '20.0.0.0/24'}] + + self.metering.update_metering_label_rules(None, updates) + + calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', + wrap=False), + mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-FORWARD', '-j ' + 'neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', + '', + wrap=False), + mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', + '-i qg-6d411f48-ec -d 10.0.0.0/24' + ' -j neutron-meter-l-c5df2fe5-c60', + wrap=False, top=False), + mock.call.empty_chain('neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', + '-o qg-6d411f48-ec -d 10.0.0.0/24' + ' -j RETURN', + wrap=False, top=True), + mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', + '-i qg-6d411f48-ec -d 20.0.0.0/24 -j ' + 'neutron-meter-l-c5df2fe5-c60', + wrap=False, top=False)] + + self.v4filter_inst.assert_has_calls(calls) + + def test_remove_metering_label_rule(self): + routers = [{'_metering_labels': [ + {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'rules': [{ + 'direction': 'ingress', + 'excluded': False, + 'id': '7f1a261f-2489-4ed1-870c-a62754501379', + 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'remote_ip_prefix': '10.0.0.0/24'}, + {'direction': 'ingress', + 'excluded': False, + 'id': 'aaaa261f-2489-4ed1-870c-a62754501379', + 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'remote_ip_prefix': '20.0.0.0/24'}] + }], + 'admin_state_up': True, + 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', + 'id': '473ec392-1711-44e3-b008-3251ccfc5099', + 'name': 'router1', + 'status': 'ACTIVE', + 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] + + self.metering.add_metering_label(None, routers) + + routers = [{'_metering_labels': [ + {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'rules': [{ + 'direction': 'ingress', + 'excluded': False, + 'id': '7f1a261f-2489-4ed1-870c-a62754501379', + 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'remote_ip_prefix': '10.0.0.0/24'}] + }], + 'admin_state_up': True, + 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', + 'id': '473ec392-1711-44e3-b008-3251ccfc5099', + 'name': 'router1', + 'status': 'ACTIVE', + 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] + + self.metering.update_metering_label_rules(None, routers) + calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', + wrap=False), + mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-FORWARD', '-j ' + 'neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', + '', + wrap=False), + mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', + '-i qg-7d411f48-ec -d 10.0.0.0/24' + ' -j neutron-meter-l-c5df2fe5-c60', + wrap=False, top=False), + mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', + '-i qg-7d411f48-ec -d 20.0.0.0/24' + ' -j neutron-meter-l-c5df2fe5-c60', + wrap=False, top=False), + mock.call.empty_chain('neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', + '-i qg-7d411f48-ec -d 10.0.0.0/24' + ' -j neutron-meter-l-c5df2fe5-c60', + wrap=False, top=False)] + + self.v4filter_inst.assert_has_calls(calls) + + def test_remove_metering_label(self): + routers = [{'_metering_labels': [ + {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'rules': [{ + 'direction': 'ingress', + 'excluded': False, + 'id': '7f1a261f-2489-4ed1-870c-a62754501379', + 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'remote_ip_prefix': '10.0.0.0/24'}] + }], + 'admin_state_up': True, + 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', + 'id': '473ec392-1711-44e3-b008-3251ccfc5099', + 'name': 'router1', + 'status': 'ACTIVE', + 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] + + self.metering.add_metering_label(None, routers) + self.metering.remove_metering_label(None, routers) + calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', + wrap=False), + mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-FORWARD', '-j ' + 'neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', + '', + wrap=False), + mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', + '-i qg-7d411f48-ec -d 10.0.0.0/24' + ' -j neutron-meter-l-c5df2fe5-c60', + wrap=False, top=False), + mock.call.remove_chain('neutron-meter-l-c5df2fe5-c60', + wrap=False), + mock.call.remove_chain('neutron-meter-r-c5df2fe5-c60', + wrap=False)] + + self.v4filter_inst.assert_has_calls(calls) + + def test_update_routers(self): + routers = [{'_metering_labels': [ + {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'rules': [{ + 'direction': 'ingress', + 'excluded': False, + 'id': '7f1a261f-2489-4ed1-870c-a62754501379', + 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', + 'remote_ip_prefix': '10.0.0.0/24'}]}], + 'admin_state_up': True, + 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', + 'id': '473ec392-1711-44e3-b008-3251ccfc5099', + 'name': 'router1', + 'status': 'ACTIVE', + 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, + {'_metering_labels': [ + {'id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', + 'rules': [{ + 'direction': 'ingress', + 'excluded': True, + 'id': 'fa2441e8-2489-4ed1-870c-a62754501379', + 'metering_label_id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', + 'remote_ip_prefix': '20.0.0.0/24'}]}], + 'admin_state_up': True, + 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', + 'id': '373ec392-1711-44e3-b008-3251ccfc5099', + 'name': 'router2', + 'status': 'ACTIVE', + 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] + + self.metering.add_metering_label(None, routers) + + updates = copy.deepcopy(routers) + updates[0]['gw_port_id'] = '587b63c1-22a3-40b3-9834-486d1fb215a5' + + self.metering.update_routers(None, updates) + calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', + wrap=False), + mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-FORWARD', '-j ' + 'neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', + '', + wrap=False), + mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', + '-i qg-6d411f48-ec -d 10.0.0.0/24' + ' -j neutron-meter-l-c5df2fe5-c60', + wrap=False, top=False), + mock.call.add_chain('neutron-meter-l-eeef45da-c60', + wrap=False), + mock.call.add_chain('neutron-meter-r-eeef45da-c60', + wrap=False), + mock.call.add_rule('neutron-meter-FORWARD', '-j ' + 'neutron-meter-r-eeef45da-c60', + wrap=False), + mock.call.add_rule('neutron-meter-l-eeef45da-c60', + '', + wrap=False), + mock.call.add_rule('neutron-meter-r-eeef45da-c60', + '-i qg-7d411f48-ec -d 20.0.0.0/24' + ' -j RETURN', + wrap=False, top=True), + mock.call.remove_chain('neutron-meter-l-c5df2fe5-c60', + wrap=False), + mock.call.remove_chain('neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', + wrap=False), + mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-FORWARD', '-j ' + 'neutron-meter-r-c5df2fe5-c60', + wrap=False), + mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', + '', + wrap=False), + mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', + '-i qg-587b63c1-22 -d 10.0.0.0/24' + ' -j neutron-meter-l-c5df2fe5-c60', + wrap=False, top=False)] + + self.v4filter_inst.assert_has_calls(calls) diff --git a/neutron/tests/unit/services/metering/test_metering_agent.py b/neutron/tests/unit/services/metering/test_metering_agent.py new file mode 100644 index 000000000..b3e3511fe --- /dev/null +++ b/neutron/tests/unit/services/metering/test_metering_agent.py @@ -0,0 +1,160 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.openstack.common import uuidutils +from neutron.services.metering.agents import metering_agent +from neutron.tests import base +from neutron.tests import fake_notifier + + +_uuid = uuidutils.generate_uuid + +TENANT_ID = _uuid() +LABEL_ID = _uuid() +ROUTERS = [{'status': 'ACTIVE', + 'name': 'router1', + 'gw_port_id': None, + 'admin_state_up': True, + 'tenant_id': TENANT_ID, + '_metering_labels': [{'rules': [], + 'id': LABEL_ID}], + 'id': _uuid()}] + + +class TestMeteringOperations(base.BaseTestCase): + + def setUp(self): + super(TestMeteringOperations, self).setUp() + cfg.CONF.register_opts(metering_agent.MeteringAgent.Opts) + config.register_root_helper(cfg.CONF) + + self.noop_driver = ('neutron.services.metering.drivers.noop.' + 'noop_driver.NoopMeteringDriver') + cfg.CONF.set_override('driver', self.noop_driver) + cfg.CONF.set_override('measure_interval', 0) + cfg.CONF.set_override('report_interval', 0) + + self.setup_notification_driver() + + metering_rpc = ('neutron.services.metering.agents.metering_agent.' + 'MeteringPluginRpc._get_sync_data_metering') + self.metering_rpc_patch = mock.patch(metering_rpc, return_value=[]) + self.metering_rpc_patch.start() + + self.driver_patch = mock.patch(self.noop_driver, autospec=True) + self.driver_patch.start() + + loopingcall_patch = mock.patch( + 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall') + loopingcall_patch.start() + + self.agent = metering_agent.MeteringAgent('my agent', cfg.CONF) + self.driver = self.agent.metering_driver + + def test_add_metering_label(self): + self.agent.add_metering_label(None, ROUTERS) + self.assertEqual(self.driver.add_metering_label.call_count, 1) + + def test_remove_metering_label(self): + self.agent.remove_metering_label(None, ROUTERS) + self.assertEqual(self.driver.remove_metering_label.call_count, 1) + + def test_update_metering_label_rule(self): + self.agent.update_metering_label_rules(None, ROUTERS) + self.assertEqual(self.driver.update_metering_label_rules.call_count, 1) + + def test_routers_updated(self): + self.agent.routers_updated(None, ROUTERS) + self.assertEqual(self.driver.update_routers.call_count, 1) + + def test_get_traffic_counters(self): + self.agent._get_traffic_counters(None, ROUTERS) + self.assertEqual(self.driver.get_traffic_counters.call_count, 1) + + def test_notification_report(self): + self.agent.routers_updated(None, ROUTERS) + + self.driver.get_traffic_counters.return_value = {LABEL_ID: + {'pkts': 88, + 'bytes': 444}} + self.agent._metering_loop() + + self.assertNotEqual(len(fake_notifier.NOTIFICATIONS), 0) + for n in fake_notifier.NOTIFICATIONS: + if n['event_type'] == 'l3.meter': + break + + self.assertEqual(n['event_type'], 'l3.meter') + + payload = n['payload'] + self.assertEqual(payload['tenant_id'], TENANT_ID) + self.assertEqual(payload['label_id'], LABEL_ID) + self.assertEqual(payload['pkts'], 88) + self.assertEqual(payload['bytes'], 444) + + def test_router_deleted(self): + label_id = _uuid() + self.driver.get_traffic_counters = mock.MagicMock() + self.driver.get_traffic_counters.return_value = {label_id: + {'pkts': 44, + 'bytes': 222}} + self.agent._add_metering_info = mock.MagicMock() + + self.agent.routers_updated(None, ROUTERS) + self.agent.router_deleted(None, ROUTERS[0]['id']) + + self.assertEqual(self.agent._add_metering_info.call_count, 1) + self.assertEqual(self.driver.remove_router.call_count, 1) + + self.agent._add_metering_info.assert_called_with(label_id, 44, 222) + + +class TestMeteringDriver(base.BaseTestCase): + def setUp(self): + super(TestMeteringDriver, self).setUp() + cfg.CONF.register_opts(metering_agent.MeteringAgent.Opts) + config.register_root_helper(cfg.CONF) + + self.noop_driver = ('neutron.services.metering.drivers.noop.' + 'noop_driver.NoopMeteringDriver') + cfg.CONF.set_override('driver', self.noop_driver) + + self.agent = metering_agent.MeteringAgent('my agent', cfg.CONF) + self.driver = mock.Mock() + self.agent.metering_driver = self.driver + + def test_add_metering_label_with_bad_driver_impl(self): + del self.driver.add_metering_label + + with mock.patch.object(metering_agent, 'LOG') as log: + self.agent.add_metering_label(None, ROUTERS) + log.exception.assert_called_with(mock.ANY, + {'driver': self.noop_driver, + 'func': 'add_metering_label'}) + + def test_add_metering_label_runtime_error(self): + self.driver.add_metering_label.side_effect = RuntimeError + + with mock.patch.object(metering_agent, 'LOG') as log: + self.agent.add_metering_label(None, ROUTERS) + log.exception.assert_called_with(mock.ANY, + {'driver': self.noop_driver, + 'func': + 'add_metering_label'}) diff --git a/neutron/tests/unit/services/metering/test_metering_plugin.py b/neutron/tests/unit/services/metering/test_metering_plugin.py new file mode 100644 index 000000000..7d7c4c025 --- /dev/null +++ b/neutron/tests/unit/services/metering/test_metering_plugin.py @@ -0,0 +1,448 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.api.v2 import attributes as attr +from neutron.common import constants as n_constants +from neutron.common import topics +from neutron import context +from neutron.db import agents_db +from neutron.db import l3_agentschedulers_db +from neutron.db.metering import metering_rpc +from neutron.extensions import l3 as ext_l3 +from neutron.extensions import metering as ext_metering +from neutron import manager +from neutron.openstack.common import timeutils +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.tests.unit.db.metering import test_db_metering +from neutron.tests.unit import test_db_plugin +from neutron.tests.unit import test_l3_plugin + + +_uuid = uuidutils.generate_uuid + +METERING_SERVICE_PLUGIN_KLASS = ( + "neutron.services.metering." + "metering_plugin.MeteringPlugin" +) + + +class MeteringTestExtensionManager(object): + + def get_resources(self): + attr.RESOURCE_ATTRIBUTE_MAP.update(ext_metering.RESOURCE_ATTRIBUTE_MAP) + attr.RESOURCE_ATTRIBUTE_MAP.update(ext_l3.RESOURCE_ATTRIBUTE_MAP) + + l3_res = ext_l3.L3.get_resources() + metering_res = ext_metering.Metering.get_resources() + + return l3_res + metering_res + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class TestMeteringPlugin(test_db_plugin.NeutronDbPluginV2TestCase, + test_l3_plugin.L3NatTestCaseMixin, + test_db_metering.MeteringPluginDbTestCaseMixin): + + resource_prefix_map = dict( + (k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING]) + for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys() + ) + + def setUp(self): + plugin = 'neutron.tests.unit.test_l3_plugin.TestL3NatIntPlugin' + service_plugins = {'metering_plugin_name': + METERING_SERVICE_PLUGIN_KLASS} + ext_mgr = MeteringTestExtensionManager() + super(TestMeteringPlugin, self).setUp(plugin=plugin, ext_mgr=ext_mgr, + service_plugins=service_plugins) + + self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60' + + uuid = 'neutron.openstack.common.uuidutils.generate_uuid' + self.uuid_patch = mock.patch(uuid, return_value=self.uuid) + self.mock_uuid = self.uuid_patch.start() + + fanout = ('neutron.common.rpc_compat.RpcProxy.fanout_cast') + self.fanout_patch = mock.patch(fanout) + self.mock_fanout = self.fanout_patch.start() + + self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b' + self.ctx = context.Context('', self.tenant_id, is_admin=True) + self.context_patch = mock.patch('neutron.context.Context', + return_value=self.ctx) + self.mock_context = self.context_patch.start() + + self.topic = 'metering_agent' + + def test_add_metering_label_rpc_call(self): + second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' + expected = {'args': {'routers': [{'status': 'ACTIVE', + 'name': 'router1', + 'gw_port_id': None, + 'admin_state_up': True, + 'tenant_id': self.tenant_id, + '_metering_labels': [ + {'rules': [], + 'id': self.uuid}], + 'id': self.uuid}]}, + 'namespace': None, + 'method': 'add_metering_label'} + + tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206' + self.mock_uuid.return_value = second_uuid + with self.router(name='router2', tenant_id=tenant_id_2, + set_context=True): + self.mock_uuid.return_value = self.uuid + with self.router(name='router1', tenant_id=self.tenant_id, + set_context=True): + with self.metering_label(tenant_id=self.tenant_id, + set_context=True): + self.mock_fanout.assert_called_with(self.ctx, expected, + topic=self.topic) + + def test_remove_metering_label_rpc_call(self): + expected = {'args': + {'routers': [{'status': 'ACTIVE', + 'name': 'router1', + 'gw_port_id': None, + 'admin_state_up': True, + 'tenant_id': self.tenant_id, + '_metering_labels': [ + {'rules': [], + 'id': self.uuid}], + 'id': self.uuid}]}, + 'namespace': None, + 'method': 'add_metering_label'} + + with self.router(tenant_id=self.tenant_id, set_context=True): + with self.metering_label(tenant_id=self.tenant_id, + set_context=True): + self.mock_fanout.assert_called_with(self.ctx, expected, + topic=self.topic) + expected['method'] = 'remove_metering_label' + self.mock_fanout.assert_called_with(self.ctx, expected, + topic=self.topic) + + def test_remove_one_metering_label_rpc_call(self): + second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' + expected_add = {'args': + {'routers': [{'status': 'ACTIVE', + 'name': 'router1', + 'gw_port_id': None, + 'admin_state_up': True, + 'tenant_id': self.tenant_id, + '_metering_labels': [ + {'rules': [], + 'id': self.uuid}, + {'rules': [], + 'id': second_uuid}], + 'id': self.uuid}]}, + 'namespace': None, + 'method': 'add_metering_label'} + expected_remove = {'args': + {'routers': [{'status': 'ACTIVE', + 'name': 'router1', + 'gw_port_id': None, + 'admin_state_up': True, + 'tenant_id': self.tenant_id, + '_metering_labels': [ + {'rules': [], + 'id': second_uuid}], + 'id': self.uuid}]}, + 'namespace': None, + 'method': 'remove_metering_label'} + + with self.router(tenant_id=self.tenant_id, set_context=True): + with self.metering_label(tenant_id=self.tenant_id, + set_context=True): + self.mock_uuid.return_value = second_uuid + with self.metering_label(tenant_id=self.tenant_id, + set_context=True): + self.mock_fanout.assert_called_with(self.ctx, expected_add, + topic=self.topic) + self.mock_fanout.assert_called_with(self.ctx, expected_remove, + topic=self.topic) + + def test_update_metering_label_rules_rpc_call(self): + second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' + expected_add = {'args': + {'routers': [ + {'status': 'ACTIVE', + 'name': 'router1', + 'gw_port_id': None, + 'admin_state_up': True, + 'tenant_id': self.tenant_id, + '_metering_labels': [ + {'rules': [ + {'remote_ip_prefix': '10.0.0.0/24', + 'direction': 'ingress', + 'metering_label_id': self.uuid, + 'excluded': False, + 'id': self.uuid}, + {'remote_ip_prefix': '10.0.0.0/24', + 'direction': 'egress', + 'metering_label_id': self.uuid, + 'excluded': False, + 'id': second_uuid}], + 'id': self.uuid}], + 'id': self.uuid}]}, + 'namespace': None, + 'method': 'update_metering_label_rules'} + + expected_del = {'args': + {'routers': [ + {'status': 'ACTIVE', + 'name': 'router1', + 'gw_port_id': None, + 'admin_state_up': True, + 'tenant_id': self.tenant_id, + '_metering_labels': [ + {'rules': [ + {'remote_ip_prefix': '10.0.0.0/24', + 'direction': 'ingress', + 'metering_label_id': self.uuid, + 'excluded': False, + 'id': self.uuid}], + 'id': self.uuid}], + 'id': self.uuid}]}, + 'namespace': None, + 'method': 'update_metering_label_rules'} + + with self.router(tenant_id=self.tenant_id, set_context=True): + with self.metering_label(tenant_id=self.tenant_id, + set_context=True) as label: + l = label['metering_label'] + with self.metering_label_rule(l['id']): + self.mock_uuid.return_value = second_uuid + with self.metering_label_rule(l['id'], direction='egress'): + self.mock_fanout.assert_called_with(self.ctx, + expected_add, + topic=self.topic) + self.mock_fanout.assert_called_with(self.ctx, + expected_del, + topic=self.topic) + + def test_delete_metering_label_does_not_clear_router_tenant_id(self): + tenant_id = '654f6b9d-0f36-4ae5-bd1b-01616794ca60' + with self.metering_label(tenant_id=tenant_id, + no_delete=True) as metering_label: + with self.router(tenant_id=tenant_id, set_context=True) as r: + router = self._show('routers', r['router']['id']) + self.assertEqual(tenant_id, router['router']['tenant_id']) + metering_label_id = metering_label['metering_label']['id'] + self._delete('metering-labels', metering_label_id, 204) + router = self._show('routers', r['router']['id']) + self.assertEqual(tenant_id, router['router']['tenant_id']) + + +class TestMeteringPluginL3AgentScheduler( + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + test_db_plugin.NeutronDbPluginV2TestCase, + test_l3_plugin.L3NatTestCaseMixin, + test_db_metering.MeteringPluginDbTestCaseMixin): + + resource_prefix_map = dict( + (k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING]) + for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys() + ) + + def setUp(self, plugin_str=None, service_plugins=None, scheduler=None): + if not plugin_str: + plugin_str = ('neutron.tests.unit.test_l3_plugin.' + 'TestL3NatIntAgentSchedulingPlugin') + + if not service_plugins: + service_plugins = {'metering_plugin_name': + METERING_SERVICE_PLUGIN_KLASS} + + if not scheduler: + scheduler = plugin_str + + ext_mgr = MeteringTestExtensionManager() + super(TestMeteringPluginL3AgentScheduler, + self).setUp(plugin=plugin_str, ext_mgr=ext_mgr, + service_plugins=service_plugins) + + self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60' + + uuid = 'neutron.openstack.common.uuidutils.generate_uuid' + self.uuid_patch = mock.patch(uuid, return_value=self.uuid) + self.mock_uuid = self.uuid_patch.start() + + cast = 'neutron.common.rpc_compat.RpcProxy.cast' + self.cast_patch = mock.patch(cast) + self.mock_cast = self.cast_patch.start() + + self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b' + self.ctx = context.Context('', self.tenant_id, is_admin=True) + self.context_patch = mock.patch('neutron.context.Context', + return_value=self.ctx) + self.mock_context = self.context_patch.start() + + self.l3routers_patch = mock.patch(scheduler + + '.get_l3_agents_hosting_routers') + self.l3routers_mock = self.l3routers_patch.start() + + self.topic = 'metering_agent' + + def test_add_metering_label_rpc_call(self): + second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' + expected1 = {'args': {'routers': [{'status': 'ACTIVE', + 'name': 'router1', + 'gw_port_id': None, + 'admin_state_up': True, + 'tenant_id': self.tenant_id, + '_metering_labels': [ + {'rules': [], + 'id': second_uuid}], + 'id': self.uuid}]}, + 'namespace': None, + 'method': 'add_metering_label'} + expected2 = {'args': {'routers': [{'status': 'ACTIVE', + 'name': 'router2', + 'gw_port_id': None, + 'admin_state_up': True, + 'tenant_id': self.tenant_id, + '_metering_labels': [ + {'rules': [], + 'id': second_uuid}], + 'id': second_uuid}]}, + 'namespace': None, + 'method': 'add_metering_label'} + + # bind each router to a specific agent + agent1 = agents_db.Agent(host='agent1') + agent2 = agents_db.Agent(host='agent2') + + agents = {self.uuid: agent1, + second_uuid: agent2} + + def side_effect(context, routers, admin_state_up, active): + return [agents[routers[0]]] + + self.l3routers_mock.side_effect = side_effect + + with self.router(name='router1', tenant_id=self.tenant_id, + set_context=True): + self.mock_uuid.return_value = second_uuid + with self.router(name='router2', tenant_id=self.tenant_id, + set_context=True): + with self.metering_label(tenant_id=self.tenant_id, + set_context=True): + + topic1 = "%s.%s" % (self.topic, 'agent1') + topic2 = "%s.%s" % (self.topic, 'agent2') + + # check if there is a call per agent + expected = [mock.call(self.ctx, expected1, topic=topic1), + mock.call(self.ctx, expected2, topic=topic2)] + + self.mock_cast.assert_has_calls(expected, any_order=True) + + +class TestMeteringPluginL3AgentSchedulerServicePlugin( + TestMeteringPluginL3AgentScheduler): + + """Unit tests for the case where separate service plugin + implements L3 routing. + """ + + def setUp(self): + l3_plugin = ('neutron.tests.unit.test_l3_plugin.' + 'TestL3NatAgentSchedulingServicePlugin') + service_plugins = {'metering_plugin_name': + METERING_SERVICE_PLUGIN_KLASS, + 'l3_plugin_name': l3_plugin} + + plugin_str = ('neutron.tests.unit.test_l3_plugin.' + 'TestNoL3NatPlugin') + + super(TestMeteringPluginL3AgentSchedulerServicePlugin, self).setUp( + plugin_str=plugin_str, service_plugins=service_plugins, + scheduler=l3_plugin) + + +class TestMeteringPluginRpcFromL3Agent( + test_db_plugin.NeutronDbPluginV2TestCase, + test_l3_plugin.L3NatTestCaseMixin, + test_db_metering.MeteringPluginDbTestCaseMixin): + + resource_prefix_map = dict( + (k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING]) + for k in ext_metering.RESOURCE_ATTRIBUTE_MAP + ) + + def setUp(self): + service_plugins = {'metering_plugin_name': + METERING_SERVICE_PLUGIN_KLASS} + + plugin = ('neutron.tests.unit.test_l3_plugin.' + 'TestL3NatIntAgentSchedulingPlugin') + + ext_mgr = MeteringTestExtensionManager() + super(TestMeteringPluginRpcFromL3Agent, + self).setUp(plugin=plugin, service_plugins=service_plugins, + ext_mgr=ext_mgr) + + self.meter_plugin = manager.NeutronManager.get_service_plugins().get( + constants.METERING) + + self.adminContext = context.get_admin_context() + self._register_l3_agent('agent1') + + def _register_l3_agent(self, host): + agent = { + 'binary': 'neutron-l3-agent', + 'host': host, + 'topic': topics.L3_AGENT, + 'configurations': {}, + 'agent_type': n_constants.AGENT_TYPE_L3, + 'start_flag': True + } + callback = agents_db.AgentExtRpcCallback() + callback.report_state(self.adminContext, + agent_state={'agent_state': agent}, + time=timeutils.strtime()) + + def test_get_sync_data_metering(self): + with self.subnet() as subnet: + s = subnet['subnet'] + self._set_net_external(s['network_id']) + with self.router(name='router1', subnet=subnet) as router: + r = router['router'] + self._add_external_gateway_to_router(r['id'], s['network_id']) + with self.metering_label(tenant_id=r['tenant_id']): + callbacks = metering_rpc.MeteringRpcCallbacks( + self.meter_plugin) + data = callbacks.get_sync_data_metering(self.adminContext, + host='agent1') + self.assertEqual('router1', data[0]['name']) + + self._register_l3_agent('agent2') + data = callbacks.get_sync_data_metering(self.adminContext, + host='agent2') + self.assertFalse(data) + + self._remove_external_gateway_from_router( + r['id'], s['network_id']) diff --git a/neutron/tests/unit/services/vpn/__init__.py b/neutron/tests/unit/services/vpn/__init__.py new file mode 100644 index 000000000..b936bbcb8 --- /dev/null +++ b/neutron/tests/unit/services/vpn/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Swaminathan Vasudevan, Hewlett-Packard. diff --git a/neutron/tests/unit/services/vpn/device_drivers/__init__.py b/neutron/tests/unit/services/vpn/device_drivers/__init__.py new file mode 100644 index 000000000..9b27a7520 --- /dev/null +++ b/neutron/tests/unit/services/vpn/device_drivers/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/services/vpn/device_drivers/cisco_csr_mock.py b/neutron/tests/unit/services/vpn/device_drivers/cisco_csr_mock.py new file mode 100644 index 000000000..fe2223a74 --- /dev/null +++ b/neutron/tests/unit/services/vpn/device_drivers/cisco_csr_mock.py @@ -0,0 +1,579 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Paul Michali, Cisco Systems, Inc. + +"""Mock REST requests to Cisco Cloud Services Router.""" + +import re + +import functools +# import httmock +import requests +from requests import exceptions as r_exc + +from neutron.openstack.common import log as logging +# TODO(pcm) Remove once httmock package is added to test-requirements. For +# now, uncomment and include httmock source to UT +from neutron.tests.unit.services.vpn import device_drivers + +LOG = logging.getLogger(__name__) + +httmock = device_drivers.httmock + + +def repeat(n): + """Decorator to limit the number of times a handler is called. + + Will allow the wrapped function (handler) to be called 'n' times. + After that, this will return None for any additional calls, + allowing other handlers, if any, to be invoked. + """ + + class static: + retries = n + + def decorator(func): + @functools.wraps(func) + def wrapped(*args, **kwargs): + if static.retries == 0: + return None + static.retries -= 1 + return func(*args, **kwargs) + return wrapped + return decorator + + +def filter_request(methods, resource): + """Decorator to invoke handler once for a specific resource. + + This will call the handler only for a specific resource using + a specific method(s). Any other resource request or method will + return None, allowing other handlers, if any, to be invoked. + """ + + class static: + target_methods = [m.upper() for m in methods] + target_resource = resource + + def decorator(func): + @functools.wraps(func) + def wrapped(*args, **kwargs): + if (args[1].method in static.target_methods and + static.target_resource in args[0].path): + return func(*args, **kwargs) + else: + return None # Not for this resource + return wrapped + return decorator + + +@httmock.urlmatch(netloc=r'localhost') +def token(url, request): + if 'auth/token-services' in url.path: + return {'status_code': requests.codes.OK, + 'content': {'token-id': 'dummy-token'}} + + +@httmock.urlmatch(netloc=r'localhost') +def token_unauthorized(url, request): + if 'auth/token-services' in url.path: + return {'status_code': requests.codes.UNAUTHORIZED} + + +@httmock.urlmatch(netloc=r'wrong-host') +def token_wrong_host(url, request): + raise r_exc.ConnectionError() + + +@httmock.all_requests +def token_timeout(url, request): + raise r_exc.Timeout() + + +@filter_request(['get'], 'global/host-name') +@httmock.all_requests +def timeout(url, request): + """Simulated timeout of a normal request.""" + + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + raise r_exc.Timeout() + + +@httmock.urlmatch(netloc=r'localhost') +def no_such_resource(url, request): + """Indicate not found error, when invalid resource requested.""" + return {'status_code': requests.codes.NOT_FOUND} + + +@filter_request(['get'], 'global/host-name') +@repeat(1) +@httmock.urlmatch(netloc=r'localhost') +def expired_request(url, request): + """Simulate access denied failure on first request for this resource. + + Intent here is to simulate that the token has expired, by failing + the first request to the resource. Because of the repeat=1, this + will only be called once, and subsequent calls will not be handled + by this function, but instead will access the normal handler and + will pass. Currently configured for a GET request, but will work + with POST and PUT as well. For DELETE, would need to filter_request on a + different resource (e.g. 'global/local-users') + """ + + return {'status_code': requests.codes.UNAUTHORIZED} + + +@httmock.urlmatch(netloc=r'localhost') +def normal_get(url, request): + if request.method != 'GET': + return + LOG.debug("GET mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + if 'global/host-name' in url.path: + content = {u'kind': u'object#host-name', + u'host-name': u'Router'} + return httmock.response(requests.codes.OK, content=content) + if 'global/local-users' in url.path: + content = {u'kind': u'collection#local-user', + u'users': ['peter', 'paul', 'mary']} + return httmock.response(requests.codes.OK, content=content) + if 'interfaces/GigabitEthernet' in url.path: + actual_interface = url.path.split('/')[-1] + ip = actual_interface[-1] + content = {u'kind': u'object#interface', + u'description': u'Changed description', + u'if-name': actual_interface, + u'proxy-arp': True, + u'subnet-mask': u'255.255.255.0', + u'icmp-unreachable': True, + u'nat-direction': u'', + u'icmp-redirects': True, + u'ip-address': u'192.168.200.%s' % ip, + u'verify-unicast-source': False, + u'type': u'ethernet'} + return httmock.response(requests.codes.OK, content=content) + if 'vpn-svc/ike/policies/2' in url.path: + content = {u'kind': u'object#ike-policy', + u'priority-id': u'2', + u'version': u'v1', + u'local-auth-method': u'pre-share', + u'encryption': u'aes256', + u'hash': u'sha', + u'dhGroup': 5, + u'lifetime': 3600} + return httmock.response(requests.codes.OK, content=content) + if 'vpn-svc/ike/keyrings' in url.path: + content = {u'kind': u'object#ike-keyring', + u'keyring-name': u'5', + u'pre-shared-key-list': [ + {u'key': u'super-secret', + u'encrypted': False, + u'peer-address': u'10.10.10.20 255.255.255.0'} + ]} + return httmock.response(requests.codes.OK, content=content) + if 'vpn-svc/ipsec/policies/' in url.path: + ipsec_policy_id = url.path.split('/')[-1] + content = {u'kind': u'object#ipsec-policy', + u'mode': u'tunnel', + u'policy-id': u'%s' % ipsec_policy_id, + u'protection-suite': { + u'esp-encryption': u'esp-256-aes', + u'esp-authentication': u'esp-sha-hmac', + u'ah': u'ah-sha-hmac', + }, + u'anti-replay-window-size': u'Disable', + u'lifetime-sec': 120, + u'pfs': u'group5', + u'lifetime-kb': 4608000, + u'idle-time': None} + return httmock.response(requests.codes.OK, content=content) + if 'vpn-svc/site-to-site/Tunnel' in url.path: + tunnel = url.path.split('/')[-1] + # Use same number, to allow mock to generate IPSec policy ID + ipsec_policy_id = tunnel[6:] + content = {u'kind': u'object#vpn-site-to-site', + u'vpn-interface-name': u'%s' % tunnel, + u'ip-version': u'ipv4', + u'vpn-type': u'site-to-site', + u'ipsec-policy-id': u'%s' % ipsec_policy_id, + u'ike-profile-id': None, + u'mtu': 1500, + u'local-device': { + u'ip-address': '10.3.0.1/24', + u'tunnel-ip-address': '10.10.10.10' + }, + u'remote-device': { + u'tunnel-ip-address': '10.10.10.20' + }} + return httmock.response(requests.codes.OK, content=content) + if 'vpn-svc/ike/keepalive' in url.path: + content = {u'interval': 60, + u'retry': 4, + u'periodic': True} + return httmock.response(requests.codes.OK, content=content) + if 'routing-svc/static-routes' in url.path: + content = {u'destination-network': u'10.1.0.0/24', + u'kind': u'object#static-route', + u'next-hop-router': None, + u'outgoing-interface': u'GigabitEthernet1', + u'admin-distance': 1} + return httmock.response(requests.codes.OK, content=content) + if 'vpn-svc/site-to-site/active/sessions' in url.path: + # Only including needed fields for mock + content = {u'kind': u'collection#vpn-active-sessions', + u'items': [{u'status': u'DOWN-NEGOTIATING', + u'vpn-interface-name': u'Tunnel123'}, ]} + return httmock.response(requests.codes.OK, content=content) + + +@filter_request(['get'], 'vpn-svc/ike/keyrings') +@httmock.urlmatch(netloc=r'localhost') +def get_fqdn(url, request): + LOG.debug("GET FQDN mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + content = {u'kind': u'object#ike-keyring', + u'keyring-name': u'5', + u'pre-shared-key-list': [ + {u'key': u'super-secret', + u'encrypted': False, + u'peer-address': u'cisco.com'} + ]} + return httmock.response(requests.codes.OK, content=content) + + +@filter_request(['get'], 'vpn-svc/ipsec/policies/') +@httmock.urlmatch(netloc=r'localhost') +def get_no_ah(url, request): + LOG.debug("GET No AH mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + ipsec_policy_id = url.path.split('/')[-1] + content = {u'kind': u'object#ipsec-policy', + u'mode': u'tunnel', + u'anti-replay-window-size': u'128', + u'policy-id': u'%s' % ipsec_policy_id, + u'protection-suite': { + u'esp-encryption': u'esp-aes', + u'esp-authentication': u'esp-sha-hmac', + }, + u'lifetime-sec': 120, + u'pfs': u'group5', + u'lifetime-kb': 4608000, + u'idle-time': None} + return httmock.response(requests.codes.OK, content=content) + + +@httmock.urlmatch(netloc=r'localhost') +def get_defaults(url, request): + if request.method != 'GET': + return + LOG.debug("GET mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + if 'vpn-svc/ike/policies/2' in url.path: + content = {u'kind': u'object#ike-policy', + u'priority-id': u'2', + u'version': u'v1', + u'local-auth-method': u'pre-share', + u'encryption': u'des', + u'hash': u'sha', + u'dhGroup': 1, + u'lifetime': 86400} + return httmock.response(requests.codes.OK, content=content) + if 'vpn-svc/ipsec/policies/' in url.path: + ipsec_policy_id = url.path.split('/')[-1] + content = {u'kind': u'object#ipsec-policy', + u'mode': u'tunnel', + u'policy-id': u'%s' % ipsec_policy_id, + u'protection-suite': {}, + u'lifetime-sec': 3600, + u'pfs': u'Disable', + u'anti-replay-window-size': u'None', + u'lifetime-kb': 4608000, + u'idle-time': None} + return httmock.response(requests.codes.OK, content=content) + + +@filter_request(['get'], 'vpn-svc/site-to-site') +@httmock.urlmatch(netloc=r'localhost') +def get_unnumbered(url, request): + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + tunnel = url.path.split('/')[-1] + ipsec_policy_id = tunnel[6:] + content = {u'kind': u'object#vpn-site-to-site', + u'vpn-interface-name': u'%s' % tunnel, + u'ip-version': u'ipv4', + u'vpn-type': u'site-to-site', + u'ipsec-policy-id': u'%s' % ipsec_policy_id, + u'ike-profile-id': None, + u'mtu': 1500, + u'local-device': { + u'ip-address': u'GigabitEthernet3', + u'tunnel-ip-address': u'10.10.10.10' + }, + u'remote-device': { + u'tunnel-ip-address': u'10.10.10.20' + }} + return httmock.response(requests.codes.OK, content=content) + + +@filter_request(['get'], 'vpn-svc/site-to-site/Tunnel') +@httmock.urlmatch(netloc=r'localhost') +def get_admin_down(url, request): + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + # URI has .../Tunnel#/state, so get number from 2nd to last element + tunnel = url.path.split('/')[-2] + content = {u'kind': u'object#vpn-site-to-site-state', + u'vpn-interface-name': u'%s' % tunnel, + u'line-protocol-state': u'down', + u'enabled': False} + return httmock.response(requests.codes.OK, content=content) + + +@filter_request(['get'], 'vpn-svc/site-to-site/Tunnel') +@httmock.urlmatch(netloc=r'localhost') +def get_admin_up(url, request): + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + # URI has .../Tunnel#/state, so get number from 2nd to last element + tunnel = url.path.split('/')[-2] + content = {u'kind': u'object#vpn-site-to-site-state', + u'vpn-interface-name': u'%s' % tunnel, + u'line-protocol-state': u'down', + u'enabled': True} + return httmock.response(requests.codes.OK, content=content) + + +@filter_request(['get'], 'vpn-svc/site-to-site') +@httmock.urlmatch(netloc=r'localhost') +def get_mtu(url, request): + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + tunnel = url.path.split('/')[-1] + ipsec_policy_id = tunnel[6:] + content = {u'kind': u'object#vpn-site-to-site', + u'vpn-interface-name': u'%s' % tunnel, + u'ip-version': u'ipv4', + u'vpn-type': u'site-to-site', + u'ipsec-policy-id': u'%s' % ipsec_policy_id, + u'ike-profile-id': None, + u'mtu': 9192, + u'local-device': { + u'ip-address': u'10.3.0.1/24', + u'tunnel-ip-address': u'10.10.10.10' + }, + u'remote-device': { + u'tunnel-ip-address': u'10.10.10.20' + }} + return httmock.response(requests.codes.OK, content=content) + + +@filter_request(['get'], 'vpn-svc/ike/keepalive') +@httmock.urlmatch(netloc=r'localhost') +def get_not_configured(url, request): + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + return {'status_code': requests.codes.NOT_FOUND} + + +@filter_request(['get'], 'vpn-svc/site-to-site/active/sessions') +@httmock.urlmatch(netloc=r'localhost') +def get_none(url, request): + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + content = {u'kind': u'collection#vpn-active-sessions', + u'items': []} + return httmock.response(requests.codes.OK, content=content) + + +@filter_request(['get'], 'interfaces/GigabitEthernet3') +@httmock.urlmatch(netloc=r'localhost') +def get_local_ip(url, request): + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + content = {u'kind': u'object#interface', + u'subnet-mask': u'255.255.255.0', + u'ip-address': u'10.5.0.2'} + return httmock.response(requests.codes.OK, content=content) + + +@httmock.urlmatch(netloc=r'localhost') +def post(url, request): + if request.method != 'POST': + return + LOG.debug("POST mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + if 'interfaces/GigabitEthernet' in url.path: + return {'status_code': requests.codes.NO_CONTENT} + if 'global/local-users' in url.path: + if 'username' not in request.body: + return {'status_code': requests.codes.BAD_REQUEST} + if '"privilege": 20' in request.body: + return {'status_code': requests.codes.BAD_REQUEST} + headers = {'location': '%s/test-user' % url.geturl()} + return httmock.response(requests.codes.CREATED, headers=headers) + if 'vpn-svc/ike/policies' in url.path: + headers = {'location': "%s/2" % url.geturl()} + return httmock.response(requests.codes.CREATED, headers=headers) + if 'vpn-svc/ipsec/policies' in url.path: + m = re.search(r'"policy-id": "(\S+)"', request.body) + if m: + headers = {'location': "%s/%s" % (url.geturl(), m.group(1))} + return httmock.response(requests.codes.CREATED, headers=headers) + return {'status_code': requests.codes.BAD_REQUEST} + if 'vpn-svc/ike/keyrings' in url.path: + headers = {'location': "%s/5" % url.geturl()} + return httmock.response(requests.codes.CREATED, headers=headers) + if 'vpn-svc/site-to-site' in url.path: + m = re.search(r'"vpn-interface-name": "(\S+)"', request.body) + if m: + headers = {'location': "%s/%s" % (url.geturl(), m.group(1))} + return httmock.response(requests.codes.CREATED, headers=headers) + return {'status_code': requests.codes.BAD_REQUEST} + if 'routing-svc/static-routes' in url.path: + headers = {'location': + "%s/10.1.0.0_24_GigabitEthernet1" % url.geturl()} + return httmock.response(requests.codes.CREATED, headers=headers) + + +@filter_request(['post'], 'global/local-users') +@httmock.urlmatch(netloc=r'localhost') +def post_change_attempt(url, request): + LOG.debug("POST change value mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + return {'status_code': requests.codes.NOT_FOUND, + 'content': { + u'error-code': -1, + u'error-message': u'user test-user already exists'}} + + +@httmock.urlmatch(netloc=r'localhost') +def post_duplicate(url, request): + LOG.debug("POST duplicate mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + return {'status_code': requests.codes.BAD_REQUEST, + 'content': { + u'error-code': -1, + u'error-message': u'policy 2 exist, not allow to ' + u'update policy using POST method'}} + + +@filter_request(['post'], 'vpn-svc/site-to-site') +@httmock.urlmatch(netloc=r'localhost') +def post_missing_ipsec_policy(url, request): + LOG.debug("POST missing ipsec policy mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + return {'status_code': requests.codes.BAD_REQUEST} + + +@filter_request(['post'], 'vpn-svc/site-to-site') +@httmock.urlmatch(netloc=r'localhost') +def post_missing_ike_policy(url, request): + LOG.debug("POST missing ike policy mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + return {'status_code': requests.codes.BAD_REQUEST} + + +@filter_request(['post'], 'vpn-svc/site-to-site') +@httmock.urlmatch(netloc=r'localhost') +def post_bad_ip(url, request): + LOG.debug("POST bad IP mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + return {'status_code': requests.codes.BAD_REQUEST} + + +@filter_request(['post'], 'vpn-svc/site-to-site') +@httmock.urlmatch(netloc=r'localhost') +def post_bad_mtu(url, request): + LOG.debug("POST bad mtu mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + return {'status_code': requests.codes.BAD_REQUEST} + + +@filter_request(['post'], 'vpn-svc/ipsec/policies') +@httmock.urlmatch(netloc=r'localhost') +def post_bad_lifetime(url, request): + LOG.debug("POST bad lifetime mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + return {'status_code': requests.codes.BAD_REQUEST} + + +@filter_request(['post'], 'vpn-svc/ipsec/policies') +@httmock.urlmatch(netloc=r'localhost') +def post_bad_name(url, request): + LOG.debug("POST bad IPSec policy name for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + return {'status_code': requests.codes.BAD_REQUEST} + + +@httmock.urlmatch(netloc=r'localhost') +def put(url, request): + if request.method != 'PUT': + return + LOG.debug("PUT mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + # Any resource + return {'status_code': requests.codes.NO_CONTENT} + + +@httmock.urlmatch(netloc=r'localhost') +def delete(url, request): + if request.method != 'DELETE': + return + LOG.debug("DELETE mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + # Any resource + return {'status_code': requests.codes.NO_CONTENT} + + +@httmock.urlmatch(netloc=r'localhost') +def delete_unknown(url, request): + if request.method != 'DELETE': + return + LOG.debug("DELETE unknown mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + # Any resource + return {'status_code': requests.codes.NOT_FOUND, + 'content': { + u'error-code': -1, + u'error-message': 'user unknown not found'}} + + +@httmock.urlmatch(netloc=r'localhost') +def delete_not_allowed(url, request): + if request.method != 'DELETE': + return + LOG.debug("DELETE not allowed mock for %s", url) + if not request.headers.get('X-auth-token', None): + return {'status_code': requests.codes.UNAUTHORIZED} + # Any resource + return {'status_code': requests.codes.METHOD_NOT_ALLOWED} diff --git a/neutron/tests/unit/services/vpn/device_drivers/notest_cisco_csr_rest.py b/neutron/tests/unit/services/vpn/device_drivers/notest_cisco_csr_rest.py new file mode 100644 index 000000000..45bf97238 --- /dev/null +++ b/neutron/tests/unit/services/vpn/device_drivers/notest_cisco_csr_rest.py @@ -0,0 +1,1346 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Paul Michali, Cisco Systems, Inc. + +#TODO(pcm): Rename this file to remove the "no" prefix, once httmock is +# approved and added to requirements.txt + +import random + +# TODO(pcm) Uncomment when httmock is added to test requirements. +# import httmock +import requests + +from neutron.openstack.common import log as logging +from neutron.services.vpn.device_drivers import ( + cisco_csr_rest_client as csr_client) +from neutron.tests import base +from neutron.tests.unit.services.vpn import device_drivers +# TODO(pcm) Remove once httmock is available. In the meantime, use +# temporary local copy of httmock source to run UT + + +LOG = logging.getLogger(__name__) +# Enables debug logging to console +if True: + logging.CONF.set_override('debug', True) + logging.setup('neutron') + +dummy_policy_id = 'dummy-ipsec-policy-id-name' + +httmock = device_drivers.httmock + + +# Note: Helper functions to test reuse of IDs. +def generate_pre_shared_key_id(): + return random.randint(100, 200) + + +def generate_ike_policy_id(): + return random.randint(200, 300) + + +def generate_ipsec_policy_id(): + return random.randint(300, 400) + + +class TestCsrLoginRestApi(base.BaseTestCase): + + """Test logging into CSR to obtain token-id.""" + + def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None): + super(TestCsrLoginRestApi, self).setUp() + self.csr = csr_client.CsrRestClient(host, tunnel_ip, 'stack', 'cisco', + timeout) + + def test_get_token(self): + """Obtain the token and its expiration time.""" + with httmock.HTTMock(device_drivers.csr_request.token): + self.assertTrue(self.csr.authenticate()) + self.assertEqual(requests.codes.OK, self.csr.status) + self.assertIsNotNone(self.csr.token) + + def test_unauthorized_token_request(self): + """Negative test of invalid user/password.""" + self.csr.auth = ('stack', 'bogus') + with httmock.HTTMock(device_drivers.csr_request.token_unauthorized): + self.assertIsNone(self.csr.authenticate()) + self.assertEqual(requests.codes.UNAUTHORIZED, self.csr.status) + + def test_non_existent_host(self): + """Negative test of request to non-existent host.""" + self.csr.host = 'wrong-host' + self.csr.token = 'Set by some previously successful access' + with httmock.HTTMock(device_drivers.csr_request.token_wrong_host): + self.assertIsNone(self.csr.authenticate()) + self.assertEqual(requests.codes.NOT_FOUND, self.csr.status) + self.assertIsNone(self.csr.token) + + def test_timeout_on_token_access(self): + """Negative test of a timeout on a request.""" + with httmock.HTTMock(device_drivers.csr_request.token_timeout): + self.assertIsNone(self.csr.authenticate()) + self.assertEqual(requests.codes.REQUEST_TIMEOUT, self.csr.status) + self.assertIsNone(self.csr.token) + + +class TestCsrGetRestApi(base.BaseTestCase): + + """Test CSR GET REST API.""" + + def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None): + super(TestCsrGetRestApi, self).setUp() + self.csr = csr_client.CsrRestClient(host, tunnel_ip, 'stack', 'cisco', + timeout) + + def test_valid_rest_gets(self): + """Simple GET requests. + + First request will do a post to get token (login). Assumes + that there are two interfaces on the CSR. + """ + + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.normal_get): + content = self.csr.get_request('global/host-name') + self.assertEqual(requests.codes.OK, self.csr.status) + self.assertIn('host-name', content) + self.assertNotEqual(None, content['host-name']) + + content = self.csr.get_request('global/local-users') + self.assertEqual(requests.codes.OK, self.csr.status) + self.assertIn('users', content) + + +class TestCsrPostRestApi(base.BaseTestCase): + + """Test CSR POST REST API.""" + + def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None): + super(TestCsrPostRestApi, self).setUp() + self.csr = csr_client.CsrRestClient(host, tunnel_ip, 'stack', 'cisco', + timeout) + + def test_post_requests(self): + """Simple POST requests (repeatable). + + First request will do a post to get token (login). Assumes + that there are two interfaces (Ge1 and Ge2) on the CSR. + """ + + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post): + content = self.csr.post_request( + 'interfaces/GigabitEthernet1/statistics', + payload={'action': 'clear'}) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + self.assertIsNone(content) + content = self.csr.post_request( + 'interfaces/GigabitEthernet2/statistics', + payload={'action': 'clear'}) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + self.assertIsNone(content) + + def test_post_with_location(self): + """Create a user and verify that location returned.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post): + location = self.csr.post_request( + 'global/local-users', + payload={'username': 'test-user', + 'password': 'pass12345', + 'privilege': 15}) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('global/local-users/test-user', location) + + def test_post_missing_required_attribute(self): + """Negative test of POST with missing mandatory info.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post): + self.csr.post_request('global/local-users', + payload={'password': 'pass12345', + 'privilege': 15}) + self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status) + + def test_post_invalid_attribute(self): + """Negative test of POST with invalid info.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post): + self.csr.post_request('global/local-users', + payload={'username': 'test-user', + 'password': 'pass12345', + 'privilege': 20}) + self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status) + + def test_post_already_exists(self): + """Negative test of a duplicate POST. + + Uses the lower level _do_request() API to just perform the POST and + obtain the response, without any error processing. + """ + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post): + location = self.csr._do_request( + 'POST', + 'global/local-users', + payload={'username': 'test-user', + 'password': 'pass12345', + 'privilege': 15}, + more_headers=csr_client.HEADER_CONTENT_TYPE_JSON) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('global/local-users/test-user', location) + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post_change_attempt): + self.csr._do_request( + 'POST', + 'global/local-users', + payload={'username': 'test-user', + 'password': 'pass12345', + 'privilege': 15}, + more_headers=csr_client.HEADER_CONTENT_TYPE_JSON) + # Note: For local-user, a 404 error is returned. For + # site-to-site connection a 400 is returned. + self.assertEqual(requests.codes.NOT_FOUND, self.csr.status) + + def test_post_changing_value(self): + """Negative test of a POST trying to change a value.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post): + location = self.csr.post_request( + 'global/local-users', + payload={'username': 'test-user', + 'password': 'pass12345', + 'privilege': 15}) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('global/local-users/test-user', location) + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post_change_attempt): + content = self.csr.post_request('global/local-users', + payload={'username': 'test-user', + 'password': 'changed', + 'privilege': 15}) + self.assertEqual(requests.codes.NOT_FOUND, self.csr.status) + expected = {u'error-code': -1, + u'error-message': u'user test-user already exists'} + self.assertDictContainsSubset(expected, content) + + +class TestCsrPutRestApi(base.BaseTestCase): + + """Test CSR PUT REST API.""" + + def _save_resources(self): + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.normal_get): + details = self.csr.get_request('global/host-name') + if self.csr.status != requests.codes.OK: + self.fail("Unable to save original host name") + self.original_host = details['host-name'] + details = self.csr.get_request('interfaces/GigabitEthernet1') + if self.csr.status != requests.codes.OK: + self.fail("Unable to save interface Ge1 description") + self.original_if = details + if details.get('description', ''): + self.original_if['description'] = '' + self.csr.token = None + + def _restore_resources(self, user, password): + """Restore the host name and itnerface description. + + Must restore the user and password, so that authentication + token can be obtained (as some tests corrupt auth info). + Will also clear token, so that it gets a fresh token. + """ + + self.csr.auth = (user, password) + self.csr.token = None + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.put): + payload = {'host-name': self.original_host} + self.csr.put_request('global/host-name', payload=payload) + if self.csr.status != requests.codes.NO_CONTENT: + self.fail("Unable to restore host name after test") + payload = {'description': self.original_if['description'], + 'if-name': self.original_if['if-name'], + 'ip-address': self.original_if['ip-address'], + 'subnet-mask': self.original_if['subnet-mask'], + 'type': self.original_if['type']} + self.csr.put_request('interfaces/GigabitEthernet1', + payload=payload) + if self.csr.status != requests.codes.NO_CONTENT: + self.fail("Unable to restore I/F Ge1 description after test") + + def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None): + """Prepare for PUT API tests.""" + super(TestCsrPutRestApi, self).setUp() + self.csr = csr_client.CsrRestClient(host, tunnel_ip, 'stack', 'cisco', + timeout) + self._save_resources() + self.addCleanup(self._restore_resources, 'stack', 'cisco') + + def test_put_requests(self): + """Simple PUT requests (repeatable). + + First request will do a post to get token (login). Assumes + that there are two interfaces on the CSR (Ge1 and Ge2). + """ + + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.put, + device_drivers.csr_request.normal_get): + payload = {'host-name': 'TestHost'} + content = self.csr.put_request('global/host-name', + payload=payload) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + self.assertIsNone(content) + + payload = {'host-name': 'TestHost2'} + content = self.csr.put_request('global/host-name', + payload=payload) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + self.assertIsNone(content) + + def test_change_interface_description(self): + """Test that interface description can be changed. + + This was a problem with an earlier version of the CSR image and is + here to prevent regression. + """ + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.put, + device_drivers.csr_request.normal_get): + payload = {'description': u'Changed description', + 'if-name': self.original_if['if-name'], + 'ip-address': self.original_if['ip-address'], + 'subnet-mask': self.original_if['subnet-mask'], + 'type': self.original_if['type']} + content = self.csr.put_request( + 'interfaces/GigabitEthernet1', payload=payload) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + self.assertIsNone(content) + content = self.csr.get_request('interfaces/GigabitEthernet1') + self.assertEqual(requests.codes.OK, self.csr.status) + self.assertIn('description', content) + self.assertEqual(u'Changed description', + content['description']) + + def ignore_test_change_to_empty_interface_description(self): + """Test that interface description can be changed to empty string. + + This is a problem in the current version of the CSR image, which + rejects the change with a 400 error. This test is here to prevent + a regression (once it is fixed) Note that there is code in the + test setup to change the description to a non-empty string to + avoid failures in other tests. + """ + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.put, + device_drivers.csr_request.normal_get): + payload = {'description': '', + 'if-name': self.original_if['if-name'], + 'ip-address': self.original_if['ip-address'], + 'subnet-mask': self.original_if['subnet-mask'], + 'type': self.original_if['type']} + content = self.csr.put_request( + 'interfaces/GigabitEthernet1', payload=payload) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + self.assertIsNone(content) + content = self.csr.get_request('interfaces/GigabitEthernet1') + self.assertEqual(requests.codes.OK, self.csr.status) + self.assertIn('description', content) + self.assertEqual('', content['description']) + + +class TestCsrDeleteRestApi(base.BaseTestCase): + + """Test CSR DELETE REST API.""" + + def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None): + super(TestCsrDeleteRestApi, self).setUp() + self.csr = csr_client.CsrRestClient(host, tunnel_ip, 'stack', 'cisco', + timeout) + + def _make_dummy_user(self): + """Create a user that will be later deleted.""" + self.csr.post_request('global/local-users', + payload={'username': 'dummy', + 'password': 'dummy', + 'privilege': 15}) + self.assertEqual(requests.codes.CREATED, self.csr.status) + + def test_delete_requests(self): + """Simple DELETE requests (creating entry first).""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.delete): + self._make_dummy_user() + self.csr.token = None # Force login + self.csr.delete_request('global/local-users/dummy') + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + # Delete again, but without logging in this time + self._make_dummy_user() + self.csr.delete_request('global/local-users/dummy') + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + + def test_delete_non_existent_entry(self): + """Negative test of trying to delete a non-existent user.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.delete_unknown): + content = self.csr.delete_request('global/local-users/unknown') + self.assertEqual(requests.codes.NOT_FOUND, self.csr.status) + expected = {u'error-code': -1, + u'error-message': u'user unknown not found'} + self.assertDictContainsSubset(expected, content) + + def test_delete_not_allowed(self): + """Negative test of trying to delete the host-name.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.delete_not_allowed): + self.csr.delete_request('global/host-name') + self.assertEqual(requests.codes.METHOD_NOT_ALLOWED, + self.csr.status) + + +class TestCsrRestApiFailures(base.BaseTestCase): + + """Test failure cases common for all REST APIs. + + Uses the lower level _do_request() to just perform the operation and get + the result, without any error handling. + """ + + def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=0.1): + super(TestCsrRestApiFailures, self).setUp() + self.csr = csr_client.CsrRestClient(host, tunnel_ip, 'stack', 'cisco', + timeout) + + def test_request_for_non_existent_resource(self): + """Negative test of non-existent resource on REST request.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.no_such_resource): + self.csr.post_request('no/such/request') + self.assertEqual(requests.codes.NOT_FOUND, self.csr.status) + # The result is HTTP 404 message, so no error content to check + + def test_timeout_during_request(self): + """Negative test of timeout during REST request.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.timeout): + self.csr._do_request('GET', 'global/host-name') + self.assertEqual(requests.codes.REQUEST_TIMEOUT, self.csr.status) + + def test_token_expired_on_request(self): + """Token expired before trying a REST request. + + The mock is configured to return a 401 error on the first + attempt to reference the host name. Simulate expiration of + token by changing it. + """ + + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.expired_request, + device_drivers.csr_request.normal_get): + self.csr.token = '123' # These are 44 characters, so won't match + content = self.csr._do_request('GET', 'global/host-name') + self.assertEqual(requests.codes.OK, self.csr.status) + self.assertIn('host-name', content) + self.assertNotEqual(None, content['host-name']) + + def test_failed_to_obtain_token_for_request(self): + """Negative test of unauthorized user for REST request.""" + self.csr.auth = ('stack', 'bogus') + with httmock.HTTMock(device_drivers.csr_request.token_unauthorized): + self.csr._do_request('GET', 'global/host-name') + self.assertEqual(requests.codes.UNAUTHORIZED, self.csr.status) + + +class TestCsrRestIkePolicyCreate(base.BaseTestCase): + + """Test IKE policy create REST requests.""" + + def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None): + super(TestCsrRestIkePolicyCreate, self).setUp() + self.csr = csr_client.CsrRestClient(host, tunnel_ip, 'stack', 'cisco', + timeout) + + def test_create_delete_ike_policy(self): + """Create and then delete IKE policy.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.normal_get): + policy_id = '2' + policy_info = {u'priority-id': u'%s' % policy_id, + u'encryption': u'aes256', + u'hash': u'sha', + u'dhGroup': 5, + u'lifetime': 3600} + location = self.csr.create_ike_policy(policy_info) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/ike/policies/%s' % policy_id, location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_policy = {u'kind': u'object#ike-policy', + u'version': u'v1', + u'local-auth-method': u'pre-share'} + expected_policy.update(policy_info) + self.assertEqual(expected_policy, content) + # Now delete and verify the IKE policy is gone + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.delete, + device_drivers.csr_request.no_such_resource): + self.csr.delete_ike_policy(policy_id) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.NOT_FOUND, self.csr.status) + + def test_create_ike_policy_with_defaults(self): + """Create IKE policy using defaults for all optional values.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.get_defaults): + policy_id = '2' + policy_info = {u'priority-id': u'%s' % policy_id} + location = self.csr.create_ike_policy(policy_info) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/ike/policies/%s' % policy_id, location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_policy = {u'kind': u'object#ike-policy', + u'version': u'v1', + u'encryption': u'des', + u'hash': u'sha', + u'dhGroup': 1, + u'lifetime': 86400, + # Lower level sets this, but it is the default + u'local-auth-method': u'pre-share'} + expected_policy.update(policy_info) + self.assertEqual(expected_policy, content) + + def test_create_duplicate_ike_policy(self): + """Negative test of trying to create a duplicate IKE policy.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.normal_get): + policy_id = '2' + policy_info = {u'priority-id': u'%s' % policy_id, + u'encryption': u'aes', + u'hash': u'sha', + u'dhGroup': 5, + u'lifetime': 3600} + location = self.csr.create_ike_policy(policy_info) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/ike/policies/%s' % policy_id, location) + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post_duplicate): + location = self.csr.create_ike_policy(policy_info) + self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status) + expected = {u'error-code': -1, + u'error-message': u'policy 2 exist, not allow to ' + u'update policy using POST method'} + self.assertDictContainsSubset(expected, location) + + +class TestCsrRestIPSecPolicyCreate(base.BaseTestCase): + + """Test IPSec policy create REST requests.""" + + def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None): + super(TestCsrRestIPSecPolicyCreate, self).setUp() + self.csr = csr_client.CsrRestClient(host, tunnel_ip, 'stack', 'cisco', + timeout) + + def test_create_delete_ipsec_policy(self): + """Create and then delete IPSec policy.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.normal_get): + policy_id = '123' + policy_info = { + u'policy-id': u'%s' % policy_id, + u'protection-suite': { + u'esp-encryption': u'esp-256-aes', + u'esp-authentication': u'esp-sha-hmac', + u'ah': u'ah-sha-hmac', + }, + u'lifetime-sec': 120, + u'pfs': u'group5', + u'anti-replay-window-size': u'disable' + } + location = self.csr.create_ipsec_policy(policy_info) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/ipsec/policies/%s' % policy_id, location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_policy = {u'kind': u'object#ipsec-policy', + u'mode': u'tunnel', + u'lifetime-kb': 4608000, + u'idle-time': None} + expected_policy.update(policy_info) + # CSR will respond with capitalized value + expected_policy[u'anti-replay-window-size'] = u'Disable' + self.assertEqual(expected_policy, content) + # Now delete and verify the IPSec policy is gone + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.delete, + device_drivers.csr_request.no_such_resource): + self.csr.delete_ipsec_policy(policy_id) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.NOT_FOUND, self.csr.status) + + def test_create_ipsec_policy_with_defaults(self): + """Create IPSec policy with default for all optional values.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.get_defaults): + policy_id = '123' + policy_info = { + u'policy-id': u'%s' % policy_id, + } + location = self.csr.create_ipsec_policy(policy_info) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/ipsec/policies/%s' % policy_id, location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_policy = {u'kind': u'object#ipsec-policy', + u'mode': u'tunnel', + u'protection-suite': {}, + u'lifetime-sec': 3600, + u'pfs': u'Disable', + u'anti-replay-window-size': u'None', + u'lifetime-kb': 4608000, + u'idle-time': None} + expected_policy.update(policy_info) + self.assertEqual(expected_policy, content) + + def test_create_ipsec_policy_with_uuid(self): + """Create IPSec policy using UUID for id.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.normal_get): + policy_info = { + u'policy-id': u'%s' % dummy_policy_id, + u'protection-suite': { + u'esp-encryption': u'esp-256-aes', + u'esp-authentication': u'esp-sha-hmac', + u'ah': u'ah-sha-hmac', + }, + u'lifetime-sec': 120, + u'pfs': u'group5', + u'anti-replay-window-size': u'disable' + } + location = self.csr.create_ipsec_policy(policy_info) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/ipsec/policies/%s' % dummy_policy_id, + location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_policy = {u'kind': u'object#ipsec-policy', + u'mode': u'tunnel', + u'lifetime-kb': 4608000, + u'idle-time': None} + expected_policy.update(policy_info) + # CSR will respond with capitalized value + expected_policy[u'anti-replay-window-size'] = u'Disable' + self.assertEqual(expected_policy, content) + + def test_create_ipsec_policy_without_ah(self): + """Create IPSec policy.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.get_no_ah): + policy_id = '10' + policy_info = { + u'policy-id': u'%s' % policy_id, + u'protection-suite': { + u'esp-encryption': u'esp-aes', + u'esp-authentication': u'esp-sha-hmac', + }, + u'lifetime-sec': 120, + u'pfs': u'group5', + u'anti-replay-window-size': u'128' + } + location = self.csr.create_ipsec_policy(policy_info) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/ipsec/policies/%s' % policy_id, location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_policy = {u'kind': u'object#ipsec-policy', + u'mode': u'tunnel', + u'lifetime-kb': 4608000, + u'idle-time': None} + expected_policy.update(policy_info) + self.assertEqual(expected_policy, content) + + def test_invalid_ipsec_policy_lifetime(self): + """Failure test of IPSec policy with unsupported lifetime.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post_bad_lifetime): + policy_id = '123' + policy_info = { + u'policy-id': u'%s' % policy_id, + u'protection-suite': { + u'esp-encryption': u'esp-aes', + u'esp-authentication': u'esp-sha-hmac', + u'ah': u'ah-sha-hmac', + }, + u'lifetime-sec': 119, + u'pfs': u'group5', + u'anti-replay-window-size': u'128' + } + self.csr.create_ipsec_policy(policy_info) + self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status) + + def test_create_ipsec_policy_with_invalid_name(self): + """Failure test of creating IPSec policy with name too long.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post_bad_name, + device_drivers.csr_request.get_defaults): + policy_id = 'policy-name-is-too-long-32-chars' + policy_info = { + u'policy-id': u'%s' % policy_id, + } + self.csr.create_ipsec_policy(policy_info) + self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status) + + +class TestCsrRestPreSharedKeyCreate(base.BaseTestCase): + + """Test Pre-shared key (PSK) create REST requests.""" + + def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None): + super(TestCsrRestPreSharedKeyCreate, self).setUp() + self.csr = csr_client.CsrRestClient(host, tunnel_ip, 'stack', 'cisco', + timeout) + + def test_create_delete_pre_shared_key(self): + """Create and then delete a keyring entry for pre-shared key.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.normal_get): + psk_id = '5' + psk_info = {u'keyring-name': u'%s' % psk_id, + u'pre-shared-key-list': [ + {u'key': u'super-secret', + u'encrypted': False, + u'peer-address': u'10.10.10.20/24'} + ]} + location = self.csr.create_pre_shared_key(psk_info) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/ike/keyrings/%s' % psk_id, location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_policy = {u'kind': u'object#ike-keyring'} + expected_policy.update(psk_info) + # Note: the peer CIDR is returned as an IP and mask + expected_policy[u'pre-shared-key-list'][0][u'peer-address'] = ( + u'10.10.10.20 255.255.255.0') + self.assertEqual(expected_policy, content) + # Now delete and verify pre-shared key is gone + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.delete, + device_drivers.csr_request.no_such_resource): + self.csr.delete_pre_shared_key(psk_id) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.NOT_FOUND, self.csr.status) + + def test_create_pre_shared_key_with_fqdn_peer(self): + """Create pre-shared key using FQDN for peer address.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.get_fqdn): + psk_id = '5' + psk_info = {u'keyring-name': u'%s' % psk_id, + u'pre-shared-key-list': [ + {u'key': u'super-secret', + u'encrypted': False, + u'peer-address': u'cisco.com'} + ]} + location = self.csr.create_pre_shared_key(psk_info) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/ike/keyrings/%s' % psk_id, location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_policy = {u'kind': u'object#ike-keyring'} + expected_policy.update(psk_info) + self.assertEqual(expected_policy, content) + + def test_create_pre_shared_key_with_duplicate_peer_address(self): + """Negative test of creating a second pre-shared key with same peer.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.normal_get): + psk_id = '5' + psk_info = {u'keyring-name': u'%s' % psk_id, + u'pre-shared-key-list': [ + {u'key': u'super-secret', + u'encrypted': False, + u'peer-address': u'10.10.10.20/24'} + ]} + location = self.csr.create_pre_shared_key(psk_info) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/ike/keyrings/%s' % psk_id, location) + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post_duplicate): + psk_id = u'6' + another_psk_info = {u'keyring-name': psk_id, + u'pre-shared-key-list': [ + {u'key': u'abc123def', + u'encrypted': False, + u'peer-address': u'10.10.10.20/24'} + ]} + self.csr.create_ike_policy(another_psk_info) + self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status) + + +class TestCsrRestIPSecConnectionCreate(base.BaseTestCase): + + """Test IPSec site-to-site connection REST requests. + + This requires us to have first created an IKE policy, IPSec policy, + and pre-shared key, so it's more of an itegration test, when used + with a real CSR (as we can't mock out these pre-conditions. + """ + + def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None): + super(TestCsrRestIPSecConnectionCreate, self).setUp() + self.csr = csr_client.CsrRestClient(host, tunnel_ip, 'stack', 'cisco', + timeout) + + def _make_psk_for_test(self): + psk_id = generate_pre_shared_key_id() + self._remove_resource_for_test(self.csr.delete_pre_shared_key, + psk_id) + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post): + psk_info = {u'keyring-name': u'%d' % psk_id, + u'pre-shared-key-list': [ + {u'key': u'super-secret', + u'encrypted': False, + u'peer-address': u'10.10.10.20/24'} + ]} + self.csr.create_pre_shared_key(psk_info) + if self.csr.status != requests.codes.CREATED: + self.fail("Unable to create PSK for test case") + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_pre_shared_key, psk_id) + return psk_id + + def _make_ike_policy_for_test(self): + policy_id = generate_ike_policy_id() + self._remove_resource_for_test(self.csr.delete_ike_policy, + policy_id) + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post): + policy_info = {u'priority-id': u'%d' % policy_id, + u'encryption': u'aes', + u'hash': u'sha', + u'dhGroup': 5, + u'lifetime': 3600} + self.csr.create_ike_policy(policy_info) + if self.csr.status != requests.codes.CREATED: + self.fail("Unable to create IKE policy for test case") + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_ike_policy, policy_id) + return policy_id + + def _make_ipsec_policy_for_test(self): + policy_id = generate_ipsec_policy_id() + self._remove_resource_for_test(self.csr.delete_ipsec_policy, + policy_id) + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post): + policy_info = { + u'policy-id': u'%d' % policy_id, + u'protection-suite': { + u'esp-encryption': u'esp-aes', + u'esp-authentication': u'esp-sha-hmac', + u'ah': u'ah-sha-hmac', + }, + u'lifetime-sec': 120, + u'pfs': u'group5', + u'anti-replay-window-size': u'disable' + } + self.csr.create_ipsec_policy(policy_info) + if self.csr.status != requests.codes.CREATED: + self.fail("Unable to create IPSec policy for test case") + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_ipsec_policy, policy_id) + return policy_id + + def _remove_resource_for_test(self, delete_resource, resource_id): + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.delete): + delete_resource(resource_id) + + def _prepare_for_site_conn_create(self, skip_psk=False, skip_ike=False, + skip_ipsec=False): + """Create the policies and PSK so can then create site conn.""" + if not skip_psk: + self._make_psk_for_test() + if not skip_ike: + self._make_ike_policy_for_test() + if not skip_ipsec: + ipsec_policy_id = self._make_ipsec_policy_for_test() + else: + ipsec_policy_id = generate_ipsec_policy_id() + # Note: Use same ID number for tunnel and IPSec policy, so that when + # GET tunnel info, the mocks can infer the IPSec policy ID from the + # tunnel number. + return (ipsec_policy_id, ipsec_policy_id) + + def test_create_delete_ipsec_connection(self): + """Create and then delete an IPSec connection.""" + tunnel_id, ipsec_policy_id = self._prepare_for_site_conn_create() + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.normal_get): + connection_info = { + u'vpn-interface-name': u'Tunnel%d' % tunnel_id, + u'ipsec-policy-id': u'%d' % ipsec_policy_id, + u'mtu': 1500, + u'local-device': {u'ip-address': u'10.3.0.1/24', + u'tunnel-ip-address': u'10.10.10.10'}, + u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'} + } + location = self.csr.create_ipsec_connection(connection_info) + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_ipsec_connection, + 'Tunnel%d' % tunnel_id) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/site-to-site/Tunnel%d' % tunnel_id, + location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_connection = {u'kind': u'object#vpn-site-to-site', + u'ike-profile-id': None, + u'mtu': 1500, + u'ip-version': u'ipv4'} + expected_connection.update(connection_info) + self.assertEqual(expected_connection, content) + # Now delete and verify that site-to-site connection is gone + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.delete, + device_drivers.csr_request.no_such_resource): + # Only delete connection. Cleanup will take care of prerequisites + self.csr.delete_ipsec_connection('Tunnel%d' % tunnel_id) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.NOT_FOUND, self.csr.status) + + def test_create_ipsec_connection_with_no_tunnel_subnet(self): + """Create an IPSec connection without an IP address on tunnel.""" + tunnel_id, ipsec_policy_id = self._prepare_for_site_conn_create() + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.get_unnumbered): + connection_info = { + u'vpn-interface-name': u'Tunnel%d' % tunnel_id, + u'ipsec-policy-id': u'%d' % ipsec_policy_id, + u'local-device': {u'ip-address': u'GigabitEthernet3', + u'tunnel-ip-address': u'10.10.10.10'}, + u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'} + } + location = self.csr.create_ipsec_connection(connection_info) + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_ipsec_connection, + 'Tunnel%d' % tunnel_id) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/site-to-site/Tunnel%d' % tunnel_id, + location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_connection = {u'kind': u'object#vpn-site-to-site', + u'ike-profile-id': None, + u'mtu': 1500, + u'ip-version': u'ipv4'} + expected_connection.update(connection_info) + self.assertEqual(expected_connection, content) + + def test_create_ipsec_connection_no_pre_shared_key(self): + """Test of connection create without associated pre-shared key. + + The CSR will create the connection, but will not be able to pass + traffic without the pre-shared key. + """ + + tunnel_id, ipsec_policy_id = self._prepare_for_site_conn_create( + skip_psk=True) + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.normal_get): + connection_info = { + u'vpn-interface-name': u'Tunnel%d' % tunnel_id, + u'ipsec-policy-id': u'%d' % ipsec_policy_id, + u'mtu': 1500, + u'local-device': {u'ip-address': u'10.3.0.1/24', + u'tunnel-ip-address': u'10.10.10.10'}, + u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'} + } + location = self.csr.create_ipsec_connection(connection_info) + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_ipsec_connection, + 'Tunnel%d' % tunnel_id) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/site-to-site/Tunnel%d' % tunnel_id, + location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_connection = {u'kind': u'object#vpn-site-to-site', + u'ike-profile-id': None, + u'mtu': 1500, + u'ip-version': u'ipv4'} + expected_connection.update(connection_info) + self.assertEqual(expected_connection, content) + + def test_create_ipsec_connection_with_default_ike_policy(self): + """Test of connection create without IKE policy (uses default). + + Without an IKE policy, the CSR will use a built-in default IKE + policy setting for the connection. + """ + + tunnel_id, ipsec_policy_id = self._prepare_for_site_conn_create( + skip_ike=True) + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.normal_get): + connection_info = { + u'vpn-interface-name': u'Tunnel%d' % tunnel_id, + u'ipsec-policy-id': u'%d' % ipsec_policy_id, + u'mtu': 1500, + u'local-device': {u'ip-address': u'10.3.0.1/24', + u'tunnel-ip-address': u'10.10.10.10'}, + u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'} + } + location = self.csr.create_ipsec_connection(connection_info) + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_ipsec_connection, + 'Tunnel%d' % tunnel_id) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/site-to-site/Tunnel%d' % tunnel_id, + location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_connection = {u'kind': u'object#vpn-site-to-site', + u'ike-profile-id': None, + u'mtu': 1500, + u'ip-version': u'ipv4'} + expected_connection.update(connection_info) + self.assertEqual(expected_connection, content) + + def test_set_ipsec_connection_admin_state_changes(self): + """Create IPSec connection in admin down state.""" + tunnel_id, ipsec_policy_id = self._prepare_for_site_conn_create() + tunnel = u'Tunnel%d' % tunnel_id + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post): + connection_info = { + u'vpn-interface-name': tunnel, + u'ipsec-policy-id': u'%d' % ipsec_policy_id, + u'mtu': 1500, + u'local-device': {u'ip-address': u'10.3.0.1/24', + u'tunnel-ip-address': u'10.10.10.10'}, + u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'} + } + location = self.csr.create_ipsec_connection(connection_info) + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_ipsec_connection, + tunnel) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/site-to-site/%s' % tunnel, location) + state_uri = location + "/state" + # Note: When created, the tunnel will be in admin 'up' state + # Note: Line protocol state will be down, unless have an active conn. + expected_state = {u'kind': u'object#vpn-site-to-site-state', + u'vpn-interface-name': tunnel, + u'line-protocol-state': u'down', + u'enabled': False} + with httmock.HTTMock(device_drivers.csr_request.put, + device_drivers.csr_request.get_admin_down): + self.csr.set_ipsec_connection_state(tunnel, admin_up=False) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + content = self.csr.get_request(state_uri, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + self.assertEqual(expected_state, content) + + with httmock.HTTMock(device_drivers.csr_request.put, + device_drivers.csr_request.get_admin_up): + self.csr.set_ipsec_connection_state(tunnel, admin_up=True) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + content = self.csr.get_request(state_uri, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_state[u'enabled'] = True + self.assertEqual(expected_state, content) + + def test_create_ipsec_connection_missing_ipsec_policy(self): + """Negative test of connection create without IPSec policy.""" + tunnel_id, ipsec_policy_id = self._prepare_for_site_conn_create( + skip_ipsec=True) + with httmock.HTTMock( + device_drivers.csr_request.token, + device_drivers.csr_request.post_missing_ipsec_policy): + connection_info = { + u'vpn-interface-name': u'Tunnel%d' % tunnel_id, + u'ipsec-policy-id': u'%d' % ipsec_policy_id, + u'local-device': {u'ip-address': u'10.3.0.1/24', + u'tunnel-ip-address': u'10.10.10.10'}, + u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'} + } + self.csr.create_ipsec_connection(connection_info) + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_ipsec_connection, + 'Tunnel%d' % tunnel_id) + self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status) + + def _determine_conflicting_ip(self): + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.get_local_ip): + details = self.csr.get_request('interfaces/GigabitEthernet3') + if self.csr.status != requests.codes.OK: + self.fail("Unable to obtain interface GigabitEthernet3's IP") + if_ip = details.get('ip-address') + if not if_ip: + self.fail("No IP address for GigabitEthernet3 interface") + return '.'.join(if_ip.split('.')[:3]) + '.10' + + def test_create_ipsec_connection_conficting_tunnel_ip(self): + """Negative test of connection create with conflicting tunnel IP. + + Find out the IP of a local interface (GigabitEthernet3) and create an + IP that is on the same subnet. Note: this interface needs to be up. + """ + + conflicting_ip = self._determine_conflicting_ip() + tunnel_id, ipsec_policy_id = self._prepare_for_site_conn_create() + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post_bad_ip): + connection_info = { + u'vpn-interface-name': u'Tunnel%d' % tunnel_id, + u'ipsec-policy-id': u'%d' % ipsec_policy_id, + u'local-device': {u'ip-address': u'%s/24' % conflicting_ip, + u'tunnel-ip-address': u'10.10.10.10'}, + u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'} + } + self.csr.create_ipsec_connection(connection_info) + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_ipsec_connection, + 'Tunnel%d' % tunnel_id) + self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status) + + def test_create_ipsec_connection_with_max_mtu(self): + """Create an IPSec connection with max MTU value.""" + tunnel_id, ipsec_policy_id = self._prepare_for_site_conn_create() + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.get_mtu): + connection_info = { + u'vpn-interface-name': u'Tunnel%d' % tunnel_id, + u'ipsec-policy-id': u'%d' % ipsec_policy_id, + u'mtu': 9192, + u'local-device': {u'ip-address': u'10.3.0.1/24', + u'tunnel-ip-address': u'10.10.10.10'}, + u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'} + } + location = self.csr.create_ipsec_connection(connection_info) + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_ipsec_connection, + 'Tunnel%d' % tunnel_id) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/site-to-site/Tunnel%d' % tunnel_id, + location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_connection = {u'kind': u'object#vpn-site-to-site', + u'ike-profile-id': None, + u'ip-version': u'ipv4'} + expected_connection.update(connection_info) + self.assertEqual(expected_connection, content) + + def test_create_ipsec_connection_with_bad_mtu(self): + """Negative test of connection create with unsupported MTU value.""" + tunnel_id, ipsec_policy_id = self._prepare_for_site_conn_create() + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post_bad_mtu): + connection_info = { + u'vpn-interface-name': u'Tunnel%d' % tunnel_id, + u'ipsec-policy-id': u'%d' % ipsec_policy_id, + u'mtu': 9193, + u'local-device': {u'ip-address': u'10.3.0.1/24', + u'tunnel-ip-address': u'10.10.10.10'}, + u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'} + } + self.csr.create_ipsec_connection(connection_info) + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_ipsec_connection, + 'Tunnel%d' % tunnel_id) + self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status) + + def test_status_when_no_tunnels_exist(self): + """Get status, when there are no tunnels.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.get_none): + tunnels = self.csr.read_tunnel_statuses() + self.assertEqual(requests.codes.OK, self.csr.status) + self.assertEqual([], tunnels) + + def test_status_for_one_tunnel(self): + """Get status of one tunnel.""" + # Create the IPsec site-to-site connection first + tunnel_id, ipsec_policy_id = self._prepare_for_site_conn_create() + tunnel_id = 123 # Must hard code to work with mock + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.normal_get): + connection_info = { + u'vpn-interface-name': u'Tunnel123', + u'ipsec-policy-id': u'%d' % ipsec_policy_id, + u'local-device': {u'ip-address': u'10.3.0.1/24', + u'tunnel-ip-address': u'10.10.10.10'}, + u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'} + } + location = self.csr.create_ipsec_connection(connection_info) + self.addCleanup(self._remove_resource_for_test, + self.csr.delete_ipsec_connection, + u'Tunnel123') + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('vpn-svc/site-to-site/Tunnel%d' % tunnel_id, + location) + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.normal_get): + tunnels = self.csr.read_tunnel_statuses() + self.assertEqual(requests.codes.OK, self.csr.status) + self.assertEqual([(u'Tunnel123', u'DOWN-NEGOTIATING'), ], tunnels) + + +class TestCsrRestIkeKeepaliveCreate(base.BaseTestCase): + + """Test IKE keepalive REST requests. + + Note: On the Cisco CSR, the IKE keepalive for v1 is a global configuration + that applies to all VPN tunnels to specify Dead Peer Detection information. + As a result, this REST API is not used in the OpenStack device driver, and + the keepalive will default to zero (disabled). + """ + + def _save_dpd_info(self): + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.normal_get): + details = self.csr.get_request('vpn-svc/ike/keepalive') + if self.csr.status == requests.codes.OK: + self.dpd = details + self.addCleanup(self._restore_dpd_info) + elif self.csr.status != requests.codes.NOT_FOUND: + self.fail("Unable to save original DPD info") + + def _restore_dpd_info(self): + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.put): + payload = {'interval': self.dpd['interval'], + 'retry': self.dpd['retry']} + self.csr.put_request('vpn-svc/ike/keepalive', payload=payload) + if self.csr.status != requests.codes.NO_CONTENT: + self.fail("Unable to restore DPD info after test") + + def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None): + super(TestCsrRestIkeKeepaliveCreate, self).setUp() + self.csr = csr_client.CsrRestClient(host, tunnel_ip, 'stack', 'cisco', + timeout) + self._save_dpd_info() + self.csr.token = None + + def test_configure_ike_keepalive(self): + """Set IKE keep-alive (aka Dead Peer Detection) for the CSR.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.put, + device_drivers.csr_request.normal_get): + keepalive_info = {'interval': 60, 'retry': 4} + self.csr.configure_ike_keepalive(keepalive_info) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + content = self.csr.get_request('vpn-svc/ike/keepalive') + self.assertEqual(requests.codes.OK, self.csr.status) + expected = {'periodic': False} + expected.update(keepalive_info) + self.assertDictContainsSubset(expected, content) + + def test_disable_ike_keepalive(self): + """Disable IKE keep-alive (aka Dead Peer Detection) for the CSR.""" + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.delete, + device_drivers.csr_request.put, + device_drivers.csr_request.get_not_configured): + keepalive_info = {'interval': 0, 'retry': 4} + self.csr.configure_ike_keepalive(keepalive_info) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + + +class TestCsrRestStaticRoute(base.BaseTestCase): + + """Test static route REST requests. + + A static route is added for the peer's private network. Would create + a route for each of the peer CIDRs specified for the VPN connection. + """ + + def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None): + super(TestCsrRestStaticRoute, self).setUp() + self.csr = csr_client.CsrRestClient(host, tunnel_ip, 'stack', 'cisco', + timeout) + + def test_create_delete_static_route(self): + """Create and then delete a static route for the tunnel.""" + cidr = u'10.1.0.0/24' + interface = u'GigabitEthernet1' + expected_id = '10.1.0.0_24_GigabitEthernet1' + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.post, + device_drivers.csr_request.normal_get): + route_info = {u'destination-network': cidr, + u'outgoing-interface': interface} + location = self.csr.create_static_route(route_info) + self.assertEqual(requests.codes.CREATED, self.csr.status) + self.assertIn('routing-svc/static-routes/%s' % expected_id, + location) + # Check the hard-coded items that get set as well... + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.OK, self.csr.status) + expected_route = {u'kind': u'object#static-route', + u'next-hop-router': None, + u'admin-distance': 1} + expected_route.update(route_info) + self.assertEqual(expected_route, content) + # Now delete and verify that static route is gone + with httmock.HTTMock(device_drivers.csr_request.token, + device_drivers.csr_request.delete, + device_drivers.csr_request.no_such_resource): + route_id = csr_client.make_route_id(cidr, interface) + self.csr.delete_static_route(route_id) + self.assertEqual(requests.codes.NO_CONTENT, self.csr.status) + content = self.csr.get_request(location, full_url=True) + self.assertEqual(requests.codes.NOT_FOUND, self.csr.status) diff --git a/neutron/tests/unit/services/vpn/device_drivers/test_cisco_ipsec.py b/neutron/tests/unit/services/vpn/device_drivers/test_cisco_ipsec.py new file mode 100644 index 000000000..3c29e7551 --- /dev/null +++ b/neutron/tests/unit/services/vpn/device_drivers/test_cisco_ipsec.py @@ -0,0 +1,1709 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Paul Michali, Cisco Systems, Inc. + +import copy +import httplib +import os +import tempfile + +import mock + +from neutron import context +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.services.vpn.device_drivers import ( + cisco_csr_rest_client as csr_client) +from neutron.services.vpn.device_drivers import cisco_ipsec as ipsec_driver +from neutron.tests import base + +_uuid = uuidutils.generate_uuid +FAKE_HOST = 'fake_host' +FAKE_ROUTER_ID = _uuid() +FAKE_VPN_SERVICE = { + 'id': _uuid(), + 'router_id': FAKE_ROUTER_ID, + 'admin_state_up': True, + 'status': constants.PENDING_CREATE, + 'subnet': {'cidr': '10.0.0.0/24'}, + 'ipsec_site_connections': [ + {'peer_cidrs': ['20.0.0.0/24', + '30.0.0.0/24']}, + {'peer_cidrs': ['40.0.0.0/24', + '50.0.0.0/24']}] +} +FIND_CFG_FOR_CSRS = ('neutron.services.vpn.device_drivers.cisco_ipsec.' + 'find_available_csrs_from_config') + + +class TestCiscoCsrIPSecConnection(base.BaseTestCase): + def setUp(self): + super(TestCiscoCsrIPSecConnection, self).setUp() + self.conn_info = { + u'id': '123', + u'status': constants.PENDING_CREATE, + u'admin_state_up': True, + 'psk': 'secret', + 'peer_address': '192.168.1.2', + 'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'], + 'mtu': 1500, + 'ike_policy': {'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-128', + 'pfs': 'Group5', + 'ike_version': 'v1', + 'lifetime_units': 'seconds', + 'lifetime_value': 3600}, + 'ipsec_policy': {'transform_protocol': 'ah', + 'encryption_algorithm': 'aes-128', + 'auth_algorithm': 'sha1', + 'pfs': 'group5', + 'lifetime_units': 'seconds', + 'lifetime_value': 3600}, + 'cisco': {'site_conn_id': 'Tunnel0', + 'ike_policy_id': 222, + 'ipsec_policy_id': 333, + # TODO(pcm) FUTURE use vpnservice['external_ip'] + 'router_public_ip': '172.24.4.23'} + } + self.csr = mock.Mock(spec=csr_client.CsrRestClient) + self.csr.status = 201 # All calls to CSR REST API succeed + self.csr.tunnel_ip = '172.24.4.23' + self.ipsec_conn = ipsec_driver.CiscoCsrIPSecConnection(self.conn_info, + self.csr) + + def test_create_ipsec_site_connection(self): + """Ensure all steps are done to create an IPSec site connection. + + Verify that each of the driver calls occur (in order), and + the right information is stored for later deletion. + """ + expected = ['create_pre_shared_key', + 'create_ike_policy', + 'create_ipsec_policy', + 'create_ipsec_connection', + 'create_static_route', + 'create_static_route'] + expected_rollback_steps = [ + ipsec_driver.RollbackStep(action='pre_shared_key', + resource_id='123', + title='Pre-Shared Key'), + ipsec_driver.RollbackStep(action='ike_policy', + resource_id=222, + title='IKE Policy'), + ipsec_driver.RollbackStep(action='ipsec_policy', + resource_id=333, + title='IPSec Policy'), + ipsec_driver.RollbackStep(action='ipsec_connection', + resource_id='Tunnel0', + title='IPSec Connection'), + ipsec_driver.RollbackStep(action='static_route', + resource_id='10.1.0.0_24_Tunnel0', + title='Static Route'), + ipsec_driver.RollbackStep(action='static_route', + resource_id='10.2.0.0_24_Tunnel0', + title='Static Route')] + self.ipsec_conn.create_ipsec_site_connection(mock.Mock(), + self.conn_info) + client_calls = [c[0] for c in self.csr.method_calls] + self.assertEqual(expected, client_calls) + self.assertEqual(expected_rollback_steps, self.ipsec_conn.steps) + + def test_create_ipsec_site_connection_with_rollback(self): + """Failure test of IPSec site conn creation that fails and rolls back. + + Simulate a failure in the last create step (making routes for the + peer networks), and ensure that the create steps are called in + order (except for create_static_route), and that the delete + steps are called in reverse order. At the end, there should be no + rollback infromation for the connection. + """ + def fake_route_check_fails(*args, **kwargs): + if args[0] == 'Static Route': + # So that subsequent calls to CSR rest client (for rollback) + # will fake as passing. + self.csr.status = httplib.NO_CONTENT + raise ipsec_driver.CsrResourceCreateFailure(resource=args[0], + which=args[1]) + + with mock.patch.object(ipsec_driver.CiscoCsrIPSecConnection, + '_check_create', + side_effect=fake_route_check_fails): + + expected = ['create_pre_shared_key', + 'create_ike_policy', + 'create_ipsec_policy', + 'create_ipsec_connection', + 'create_static_route', + 'delete_ipsec_connection', + 'delete_ipsec_policy', + 'delete_ike_policy', + 'delete_pre_shared_key'] + self.ipsec_conn.create_ipsec_site_connection(mock.Mock(), + self.conn_info) + client_calls = [c[0] for c in self.csr.method_calls] + self.assertEqual(expected, client_calls) + self.assertEqual([], self.ipsec_conn.steps) + + def test_create_verification_with_error(self): + """Negative test of create check step had failed.""" + self.csr.status = httplib.NOT_FOUND + self.assertRaises(ipsec_driver.CsrResourceCreateFailure, + self.ipsec_conn._check_create, 'name', 'id') + + def test_failure_with_invalid_create_step(self): + """Negative test of invalid create step (programming error).""" + self.ipsec_conn.steps = [] + try: + self.ipsec_conn.do_create_action('bogus', None, '123', 'Bad Step') + except ipsec_driver.CsrResourceCreateFailure: + pass + else: + self.fail('Expected exception with invalid create step') + + def test_failure_with_invalid_delete_step(self): + """Negative test of invalid delete step (programming error).""" + self.ipsec_conn.steps = [ipsec_driver.RollbackStep(action='bogus', + resource_id='123', + title='Bogus Step')] + try: + self.ipsec_conn.do_rollback() + except ipsec_driver.CsrResourceCreateFailure: + pass + else: + self.fail('Expected exception with invalid delete step') + + def test_delete_ipsec_connection(self): + # TODO(pcm) implement + pass + + +class TestCiscoCsrIPsecConnectionCreateTransforms(base.BaseTestCase): + + """Verifies that config info is prepared/transformed correctly.""" + + def setUp(self): + super(TestCiscoCsrIPsecConnectionCreateTransforms, self).setUp() + self.conn_info = { + u'id': '123', + u'status': constants.PENDING_CREATE, + u'admin_state_up': True, + 'psk': 'secret', + 'peer_address': '192.168.1.2', + 'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'], + 'mtu': 1500, + 'ike_policy': {'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-128', + 'pfs': 'Group5', + 'ike_version': 'v1', + 'lifetime_units': 'seconds', + 'lifetime_value': 3600}, + 'ipsec_policy': {'transform_protocol': 'ah', + 'encryption_algorithm': 'aes-128', + 'auth_algorithm': 'sha1', + 'pfs': 'group5', + 'lifetime_units': 'seconds', + 'lifetime_value': 3600}, + 'cisco': {'site_conn_id': 'Tunnel0', + 'ike_policy_id': 222, + 'ipsec_policy_id': 333, + # TODO(pcm) get from vpnservice['external_ip'] + 'router_public_ip': '172.24.4.23'} + } + self.csr = mock.Mock(spec=csr_client.CsrRestClient) + self.csr.tunnel_ip = '172.24.4.23' + self.ipsec_conn = ipsec_driver.CiscoCsrIPSecConnection(self.conn_info, + self.csr) + + def test_invalid_attribute(self): + """Negative test of unknown attribute - programming error.""" + self.assertRaises(ipsec_driver.CsrDriverMismatchError, + self.ipsec_conn.translate_dialect, + 'ike_policy', 'unknown_attr', self.conn_info) + + def test_driver_unknown_mapping(self): + """Negative test of service driver providing unknown value to map.""" + self.conn_info['ike_policy']['pfs'] = "unknown_value" + self.assertRaises(ipsec_driver.CsrUnknownMappingError, + self.ipsec_conn.translate_dialect, + 'ike_policy', 'pfs', self.conn_info['ike_policy']) + + def test_psk_create_info(self): + """Ensure that pre-shared key info is created correctly.""" + expected = {u'keyring-name': '123', + u'pre-shared-key-list': [ + {u'key': 'secret', + u'encrypted': False, + u'peer-address': '192.168.1.2'}]} + psk_id = self.conn_info['id'] + psk_info = self.ipsec_conn.create_psk_info(psk_id, self.conn_info) + self.assertEqual(expected, psk_info) + + def test_create_ike_policy_info(self): + """Ensure that IKE policy info is mapped/created correctly.""" + expected = {u'priority-id': 222, + u'encryption': u'aes', + u'hash': u'sha', + u'dhGroup': 5, + u'version': u'v1', + u'lifetime': 3600} + policy_id = self.conn_info['cisco']['ike_policy_id'] + policy_info = self.ipsec_conn.create_ike_policy_info(policy_id, + self.conn_info) + self.assertEqual(expected, policy_info) + + def test_create_ike_policy_info_different_encryption(self): + """Ensure that IKE policy info is mapped/created correctly.""" + self.conn_info['ike_policy']['encryption_algorithm'] = 'aes-192' + expected = {u'priority-id': 222, + u'encryption': u'aes192', + u'hash': u'sha', + u'dhGroup': 5, + u'version': u'v1', + u'lifetime': 3600} + policy_id = self.conn_info['cisco']['ike_policy_id'] + policy_info = self.ipsec_conn.create_ike_policy_info(policy_id, + self.conn_info) + self.assertEqual(expected, policy_info) + + def test_create_ike_policy_info_non_defaults(self): + """Ensure that IKE policy info with different values.""" + self.conn_info['ike_policy'] = { + 'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-256', + 'pfs': 'Group14', + 'ike_version': 'v1', + 'lifetime_units': 'seconds', + 'lifetime_value': 60 + } + expected = {u'priority-id': 222, + u'encryption': u'aes256', + u'hash': u'sha', + u'dhGroup': 14, + u'version': u'v1', + u'lifetime': 60} + policy_id = self.conn_info['cisco']['ike_policy_id'] + policy_info = self.ipsec_conn.create_ike_policy_info(policy_id, + self.conn_info) + self.assertEqual(expected, policy_info) + + def test_ipsec_policy_info(self): + """Ensure that IPSec policy info is mapped/created correctly. + + Note: That although the default for anti-replay-window-size on the + CSR is 64, we force it to disabled, for OpenStack use. + """ + expected = {u'policy-id': 333, + u'protection-suite': { + u'esp-encryption': u'esp-aes', + u'esp-authentication': u'esp-sha-hmac', + u'ah': u'ah-sha-hmac' + }, + u'lifetime-sec': 3600, + u'pfs': u'group5', + u'anti-replay-window-size': u'disable'} + ipsec_policy_id = self.conn_info['cisco']['ipsec_policy_id'] + policy_info = self.ipsec_conn.create_ipsec_policy_info(ipsec_policy_id, + self.conn_info) + self.assertEqual(expected, policy_info) + + def test_ipsec_policy_info_different_encryption(self): + """Create IPSec policy with different settings.""" + self.conn_info['ipsec_policy']['transform_protocol'] = 'ah-esp' + self.conn_info['ipsec_policy']['encryption_algorithm'] = 'aes-192' + expected = {u'policy-id': 333, + u'protection-suite': { + u'esp-encryption': u'esp-192-aes', + u'esp-authentication': u'esp-sha-hmac', + u'ah': u'ah-sha-hmac' + }, + u'lifetime-sec': 3600, + u'pfs': u'group5', + u'anti-replay-window-size': u'disable'} + ipsec_policy_id = self.conn_info['cisco']['ipsec_policy_id'] + policy_info = self.ipsec_conn.create_ipsec_policy_info(ipsec_policy_id, + self.conn_info) + self.assertEqual(expected, policy_info) + + def test_ipsec_policy_info_non_defaults(self): + """Create/map IPSec policy info with different values.""" + self.conn_info['ipsec_policy'] = {'transform_protocol': 'esp', + 'encryption_algorithm': '3des', + 'auth_algorithm': 'sha1', + 'pfs': 'group14', + 'lifetime_units': 'seconds', + 'lifetime_value': 120, + 'anti-replay-window-size': 'disable'} + expected = {u'policy-id': 333, + u'protection-suite': { + u'esp-encryption': u'esp-3des', + u'esp-authentication': u'esp-sha-hmac' + }, + u'lifetime-sec': 120, + u'pfs': u'group14', + u'anti-replay-window-size': u'disable'} + ipsec_policy_id = self.conn_info['cisco']['ipsec_policy_id'] + policy_info = self.ipsec_conn.create_ipsec_policy_info(ipsec_policy_id, + self.conn_info) + self.assertEqual(expected, policy_info) + + def test_site_connection_info(self): + """Ensure site-to-site connection info is created/mapped correctly.""" + expected = {u'vpn-interface-name': 'Tunnel0', + u'ipsec-policy-id': 333, + u'local-device': { + u'ip-address': u'GigabitEthernet3', + u'tunnel-ip-address': '172.24.4.23' + }, + u'remote-device': { + u'tunnel-ip-address': '192.168.1.2' + }, + u'mtu': 1500} + ipsec_policy_id = self.conn_info['cisco']['ipsec_policy_id'] + site_conn_id = self.conn_info['cisco']['site_conn_id'] + conn_info = self.ipsec_conn.create_site_connection_info( + site_conn_id, ipsec_policy_id, self.conn_info) + self.assertEqual(expected, conn_info) + + def test_static_route_info(self): + """Create static route info for peer CIDRs.""" + expected = [('10.1.0.0_24_Tunnel0', + {u'destination-network': '10.1.0.0/24', + u'outgoing-interface': 'Tunnel0'}), + ('10.2.0.0_24_Tunnel0', + {u'destination-network': '10.2.0.0/24', + u'outgoing-interface': 'Tunnel0'})] +# self.driver.csr.make_route_id.side_effect = ['10.1.0.0_24_Tunnel0', +# '10.2.0.0_24_Tunnel0'] + site_conn_id = self.conn_info['cisco']['site_conn_id'] + routes_info = self.ipsec_conn.create_routes_info(site_conn_id, + self.conn_info) + self.assertEqual(2, len(routes_info)) + self.assertEqual(expected, routes_info) + + +class TestCiscoCsrIPsecDeviceDriverSyncStatuses(base.BaseTestCase): + + """Test status/state of services and connections, after sync.""" + + def setUp(self): + super(TestCiscoCsrIPsecDeviceDriverSyncStatuses, self).setUp() + for klass in ['neutron.common.rpc_compat.create_connection', + 'neutron.context.get_admin_context_without_session', + 'neutron.openstack.common.' + 'loopingcall.FixedIntervalLoopingCall']: + mock.patch(klass).start() + self.context = context.Context('some_user', 'some_tenant') + self.agent = mock.Mock() + conf_patch = mock.patch('oslo.config.cfg.CONF').start() + conf_patch.config_file = ['dummy'] + self.config_load = mock.patch(FIND_CFG_FOR_CSRS).start() + self.config_load.return_value = {'1.1.1.1': {'rest_mgmt': '2.2.2.2', + 'tunnel_ip': '1.1.1.3', + 'username': 'pe', + 'password': 'password', + 'timeout': 120}} + self.driver = ipsec_driver.CiscoCsrIPsecDriver(self.agent, FAKE_HOST) + self.driver.agent_rpc = mock.Mock() + self.conn_create = mock.patch.object( + ipsec_driver.CiscoCsrIPSecConnection, + 'create_ipsec_site_connection').start() + self.conn_delete = mock.patch.object( + ipsec_driver.CiscoCsrIPSecConnection, + 'delete_ipsec_site_connection').start() + self.admin_state = mock.patch.object( + ipsec_driver.CiscoCsrIPSecConnection, + 'set_admin_state').start() + self.csr = mock.Mock() + self.driver.csrs['1.1.1.1'] = self.csr + self.service123_data = {u'id': u'123', + u'status': constants.DOWN, + u'admin_state_up': False, + u'external_ip': u'1.1.1.1'} + self.conn1_data = {u'id': u'1', + u'status': constants.ACTIVE, + u'admin_state_up': True, + u'mtu': 1500, + u'psk': u'secret', + u'peer_address': '192.168.1.2', + u'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'], + u'ike_policy': { + u'auth_algorithm': u'sha1', + u'encryption_algorithm': u'aes-128', + u'pfs': u'Group5', + u'ike_version': u'v1', + u'lifetime_units': u'seconds', + u'lifetime_value': 3600}, + u'ipsec_policy': { + u'transform_protocol': u'ah', + u'encryption_algorithm': u'aes-128', + u'auth_algorithm': u'sha1', + u'pfs': u'group5', + u'lifetime_units': u'seconds', + u'lifetime_value': 3600}, + u'cisco': {u'site_conn_id': u'Tunnel0'}} + + # NOTE: For sync, there is mark (trivial), update (tested), + # sweep (tested), and report(tested) phases. + + def test_update_ipsec_connection_create_notify(self): + """Notified of connection create request - create.""" + # Make the (existing) service + self.driver.create_vpn_service(self.service123_data) + conn_data = copy.deepcopy(self.conn1_data) + conn_data[u'status'] = constants.PENDING_CREATE + + connection = self.driver.update_connection(self.context, + u'123', conn_data) + self.assertFalse(connection.is_dirty) + self.assertEqual(u'Tunnel0', connection.tunnel) + self.assertEqual(constants.PENDING_CREATE, connection.last_status) + self.assertEqual(1, self.conn_create.call_count) + + def test_detect_no_change_to_ipsec_connection(self): + """No change to IPSec connection - nop.""" + # Make existing service, and connection that was active + vpn_service = self.driver.create_vpn_service(self.service123_data) + connection = vpn_service.create_connection(self.conn1_data) + + self.assertFalse(connection.check_for_changes(self.conn1_data)) + + def test_detect_state_only_change_to_ipsec_connection(self): + """Only IPSec connection state changed - update.""" + # Make existing service, and connection that was active + vpn_service = self.driver.create_vpn_service(self.service123_data) + connection = vpn_service.create_connection(self.conn1_data) + + conn_data = copy.deepcopy(self.conn1_data) + conn_data[u'admin_state_up'] = False + self.assertFalse(connection.check_for_changes(conn_data)) + + def test_detect_non_state_change_to_ipsec_connection(self): + """Connection change instead of/in addition to state - update.""" + # Make existing service, and connection that was active + vpn_service = self.driver.create_vpn_service(self.service123_data) + connection = vpn_service.create_connection(self.conn1_data) + + conn_data = copy.deepcopy(self.conn1_data) + conn_data[u'ipsec_policy'][u'encryption_algorithm'] = u'aes-256' + self.assertTrue(connection.check_for_changes(conn_data)) + + def test_update_ipsec_connection_changed_admin_down(self): + """Notified of connection state change - update. + + For a connection that was previously created, expect to + force connection down on an admin down (only) change. + """ + + # Make existing service, and connection that was active + vpn_service = self.driver.create_vpn_service(self.service123_data) + connection = vpn_service.create_connection(self.conn1_data) + + # Simulate that notification of connection update received + self.driver.mark_existing_connections_as_dirty() + # Modify the connection data for the 'sync' + conn_data = copy.deepcopy(self.conn1_data) + conn_data[u'admin_state_up'] = False + + connection = self.driver.update_connection(self.context, + '123', conn_data) + self.assertFalse(connection.is_dirty) + self.assertEqual(u'Tunnel0', connection.tunnel) + self.assertEqual(constants.ACTIVE, connection.last_status) + self.assertFalse(self.conn_create.called) + self.assertFalse(connection.is_admin_up) + self.assertTrue(connection.forced_down) + self.assertEqual(1, self.admin_state.call_count) + + def test_update_ipsec_connection_changed_config(self): + """Notified of connection changing config - update. + + Goal here is to detect that the connection is deleted and then + created, but not that the specific values have changed, so picking + arbitrary value (MTU). + """ + # Make existing service, and connection that was active + vpn_service = self.driver.create_vpn_service(self.service123_data) + connection = vpn_service.create_connection(self.conn1_data) + + # Simulate that notification of connection update received + self.driver.mark_existing_connections_as_dirty() + # Modify the connection data for the 'sync' + conn_data = copy.deepcopy(self.conn1_data) + conn_data[u'mtu'] = 9200 + + connection = self.driver.update_connection(self.context, + '123', conn_data) + self.assertFalse(connection.is_dirty) + self.assertEqual(u'Tunnel0', connection.tunnel) + self.assertEqual(constants.ACTIVE, connection.last_status) + self.assertEqual(1, self.conn_create.call_count) + self.assertEqual(1, self.conn_delete.call_count) + self.assertTrue(connection.is_admin_up) + self.assertFalse(connection.forced_down) + self.assertFalse(self.admin_state.called) + + def test_update_of_unknown_ipsec_connection(self): + """Notified of update of unknown connection - create. + + Occurs if agent restarts and receives a notification of change + to connection, but has no previous record of the connection. + Result will be to rebuild the connection. + """ + # Will have previously created service, but don't know of connection + self.driver.create_vpn_service(self.service123_data) + + # Simulate that notification of connection update received + self.driver.mark_existing_connections_as_dirty() + conn_data = copy.deepcopy(self.conn1_data) + conn_data[u'status'] = constants.DOWN + + connection = self.driver.update_connection(self.context, + u'123', conn_data) + self.assertFalse(connection.is_dirty) + self.assertEqual(u'Tunnel0', connection.tunnel) + self.assertEqual(constants.DOWN, connection.last_status) + self.assertEqual(1, self.conn_create.call_count) + self.assertTrue(connection.is_admin_up) + self.assertFalse(connection.forced_down) + self.assertFalse(self.admin_state.called) + + def test_update_missing_connection_admin_down(self): + """Connection not present is in admin down state - nop. + + If the agent has restarted, and a sync notification occurs with + a connection that is in admin down state, recreate the connection, + but indicate that the connection is down. + """ + # Make existing service, but no connection + self.driver.create_vpn_service(self.service123_data) + + conn_data = copy.deepcopy(self.conn1_data) + conn_data.update({u'status': constants.DOWN, + u'admin_state_up': False}) + connection = self.driver.update_connection(self.context, + u'123', conn_data) + self.assertIsNotNone(connection) + self.assertFalse(connection.is_dirty) + self.assertEqual(1, self.conn_create.call_count) + self.assertFalse(connection.is_admin_up) + self.assertTrue(connection.forced_down) + self.assertEqual(1, self.admin_state.call_count) + + def test_update_connection_admin_up(self): + """Connection updated to admin up state - record.""" + # Make existing service, and connection that was admin down + conn_data = copy.deepcopy(self.conn1_data) + conn_data.update({u'status': constants.DOWN, u'admin_state_up': False}) + service_data = {u'id': u'123', + u'status': constants.DOWN, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn_data]} + self.driver.update_service(self.context, service_data) + + # Simulate that notification of connection update received + self.driver.mark_existing_connections_as_dirty() + # Now simulate that the notification shows the connection admin up + new_conn_data = copy.deepcopy(conn_data) + new_conn_data[u'admin_state_up'] = True + + connection = self.driver.update_connection(self.context, + u'123', new_conn_data) + self.assertFalse(connection.is_dirty) + self.assertEqual(u'Tunnel0', connection.tunnel) + self.assertEqual(constants.DOWN, connection.last_status) + self.assertTrue(connection.is_admin_up) + self.assertFalse(connection.forced_down) + self.assertEqual(2, self.admin_state.call_count) + + def test_update_for_vpn_service_create(self): + """Creation of new IPSec connection on new VPN service - create. + + Service will be created and marked as 'clean', and update + processing for connection will occur (create). + """ + conn_data = copy.deepcopy(self.conn1_data) + conn_data[u'status'] = constants.PENDING_CREATE + service_data = {u'id': u'123', + u'status': constants.PENDING_CREATE, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn_data]} + vpn_service = self.driver.update_service(self.context, service_data) + self.assertFalse(vpn_service.is_dirty) + self.assertEqual(constants.PENDING_CREATE, vpn_service.last_status) + connection = vpn_service.get_connection(u'1') + self.assertIsNotNone(connection) + self.assertFalse(connection.is_dirty) + self.assertEqual(u'Tunnel0', connection.tunnel) + self.assertEqual(constants.PENDING_CREATE, connection.last_status) + self.assertEqual(1, self.conn_create.call_count) + self.assertTrue(connection.is_admin_up) + self.assertFalse(connection.forced_down) + self.assertFalse(self.admin_state.called) + + def test_update_for_new_connection_on_existing_service(self): + """Creating a new IPSec connection on an existing service.""" + # Create the service before testing, and mark it dirty + prev_vpn_service = self.driver.create_vpn_service(self.service123_data) + self.driver.mark_existing_connections_as_dirty() + conn_data = copy.deepcopy(self.conn1_data) + conn_data[u'status'] = constants.PENDING_CREATE + service_data = {u'id': u'123', + u'status': constants.ACTIVE, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn_data]} + vpn_service = self.driver.update_service(self.context, service_data) + # Should reuse the entry and update the status + self.assertEqual(prev_vpn_service, vpn_service) + self.assertFalse(vpn_service.is_dirty) + self.assertEqual(constants.ACTIVE, vpn_service.last_status) + connection = vpn_service.get_connection(u'1') + self.assertIsNotNone(connection) + self.assertFalse(connection.is_dirty) + self.assertEqual(u'Tunnel0', connection.tunnel) + self.assertEqual(constants.PENDING_CREATE, connection.last_status) + self.assertEqual(1, self.conn_create.call_count) + + def test_update_for_vpn_service_with_one_unchanged_connection(self): + """Existing VPN service and IPSec connection without any changes - nop. + + Service and connection will be marked clean. No processing for + either, as there are no changes. + """ + # Create a service and add in a connection that is active + prev_vpn_service = self.driver.create_vpn_service(self.service123_data) + prev_vpn_service.create_connection(self.conn1_data) + + self.driver.mark_existing_connections_as_dirty() + # Create notification with conn unchanged and service already created + service_data = {u'id': u'123', + u'status': constants.ACTIVE, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [self.conn1_data]} + vpn_service = self.driver.update_service(self.context, service_data) + # Should reuse the entry and update the status + self.assertEqual(prev_vpn_service, vpn_service) + self.assertFalse(vpn_service.is_dirty) + self.assertEqual(constants.ACTIVE, vpn_service.last_status) + connection = vpn_service.get_connection(u'1') + self.assertIsNotNone(connection) + self.assertFalse(connection.is_dirty) + self.assertEqual(u'Tunnel0', connection.tunnel) + self.assertEqual(constants.ACTIVE, connection.last_status) + self.assertFalse(self.conn_create.called) + + def test_update_service_admin_down(self): + """VPN service updated to admin down state - force all down. + + If service is down, then all connections are forced down. + """ + # Create an "existing" service, prior to notification + prev_vpn_service = self.driver.create_vpn_service(self.service123_data) + + self.driver.mark_existing_connections_as_dirty() + service_data = {u'id': u'123', + u'status': constants.DOWN, + u'external_ip': u'1.1.1.1', + u'admin_state_up': False, + u'ipsec_conns': [self.conn1_data]} + vpn_service = self.driver.update_service(self.context, service_data) + self.assertEqual(prev_vpn_service, vpn_service) + self.assertFalse(vpn_service.is_dirty) + self.assertFalse(vpn_service.is_admin_up) + self.assertEqual(constants.DOWN, vpn_service.last_status) + conn = vpn_service.get_connection(u'1') + self.assertIsNotNone(conn) + self.assertFalse(conn.is_dirty) + self.assertTrue(conn.forced_down) + self.assertTrue(conn.is_admin_up) + + def test_update_new_service_admin_down(self): + """Unknown VPN service updated to admin down state - nop. + + Can happen if agent restarts and then gets its first notificaiton + of a service that is in the admin down state. Structures will be + created, but forced down. + """ + service_data = {u'id': u'123', + u'status': constants.DOWN, + u'external_ip': u'1.1.1.1', + u'admin_state_up': False, + u'ipsec_conns': [self.conn1_data]} + vpn_service = self.driver.update_service(self.context, service_data) + self.assertIsNotNone(vpn_service) + self.assertFalse(vpn_service.is_dirty) + self.assertFalse(vpn_service.is_admin_up) + self.assertEqual(constants.DOWN, vpn_service.last_status) + conn = vpn_service.get_connection(u'1') + self.assertIsNotNone(conn) + self.assertFalse(conn.is_dirty) + self.assertTrue(conn.forced_down) + self.assertTrue(conn.is_admin_up) + + def test_update_service_admin_up(self): + """VPN service updated to admin up state - restore. + + If service is up now, then connections that are admin up will come + up and connections that are admin down, will remain down. + """ + # Create an "existing" service, prior to notification + prev_vpn_service = self.driver.create_vpn_service(self.service123_data) + self.driver.mark_existing_connections_as_dirty() + conn_data1 = {u'id': u'1', u'status': constants.DOWN, + u'admin_state_up': False, + u'cisco': {u'site_conn_id': u'Tunnel0'}} + conn_data2 = {u'id': u'2', u'status': constants.ACTIVE, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel1'}} + service_data = {u'id': u'123', + u'status': constants.DOWN, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn_data1, conn_data2]} + vpn_service = self.driver.update_service(self.context, service_data) + self.assertEqual(prev_vpn_service, vpn_service) + self.assertFalse(vpn_service.is_dirty) + self.assertTrue(vpn_service.is_admin_up) + self.assertEqual(constants.DOWN, vpn_service.last_status) + conn1 = vpn_service.get_connection(u'1') + self.assertIsNotNone(conn1) + self.assertFalse(conn1.is_dirty) + self.assertTrue(conn1.forced_down) + self.assertFalse(conn1.is_admin_up) + conn2 = vpn_service.get_connection(u'2') + self.assertIsNotNone(conn2) + self.assertFalse(conn2.is_dirty) + self.assertFalse(conn2.forced_down) + self.assertTrue(conn2.is_admin_up) + + def test_update_of_unknown_service_create(self): + """Create of VPN service that is currently unknown - record. + + If agent is restarted or user changes VPN service to admin up, the + notification may contain a VPN service with an IPSec connection + that is not in PENDING_CREATE state. + """ + conn_data = {u'id': u'1', u'status': constants.DOWN, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel0'}} + service_data = {u'id': u'123', + u'status': constants.ACTIVE, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn_data]} + vpn_service = self.driver.update_service(self.context, service_data) + self.assertFalse(vpn_service.is_dirty) + self.assertEqual(constants.ACTIVE, vpn_service.last_status) + connection = vpn_service.get_connection(u'1') + self.assertIsNotNone(connection) + self.assertFalse(connection.is_dirty) + self.assertEqual(u'Tunnel0', connection.tunnel) + self.assertEqual(constants.DOWN, connection.last_status) + self.assertEqual(1, self.conn_create.call_count) + + def test_update_service_create_no_csr(self): + """Failure test of sync of service that is not on CSR - ignore. + + Ignore the VPN service and its IPSec connection(s) notifications for + which there is no corresponding Cisco CSR. + """ + conn_data = {u'id': u'1', u'status': constants.PENDING_CREATE, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel0'}} + service_data = {u'id': u'123', + u'status': constants.PENDING_CREATE, + u'external_ip': u'2.2.2.2', + u'admin_state_up': True, + u'ipsec_conns': [conn_data]} + vpn_service = self.driver.update_service(self.context, service_data) + self.assertIsNone(vpn_service) + + def _check_connection_for_service(self, count, vpn_service): + """Helper to check the connection information for a service.""" + connection = vpn_service.get_connection(u'%d' % count) + self.assertIsNotNone(connection, "for connection %d" % count) + self.assertFalse(connection.is_dirty, "for connection %d" % count) + self.assertEqual(u'Tunnel%d' % count, connection.tunnel, + "for connection %d" % count) + self.assertEqual(constants.PENDING_CREATE, connection.last_status, + "for connection %d" % count) + return count + 1 + + def notification_for_two_services_with_two_conns(self): + """Helper used by tests to create two services, each with two conns.""" + conn1_data = {u'id': u'1', u'status': constants.PENDING_CREATE, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel1'}} + conn2_data = {u'id': u'2', u'status': constants.PENDING_CREATE, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel2'}} + service1_data = {u'id': u'123', + u'status': constants.PENDING_CREATE, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn1_data, conn2_data]} + conn3_data = {u'id': u'3', u'status': constants.PENDING_CREATE, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel3'}} + conn4_data = {u'id': u'4', u'status': constants.PENDING_CREATE, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel4'}} + service2_data = {u'id': u'456', + u'status': constants.PENDING_CREATE, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn3_data, conn4_data]} + return service1_data, service2_data + + def test_create_two_connections_on_two_services(self): + """High level test of multiple VPN services with connections.""" + # Build notification message + (service1_data, + service2_data) = self.notification_for_two_services_with_two_conns() + # Simulate plugin returning notifcation, when requested + self.driver.agent_rpc.get_vpn_services_on_host.return_value = [ + service1_data, service2_data] + vpn_services = self.driver.update_all_services_and_connections( + self.context) + self.assertEqual(2, len(vpn_services)) + count = 1 + for vpn_service in vpn_services: + self.assertFalse(vpn_service.is_dirty, + "for service %s" % vpn_service) + self.assertEqual(constants.PENDING_CREATE, vpn_service.last_status, + "for service %s" % vpn_service) + count = self._check_connection_for_service(count, vpn_service) + count = self._check_connection_for_service(count, vpn_service) + self.assertEqual(4, self.conn_create.call_count) + + def test_sweep_connection_marked_as_clean(self): + """Sync updated connection - no action.""" + # Create a service and connection + vpn_service = self.driver.create_vpn_service(self.service123_data) + connection = vpn_service.create_connection(self.conn1_data) + self.driver.mark_existing_connections_as_dirty() + # Simulate that the update phase visted both of them + vpn_service.is_dirty = False + connection.is_dirty = False + self.driver.remove_unknown_connections(self.context) + vpn_service = self.driver.service_state.get(u'123') + self.assertIsNotNone(vpn_service) + self.assertFalse(vpn_service.is_dirty) + connection = vpn_service.get_connection(u'1') + self.assertIsNotNone(connection) + self.assertFalse(connection.is_dirty) + + def test_sweep_connection_dirty(self): + """Sync did not update connection - delete.""" + # Create a service and connection + vpn_service = self.driver.create_vpn_service(self.service123_data) + vpn_service.create_connection(self.conn1_data) + self.driver.mark_existing_connections_as_dirty() + # Simulate that the update phase only visited the service + vpn_service.is_dirty = False + self.driver.remove_unknown_connections(self.context) + vpn_service = self.driver.service_state.get(u'123') + self.assertIsNotNone(vpn_service) + self.assertFalse(vpn_service.is_dirty) + connection = vpn_service.get_connection(u'1') + self.assertIsNone(connection) + self.assertEqual(1, self.conn_delete.call_count) + + def test_sweep_service_dirty(self): + """Sync did not update service - delete it and all conns.""" + # Create a service and connection + vpn_service = self.driver.create_vpn_service(self.service123_data) + vpn_service.create_connection(self.conn1_data) + self.driver.mark_existing_connections_as_dirty() + # Both the service and the connection are still 'dirty' + self.driver.remove_unknown_connections(self.context) + self.assertIsNone(self.driver.service_state.get(u'123')) + self.assertEqual(1, self.conn_delete.call_count) + + def test_sweep_multiple_services(self): + """One service and conn updated, one service and conn not.""" + # Create two services, each with a connection + vpn_service1 = self.driver.create_vpn_service(self.service123_data) + vpn_service1.create_connection(self.conn1_data) + service456_data = {u'id': u'456', + u'status': constants.ACTIVE, + u'admin_state_up': False, + u'external_ip': u'1.1.1.1'} + conn2_data = {u'id': u'2', u'status': constants.ACTIVE, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel0'}} + prev_vpn_service2 = self.driver.create_vpn_service(service456_data) + prev_connection2 = prev_vpn_service2.create_connection(conn2_data) + self.driver.mark_existing_connections_as_dirty() + # Simulate that the update phase visited the first service and conn + prev_vpn_service2.is_dirty = False + prev_connection2.is_dirty = False + self.driver.remove_unknown_connections(self.context) + self.assertIsNone(self.driver.service_state.get(u'123')) + vpn_service2 = self.driver.service_state.get(u'456') + self.assertEqual(prev_vpn_service2, vpn_service2) + self.assertFalse(vpn_service2.is_dirty) + connection2 = vpn_service2.get_connection(u'2') + self.assertEqual(prev_connection2, connection2) + self.assertFalse(connection2.is_dirty) + self.assertEqual(1, self.conn_delete.call_count) + + def simulate_mark_update_sweep_for_service_with_conn(self, service_state, + connection_state): + """Create internal structures for single service with connection. + + The service and connection will be marked as clean, and since + none are being deleted, the service's connections_removed + attribute will remain false. + """ + # Simulate that we have done mark, update, and sweep. + conn_data = {u'id': u'1', u'status': connection_state, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel0'}} + service_data = {u'id': u'123', + u'status': service_state, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn_data]} + return self.driver.update_service(self.context, service_data) + + def test_report_fragment_connection_created(self): + """Generate report section for a created connection.""" + # Prepare service and connection in PENDING_CREATE state + vpn_service = self.simulate_mark_update_sweep_for_service_with_conn( + constants.PENDING_CREATE, constants.PENDING_CREATE) + # Simulate that CSR has reported the connection is still up + self.csr.read_tunnel_statuses.return_value = [ + (u'Tunnel0', u'UP-ACTIVE'), ] + + # Get the statuses for connections existing on CSR + tunnels = vpn_service.get_ipsec_connections_status() + self.assertEqual({u'Tunnel0': constants.ACTIVE}, tunnels) + + # Check that there is a status for this connection + connection = vpn_service.get_connection(u'1') + self.assertIsNotNone(connection) + current_status = connection.find_current_status_in(tunnels) + self.assertEqual(constants.ACTIVE, current_status) + + # Create report fragment due to change + self.assertNotEqual(connection.last_status, current_status) + report_frag = connection.update_status_and_build_report(current_status) + self.assertEqual(current_status, connection.last_status) + expected = {'1': {'status': constants.ACTIVE, + 'updated_pending_status': True}} + self.assertEqual(expected, report_frag) + + def test_report_fragment_connection_unchanged_status(self): + """No report section generated for a created connection.""" + # Prepare service and connection in ACTIVE state + vpn_service = self.simulate_mark_update_sweep_for_service_with_conn( + constants.ACTIVE, constants.ACTIVE) + # Simulate that CSR has reported the connection is up + self.csr.read_tunnel_statuses.return_value = [ + (u'Tunnel0', u'UP-IDLE'), ] + + # Get the statuses for connections existing on CSR + tunnels = vpn_service.get_ipsec_connections_status() + self.assertEqual({u'Tunnel0': constants.ACTIVE}, tunnels) + + # Check that there is a status for this connection + connection = vpn_service.get_connection(u'1') + self.assertIsNotNone(connection) + current_status = connection.find_current_status_in(tunnels) + self.assertEqual(constants.ACTIVE, current_status) + + # Should be no report, as no change + self.assertEqual(connection.last_status, current_status) + report_frag = connection.update_status_and_build_report(current_status) + self.assertEqual(current_status, connection.last_status) + self.assertEqual({}, report_frag) + + def test_report_fragment_connection_changed_status(self): + """Generate report section for connection with changed state.""" + # Prepare service in ACTIVE state and connection in DOWN state + vpn_service = self.simulate_mark_update_sweep_for_service_with_conn( + constants.ACTIVE, constants.DOWN) + # Simulate that CSR has reported the connection is still up + self.csr.read_tunnel_statuses.return_value = [ + (u'Tunnel0', u'UP-NO-IKE'), ] + + # Get the statuses for connections existing on CSR + tunnels = vpn_service.get_ipsec_connections_status() + self.assertEqual({u'Tunnel0': constants.ACTIVE}, tunnels) + + # Check that there is a status for this connection + connection = vpn_service.get_connection(u'1') + self.assertIsNotNone(connection) + current_status = connection.find_current_status_in(tunnels) + self.assertEqual(constants.ACTIVE, current_status) + + # Create report fragment due to change + self.assertNotEqual(connection.last_status, current_status) + report_frag = connection.update_status_and_build_report(current_status) + self.assertEqual(current_status, connection.last_status) + expected = {'1': {'status': constants.ACTIVE, + 'updated_pending_status': False}} + self.assertEqual(expected, report_frag) + + def test_report_fragment_connection_failed_create(self): + """Failure test of report fragment for conn that failed creation. + + Normally, without any status from the CSR, the connection report would + be skipped, but we need to report back failures. + """ + # Prepare service and connection in PENDING_CREATE state + vpn_service = self.simulate_mark_update_sweep_for_service_with_conn( + constants.PENDING_CREATE, constants.PENDING_CREATE) + # Simulate that CSR does NOT report the status (no tunnel) + self.csr.read_tunnel_statuses.return_value = [] + + # Get the statuses for connections existing on CSR + tunnels = vpn_service.get_ipsec_connections_status() + self.assertEqual({}, tunnels) + + # Check that there is a status for this connection + connection = vpn_service.get_connection(u'1') + self.assertIsNotNone(connection) + current_status = connection.find_current_status_in(tunnels) + self.assertEqual(constants.ERROR, current_status) + + # Create report fragment due to change + self.assertNotEqual(connection.last_status, current_status) + report_frag = connection.update_status_and_build_report(current_status) + self.assertEqual(current_status, connection.last_status) + expected = {'1': {'status': constants.ERROR, + 'updated_pending_status': True}} + self.assertEqual(expected, report_frag) + + def test_report_fragment_connection_admin_down(self): + """Report for a connection that is in admin down state.""" + # Prepare service and connection with previous status ACTIVE, but + # with connection admin down + conn_data = {u'id': u'1', u'status': constants.ACTIVE, + u'admin_state_up': False, + u'cisco': {u'site_conn_id': u'Tunnel0'}} + service_data = {u'id': u'123', + u'status': constants.ACTIVE, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn_data]} + vpn_service = self.driver.update_service(self.context, service_data) + # Tunnel would have been deleted, so simulate no status + self.csr.read_tunnel_statuses.return_value = [] + + connection = vpn_service.get_connection(u'1') + self.assertIsNotNone(connection) + self.assertTrue(connection.forced_down) + self.assertEqual(constants.ACTIVE, connection.last_status) + + # Create report fragment due to change + report_frag = self.driver.build_report_for_connections_on(vpn_service) + self.assertEqual(constants.DOWN, connection.last_status) + expected = {'1': {'status': constants.DOWN, + 'updated_pending_status': False}} + self.assertEqual(expected, report_frag) + + def test_report_fragment_two_connections(self): + """Generate report fragment for two connections on a service.""" + # Prepare service with two connections, one ACTIVE, one DOWN + conn1_data = {u'id': u'1', u'status': constants.DOWN, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel1'}} + conn2_data = {u'id': u'2', u'status': constants.ACTIVE, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel2'}} + service_data = {u'id': u'123', + u'status': constants.ACTIVE, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn1_data, conn2_data]} + vpn_service = self.driver.update_service(self.context, service_data) + # Simulate that CSR has reported the connections with diff status + self.csr.read_tunnel_statuses.return_value = [ + (u'Tunnel1', u'UP-IDLE'), (u'Tunnel2', u'DOWN-NEGOTIATING')] + + # Get the report fragments for the connections + report_frag = self.driver.build_report_for_connections_on(vpn_service) + expected = {u'1': {u'status': constants.ACTIVE, + u'updated_pending_status': False}, + u'2': {u'status': constants.DOWN, + u'updated_pending_status': False}} + self.assertEqual(expected, report_frag) + + def test_report_service_create(self): + """VPN service and IPSec connection created - report.""" + # Simulate creation of the service and connection + vpn_service = self.simulate_mark_update_sweep_for_service_with_conn( + constants.PENDING_CREATE, constants.PENDING_CREATE) + # Simulate that the CSR has created the connection + self.csr.read_tunnel_statuses.return_value = [ + (u'Tunnel0', u'UP-ACTIVE'), ] + + report = self.driver.build_report_for_service(vpn_service) + expected_report = { + u'id': u'123', + u'updated_pending_status': True, + u'status': constants.ACTIVE, + u'ipsec_site_connections': { + u'1': {u'status': constants.ACTIVE, + u'updated_pending_status': True} + } + } + self.assertEqual(expected_report, report) + # Check that service and connection statuses are updated + self.assertEqual(constants.ACTIVE, vpn_service.last_status) + self.assertEqual(constants.ACTIVE, + vpn_service.get_connection(u'1').last_status) + + def test_report_service_create_of_first_conn_fails(self): + """VPN service and IPSec conn created, but conn failed - report. + + Since this is the sole IPSec connection on the service, and the + create failed (connection in ERROR state), the VPN service's + status will be set to DOWN. + """ + # Simulate creation of the service and connection + vpn_service = self.simulate_mark_update_sweep_for_service_with_conn( + constants.PENDING_CREATE, constants.PENDING_CREATE) + # Simulate that the CSR has no info due to failed create + self.csr.read_tunnel_statuses.return_value = [] + + report = self.driver.build_report_for_service(vpn_service) + expected_report = { + u'id': u'123', + u'updated_pending_status': True, + u'status': constants.DOWN, + u'ipsec_site_connections': { + u'1': {u'status': constants.ERROR, + u'updated_pending_status': True} + } + } + self.assertEqual(expected_report, report) + # Check that service and connection statuses are updated + self.assertEqual(constants.DOWN, vpn_service.last_status) + self.assertEqual(constants.ERROR, + vpn_service.get_connection(u'1').last_status) + + def test_report_connection_created_on_existing_service(self): + """Creating connection on existing service - report.""" + # Simulate existing service and connection create + vpn_service = self.simulate_mark_update_sweep_for_service_with_conn( + constants.ACTIVE, constants.PENDING_CREATE) + # Simulate that the CSR has created the connection + self.csr.read_tunnel_statuses.return_value = [ + (u'Tunnel0', u'UP-IDLE'), ] + + report = self.driver.build_report_for_service(vpn_service) + expected_report = { + u'id': u'123', + u'updated_pending_status': False, + u'status': constants.ACTIVE, + u'ipsec_site_connections': { + u'1': {u'status': constants.ACTIVE, + u'updated_pending_status': True} + } + } + self.assertEqual(expected_report, report) + # Check that service and connection statuses are updated + self.assertEqual(constants.ACTIVE, vpn_service.last_status) + self.assertEqual(constants.ACTIVE, + vpn_service.get_connection(u'1').last_status) + + def test_no_report_no_changes(self): + """VPN service with unchanged IPSec connection - no report. + + Note: No report will be generated if the last connection on the + service is deleted. The service (and connection) objects will + have been removed by the sweep operation and thus not reported. + On the plugin, the service should be changed to DOWN. Likewise, + if the service goes to admin down state. + """ + # Simulate an existing service and connection that are ACTIVE + vpn_service = self.simulate_mark_update_sweep_for_service_with_conn( + constants.ACTIVE, constants.ACTIVE) + # Simulate that the CSR reports the connection still active + self.csr.read_tunnel_statuses.return_value = [ + (u'Tunnel0', u'UP-ACTIVE'), ] + + report = self.driver.build_report_for_service(vpn_service) + self.assertEqual({}, report) + # Check that service and connection statuses are still same + self.assertEqual(constants.ACTIVE, vpn_service.last_status) + self.assertEqual(constants.ACTIVE, + vpn_service.get_connection(u'1').last_status) + + def test_report_sole_connection_goes_down(self): + """Only connection on VPN service goes down - report. + + In addition to reporting the status change and recording the new + state for the IPSec connection, the VPN service status will be + DOWN. + """ + # Simulate an existing service and connection that are ACTIVE + vpn_service = self.simulate_mark_update_sweep_for_service_with_conn( + constants.ACTIVE, constants.ACTIVE) + # Simulate that the CSR reports the connection went down + self.csr.read_tunnel_statuses.return_value = [ + (u'Tunnel0', u'DOWN-NEGOTIATING'), ] + + report = self.driver.build_report_for_service(vpn_service) + expected_report = { + u'id': u'123', + u'updated_pending_status': False, + u'status': constants.DOWN, + u'ipsec_site_connections': { + u'1': {u'status': constants.DOWN, + u'updated_pending_status': False} + } + } + self.assertEqual(expected_report, report) + # Check that service and connection statuses are updated + self.assertEqual(constants.DOWN, vpn_service.last_status) + self.assertEqual(constants.DOWN, + vpn_service.get_connection(u'1').last_status) + + def test_report_sole_connection_comes_up(self): + """Only connection on VPN service comes up - report. + + In addition to reporting the status change and recording the new + state for the IPSec connection, the VPN service status will be + ACTIVE. + """ + # Simulate an existing service and connection that are DOWN + vpn_service = self.simulate_mark_update_sweep_for_service_with_conn( + constants.DOWN, constants.DOWN) + # Simulate that the CSR reports the connection came up + self.csr.read_tunnel_statuses.return_value = [ + (u'Tunnel0', u'UP-NO-IKE'), ] + + report = self.driver.build_report_for_service(vpn_service) + expected_report = { + u'id': u'123', + u'updated_pending_status': False, + u'status': constants.ACTIVE, + u'ipsec_site_connections': { + u'1': {u'status': constants.ACTIVE, + u'updated_pending_status': False} + } + } + self.assertEqual(expected_report, report) + # Check that service and connection statuses are updated + self.assertEqual(constants.ACTIVE, vpn_service.last_status) + self.assertEqual(constants.ACTIVE, + vpn_service.get_connection(u'1').last_status) + + def test_report_service_with_two_connections_gone_down(self): + """One service with two connections that went down - report. + + Shows the case where all the connections are down, so that the + service should report as DOWN, as well. + """ + # Simulate one service with two ACTIVE connections + conn1_data = {u'id': u'1', u'status': constants.ACTIVE, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel1'}} + conn2_data = {u'id': u'2', u'status': constants.ACTIVE, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel2'}} + service_data = {u'id': u'123', + u'status': constants.ACTIVE, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn1_data, conn2_data]} + vpn_service = self.driver.update_service(self.context, service_data) + # Simulate that the CSR has reported that the connections are DOWN + self.csr.read_tunnel_statuses.return_value = [ + (u'Tunnel1', u'DOWN-NEGOTIATING'), (u'Tunnel2', u'DOWN')] + + report = self.driver.build_report_for_service(vpn_service) + expected_report = { + u'id': u'123', + u'updated_pending_status': False, + u'status': constants.DOWN, + u'ipsec_site_connections': { + u'1': {u'status': constants.DOWN, + u'updated_pending_status': False}, + u'2': {u'status': constants.DOWN, + u'updated_pending_status': False}} + } + self.assertEqual(expected_report, report) + # Check that service and connection statuses are updated + self.assertEqual(constants.DOWN, vpn_service.last_status) + self.assertEqual(constants.DOWN, + vpn_service.get_connection(u'1').last_status) + self.assertEqual(constants.DOWN, + vpn_service.get_connection(u'2').last_status) + + def test_report_service_with_connection_removed(self): + """One service with two connections where one is removed - report. + + With a connection removed and the other connection unchanged, + normally there would be nothing to report for the connections, but + we need to report any possible change to the service state. In this + case, the service was ACTIVE, but since the only ACTIVE connection + is deleted and the remaining connection is DOWN, the service will + indicate as DOWN. + """ + # Simulate one service with one connection up, one down + conn1_data = {u'id': u'1', u'status': constants.ACTIVE, + u'admin_state_up': True, + u'mtu': 1500, + u'psk': u'secret', + u'peer_address': '192.168.1.2', + u'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'], + u'ike_policy': {u'auth_algorithm': u'sha1', + u'encryption_algorithm': u'aes-128', + u'pfs': u'Group5', + u'ike_version': u'v1', + u'lifetime_units': u'seconds', + u'lifetime_value': 3600}, + u'ipsec_policy': {u'transform_protocol': u'ah', + u'encryption_algorithm': u'aes-128', + u'auth_algorithm': u'sha1', + u'pfs': u'group5', + u'lifetime_units': u'seconds', + u'lifetime_value': 3600}, + u'cisco': {u'site_conn_id': u'Tunnel1'}} + conn2_data = {u'id': u'2', u'status': constants.DOWN, + u'admin_state_up': True, + u'mtu': 1500, + u'psk': u'secret', + u'peer_address': '192.168.1.2', + u'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'], + u'ike_policy': {u'auth_algorithm': u'sha1', + u'encryption_algorithm': u'aes-128', + u'pfs': u'Group5', + u'ike_version': u'v1', + u'lifetime_units': u'seconds', + u'lifetime_value': 3600}, + u'ipsec_policy': {u'transform_protocol': u'ah', + u'encryption_algorithm': u'aes-128', + u'auth_algorithm': u'sha1', + u'pfs': u'group5', + u'lifetime_units': u'seconds', + u'lifetime_value': 3600}, + u'cisco': {u'site_conn_id': u'Tunnel2'}} + service_data = {u'id': u'123', + u'status': constants.ACTIVE, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn1_data, conn2_data]} + vpn_service = self.driver.update_service(self.context, service_data) + self.assertEqual(constants.ACTIVE, vpn_service.last_status) + self.assertEqual(constants.ACTIVE, + vpn_service.get_connection(u'1').last_status) + self.assertEqual(constants.DOWN, + vpn_service.get_connection(u'2').last_status) + + # Simulate that one is deleted + self.driver.mark_existing_connections_as_dirty() + service_data = {u'id': u'123', + u'status': constants.ACTIVE, + u'external_ip': u'1.1.1.1', + u'admin_state_up': True, + u'ipsec_conns': [conn2_data]} + vpn_service = self.driver.update_service(self.context, service_data) + self.driver.remove_unknown_connections(self.context) + self.assertTrue(vpn_service.connections_removed) + self.assertEqual(constants.ACTIVE, vpn_service.last_status) + self.assertIsNone(vpn_service.get_connection(u'1')) + self.assertEqual(constants.DOWN, + vpn_service.get_connection(u'2').last_status) + + # Simulate that only one connection reports and status is unchanged, + # so there will be NO connection info to report. + self.csr.read_tunnel_statuses.return_value = [(u'Tunnel2', u'DOWN')] + report = self.driver.build_report_for_service(vpn_service) + expected_report = { + u'id': u'123', + u'updated_pending_status': False, + u'status': constants.DOWN, + u'ipsec_site_connections': {} + } + self.assertEqual(expected_report, report) + # Check that service and connection statuses are updated + self.assertEqual(constants.DOWN, vpn_service.last_status) + self.assertEqual(constants.DOWN, + vpn_service.get_connection(u'2').last_status) + + def test_report_service_admin_down_with_two_connections(self): + """One service admin down, with two connections - report. + + When the service is admin down, all the connections will report + as DOWN. + """ + # Simulate one service (admin down) with two ACTIVE connections + conn1_data = {u'id': u'1', u'status': constants.ACTIVE, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel1'}} + conn2_data = {u'id': u'2', u'status': constants.ACTIVE, + u'admin_state_up': True, + u'cisco': {u'site_conn_id': u'Tunnel2'}} + service_data = {u'id': u'123', + u'status': constants.ACTIVE, + u'external_ip': u'1.1.1.1', + u'admin_state_up': False, + u'ipsec_conns': [conn1_data, conn2_data]} + vpn_service = self.driver.update_service(self.context, service_data) + # Since service admin down, connections will have been deleted + self.csr.read_tunnel_statuses.return_value = [] + + report = self.driver.build_report_for_service(vpn_service) + expected_report = { + u'id': u'123', + u'updated_pending_status': False, + u'status': constants.DOWN, + u'ipsec_site_connections': { + u'1': {u'status': constants.DOWN, + u'updated_pending_status': False}, + u'2': {u'status': constants.DOWN, + u'updated_pending_status': False}} + } + self.assertEqual(expected_report, report) + # Check that service and connection statuses are updated + self.assertEqual(constants.DOWN, vpn_service.last_status) + self.assertEqual(constants.DOWN, + vpn_service.get_connection(u'1').last_status) + self.assertEqual(constants.DOWN, + vpn_service.get_connection(u'2').last_status) + + def test_report_multiple_services(self): + """Status changes for several services - report.""" + # Simulate creation of the service and connection + (service1_data, + service2_data) = self.notification_for_two_services_with_two_conns() + vpn_service1 = self.driver.update_service(self.context, service1_data) + vpn_service2 = self.driver.update_service(self.context, service2_data) + # Simulate that the CSR has created the connections + self.csr.read_tunnel_statuses.return_value = [ + (u'Tunnel1', u'UP-ACTIVE'), (u'Tunnel2', u'DOWN'), + (u'Tunnel3', u'DOWN-NEGOTIATING'), (u'Tunnel4', u'UP-IDLE')] + + report = self.driver.report_status(self.context) + expected_report = [{u'id': u'123', + u'updated_pending_status': True, + u'status': constants.ACTIVE, + u'ipsec_site_connections': { + u'1': {u'status': constants.ACTIVE, + u'updated_pending_status': True}, + u'2': {u'status': constants.DOWN, + u'updated_pending_status': True}} + }, + {u'id': u'456', + u'updated_pending_status': True, + u'status': constants.ACTIVE, + u'ipsec_site_connections': { + u'3': {u'status': constants.DOWN, + u'updated_pending_status': True}, + u'4': {u'status': constants.ACTIVE, + u'updated_pending_status': True}} + }] + self.assertEqual(expected_report, report) + # Check that service and connection statuses are updated + self.assertEqual(constants.ACTIVE, vpn_service1.last_status) + self.assertEqual(constants.ACTIVE, + vpn_service1.get_connection(u'1').last_status) + self.assertEqual(constants.DOWN, + vpn_service1.get_connection(u'2').last_status) + self.assertEqual(constants.ACTIVE, vpn_service2.last_status) + self.assertEqual(constants.DOWN, + vpn_service2.get_connection(u'3').last_status) + self.assertEqual(constants.ACTIVE, + vpn_service2.get_connection(u'4').last_status) + + # TODO(pcm) FUTURE - UTs for update action, when supported. + + def test_vpnservice_updated(self): + with mock.patch.object(self.driver, 'sync') as sync: + context = mock.Mock() + self.driver.vpnservice_updated(context) + sync.assert_called_once_with(context, []) + + +class TestCiscoCsrIPsecDeviceDriverConfigLoading(base.BaseTestCase): + + def create_tempfile(self, contents): + (fd, path) = tempfile.mkstemp(prefix='test', suffix='.conf') + try: + os.write(fd, contents.encode('utf-8')) + finally: + os.close(fd) + return path + + def test_loading_csr_configuration(self): + """Ensure that Cisco CSR configs can be loaded from config files.""" + cfg_file = self.create_tempfile('[CISCO_CSR_REST:3.2.1.1]\n' + 'rest_mgmt = 10.20.30.1\n' + 'tunnel_ip = 3.2.1.3\n' + 'username = me\n' + 'password = secret\n' + 'timeout = 5.0\n') + expected = {'3.2.1.1': {'rest_mgmt': '10.20.30.1', + 'tunnel_ip': '3.2.1.3', + 'username': 'me', + 'password': 'secret', + 'timeout': 5.0}} + csrs_found = ipsec_driver.find_available_csrs_from_config([cfg_file]) + self.assertEqual(expected, csrs_found) + + def test_loading_config_without_timeout(self): + """Cisco CSR config without timeout will use default timeout.""" + cfg_file = self.create_tempfile('[CISCO_CSR_REST:3.2.1.1]\n' + 'rest_mgmt = 10.20.30.1\n' + 'tunnel_ip = 3.2.1.3\n' + 'username = me\n' + 'password = secret\n') + expected = {'3.2.1.1': {'rest_mgmt': '10.20.30.1', + 'tunnel_ip': '3.2.1.3', + 'username': 'me', + 'password': 'secret', + 'timeout': csr_client.TIMEOUT}} + csrs_found = ipsec_driver.find_available_csrs_from_config([cfg_file]) + self.assertEqual(expected, csrs_found) + + def test_skip_loading_duplicate_csr_configuration(self): + """Failure test that duplicate configurations are ignored.""" + cfg_file = self.create_tempfile('[CISCO_CSR_REST:3.2.1.1]\n' + 'rest_mgmt = 10.20.30.1\n' + 'tunnel_ip = 3.2.1.3\n' + 'username = me\n' + 'password = secret\n' + 'timeout = 5.0\n' + '[CISCO_CSR_REST:3.2.1.1]\n' + 'rest_mgmt = 5.5.5.3\n' + 'tunnel_ip = 3.2.1.6\n' + 'username = me\n' + 'password = secret\n') + expected = {'3.2.1.1': {'rest_mgmt': '10.20.30.1', + 'tunnel_ip': '3.2.1.3', + 'username': 'me', + 'password': 'secret', + 'timeout': 5.0}} + csrs_found = ipsec_driver.find_available_csrs_from_config([cfg_file]) + self.assertEqual(expected, csrs_found) + + def test_fail_loading_config_with_invalid_timeout(self): + """Failure test of invalid timeout in config info.""" + cfg_file = self.create_tempfile('[CISCO_CSR_REST:3.2.1.1]\n' + 'rest_mgmt = 10.20.30.1\n' + 'tunnel_ip = 3.2.1.3\n' + 'username = me\n' + 'password = secret\n' + 'timeout = yes\n') + csrs_found = ipsec_driver.find_available_csrs_from_config([cfg_file]) + self.assertEqual({}, csrs_found) + + def test_fail_loading_config_missing_required_info(self): + """Failure test of config missing required info.""" + cfg_file = self.create_tempfile('[CISCO_CSR_REST:1.1.1.0]\n' + 'tunnel_ip = 1.1.1.3\n' + 'username = me\n' + 'password = secret\n' + 'timeout = 5.0\n' + '[CISCO_CSR_REST:2.2.2.0]\n' + 'rest_mgmt = 10.20.30.1\n' + 'username = me\n' + 'password = secret\n' + 'timeout = 5.0\n' + '[CISCO_CSR_REST:3.3.3.0]\n' + 'rest_mgmt = 10.20.30.1\n' + 'tunnel_ip = 3.3.3.3\n' + 'password = secret\n' + 'timeout = 5.0\n' + '[CISCO_CSR_REST:4.4.4.0]\n' + 'rest_mgmt = 10.20.30.1\n' + 'tunnel_ip = 4.4.4.4\n' + 'username = me\n' + 'timeout = 5.0\n') + csrs_found = ipsec_driver.find_available_csrs_from_config([cfg_file]) + self.assertEqual({}, csrs_found) + + def test_fail_loading_config_with_invalid_router_id(self): + """Failure test of config with invalid rotuer ID.""" + cfg_file = self.create_tempfile('[CISCO_CSR_REST:4.3.2.1.9]\n' + 'rest_mgmt = 10.20.30.1\n' + 'tunnel_ip = 4.3.2.3\n' + 'username = me\n' + 'password = secret\n' + 'timeout = 5.0\n') + csrs_found = ipsec_driver.find_available_csrs_from_config([cfg_file]) + self.assertEqual({}, csrs_found) + + def test_fail_loading_config_with_invalid_mgmt_ip(self): + """Failure test of configuration with invalid management IP address.""" + cfg_file = self.create_tempfile('[CISCO_CSR_REST:3.2.1.1]\n' + 'rest_mgmt = 1.1.1.1.1\n' + 'tunnel_ip = 3.2.1.3\n' + 'username = me\n' + 'password = secret\n' + 'timeout = 5.0\n') + csrs_found = ipsec_driver.find_available_csrs_from_config([cfg_file]) + self.assertEqual({}, csrs_found) + + def test_fail_loading_config_with_invalid_tunnel_ip(self): + """Failure test of configuration with invalid tunnel IP address.""" + cfg_file = self.create_tempfile('[CISCO_CSR_REST:3.2.1.1]\n' + 'rest_mgmt = 1.1.1.1\n' + 'tunnel_ip = 3.2.1.4.5\n' + 'username = me\n' + 'password = secret\n' + 'timeout = 5.0\n') + csrs_found = ipsec_driver.find_available_csrs_from_config([cfg_file]) + self.assertEqual({}, csrs_found) + + def test_failure_no_configurations_entries(self): + """Failure test config file without any CSR definitions.""" + cfg_file = self.create_tempfile('NO CISCO SECTION AT ALL\n') + csrs_found = ipsec_driver.find_available_csrs_from_config([cfg_file]) + self.assertEqual({}, csrs_found) + + def test_failure_no_csr_configurations_entries(self): + """Failure test config file without any CSR definitions.""" + cfg_file = self.create_tempfile('[SOME_CONFIG:123]\n' + 'username = me\n') + csrs_found = ipsec_driver.find_available_csrs_from_config([cfg_file]) + self.assertEqual({}, csrs_found) + + def test_missing_config_value(self): + """Failure test of config file missing a value for attribute.""" + cfg_file = self.create_tempfile('[CISCO_CSR_REST:3.2.1.1]\n' + 'rest_mgmt = \n' + 'tunnel_ip = 3.2.1.3\n' + 'username = me\n' + 'password = secret\n' + 'timeout = 5.0\n') + csrs_found = ipsec_driver.find_available_csrs_from_config([cfg_file]) + self.assertEqual({}, csrs_found) + + def test_ignores_invalid_attribute_in_config(self): + """Test ignoring of config file with invalid attribute.""" + cfg_file = self.create_tempfile('[CISCO_CSR_REST:3.2.1.1]\n' + 'rest_mgmt = 1.1.1.1\n' + 'bogus = abcdef\n' + 'tunnel_ip = 3.2.1.3\n' + 'username = me\n' + 'password = secret\n' + 'timeout = 15.5\n') + expected = {'3.2.1.1': {'rest_mgmt': '1.1.1.1', + 'tunnel_ip': '3.2.1.3', + 'username': 'me', + 'password': 'secret', + 'timeout': 15.5}} + csrs_found = ipsec_driver.find_available_csrs_from_config([cfg_file]) + self.assertEqual(expected, csrs_found) diff --git a/neutron/tests/unit/services/vpn/device_drivers/test_ipsec.py b/neutron/tests/unit/services/vpn/device_drivers/test_ipsec.py new file mode 100644 index 000000000..29867a144 --- /dev/null +++ b/neutron/tests/unit/services/vpn/device_drivers/test_ipsec.py @@ -0,0 +1,258 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import copy +import mock + +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.services.vpn.device_drivers import ipsec as ipsec_driver +from neutron.tests import base + +_uuid = uuidutils.generate_uuid +FAKE_HOST = 'fake_host' +FAKE_ROUTER_ID = _uuid() +FAKE_VPN_SERVICE = { + 'id': _uuid(), + 'router_id': FAKE_ROUTER_ID, + 'admin_state_up': True, + 'status': constants.PENDING_CREATE, + 'subnet': {'cidr': '10.0.0.0/24'}, + 'ipsec_site_connections': [ + {'peer_cidrs': ['20.0.0.0/24', + '30.0.0.0/24']}, + {'peer_cidrs': ['40.0.0.0/24', + '50.0.0.0/24']}] +} + + +class TestIPsecDeviceDriver(base.BaseTestCase): + def setUp(self, driver=ipsec_driver.OpenSwanDriver): + super(TestIPsecDeviceDriver, self).setUp() + + for klass in [ + 'os.makedirs', + 'os.path.isdir', + 'neutron.agent.linux.utils.replace_file', + 'neutron.common.rpc_compat.create_connection', + 'neutron.services.vpn.device_drivers.ipsec.' + 'OpenSwanProcess._gen_config_content', + 'shutil.rmtree', + ]: + mock.patch(klass).start() + self.execute = mock.patch( + 'neutron.agent.linux.utils.execute').start() + self.agent = mock.Mock() + self.driver = driver( + self.agent, + FAKE_HOST) + self.driver.agent_rpc = mock.Mock() + + def test_vpnservice_updated(self): + with mock.patch.object(self.driver, 'sync') as sync: + context = mock.Mock() + self.driver.vpnservice_updated(context) + sync.assert_called_once_with(context, []) + + def test_create_router(self): + process_id = _uuid() + process = mock.Mock() + process.vpnservice = FAKE_VPN_SERVICE + self.driver.processes = { + process_id: process} + self.driver.create_router(process_id) + process.enable.assert_called_once_with() + + def test_destroy_router(self): + process_id = _uuid() + process = mock.Mock() + process.vpnservice = FAKE_VPN_SERVICE + self.driver.processes = { + process_id: process} + self.driver.destroy_router(process_id) + process.disable.assert_called_once_with() + self.assertNotIn(process_id, self.driver.processes) + + def test_sync_added(self): + self.driver.agent_rpc.get_vpn_services_on_host.return_value = [ + FAKE_VPN_SERVICE] + context = mock.Mock() + process = mock.Mock() + process.vpnservice = FAKE_VPN_SERVICE + process.connection_status = {} + process.status = constants.ACTIVE + process.updated_pending_status = True + self.driver.process_status_cache = {} + self.driver.processes = { + FAKE_ROUTER_ID: process} + self.driver.sync(context, []) + self.agent.assert_has_calls([ + mock.call.add_nat_rule( + FAKE_ROUTER_ID, + 'POSTROUTING', + '-s 10.0.0.0/24 -d 20.0.0.0/24 -m policy ' + '--dir out --pol ipsec -j ACCEPT ', + top=True), + mock.call.add_nat_rule( + FAKE_ROUTER_ID, + 'POSTROUTING', + '-s 10.0.0.0/24 -d 30.0.0.0/24 -m policy ' + '--dir out --pol ipsec -j ACCEPT ', + top=True), + mock.call.add_nat_rule( + FAKE_ROUTER_ID, + 'POSTROUTING', + '-s 10.0.0.0/24 -d 40.0.0.0/24 -m policy ' + '--dir out --pol ipsec -j ACCEPT ', + top=True), + mock.call.add_nat_rule( + FAKE_ROUTER_ID, + 'POSTROUTING', + '-s 10.0.0.0/24 -d 50.0.0.0/24 -m policy ' + '--dir out --pol ipsec -j ACCEPT ', + top=True), + mock.call.iptables_apply(FAKE_ROUTER_ID) + ]) + process.update.assert_called_once_with() + self.driver.agent_rpc.update_status.assert_called_once_with( + context, + [{'status': 'ACTIVE', + 'ipsec_site_connections': {}, + 'updated_pending_status': True, + 'id': FAKE_VPN_SERVICE['id']}]) + + def fake_ensure_process(self, process_id, vpnservice=None): + process = self.driver.processes.get(process_id) + if not process: + process = mock.Mock() + process.vpnservice = FAKE_VPN_SERVICE + process.connection_status = {} + process.status = constants.ACTIVE + process.updated_pending_status = True + self.driver.processes[process_id] = process + elif vpnservice: + process.vpnservice = vpnservice + process.update_vpnservice(vpnservice) + return process + + def test_sync_update_vpnservice(self): + with mock.patch.object(self.driver, + 'ensure_process') as ensure_process: + ensure_process.side_effect = self.fake_ensure_process + new_vpn_service = FAKE_VPN_SERVICE + updated_vpn_service = copy.deepcopy(new_vpn_service) + updated_vpn_service['ipsec_site_connections'].append( + {'peer_cidrs': ['60.0.0.0/24', + '70.0.0.0/24']}) + context = mock.Mock() + self.driver.process_status_cache = {} + self.driver.agent_rpc.get_vpn_services_on_host.return_value = [ + new_vpn_service] + self.driver.sync(context, []) + process = self.driver.processes[FAKE_ROUTER_ID] + self.assertEqual(process.vpnservice, new_vpn_service) + self.driver.agent_rpc.get_vpn_services_on_host.return_value = [ + updated_vpn_service] + self.driver.sync(context, []) + process = self.driver.processes[FAKE_ROUTER_ID] + process.update_vpnservice.assert_called_once_with( + updated_vpn_service) + self.assertEqual(process.vpnservice, updated_vpn_service) + + def test_sync_removed(self): + self.driver.agent_rpc.get_vpn_services_on_host.return_value = [] + context = mock.Mock() + process_id = _uuid() + process = mock.Mock() + process.vpnservice = FAKE_VPN_SERVICE + self.driver.processes = { + process_id: process} + self.driver.sync(context, []) + process.disable.assert_called_once_with() + self.assertNotIn(process_id, self.driver.processes) + + def test_sync_removed_router(self): + self.driver.agent_rpc.get_vpn_services_on_host.return_value = [] + context = mock.Mock() + process_id = _uuid() + self.driver.sync(context, [{'id': process_id}]) + self.assertNotIn(process_id, self.driver.processes) + + def test_status_updated_on_connection_admin_down(self): + self.driver.process_status_cache = { + '1': { + 'status': constants.ACTIVE, + 'id': 123, + 'updated_pending_status': False, + 'ipsec_site_connections': { + '10': { + 'status': constants.ACTIVE, + 'updated_pending_status': False, + }, + '20': { + 'status': constants.ACTIVE, + 'updated_pending_status': False, + } + } + } + } + # Simulate that there is no longer status for connection '20' + # e.g. connection admin down + new_status = { + 'ipsec_site_connections': { + '10': { + 'status': constants.ACTIVE, + 'updated_pending_status': False + } + } + } + self.driver.update_downed_connections('1', new_status) + existing_conn = new_status['ipsec_site_connections'].get('10') + self.assertIsNotNone(existing_conn) + self.assertEqual(constants.ACTIVE, existing_conn['status']) + missing_conn = new_status['ipsec_site_connections'].get('20') + self.assertIsNotNone(missing_conn) + self.assertEqual(constants.DOWN, missing_conn['status']) + + def test_status_updated_on_service_admin_down(self): + self.driver.process_status_cache = { + '1': { + 'status': constants.ACTIVE, + 'id': 123, + 'updated_pending_status': False, + 'ipsec_site_connections': { + '10': { + 'status': constants.ACTIVE, + 'updated_pending_status': False, + }, + '20': { + 'status': constants.ACTIVE, + 'updated_pending_status': False, + } + } + } + } + # Simulate that there are no connections now + new_status = { + 'ipsec_site_connections': {} + } + self.driver.update_downed_connections('1', new_status) + missing_conn = new_status['ipsec_site_connections'].get('10') + self.assertIsNotNone(missing_conn) + self.assertEqual(constants.DOWN, missing_conn['status']) + missing_conn = new_status['ipsec_site_connections'].get('20') + self.assertIsNotNone(missing_conn) + self.assertEqual(constants.DOWN, missing_conn['status']) diff --git a/neutron/tests/unit/services/vpn/service_drivers/__init__.py b/neutron/tests/unit/services/vpn/service_drivers/__init__.py new file mode 100644 index 000000000..9b27a7520 --- /dev/null +++ b/neutron/tests/unit/services/vpn/service_drivers/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutron/tests/unit/services/vpn/service_drivers/test_cisco_ipsec.py b/neutron/tests/unit/services/vpn/service_drivers/test_cisco_ipsec.py new file mode 100644 index 000000000..f78d8b16c --- /dev/null +++ b/neutron/tests/unit/services/vpn/service_drivers/test_cisco_ipsec.py @@ -0,0 +1,365 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + + +from neutron import context as n_ctx +from neutron.db import api as dbapi +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.services.vpn.service_drivers import cisco_csr_db as csr_db +from neutron.services.vpn.service_drivers import cisco_ipsec as ipsec_driver +from neutron.tests import base + +_uuid = uuidutils.generate_uuid + +FAKE_VPN_CONN_ID = _uuid() + +FAKE_VPN_CONNECTION = { + 'vpnservice_id': _uuid(), + 'id': FAKE_VPN_CONN_ID, + 'ikepolicy_id': _uuid(), + 'ipsecpolicy_id': _uuid(), + 'tenant_id': _uuid() +} +FAKE_VPN_SERVICE = { + 'router_id': _uuid() +} +FAKE_HOST = 'fake_host' + + +class TestCiscoIPsecDriverValidation(base.BaseTestCase): + + def setUp(self): + super(TestCiscoIPsecDriverValidation, self).setUp() + mock.patch('neutron.common.rpc_compat.create_connection').start() + self.service_plugin = mock.Mock() + self.driver = ipsec_driver.CiscoCsrIPsecVPNDriver(self.service_plugin) + self.context = n_ctx.Context('some_user', 'some_tenant') + self.vpn_service = mock.Mock() + + def test_ike_version_unsupported(self): + """Failure test that Cisco CSR REST API does not support IKE v2.""" + policy_info = {'ike_version': 'v2', + 'lifetime': {'units': 'seconds', 'value': 60}} + self.assertRaises(ipsec_driver.CsrValidationFailure, + self.driver.validate_ike_version, policy_info) + + def test_ike_lifetime_not_in_seconds(self): + """Failure test of unsupported lifetime units for IKE policy.""" + policy_info = {'lifetime': {'units': 'kilobytes', 'value': 1000}} + self.assertRaises(ipsec_driver.CsrValidationFailure, + self.driver.validate_lifetime, + "IKE Policy", policy_info) + + def test_ipsec_lifetime_not_in_seconds(self): + """Failure test of unsupported lifetime units for IPSec policy.""" + policy_info = {'lifetime': {'units': 'kilobytes', 'value': 1000}} + self.assertRaises(ipsec_driver.CsrValidationFailure, + self.driver.validate_lifetime, + "IPSec Policy", policy_info) + + def test_ike_lifetime_seconds_values_at_limits(self): + """Test valid lifetime values for IKE policy.""" + policy_info = {'lifetime': {'units': 'seconds', 'value': 60}} + self.driver.validate_lifetime('IKE Policy', policy_info) + policy_info = {'lifetime': {'units': 'seconds', 'value': 86400}} + self.driver.validate_lifetime('IKE Policy', policy_info) + + def test_ipsec_lifetime_seconds_values_at_limits(self): + """Test valid lifetime values for IPSec policy.""" + policy_info = {'lifetime': {'units': 'seconds', 'value': 120}} + self.driver.validate_lifetime('IPSec Policy', policy_info) + policy_info = {'lifetime': {'units': 'seconds', 'value': 2592000}} + self.driver.validate_lifetime('IPSec Policy', policy_info) + + def test_ike_lifetime_values_invalid(self): + """Failure test of unsupported lifetime values for IKE policy.""" + which = "IKE Policy" + policy_info = {'lifetime': {'units': 'seconds', 'value': 59}} + self.assertRaises(ipsec_driver.CsrValidationFailure, + self.driver.validate_lifetime, + which, policy_info) + policy_info = {'lifetime': {'units': 'seconds', 'value': 86401}} + self.assertRaises(ipsec_driver.CsrValidationFailure, + self.driver.validate_lifetime, + which, policy_info) + + def test_ipsec_lifetime_values_invalid(self): + """Failure test of unsupported lifetime values for IPSec policy.""" + which = "IPSec Policy" + policy_info = {'lifetime': {'units': 'seconds', 'value': 119}} + self.assertRaises(ipsec_driver.CsrValidationFailure, + self.driver.validate_lifetime, + which, policy_info) + policy_info = {'lifetime': {'units': 'seconds', 'value': 2592001}} + self.assertRaises(ipsec_driver.CsrValidationFailure, + self.driver.validate_lifetime, + which, policy_info) + + def test_ipsec_connection_with_mtu_at_limits(self): + """Test IPSec site-to-site connection with MTU at limits.""" + conn_info = {'mtu': 1500} + self.driver.validate_mtu(conn_info) + conn_info = {'mtu': 9192} + self.driver.validate_mtu(conn_info) + + def test_ipsec_connection_with_invalid_mtu(self): + """Failure test of IPSec site connection with unsupported MTUs.""" + conn_info = {'mtu': 1499} + self.assertRaises(ipsec_driver.CsrValidationFailure, + self.driver.validate_mtu, conn_info) + conn_info = {'mtu': 9193} + self.assertRaises(ipsec_driver.CsrValidationFailure, + self.driver.validate_mtu, conn_info) + + def simulate_gw_ip_available(self): + """Helper function indicating that tunnel has a gateway IP.""" + def have_one(): + return 1 + self.vpn_service.router.gw_port.fixed_ips.__len__ = have_one + ip_addr_mock = mock.Mock() + self.vpn_service.router.gw_port.fixed_ips = [ip_addr_mock] + return ip_addr_mock + + def test_have_public_ip_for_router(self): + """Ensure that router for IPSec connection has gateway IP.""" + self.simulate_gw_ip_available() + self.driver.validate_public_ip_present(self.vpn_service) + + def test_router_with_missing_gateway_ip(self): + """Failure test of IPSec connection with missing gateway IP.""" + self.simulate_gw_ip_available() + self.vpn_service.router.gw_port = None + self.assertRaises(ipsec_driver.CsrValidationFailure, + self.driver.validate_public_ip_present, + self.vpn_service) + + def test_peer_id_is_an_ip_address(self): + """Ensure peer ID is an IP address for IPsec connection create.""" + ipsec_conn = {'peer_id': '10.10.10.10'} + self.driver.validate_peer_id(ipsec_conn) + + def test_peer_id_is_not_ip_address(self): + """Failure test of peer_id that is not an IP address.""" + ipsec_conn = {'peer_id': 'some-site.com'} + self.assertRaises(ipsec_driver.CsrValidationFailure, + self.driver.validate_peer_id, ipsec_conn) + + def test_validation_for_create_ipsec_connection(self): + """Ensure all validation passes for IPSec site connection create.""" + self.simulate_gw_ip_available() + # Provide the minimum needed items to validate + ipsec_conn = {'id': '1', + 'ikepolicy_id': '123', + 'ipsecpolicy_id': '2', + 'mtu': 1500, + 'peer_id': '10.10.10.10'} + self.service_plugin.get_ikepolicy = mock.Mock( + return_value={'ike_version': 'v1', + 'lifetime': {'units': 'seconds', 'value': 60}}) + self.service_plugin.get_ipsecpolicy = mock.Mock( + return_value={'lifetime': {'units': 'seconds', 'value': 120}}) + self.driver.validate_ipsec_connection(self.context, ipsec_conn, + self.vpn_service) + + +class TestCiscoIPsecDriverMapping(base.BaseTestCase): + + def setUp(self): + super(TestCiscoIPsecDriverMapping, self).setUp() + self.context = mock.patch.object(n_ctx, 'Context').start() + self.session = self.context.session + self.query_mock = self.session.query.return_value.order_by + + def test_identifying_first_mapping_id(self): + """Make sure first available ID is obtained for each ID type.""" + # Simulate mapping table is empty - get first one + self.query_mock.return_value = [] + next_id = csr_db.get_next_available_tunnel_id(self.session) + self.assertEqual(0, next_id) + + next_id = csr_db.get_next_available_ike_policy_id(self.session) + self.assertEqual(1, next_id) + + next_id = csr_db.get_next_available_ipsec_policy_id(self.session) + self.assertEqual(1, next_id) + + def test_last_mapping_id_available(self): + """Make sure can get the last ID for each of the table types.""" + # Simulate query indicates table is full + self.query_mock.return_value = [ + (x, ) for x in xrange(csr_db.MAX_CSR_TUNNELS - 1)] + next_id = csr_db.get_next_available_tunnel_id(self.session) + self.assertEqual(csr_db.MAX_CSR_TUNNELS - 1, next_id) + + self.query_mock.return_value = [ + (x, ) for x in xrange(1, csr_db.MAX_CSR_IKE_POLICIES)] + next_id = csr_db.get_next_available_ike_policy_id(self.session) + self.assertEqual(csr_db.MAX_CSR_IKE_POLICIES, next_id) + + self.query_mock.return_value = [ + (x, ) for x in xrange(1, csr_db.MAX_CSR_IPSEC_POLICIES)] + next_id = csr_db.get_next_available_ipsec_policy_id(self.session) + self.assertEqual(csr_db.MAX_CSR_IPSEC_POLICIES, next_id) + + def test_reusing_first_available_mapping_id(self): + """Ensure that we reuse the first available ID. + + Make sure that the next lowest ID is obtained from the mapping + table when there are "holes" from deletions. Database query sorts + the entries, so will return them in order. Using tunnel ID, as the + logic is the same for each ID type. + """ + self.query_mock.return_value = [(0, ), (1, ), (2, ), (5, ), (6, )] + next_id = csr_db.get_next_available_tunnel_id(self.session) + self.assertEqual(3, next_id) + + def test_no_more_mapping_ids_available(self): + """Failure test of trying to reserve ID, when none available.""" + self.query_mock.return_value = [ + (x, ) for x in xrange(csr_db.MAX_CSR_TUNNELS)] + self.assertRaises(IndexError, csr_db.get_next_available_tunnel_id, + self.session) + + self.query_mock.return_value = [ + (x, ) for x in xrange(1, csr_db.MAX_CSR_IKE_POLICIES + 1)] + self.assertRaises(IndexError, csr_db.get_next_available_ike_policy_id, + self.session) + + self.query_mock.return_value = [ + (x, ) for x in xrange(1, csr_db.MAX_CSR_IPSEC_POLICIES + 1)] + self.assertRaises(IndexError, + csr_db.get_next_available_ipsec_policy_id, + self.session) + + def test_create_tunnel_mappings(self): + """Ensure successfully create new tunnel mappings.""" + # Simulate that first IDs are obtained + self.query_mock.return_value = [] + map_db_mock = mock.patch.object(csr_db, 'IdentifierMap').start() + conn_info = {'ikepolicy_id': '10', + 'ipsecpolicy_id': '50', + 'id': '100', + 'tenant_id': '1000'} + csr_db.create_tunnel_mapping(self.context, conn_info) + map_db_mock.assert_called_once_with(csr_tunnel_id=0, + csr_ike_policy_id=1, + csr_ipsec_policy_id=1, + ipsec_site_conn_id='100', + tenant_id='1000') + # Create another, with next ID of 2 for all IDs (not mocking each + # ID separately, so will not have different IDs). + self.query_mock.return_value = [(0, ), (1, )] + map_db_mock.reset_mock() + conn_info = {'ikepolicy_id': '20', + 'ipsecpolicy_id': '60', + 'id': '101', + 'tenant_id': '1000'} + csr_db.create_tunnel_mapping(self.context, conn_info) + map_db_mock.assert_called_once_with(csr_tunnel_id=2, + csr_ike_policy_id=2, + csr_ipsec_policy_id=2, + ipsec_site_conn_id='101', + tenant_id='1000') + + +class TestCiscoIPsecDriver(base.BaseTestCase): + + """Test that various incoming requests are sent to device driver.""" + + def setUp(self): + super(TestCiscoIPsecDriver, self).setUp() + dbapi.configure_db() + self.addCleanup(dbapi.clear_db) + mock.patch('neutron.common.rpc_compat.create_connection').start() + + l3_agent = mock.Mock() + l3_agent.host = FAKE_HOST + plugin = mock.Mock() + plugin.get_l3_agents_hosting_routers.return_value = [l3_agent] + plugin_p = mock.patch('neutron.manager.NeutronManager.get_plugin') + get_plugin = plugin_p.start() + get_plugin.return_value = plugin + service_plugin_p = mock.patch( + 'neutron.manager.NeutronManager.get_service_plugins') + get_service_plugin = service_plugin_p.start() + get_service_plugin.return_value = {constants.L3_ROUTER_NAT: plugin} + + service_plugin = mock.Mock() + service_plugin.get_l3_agents_hosting_routers.return_value = [l3_agent] + service_plugin._get_vpnservice.return_value = { + 'router_id': _uuid() + } + self.db_update_mock = service_plugin.update_ipsec_site_conn_status + self.driver = ipsec_driver.CiscoCsrIPsecVPNDriver(service_plugin) + self.driver.validate_ipsec_connection = mock.Mock() + mock.patch.object(csr_db, 'create_tunnel_mapping').start() + self.context = n_ctx.Context('some_user', 'some_tenant') + + def _test_update(self, func, args, additional_info=None): + with mock.patch.object(self.driver.agent_rpc, 'cast') as cast: + func(self.context, *args) + cast.assert_called_once_with( + self.context, + {'args': additional_info, + 'namespace': None, + 'method': 'vpnservice_updated'}, + version='1.0', + topic='cisco_csr_ipsec_agent.fake_host') + + def test_create_ipsec_site_connection(self): + self._test_update(self.driver.create_ipsec_site_connection, + [FAKE_VPN_CONNECTION], + {'reason': 'ipsec-conn-create'}) + + def test_failure_validation_ipsec_connection(self): + """Failure test of validation during IPSec site connection create. + + Simulate a validation failure, and ensure that database is + updated to indicate connection is in error state. + + TODO(pcm): FUTURE - remove test case, once vendor plugin + validation is done before database commit. + """ + self.driver.validate_ipsec_connection.side_effect = ( + ipsec_driver.CsrValidationFailure(resource='IPSec Connection', + key='mtu', value=1000)) + self.assertRaises(ipsec_driver.CsrValidationFailure, + self.driver.create_ipsec_site_connection, + self.context, FAKE_VPN_CONNECTION) + self.db_update_mock.assert_called_with(self.context, + FAKE_VPN_CONN_ID, + constants.ERROR) + + def test_update_ipsec_site_connection(self): + self._test_update(self.driver.update_ipsec_site_connection, + [FAKE_VPN_CONNECTION, FAKE_VPN_CONNECTION], + {'reason': 'ipsec-conn-update'}) + + def test_delete_ipsec_site_connection(self): + self._test_update(self.driver.delete_ipsec_site_connection, + [FAKE_VPN_CONNECTION], + {'reason': 'ipsec-conn-delete'}) + + def test_update_vpnservice(self): + self._test_update(self.driver.update_vpnservice, + [FAKE_VPN_SERVICE, FAKE_VPN_SERVICE], + {'reason': 'vpn-service-update'}) + + def test_delete_vpnservice(self): + self._test_update(self.driver.delete_vpnservice, + [FAKE_VPN_SERVICE], + {'reason': 'vpn-service-delete'}) diff --git a/neutron/tests/unit/services/vpn/service_drivers/test_ipsec.py b/neutron/tests/unit/services/vpn/service_drivers/test_ipsec.py new file mode 100644 index 000000000..2f7cbfaf9 --- /dev/null +++ b/neutron/tests/unit/services/vpn/service_drivers/test_ipsec.py @@ -0,0 +1,91 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron import context +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.services.vpn.service_drivers import ipsec as ipsec_driver +from neutron.tests import base + +_uuid = uuidutils.generate_uuid + +FAKE_VPN_CONNECTION = { + 'vpnservice_id': _uuid() +} +FAKE_VPN_SERVICE = { + 'router_id': _uuid() +} +FAKE_HOST = 'fake_host' + + +class TestIPsecDriver(base.BaseTestCase): + def setUp(self): + super(TestIPsecDriver, self).setUp() + mock.patch('neutron.common.rpc_compat.create_connection').start() + + l3_agent = mock.Mock() + l3_agent.host = FAKE_HOST + plugin = mock.Mock() + plugin.get_l3_agents_hosting_routers.return_value = [l3_agent] + plugin_p = mock.patch('neutron.manager.NeutronManager.get_plugin') + get_plugin = plugin_p.start() + get_plugin.return_value = plugin + service_plugin_p = mock.patch( + 'neutron.manager.NeutronManager.get_service_plugins') + get_service_plugin = service_plugin_p.start() + get_service_plugin.return_value = {constants.L3_ROUTER_NAT: plugin} + + service_plugin = mock.Mock() + service_plugin.get_l3_agents_hosting_routers.return_value = [l3_agent] + service_plugin._get_vpnservice.return_value = { + 'router_id': _uuid() + } + self.driver = ipsec_driver.IPsecVPNDriver(service_plugin) + + def _test_update(self, func, args): + ctxt = context.Context('', 'somebody') + with mock.patch.object(self.driver.agent_rpc, 'cast') as cast: + func(ctxt, *args) + cast.assert_called_once_with( + ctxt, + {'args': {}, + 'namespace': None, + 'method': 'vpnservice_updated'}, + version='1.0', + topic='ipsec_agent.fake_host') + + def test_create_ipsec_site_connection(self): + self._test_update(self.driver.create_ipsec_site_connection, + [FAKE_VPN_CONNECTION]) + + def test_update_ipsec_site_connection(self): + self._test_update(self.driver.update_ipsec_site_connection, + [FAKE_VPN_CONNECTION, FAKE_VPN_CONNECTION]) + + def test_delete_ipsec_site_connection(self): + self._test_update(self.driver.delete_ipsec_site_connection, + [FAKE_VPN_CONNECTION]) + + def test_update_vpnservice(self): + self._test_update(self.driver.update_vpnservice, + [FAKE_VPN_SERVICE, FAKE_VPN_SERVICE]) + + def test_delete_vpnservice(self): + self._test_update(self.driver.delete_vpnservice, + [FAKE_VPN_SERVICE]) diff --git a/neutron/tests/unit/services/vpn/test_vpn_agent.py b/neutron/tests/unit/services/vpn/test_vpn_agent.py new file mode 100644 index 000000000..7b1cab523 --- /dev/null +++ b/neutron/tests/unit/services/vpn/test_vpn_agent.py @@ -0,0 +1,196 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo.config import cfg + +from neutron.agent.common import config as agent_config +from neutron.agent import l3_agent +from neutron.agent.linux import interface +from neutron.common import config as base_config +from neutron.openstack.common import uuidutils +from neutron.services.vpn import agent +from neutron.services.vpn import device_drivers +from neutron.tests import base + +_uuid = uuidutils.generate_uuid +NOOP_DEVICE_CLASS = 'NoopDeviceDriver' +NOOP_DEVICE = ('neutron.tests.unit.services.' + 'vpn.test_vpn_agent.%s' % NOOP_DEVICE_CLASS) + + +class NoopDeviceDriver(device_drivers.DeviceDriver): + def sync(self, context, processes): + pass + + def create_router(self, process_id): + pass + + def destroy_router(self, process_id): + pass + + +class TestVPNAgent(base.BaseTestCase): + def setUp(self): + super(TestVPNAgent, self).setUp() + self.conf = cfg.CONF + self.conf.register_opts(base_config.core_opts) + self.conf.register_opts(l3_agent.L3NATAgent.OPTS) + self.conf.register_opts(interface.OPTS) + agent_config.register_interface_driver_opts_helper(self.conf) + agent_config.register_use_namespaces_opts_helper(self.conf) + agent_config.register_agent_state_opts_helper(self.conf) + agent_config.register_root_helper(self.conf) + + self.conf.set_override('interface_driver', + 'neutron.agent.linux.interface.NullDriver') + self.conf.set_override( + 'vpn_device_driver', + [NOOP_DEVICE], + 'vpnagent') + + for clazz in [ + 'neutron.agent.linux.ip_lib.device_exists', + 'neutron.agent.linux.ip_lib.IPWrapper', + 'neutron.agent.linux.interface.NullDriver', + 'neutron.agent.linux.utils.execute' + ]: + mock.patch(clazz).start() + + l3pluginApi_cls = mock.patch( + 'neutron.agent.l3_agent.L3PluginApi').start() + self.plugin_api = mock.Mock() + l3pluginApi_cls.return_value = self.plugin_api + + looping_call_p = mock.patch( + 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall') + looping_call_p.start() + + self.fake_host = 'fake_host' + self.agent = agent.VPNAgent(self.fake_host) + + def test_setup_drivers(self): + self.assertEqual(1, len(self.agent.devices)) + device = self.agent.devices[0] + self.assertEqual( + NOOP_DEVICE_CLASS, + device.__class__.__name__ + ) + + def test_get_namespace(self): + router_id = _uuid() + ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, + self.conf.use_namespaces, None) + self.agent.router_info = {router_id: ri} + namespace = self.agent.get_namespace(router_id) + self.assertTrue(namespace.endswith(router_id)) + self.assertFalse(self.agent.get_namespace('fake_id')) + + def test_add_nat_rule(self): + router_id = _uuid() + ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, + self.conf.use_namespaces, None) + iptables = mock.Mock() + ri.iptables_manager.ipv4['nat'] = iptables + self.agent.router_info = {router_id: ri} + self.agent.add_nat_rule(router_id, 'fake_chain', 'fake_rule', True) + iptables.add_rule.assert_called_once_with( + 'fake_chain', 'fake_rule', top=True) + + def test_add_nat_rule_with_no_router(self): + self.agent.router_info = {} + #Should do nothing + self.agent.add_nat_rule( + 'fake_router_id', + 'fake_chain', + 'fake_rule', + True) + + def test_remove_rule(self): + router_id = _uuid() + ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, + self.conf.use_namespaces, None) + iptables = mock.Mock() + ri.iptables_manager.ipv4['nat'] = iptables + self.agent.router_info = {router_id: ri} + self.agent.remove_nat_rule(router_id, 'fake_chain', 'fake_rule', True) + iptables.remove_rule.assert_called_once_with( + 'fake_chain', 'fake_rule', top=True) + + def test_remove_rule_with_no_router(self): + self.agent.router_info = {} + #Should do nothing + self.agent.remove_nat_rule( + 'fake_router_id', + 'fake_chain', + 'fake_rule') + + def test_iptables_apply(self): + router_id = _uuid() + ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, + self.conf.use_namespaces, None) + iptables = mock.Mock() + ri.iptables_manager = iptables + self.agent.router_info = {router_id: ri} + self.agent.iptables_apply(router_id) + iptables.apply.assert_called_once_with() + + def test_iptables_apply_with_no_router(self): + #Should do nothing + self.agent.router_info = {} + self.agent.iptables_apply('fake_router_id') + + def test_router_added(self): + mock.patch( + 'neutron.agent.linux.iptables_manager.IptablesManager').start() + router_id = _uuid() + router = {'id': router_id} + device = mock.Mock() + self.agent.devices = [device] + self.agent._router_added(router_id, router) + device.create_router.assert_called_once_with(router_id) + + def test_router_removed(self): + self.plugin_api.get_external_network_id.return_value = None + mock.patch( + 'neutron.agent.linux.iptables_manager.IptablesManager').start() + router_id = _uuid() + ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, + self.conf.use_namespaces, None) + ri.router = { + 'id': _uuid(), + 'admin_state_up': True, + 'routes': [], + 'external_gateway_info': {}} + device = mock.Mock() + self.agent.router_info = {router_id: ri} + self.agent.devices = [device] + self.agent._router_removed(router_id) + device.destroy_router.assert_called_once_with(router_id) + + def test_process_routers(self): + self.plugin_api.get_external_network_id.return_value = None + routers = [ + {'id': _uuid(), + 'admin_state_up': True, + 'routes': [], + 'external_gateway_info': {}}] + + device = mock.Mock() + self.agent.devices = [device] + self.agent._process_routers(routers, False) + device.sync.assert_called_once_with(mock.ANY, routers) diff --git a/neutron/tests/unit/services/vpn/test_vpnaas_driver_plugin.py b/neutron/tests/unit/services/vpn/test_vpnaas_driver_plugin.py new file mode 100644 index 000000000..45f932ec1 --- /dev/null +++ b/neutron/tests/unit/services/vpn/test_vpnaas_driver_plugin.py @@ -0,0 +1,160 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import contextlib + +import mock + +from neutron.common import constants +from neutron import context +from neutron import manager +from neutron.plugins.common import constants as p_constants +from neutron.services.vpn.service_drivers import ipsec as ipsec_driver +from neutron.tests.unit.db.vpn import test_db_vpnaas +from neutron.tests.unit.openvswitch import test_agent_scheduler +from neutron.tests.unit import test_agent_ext_plugin + +FAKE_HOST = test_agent_ext_plugin.L3_HOSTA +VPN_DRIVER_CLASS = 'neutron.services.vpn.plugin.VPNDriverPlugin' + + +class TestVPNDriverPlugin(test_db_vpnaas.TestVpnaas, + test_agent_scheduler.AgentSchedulerTestMixIn, + test_agent_ext_plugin.AgentDBTestMixIn): + def setUp(self): + self.adminContext = context.get_admin_context() + driver_cls_p = mock.patch( + 'neutron.services.vpn.' + 'service_drivers.ipsec.IPsecVPNDriver') + driver_cls = driver_cls_p.start() + self.driver = mock.Mock() + self.driver.service_type = ipsec_driver.IPSEC + driver_cls.return_value = self.driver + super(TestVPNDriverPlugin, self).setUp( + vpnaas_plugin=VPN_DRIVER_CLASS) + + def test_create_ipsec_site_connection(self, **extras): + super(TestVPNDriverPlugin, self).test_create_ipsec_site_connection() + self.driver.create_ipsec_site_connection.assert_called_once_with( + mock.ANY, mock.ANY) + self.driver.delete_ipsec_site_connection.assert_called_once_with( + mock.ANY, mock.ANY) + + def test_delete_vpnservice(self, **extras): + super(TestVPNDriverPlugin, self).test_delete_vpnservice() + self.driver.delete_vpnservice.assert_called_once_with( + mock.ANY, mock.ANY) + + def test_update_vpnservice(self, **extras): + super(TestVPNDriverPlugin, self).test_update_vpnservice() + self.driver.update_vpnservice.assert_called_once_with( + mock.ANY, mock.ANY, mock.ANY) + + @contextlib.contextmanager + def vpnservice_set(self): + """Test case to create a ipsec_site_connection.""" + vpnservice_name = "vpn1" + ipsec_site_connection_name = "ipsec_site_connection" + ikename = "ikepolicy1" + ipsecname = "ipsecpolicy1" + description = "my-vpn-connection" + keys = {'name': vpnservice_name, + 'description': "my-vpn-connection", + 'peer_address': '192.168.1.10', + 'peer_id': '192.168.1.10', + 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], + 'initiator': 'bi-directional', + 'mtu': 1500, + 'dpd_action': 'hold', + 'dpd_interval': 40, + 'dpd_timeout': 120, + 'tenant_id': self._tenant_id, + 'psk': 'abcd', + 'status': 'PENDING_CREATE', + 'admin_state_up': True} + with self.ikepolicy(name=ikename) as ikepolicy: + with self.ipsecpolicy(name=ipsecname) as ipsecpolicy: + with self.subnet() as subnet: + with self.router() as router: + plugin = manager.NeutronManager.get_plugin() + agent = {'host': FAKE_HOST, + 'agent_type': constants.AGENT_TYPE_L3, + 'binary': 'fake-binary', + 'topic': 'fake-topic'} + plugin.create_or_update_agent(self.adminContext, agent) + plugin.schedule_router( + self.adminContext, router['router']['id']) + with self.vpnservice(name=vpnservice_name, + subnet=subnet, + router=router) as vpnservice1: + keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id'] + keys['ipsecpolicy_id'] = ( + ipsecpolicy['ipsecpolicy']['id'] + ) + keys['vpnservice_id'] = ( + vpnservice1['vpnservice']['id'] + ) + with self.ipsec_site_connection( + self.fmt, + ipsec_site_connection_name, + keys['peer_address'], + keys['peer_id'], + keys['peer_cidrs'], + keys['mtu'], + keys['psk'], + keys['initiator'], + keys['dpd_action'], + keys['dpd_interval'], + keys['dpd_timeout'], + vpnservice1, + ikepolicy, + ipsecpolicy, + keys['admin_state_up'], + description=description, + ): + yield vpnservice1['vpnservice'] + + def test_get_agent_hosting_vpn_services(self): + with self.vpnservice_set(): + service_plugin = manager.NeutronManager.get_service_plugins()[ + p_constants.VPN] + vpnservices = service_plugin._get_agent_hosting_vpn_services( + self.adminContext, FAKE_HOST) + vpnservices = vpnservices.all() + self.assertEqual(1, len(vpnservices)) + vpnservice_db = vpnservices[0] + self.assertEqual(1, len(vpnservice_db.ipsec_site_connections)) + ipsec_site_connection = vpnservice_db.ipsec_site_connections[0] + self.assertIsNotNone( + ipsec_site_connection['ikepolicy']) + self.assertIsNotNone( + ipsec_site_connection['ipsecpolicy']) + + def test_update_status(self): + with self.vpnservice_set() as vpnservice: + self._register_agent_states() + service_plugin = manager.NeutronManager.get_service_plugins()[ + p_constants.VPN] + service_plugin.update_status_by_agent( + self.adminContext, + [{'status': 'ACTIVE', + 'ipsec_site_connections': {}, + 'updated_pending_status': True, + 'id': vpnservice['id']}]) + vpnservices = service_plugin._get_agent_hosting_vpn_services( + self.adminContext, FAKE_HOST) + vpnservice_db = vpnservices[0] + self.assertEqual(p_constants.ACTIVE, vpnservice_db['status']) diff --git a/neutron/tests/unit/services/vpn/test_vpnaas_extension.py b/neutron/tests/unit/services/vpn/test_vpnaas_extension.py new file mode 100644 index 000000000..beff3d7a5 --- /dev/null +++ b/neutron/tests/unit/services/vpn/test_vpnaas_extension.py @@ -0,0 +1,530 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Swaminathan Vasudevan, Hewlett-Packard. + +import copy + +import mock +from webob import exc + +from neutron.extensions import vpnaas +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_api_v2_extension + + +_uuid = uuidutils.generate_uuid +_get_path = test_api_v2._get_path + + +class VpnaasExtensionTestCase(test_api_v2_extension.ExtensionTestCase): + fmt = 'json' + + def setUp(self): + super(VpnaasExtensionTestCase, self).setUp() + plural_mappings = {'ipsecpolicy': 'ipsecpolicies', + 'ikepolicy': 'ikepolicies', + 'ipsec_site_connection': 'ipsec-site-connections'} + self._setUpExtension( + 'neutron.extensions.vpnaas.VPNPluginBase', constants.VPN, + vpnaas.RESOURCE_ATTRIBUTE_MAP, vpnaas.Vpnaas, + 'vpn', plural_mappings=plural_mappings, + use_quota=True) + + def test_ikepolicy_create(self): + """Test case to create an ikepolicy.""" + ikepolicy_id = _uuid() + data = {'ikepolicy': {'name': 'ikepolicy1', + 'description': 'myikepolicy1', + 'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-128', + 'phase1_negotiation_mode': 'main', + 'lifetime': { + 'units': 'seconds', + 'value': 3600}, + 'ike_version': 'v1', + 'pfs': 'group5', + 'tenant_id': _uuid()}} + + return_value = copy.copy(data['ikepolicy']) + return_value.update({'id': ikepolicy_id}) + + instance = self.plugin.return_value + instance.create_ikepolicy.return_value = return_value + res = self.api.post(_get_path('vpn/ikepolicies', fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt) + instance.create_ikepolicy.assert_called_with(mock.ANY, + ikepolicy=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('ikepolicy', res) + self.assertEqual(res['ikepolicy'], return_value) + + def test_ikepolicy_list(self): + """Test case to list all ikepolicies.""" + ikepolicy_id = _uuid() + return_value = [{'name': 'ikepolicy1', + 'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-128', + 'pfs': 'group5', + 'ike_version': 'v1', + 'id': ikepolicy_id}] + + instance = self.plugin.return_value + instance.get_ikepolicies.return_value = return_value + + res = self.api.get(_get_path('vpn/ikepolicies', fmt=self.fmt)) + + instance.get_ikepolicies.assert_called_with(mock.ANY, + fields=mock.ANY, + filters=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_ikepolicy_update(self): + """Test case to update an ikepolicy.""" + ikepolicy_id = _uuid() + update_data = {'ikepolicy': {'name': 'ikepolicy1', + 'encryption_algorithm': 'aes-256'}} + return_value = {'name': 'ikepolicy1', + 'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-256', + 'phase1_negotiation_mode': 'main', + 'lifetime': { + 'units': 'seconds', + 'value': 3600}, + 'ike_version': 'v1', + 'pfs': 'group5', + 'tenant_id': _uuid(), + 'id': ikepolicy_id} + + instance = self.plugin.return_value + instance.update_ikepolicy.return_value = return_value + + res = self.api.put(_get_path('vpn/ikepolicies', id=ikepolicy_id, + fmt=self.fmt), + self.serialize(update_data)) + + instance.update_ikepolicy.assert_called_with(mock.ANY, ikepolicy_id, + ikepolicy=update_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('ikepolicy', res) + self.assertEqual(res['ikepolicy'], return_value) + + def test_ikepolicy_get(self): + """Test case to get or show an ikepolicy.""" + ikepolicy_id = _uuid() + return_value = {'name': 'ikepolicy1', + 'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-128', + 'phase1_negotiation_mode': 'main', + 'lifetime': { + 'units': 'seconds', + 'value': 3600}, + 'ike_version': 'v1', + 'pfs': 'group5', + 'tenant_id': _uuid(), + 'id': ikepolicy_id} + + instance = self.plugin.return_value + instance.get_ikepolicy.return_value = return_value + + res = self.api.get(_get_path('vpn/ikepolicies', id=ikepolicy_id, + fmt=self.fmt)) + + instance.get_ikepolicy.assert_called_with(mock.ANY, + ikepolicy_id, + fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('ikepolicy', res) + self.assertEqual(res['ikepolicy'], return_value) + + def test_ikepolicy_delete(self): + """Test case to delete an ikepolicy.""" + self._test_entity_delete('ikepolicy') + + def test_ipsecpolicy_create(self): + """Test case to create an ipsecpolicy.""" + ipsecpolicy_id = _uuid() + data = {'ipsecpolicy': {'name': 'ipsecpolicy1', + 'description': 'myipsecpolicy1', + 'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-128', + 'encapsulation_mode': 'tunnel', + 'lifetime': { + 'units': 'seconds', + 'value': 3600}, + 'transform_protocol': 'esp', + 'pfs': 'group5', + 'tenant_id': _uuid()}} + return_value = copy.copy(data['ipsecpolicy']) + return_value.update({'id': ipsecpolicy_id}) + + instance = self.plugin.return_value + instance.create_ipsecpolicy.return_value = return_value + res = self.api.post(_get_path('vpn/ipsecpolicies', fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt) + instance.create_ipsecpolicy.assert_called_with(mock.ANY, + ipsecpolicy=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('ipsecpolicy', res) + self.assertEqual(res['ipsecpolicy'], return_value) + + def test_ipsecpolicy_list(self): + """Test case to list an ipsecpolicy.""" + ipsecpolicy_id = _uuid() + return_value = [{'name': 'ipsecpolicy1', + 'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-128', + 'pfs': 'group5', + 'id': ipsecpolicy_id}] + + instance = self.plugin.return_value + instance.get_ipsecpolicies.return_value = return_value + + res = self.api.get(_get_path('vpn/ipsecpolicies', fmt=self.fmt)) + + instance.get_ipsecpolicies.assert_called_with(mock.ANY, + fields=mock.ANY, + filters=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_ipsecpolicy_update(self): + """Test case to update an ipsecpolicy.""" + ipsecpolicy_id = _uuid() + update_data = {'ipsecpolicy': {'name': 'ipsecpolicy1', + 'encryption_algorithm': 'aes-256'}} + return_value = {'name': 'ipsecpolicy1', + 'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-128', + 'encapsulation_mode': 'tunnel', + 'lifetime': { + 'units': 'seconds', + 'value': 3600}, + 'transform_protocol': 'esp', + 'pfs': 'group5', + 'tenant_id': _uuid(), + 'id': ipsecpolicy_id} + + instance = self.plugin.return_value + instance.update_ipsecpolicy.return_value = return_value + + res = self.api.put(_get_path('vpn/ipsecpolicies', + id=ipsecpolicy_id, + fmt=self.fmt), + self.serialize(update_data)) + + instance.update_ipsecpolicy.assert_called_with(mock.ANY, + ipsecpolicy_id, + ipsecpolicy=update_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('ipsecpolicy', res) + self.assertEqual(res['ipsecpolicy'], return_value) + + def test_ipsecpolicy_get(self): + """Test case to get or show an ipsecpolicy.""" + ipsecpolicy_id = _uuid() + return_value = {'name': 'ipsecpolicy1', + 'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-128', + 'encapsulation_mode': 'tunnel', + 'lifetime': { + 'units': 'seconds', + 'value': 3600}, + 'transform_protocol': 'esp', + 'pfs': 'group5', + 'tenant_id': _uuid(), + 'id': ipsecpolicy_id} + + instance = self.plugin.return_value + instance.get_ipsecpolicy.return_value = return_value + + res = self.api.get(_get_path('vpn/ipsecpolicies', + id=ipsecpolicy_id, + fmt=self.fmt)) + + instance.get_ipsecpolicy.assert_called_with(mock.ANY, + ipsecpolicy_id, + fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('ipsecpolicy', res) + self.assertEqual(res['ipsecpolicy'], return_value) + + def test_ipsecpolicy_delete(self): + """Test case to delete an ipsecpolicy.""" + self._test_entity_delete('ipsecpolicy') + + def test_vpnservice_create(self): + """Test case to create a vpnservice.""" + vpnservice_id = _uuid() + data = {'vpnservice': {'name': 'vpnservice1', + 'description': 'descr_vpn1', + 'subnet_id': _uuid(), + 'router_id': _uuid(), + 'admin_state_up': True, + 'tenant_id': _uuid()}} + return_value = copy.copy(data['vpnservice']) + return_value.update({'status': "ACTIVE", 'id': vpnservice_id}) + + instance = self.plugin.return_value + instance.create_vpnservice.return_value = return_value + res = self.api.post(_get_path('vpn/vpnservices', fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt) + instance.create_vpnservice.assert_called_with(mock.ANY, + vpnservice=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('vpnservice', res) + self.assertEqual(res['vpnservice'], return_value) + + def test_vpnservice_list(self): + """Test case to list all vpnservices.""" + vpnservice_id = _uuid() + return_value = [{'name': 'vpnservice1', + 'tenant_id': _uuid(), + 'status': 'ACTIVE', + 'id': vpnservice_id}] + + instance = self.plugin.return_value + instance.get_vpnservice.return_value = return_value + + res = self.api.get(_get_path('vpn/vpnservices', fmt=self.fmt)) + + instance.get_vpnservices.assert_called_with(mock.ANY, + fields=mock.ANY, + filters=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_vpnservice_update(self): + """Test case to update a vpnservice.""" + vpnservice_id = _uuid() + update_data = {'vpnservice': {'admin_state_up': False}} + return_value = {'name': 'vpnservice1', + 'admin_state_up': False, + 'subnet_id': _uuid(), + 'router_id': _uuid(), + 'tenant_id': _uuid(), + 'status': "ACTIVE", + 'id': vpnservice_id} + + instance = self.plugin.return_value + instance.update_vpnservice.return_value = return_value + + res = self.api.put(_get_path('vpn/vpnservices', + id=vpnservice_id, + fmt=self.fmt), + self.serialize(update_data)) + + instance.update_vpnservice.assert_called_with(mock.ANY, + vpnservice_id, + vpnservice=update_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('vpnservice', res) + self.assertEqual(res['vpnservice'], return_value) + + def test_vpnservice_get(self): + """Test case to get or show a vpnservice.""" + vpnservice_id = _uuid() + return_value = {'name': 'vpnservice1', + 'admin_state_up': True, + 'subnet_id': _uuid(), + 'router_id': _uuid(), + 'tenant_id': _uuid(), + 'status': "ACTIVE", + 'id': vpnservice_id} + + instance = self.plugin.return_value + instance.get_vpnservice.return_value = return_value + + res = self.api.get(_get_path('vpn/vpnservices', + id=vpnservice_id, + fmt=self.fmt)) + + instance.get_vpnservice.assert_called_with(mock.ANY, + vpnservice_id, + fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('vpnservice', res) + self.assertEqual(res['vpnservice'], return_value) + + def test_vpnservice_delete(self): + """Test case to delete a vpnservice.""" + self._test_entity_delete('vpnservice') + + def test_ipsec_site_connection_create(self): + """Test case to create a ipsec_site_connection.""" + ipsecsite_con_id = _uuid() + ikepolicy_id = _uuid() + ipsecpolicy_id = _uuid() + data = { + 'ipsec_site_connection': {'name': 'connection1', + 'description': 'Remote-connection1', + 'peer_address': '192.168.1.10', + 'peer_id': '192.168.1.10', + 'peer_cidrs': ['192.168.2.0/24', + '192.168.3.0/24'], + 'mtu': 1500, + 'psk': 'abcd', + 'initiator': 'bi-directional', + 'dpd': { + 'action': 'hold', + 'interval': 30, + 'timeout': 120}, + 'ikepolicy_id': ikepolicy_id, + 'ipsecpolicy_id': ipsecpolicy_id, + 'vpnservice_id': _uuid(), + 'admin_state_up': True, + 'tenant_id': _uuid()} + } + return_value = copy.copy(data['ipsec_site_connection']) + return_value.update({'status': "ACTIVE", 'id': ipsecsite_con_id}) + + instance = self.plugin.return_value + instance.create_ipsec_site_connection.return_value = return_value + res = self.api.post(_get_path('vpn/ipsec-site-connections', + fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt) + instance.create_ipsec_site_connection.assert_called_with( + mock.ANY, ipsec_site_connection=data + ) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('ipsec_site_connection', res) + self.assertEqual(res['ipsec_site_connection'], return_value) + + def test_ipsec_site_connection_list(self): + """Test case to list all ipsec_site_connections.""" + ipsecsite_con_id = _uuid() + return_value = [{'name': 'connection1', + 'peer_address': '192.168.1.10', + 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], + 'route_mode': 'static', + 'auth_mode': 'psk', + 'tenant_id': _uuid(), + 'status': 'ACTIVE', + 'id': ipsecsite_con_id}] + + instance = self.plugin.return_value + instance.get_ipsec_site_connections.return_value = return_value + + res = self.api.get( + _get_path('vpn/ipsec-site-connections', fmt=self.fmt)) + + instance.get_ipsec_site_connections.assert_called_with( + mock.ANY, fields=mock.ANY, filters=mock.ANY + ) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_ipsec_site_connection_update(self): + """Test case to update a ipsec_site_connection.""" + ipsecsite_con_id = _uuid() + update_data = {'ipsec_site_connection': {'admin_state_up': False}} + return_value = {'name': 'connection1', + 'description': 'Remote-connection1', + 'peer_address': '192.168.1.10', + 'peer_id': '192.168.1.10', + 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], + 'mtu': 1500, + 'psk': 'abcd', + 'initiator': 'bi-directional', + 'dpd': { + 'action': 'hold', + 'interval': 30, + 'timeout': 120}, + 'ikepolicy_id': _uuid(), + 'ipsecpolicy_id': _uuid(), + 'vpnservice_id': _uuid(), + 'admin_state_up': False, + 'tenant_id': _uuid(), + 'status': 'ACTIVE', + 'id': ipsecsite_con_id} + + instance = self.plugin.return_value + instance.update_ipsec_site_connection.return_value = return_value + + res = self.api.put(_get_path('vpn/ipsec-site-connections', + id=ipsecsite_con_id, + fmt=self.fmt), + self.serialize(update_data)) + + instance.update_ipsec_site_connection.assert_called_with( + mock.ANY, ipsecsite_con_id, ipsec_site_connection=update_data + ) + + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('ipsec_site_connection', res) + self.assertEqual(res['ipsec_site_connection'], return_value) + + def test_ipsec_site_connection_get(self): + """Test case to get or show a ipsec_site_connection.""" + ipsecsite_con_id = _uuid() + return_value = {'name': 'connection1', + 'description': 'Remote-connection1', + 'peer_address': '192.168.1.10', + 'peer_id': '192.168.1.10', + 'peer_cidrs': ['192.168.2.0/24', + '192.168.3.0/24'], + 'mtu': 1500, + 'psk': 'abcd', + 'initiator': 'bi-directional', + 'dpd': { + 'action': 'hold', + 'interval': 30, + 'timeout': 120}, + 'ikepolicy_id': _uuid(), + 'ipsecpolicy_id': _uuid(), + 'vpnservice_id': _uuid(), + 'admin_state_up': True, + 'tenant_id': _uuid(), + 'status': 'ACTIVE', + 'id': ipsecsite_con_id} + + instance = self.plugin.return_value + instance.get_ipsec_site_connection.return_value = return_value + + res = self.api.get(_get_path('vpn/ipsec-site-connections', + id=ipsecsite_con_id, + fmt=self.fmt)) + + instance.get_ipsec_site_connection.assert_called_with( + mock.ANY, ipsecsite_con_id, fields=mock.ANY + ) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('ipsec_site_connection', res) + self.assertEqual(res['ipsec_site_connection'], return_value) + + def test_ipsec_site_connection_delete(self): + """Test case to delete a ipsec_site_connection.""" + self._test_entity_delete('ipsec_site_connection') + + +class VpnaasExtensionTestCaseXML(VpnaasExtensionTestCase): + fmt = 'xml' diff --git a/neutron/tests/unit/test_agent_config.py b/neutron/tests/unit/test_agent_config.py new file mode 100644 index 000000000..e2408e0ce --- /dev/null +++ b/neutron/tests/unit/test_agent_config.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.agent.common import config +from neutron.tests import base + + +def test_setup_conf(): + conf = config.setup_conf() + assert conf.state_path.endswith('/var/lib/neutron') + + +class TestRootHelper(base.BaseTestCase): + + def test_agent_root_helper(self): + conf = config.setup_conf() + config.register_root_helper(conf) + conf.set_override('root_helper', 'my_root_helper', 'AGENT') + self.assertEqual(config.get_root_helper(conf), 'my_root_helper') + + def test_root_helper(self): + conf = config.setup_conf() + config.register_root_helper(conf) + conf.set_override('root_helper', 'my_root_helper') + self.assertEqual(config.get_root_helper(conf), 'my_root_helper') + + def test_root_default(self): + conf = config.setup_conf() + config.register_root_helper(conf) + self.assertEqual(config.get_root_helper(conf), 'sudo') diff --git a/neutron/tests/unit/test_agent_ext_plugin.py b/neutron/tests/unit/test_agent_ext_plugin.py new file mode 100644 index 000000000..a59cd3348 --- /dev/null +++ b/neutron/tests/unit/test_agent_ext_plugin.py @@ -0,0 +1,259 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import time + +from oslo.config import cfg +from webob import exc + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import topics +from neutron import context +from neutron.db import agents_db +from neutron.db import db_base_plugin_v2 +from neutron.extensions import agent +from neutron.openstack.common import log as logging +from neutron.openstack.common import timeutils +from neutron.openstack.common import uuidutils +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_db_plugin + + +LOG = logging.getLogger(__name__) + +_uuid = uuidutils.generate_uuid +_get_path = test_api_v2._get_path +L3_HOSTA = 'hosta' +DHCP_HOSTA = 'hosta' +L3_HOSTB = 'hostb' +DHCP_HOSTC = 'hostc' +DHCP_HOST1 = 'host1' +LBAAS_HOSTA = 'hosta' +LBAAS_HOSTB = 'hostb' + + +class AgentTestExtensionManager(object): + + def get_resources(self): + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + agent.RESOURCE_ATTRIBUTE_MAP) + return agent.Agent.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +# This plugin class is just for testing +class TestAgentPlugin(db_base_plugin_v2.NeutronDbPluginV2, + agents_db.AgentDbMixin): + supported_extension_aliases = ["agent"] + + +class AgentDBTestMixIn(object): + + def _list_agents(self, expected_res_status=None, + neutron_context=None, + query_string=None): + agent_res = self._list('agents', + neutron_context=neutron_context, + query_params=query_string) + if expected_res_status: + self.assertEqual(agent_res.status_int, expected_res_status) + return agent_res + + def _register_agent_states(self, lbaas_agents=False): + """Register two L3 agents and two DHCP agents.""" + l3_hosta = { + 'binary': 'neutron-l3-agent', + 'host': L3_HOSTA, + 'topic': topics.L3_AGENT, + 'configurations': {'use_namespaces': True, + 'router_id': None, + 'handle_internal_only_routers': + True, + 'gateway_external_network_id': + None, + 'interface_driver': 'interface_driver', + }, + 'agent_type': constants.AGENT_TYPE_L3} + l3_hostb = copy.deepcopy(l3_hosta) + l3_hostb['host'] = L3_HOSTB + dhcp_hosta = { + 'binary': 'neutron-dhcp-agent', + 'host': DHCP_HOSTA, + 'topic': 'DHCP_AGENT', + 'configurations': {'dhcp_driver': 'dhcp_driver', + 'use_namespaces': True, + }, + 'agent_type': constants.AGENT_TYPE_DHCP} + dhcp_hostc = copy.deepcopy(dhcp_hosta) + dhcp_hostc['host'] = DHCP_HOSTC + lbaas_hosta = { + 'binary': 'neutron-loadbalancer-agent', + 'host': LBAAS_HOSTA, + 'topic': 'LOADBALANCER_AGENT', + 'configurations': {'device_drivers': ['haproxy_ns']}, + 'agent_type': constants.AGENT_TYPE_LOADBALANCER} + lbaas_hostb = copy.deepcopy(lbaas_hosta) + lbaas_hostb['host'] = LBAAS_HOSTB + callback = agents_db.AgentExtRpcCallback() + callback.report_state(self.adminContext, + agent_state={'agent_state': l3_hosta}, + time=timeutils.strtime()) + callback.report_state(self.adminContext, + agent_state={'agent_state': l3_hostb}, + time=timeutils.strtime()) + callback.report_state(self.adminContext, + agent_state={'agent_state': dhcp_hosta}, + time=timeutils.strtime()) + callback.report_state(self.adminContext, + agent_state={'agent_state': dhcp_hostc}, + time=timeutils.strtime()) + + res = [l3_hosta, l3_hostb, dhcp_hosta, dhcp_hostc] + if lbaas_agents: + callback.report_state(self.adminContext, + agent_state={'agent_state': lbaas_hosta}, + time=timeutils.strtime()) + callback.report_state(self.adminContext, + agent_state={'agent_state': lbaas_hostb}, + time=timeutils.strtime()) + res += [lbaas_hosta, lbaas_hostb] + + return res + + def _register_one_dhcp_agent(self): + """Register one DHCP agent.""" + dhcp_host = { + 'binary': 'neutron-dhcp-agent', + 'host': DHCP_HOST1, + 'topic': 'DHCP_AGENT', + 'configurations': {'dhcp_driver': 'dhcp_driver', + 'use_namespaces': True, + }, + 'agent_type': constants.AGENT_TYPE_DHCP} + callback = agents_db.AgentExtRpcCallback() + callback.report_state(self.adminContext, + agent_state={'agent_state': dhcp_host}, + time=timeutils.strtime()) + return [dhcp_host] + + def _register_one_l3_agent(self, host=L3_HOSTA, internal_only=True, + ext_net_id='', ext_bridge=''): + l3 = { + 'binary': 'neutron-l3-agent', + 'host': host, + 'topic': topics.L3_AGENT, + 'configurations': {'use_namespaces': True, + 'router_id': None, + 'handle_internal_only_routers': internal_only, + 'external_network_bridge': ext_bridge, + 'gateway_external_network_id': ext_net_id, + 'interface_driver': 'interface_driver', + }, + 'agent_type': constants.AGENT_TYPE_L3} + callback = agents_db.AgentExtRpcCallback() + callback.report_state(self.adminContext, + agent_state={'agent_state': l3}, + time=timeutils.strtime()) + + +class AgentDBTestCase(AgentDBTestMixIn, + test_db_plugin.NeutronDbPluginV2TestCase): + fmt = 'json' + + def setUp(self): + self.adminContext = context.get_admin_context() + plugin = 'neutron.tests.unit.test_agent_ext_plugin.TestAgentPlugin' + # for these tests we need to enable overlapping ips + cfg.CONF.set_default('allow_overlapping_ips', True) + # Save the original RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + ext_mgr = AgentTestExtensionManager() + self.addCleanup(self.restore_resource_attribute_map) + super(AgentDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) + + def restore_resource_attribute_map(self): + # Restore the originak RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + + def test_create_agent(self): + data = {'agent': {}} + _req = self.new_create_request('agents', data, self.fmt) + _req.environ['neutron.context'] = context.Context( + '', 'tenant_id') + res = _req.get_response(self.ext_api) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_list_agent(self): + agents = self._register_agent_states() + res = self._list('agents') + for agent in res['agents']: + if (agent['host'] == DHCP_HOSTA and + agent['agent_type'] == constants.AGENT_TYPE_DHCP): + self.assertEqual( + 'dhcp_driver', + agent['configurations']['dhcp_driver']) + break + self.assertEqual(len(agents), len(res['agents'])) + + def test_show_agent(self): + self._register_agent_states() + agents = self._list_agents( + query_string='binary=neutron-l3-agent') + self.assertEqual(2, len(agents['agents'])) + agent = self._show('agents', agents['agents'][0]['id']) + self.assertEqual('neutron-l3-agent', agent['agent']['binary']) + + def test_update_agent(self): + self._register_agent_states() + agents = self._list_agents( + query_string='binary=neutron-l3-agent&host=' + L3_HOSTB) + self.assertEqual(1, len(agents['agents'])) + com_id = agents['agents'][0]['id'] + agent = self._show('agents', com_id) + new_agent = {} + new_agent['agent'] = {} + new_agent['agent']['admin_state_up'] = False + new_agent['agent']['description'] = 'description' + self._update('agents', com_id, new_agent) + agent = self._show('agents', com_id) + self.assertFalse(agent['agent']['admin_state_up']) + self.assertEqual('description', agent['agent']['description']) + + def test_dead_agent(self): + cfg.CONF.set_override('agent_down_time', 1) + self._register_agent_states() + time.sleep(1.5) + agents = self._list_agents( + query_string='binary=neutron-l3-agent&host=' + L3_HOSTB) + self.assertFalse(agents['agents'][0]['alive']) + + +class AgentDBTestCaseXML(AgentDBTestCase): + fmt = 'xml' diff --git a/neutron/tests/unit/test_agent_linux_utils.py b/neutron/tests/unit/test_agent_linux_utils.py new file mode 100644 index 000000000..d54d9f1eb --- /dev/null +++ b/neutron/tests/unit/test_agent_linux_utils.py @@ -0,0 +1,162 @@ +# Copyright 2012, VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +import testtools + +from neutron.agent.linux import utils +from neutron.tests import base + + +class FakeCreateProcess(object): + class FakeStdin(object): + def close(self): + pass + + def __init__(self, returncode): + self.returncode = returncode + self.stdin = self.FakeStdin() + + def communicate(self, process_input=None): + return '', '' + + +class AgentUtilsExecuteTest(base.BaseTestCase): + def setUp(self): + super(AgentUtilsExecuteTest, self).setUp() + self.root_helper = "echo" + self.test_file = self.useFixture( + fixtures.TempDir()).join("test_execute.tmp") + open(self.test_file, 'w').close() + self.mock_popen_p = mock.patch("subprocess.Popen.communicate") + self.mock_popen = self.mock_popen_p.start() + + def test_without_helper(self): + expected = "%s\n" % self.test_file + self.mock_popen.return_value = [expected, ""] + result = utils.execute(["ls", self.test_file]) + self.assertEqual(result, expected) + + def test_with_helper(self): + expected = "ls %s\n" % self.test_file + self.mock_popen.return_value = [expected, ""] + result = utils.execute(["ls", self.test_file], + self.root_helper) + self.assertEqual(result, expected) + + def test_stderr_true(self): + expected = "%s\n" % self.test_file + self.mock_popen.return_value = [expected, ""] + out = utils.execute(["ls", self.test_file], return_stderr=True) + self.assertIsInstance(out, tuple) + self.assertEqual(out, (expected, "")) + + def test_check_exit_code(self): + self.mock_popen.return_value = ["", ""] + stdout = utils.execute(["ls", self.test_file[:-1]], + check_exit_code=False) + self.assertEqual(stdout, "") + + def test_execute_raises(self): + self.mock_popen.side_effect = RuntimeError + self.assertRaises(RuntimeError, utils.execute, + ["ls", self.test_file[:-1]]) + + def test_process_input(self): + expected = "%s\n" % self.test_file[:-1] + self.mock_popen.return_value = [expected, ""] + result = utils.execute(["cat"], process_input="%s\n" % + self.test_file[:-1]) + self.assertEqual(result, expected) + + def test_with_addl_env(self): + expected = "%s\n" % self.test_file + self.mock_popen.return_value = [expected, ""] + result = utils.execute(["ls", self.test_file], + addl_env={'foo': 'bar'}) + self.assertEqual(result, expected) + + def test_return_code_log_error_raise_runtime(self): + with mock.patch.object(utils, 'create_process') as create_process: + create_process.return_value = FakeCreateProcess(1), 'ls' + with mock.patch.object(utils, 'LOG') as log: + self.assertRaises(RuntimeError, utils.execute, + ['ls']) + self.assertTrue(log.error.called) + + def test_return_code_log_error_no_raise_runtime(self): + with mock.patch.object(utils, 'create_process') as create_process: + create_process.return_value = FakeCreateProcess(1), 'ls' + with mock.patch.object(utils, 'LOG') as log: + utils.execute(['ls'], check_exit_code=False) + self.assertTrue(log.error.called) + + def test_return_code_log_debug(self): + with mock.patch.object(utils, 'create_process') as create_process: + create_process.return_value = FakeCreateProcess(0), 'ls' + with mock.patch.object(utils, 'LOG') as log: + utils.execute(['ls']) + self.assertTrue(log.debug.called) + + +class AgentUtilsGetInterfaceMAC(base.BaseTestCase): + def test_get_interface_mac(self): + expect_val = '01:02:03:04:05:06' + with mock.patch('fcntl.ioctl') as ioctl: + ioctl.return_value = ''.join(['\x00' * 18, + '\x01\x02\x03\x04\x05\x06', + '\x00' * 232]) + actual_val = utils.get_interface_mac('eth0') + self.assertEqual(actual_val, expect_val) + + +class AgentUtilsReplaceFile(base.BaseTestCase): + def test_replace_file(self): + # make file to replace + with mock.patch('tempfile.NamedTemporaryFile') as ntf: + ntf.return_value.name = '/baz' + with mock.patch('os.chmod') as chmod: + with mock.patch('os.rename') as rename: + utils.replace_file('/foo', 'bar') + + expected = [mock.call('w+', dir='/', delete=False), + mock.call().write('bar'), + mock.call().close()] + + ntf.assert_has_calls(expected) + chmod.assert_called_once_with('/baz', 0o644) + rename.assert_called_once_with('/baz', '/foo') + + +class TestFindChildPids(base.BaseTestCase): + + def test_returns_empty_list_for_exit_code_1(self): + with mock.patch.object(utils, 'execute', + side_effect=RuntimeError('Exit code: 1')): + self.assertEqual(utils.find_child_pids(-1), []) + + def test_returns_empty_list_for_no_output(self): + with mock.patch.object(utils, 'execute', return_value=''): + self.assertEqual(utils.find_child_pids(-1), []) + + def test_returns_list_of_child_process_ids_for_good_ouput(self): + with mock.patch.object(utils, 'execute', return_value=' 123 \n 185\n'): + self.assertEqual(utils.find_child_pids(-1), ['123', '185']) + + def test_raises_unknown_exception(self): + with testtools.ExpectedException(RuntimeError): + with mock.patch.object(utils, 'execute', + side_effect=RuntimeError()): + utils.find_child_pids(-1) diff --git a/neutron/tests/unit/test_agent_netns_cleanup.py b/neutron/tests/unit/test_agent_netns_cleanup.py new file mode 100644 index 000000000..71b1cd6da --- /dev/null +++ b/neutron/tests/unit/test_agent_netns_cleanup.py @@ -0,0 +1,258 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.agent.linux import interface +from neutron.agent import netns_cleanup_util as util +from neutron.tests import base + + +class TestNetnsCleanup(base.BaseTestCase): + + def test_setup_conf(self): + expected_opts = interface.OPTS + conf = util.setup_conf() + self.assertTrue(all([opt.name in conf for opt in expected_opts])) + + def test_kill_dhcp(self, dhcp_active=True): + conf = mock.Mock() + conf.AGENT.root_helper = 'sudo', + conf.dhcp_driver = 'driver' + + method_to_patch = 'neutron.openstack.common.importutils.import_object' + + with mock.patch(method_to_patch) as import_object: + driver = mock.Mock() + driver.active = dhcp_active + import_object.return_value = driver + + util.kill_dhcp(conf, 'ns') + + expected_params = {'conf': conf, 'network': mock.ANY, + 'root_helper': conf.AGENT.root_helper, + 'plugin': mock.ANY} + import_object.assert_called_once_with('driver', **expected_params) + + if dhcp_active: + driver.assert_has_calls([mock.call.disable()]) + else: + self.assertFalse(driver.called) + + def test_kill_dhcp_no_active(self): + self.test_kill_dhcp(False) + + def test_eligible_for_deletion_ns_not_uuid(self): + ns = 'not_a_uuid' + self.assertFalse(util.eligible_for_deletion(mock.Mock(), ns)) + + def _test_eligible_for_deletion_helper(self, prefix, force, is_empty, + expected): + ns = prefix + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' + conf = mock.Mock() + + with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: + ip_wrap.return_value.namespace_is_empty.return_value = is_empty + self.assertEqual(util.eligible_for_deletion(conf, ns, force), + expected) + + expected_calls = [mock.call(conf.AGENT.root_helper, ns)] + if not force: + expected_calls.append(mock.call().namespace_is_empty()) + ip_wrap.assert_has_calls(expected_calls) + + def test_eligible_for_deletion_empty(self): + self._test_eligible_for_deletion_helper('qrouter-', False, True, True) + + def test_eligible_for_deletion_not_empty(self): + self._test_eligible_for_deletion_helper('qdhcp-', False, False, False) + + def test_eligible_for_deletion_not_empty_forced(self): + self._test_eligible_for_deletion_helper('qdhcp-', True, False, True) + + def test_unplug_device_regular_device(self): + conf = mock.Mock() + device = mock.Mock() + + util.unplug_device(conf, device) + device.assert_has_calls([mock.call.link.delete()]) + + def test_unplug_device_ovs_port(self): + conf = mock.Mock() + conf.ovs_integration_bridge = 'br-int' + + device = mock.Mock() + device.name = 'tap1' + device.link.delete.side_effect = RuntimeError + + with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge') as ovs_br_cls: + br_patch = mock.patch( + 'neutron.agent.linux.ovs_lib.get_bridge_for_iface') + with br_patch as mock_get_bridge_for_iface: + mock_get_bridge_for_iface.return_value = 'br-int' + ovs_bridge = mock.Mock() + ovs_br_cls.return_value = ovs_bridge + + util.unplug_device(conf, device) + + mock_get_bridge_for_iface.assert_called_once_with( + conf.AGENT.root_helper, 'tap1') + ovs_br_cls.assert_called_once_with('br-int', + conf.AGENT.root_helper) + ovs_bridge.assert_has_calls( + [mock.call.delete_port(device.name)]) + + def test_unplug_device_cannot_determine_bridge_port(self): + conf = mock.Mock() + conf.ovs_integration_bridge = 'br-int' + + device = mock.Mock() + device.name = 'tap1' + device.link.delete.side_effect = RuntimeError + + with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge') as ovs_br_cls: + br_patch = mock.patch( + 'neutron.agent.linux.ovs_lib.get_bridge_for_iface') + with br_patch as mock_get_bridge_for_iface: + with mock.patch.object(util.LOG, 'debug') as debug: + mock_get_bridge_for_iface.return_value = None + ovs_bridge = mock.Mock() + ovs_br_cls.return_value = ovs_bridge + + util.unplug_device(conf, device) + + mock_get_bridge_for_iface.assert_called_once_with( + conf.AGENT.root_helper, 'tap1') + self.assertEqual(ovs_br_cls.mock_calls, []) + self.assertTrue(debug.called) + + def _test_destroy_namespace_helper(self, force, num_devices): + ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' + conf = mock.Mock() +# conf.AGENT.root_helper = 'sudo' + + lo_device = mock.Mock() + lo_device.name = 'lo' + + devices = [lo_device] + + while num_devices: + dev = mock.Mock() + dev.name = 'tap%d' % num_devices + devices.append(dev) + num_devices -= 1 + + with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: + ip_wrap.return_value.get_devices.return_value = devices + ip_wrap.return_value.netns.exists.return_value = True + + with mock.patch.object(util, 'unplug_device') as unplug: + + with mock.patch.object(util, 'kill_dhcp') as kill_dhcp: + util.destroy_namespace(conf, ns, force) + expected = [mock.call(conf.AGENT.root_helper, ns)] + + if force: + expected.extend([ + mock.call().netns.exists(ns), + mock.call().get_devices(exclude_loopback=True)]) + self.assertTrue(kill_dhcp.called) + unplug.assert_has_calls( + [mock.call(conf, d) for d in + devices[1:]]) + + expected.append(mock.call().garbage_collect_namespace()) + ip_wrap.assert_has_calls(expected) + + def test_destroy_namespace_empty(self): + self._test_destroy_namespace_helper(False, 0) + + def test_destroy_namespace_not_empty(self): + self._test_destroy_namespace_helper(False, 1) + + def test_destroy_namespace_not_empty_forced(self): + self._test_destroy_namespace_helper(True, 2) + + def test_destroy_namespace_exception(self): + ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' + conf = mock.Mock() + conf.AGENT.root_helper = 'sudo' + with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: + ip_wrap.side_effect = Exception() + util.destroy_namespace(conf, ns) + + def test_main(self): + namespaces = ['ns1', 'ns2'] + with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: + ip_wrap.get_namespaces.return_value = namespaces + + with mock.patch('eventlet.sleep') as eventlet_sleep: + conf = mock.Mock() + conf.force = False + methods_to_mock = dict( + eligible_for_deletion=mock.DEFAULT, + destroy_namespace=mock.DEFAULT, + setup_conf=mock.DEFAULT) + + with mock.patch.multiple(util, **methods_to_mock) as mocks: + mocks['eligible_for_deletion'].return_value = True + mocks['setup_conf'].return_value = conf + with mock.patch('neutron.common.config.setup_logging'): + util.main() + + mocks['eligible_for_deletion'].assert_has_calls( + [mock.call(conf, 'ns1', False), + mock.call(conf, 'ns2', False)]) + + mocks['destroy_namespace'].assert_has_calls( + [mock.call(conf, 'ns1', False), + mock.call(conf, 'ns2', False)]) + + ip_wrap.assert_has_calls( + [mock.call.get_namespaces(conf.AGENT.root_helper)]) + + eventlet_sleep.assert_called_once_with(2) + + def test_main_no_candidates(self): + namespaces = ['ns1', 'ns2'] + with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: + ip_wrap.get_namespaces.return_value = namespaces + + with mock.patch('eventlet.sleep') as eventlet_sleep: + conf = mock.Mock() + conf.force = False + methods_to_mock = dict( + eligible_for_deletion=mock.DEFAULT, + destroy_namespace=mock.DEFAULT, + setup_conf=mock.DEFAULT) + + with mock.patch.multiple(util, **methods_to_mock) as mocks: + mocks['eligible_for_deletion'].return_value = False + mocks['setup_conf'].return_value = conf + with mock.patch('neutron.common.config.setup_logging'): + util.main() + + ip_wrap.assert_has_calls( + [mock.call.get_namespaces(conf.AGENT.root_helper)]) + + mocks['eligible_for_deletion'].assert_has_calls( + [mock.call(conf, 'ns1', False), + mock.call(conf, 'ns2', False)]) + + self.assertFalse(mocks['destroy_namespace'].called) + + self.assertFalse(eventlet_sleep.called) diff --git a/neutron/tests/unit/test_agent_ovs_cleanup.py b/neutron/tests/unit/test_agent_ovs_cleanup.py new file mode 100644 index 000000000..defdf5a2c --- /dev/null +++ b/neutron/tests/unit/test_agent_ovs_cleanup.py @@ -0,0 +1,93 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import itertools +import mock + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent import ovs_cleanup_util as util +from neutron.openstack.common import uuidutils +from neutron.tests import base + + +class TestOVSCleanup(base.BaseTestCase): + + def test_setup_conf(self): + conf = util.setup_conf() + self.assertEqual(conf.external_network_bridge, 'br-ex') + self.assertEqual(conf.ovs_integration_bridge, 'br-int') + self.assertFalse(conf.ovs_all_ports) + self.assertEqual(conf.AGENT.root_helper, 'sudo') + + def test_main(self): + bridges = ['br-int', 'br-ex'] + ports = ['p1', 'p2', 'p3'] + conf = mock.Mock() + conf.AGENT.root_helper = 'dummy_sudo' + conf.ovs_all_ports = False + conf.ovs_integration_bridge = 'br-int' + conf.external_network_bridge = 'br-ex' + with contextlib.nested( + mock.patch('neutron.common.config.setup_logging'), + mock.patch('neutron.agent.ovs_cleanup_util.setup_conf', + return_value=conf), + mock.patch('neutron.agent.linux.ovs_lib.get_bridges', + return_value=bridges), + mock.patch('neutron.agent.linux.ovs_lib.OVSBridge'), + mock.patch.object(util, 'collect_neutron_ports', + return_value=ports), + mock.patch.object(util, 'delete_neutron_ports') + ) as (_log, _conf, _get, ovs, collect, delete): + with mock.patch('neutron.common.config.setup_logging'): + util.main() + ovs.assert_has_calls([mock.call().delete_ports( + all_ports=False)]) + collect.assert_called_once_with(set(bridges), 'dummy_sudo') + delete.assert_called_once_with(ports, 'dummy_sudo') + + def test_collect_neutron_ports(self): + port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(), + '11:22:33:44:55:66', 'br') + port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(), + '77:88:99:aa:bb:cc', 'br') + port3 = ovs_lib.VifPort('tap90ab', 3, uuidutils.generate_uuid(), + '99:00:aa:bb:cc:dd', 'br') + ports = [[port1, port2], [port3]] + portnames = [p.port_name for p in itertools.chain(*ports)] + with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge') as ovs: + ovs.return_value.get_vif_ports.side_effect = ports + bridges = ['br-int', 'br-ex'] + ret = util.collect_neutron_ports(bridges, 'dummy_sudo') + self.assertEqual(ret, portnames) + + def test_delete_neutron_ports(self): + ports = ['tap1234', 'tap5678', 'tap09ab'] + port_found = [True, False, True] + with contextlib.nested( + mock.patch.object(ip_lib, 'device_exists', + side_effect=port_found), + mock.patch.object(ip_lib, 'IPDevice') + ) as (device_exists, ip_dev): + util.delete_neutron_ports(ports, 'dummy_sudo') + device_exists.assert_has_calls([mock.call(p) for p in ports]) + ip_dev.assert_has_calls( + [mock.call('tap1234', 'dummy_sudo'), + mock.call().link.delete(), + mock.call('tap09ab', 'dummy_sudo'), + mock.call().link.delete()]) diff --git a/neutron/tests/unit/test_agent_rpc.py b/neutron/tests/unit/test_agent_rpc.py new file mode 100644 index 000000000..ed571376d --- /dev/null +++ b/neutron/tests/unit/test_agent_rpc.py @@ -0,0 +1,113 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.agent import rpc +from neutron.openstack.common import context +from neutron.tests import base + + +class AgentRPCPluginApi(base.BaseTestCase): + def _test_rpc_call(self, method): + agent = rpc.PluginApi('fake_topic') + ctxt = context.RequestContext('fake_user', 'fake_project') + expect_val = 'foo' + with mock.patch('neutron.common.rpc_compat.RpcProxy.call') as rpc_call: + rpc_call.return_value = expect_val + func_obj = getattr(agent, method) + if method == 'tunnel_sync': + actual_val = func_obj(ctxt, 'fake_tunnel_ip') + else: + actual_val = func_obj(ctxt, 'fake_device', 'fake_agent_id') + self.assertEqual(actual_val, expect_val) + + def test_get_device_details(self): + self._test_rpc_call('get_device_details') + + def test_update_device_down(self): + self._test_rpc_call('update_device_down') + + def test_tunnel_sync(self): + self._test_rpc_call('tunnel_sync') + + +class AgentPluginReportState(base.BaseTestCase): + def test_plugin_report_state_use_call(self): + topic = 'test' + reportStateAPI = rpc.PluginReportStateAPI(topic) + expected_agent_state = {'agent': 'test'} + with mock.patch.object(reportStateAPI, 'call') as call: + ctxt = context.RequestContext('fake_user', 'fake_project') + reportStateAPI.report_state(ctxt, expected_agent_state, + use_call=True) + self.assertEqual(call.call_args[0][0], ctxt) + self.assertEqual(call.call_args[0][1]['method'], + 'report_state') + self.assertEqual(call.call_args[0][1]['args']['agent_state'], + {'agent_state': expected_agent_state}) + self.assertIsInstance(call.call_args[0][1]['args']['time'], + str) + self.assertEqual(call.call_args[1]['topic'], topic) + + def test_plugin_report_state_cast(self): + topic = 'test' + reportStateAPI = rpc.PluginReportStateAPI(topic) + expected_agent_state = {'agent': 'test'} + with mock.patch.object(reportStateAPI, 'cast') as cast: + ctxt = context.RequestContext('fake_user', 'fake_project') + reportStateAPI.report_state(ctxt, expected_agent_state) + self.assertEqual(cast.call_args[0][0], ctxt) + self.assertEqual(cast.call_args[0][1]['method'], + 'report_state') + self.assertEqual(cast.call_args[0][1]['args']['agent_state'], + {'agent_state': expected_agent_state}) + self.assertIsInstance(cast.call_args[0][1]['args']['time'], + str) + self.assertEqual(cast.call_args[1]['topic'], topic) + + +class AgentRPCMethods(base.BaseTestCase): + def test_create_consumers(self): + endpoints = [mock.Mock()] + expected = [ + mock.call(new=True), + mock.call().create_consumer('foo-topic-op', endpoints, + fanout=True), + mock.call().consume_in_threads() + ] + + call_to_patch = 'neutron.common.rpc_compat.create_connection' + with mock.patch(call_to_patch) as create_connection: + rpc.create_consumers(endpoints, 'foo', [('topic', 'op')]) + create_connection.assert_has_calls(expected) + + def test_create_consumers_with_node_name(self): + endpoints = [mock.Mock()] + expected = [ + mock.call(new=True), + mock.call().create_consumer('foo-topic-op', endpoints, + fanout=True), + mock.call().create_consumer('foo-topic-op.node1', endpoints, + fanout=False), + mock.call().consume_in_threads() + ] + + call_to_patch = 'neutron.common.rpc_compat.create_connection' + with mock.patch(call_to_patch) as create_connection: + rpc.create_consumers(endpoints, 'foo', [('topic', 'op', 'node1')]) + create_connection.assert_has_calls(expected) diff --git a/neutron/tests/unit/test_api_api_common.py b/neutron/tests/unit/test_api_api_common.py new file mode 100644 index 000000000..622ddc8f7 --- /dev/null +++ b/neutron/tests/unit/test_api_api_common.py @@ -0,0 +1,99 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Zhongyue Luo, Intel Corporation. +# + +from testtools import matchers +from webob import exc + +from neutron.api import api_common as common +from neutron.tests import base + + +class FakeController(common.NeutronController): + _resource_name = 'fake' + + +class APICommonTestCase(base.BaseTestCase): + def setUp(self): + super(APICommonTestCase, self).setUp() + self.controller = FakeController(None) + + def test_prepare_request_body(self): + body = { + 'fake': { + 'name': 'terminator', + 'model': 'T-800', + } + } + params = [ + {'param-name': 'name', + 'required': True}, + {'param-name': 'model', + 'required': True}, + {'param-name': 'quote', + 'required': False, + 'default-value': "i'll be back"}, + ] + expect = { + 'fake': { + 'name': 'terminator', + 'model': 'T-800', + 'quote': "i'll be back", + } + } + actual = self.controller._prepare_request_body(body, params) + self.assertThat(expect, matchers.Equals(actual)) + + def test_prepare_request_body_none(self): + body = None + params = [ + {'param-name': 'quote', + 'required': False, + 'default-value': "I'll be back"}, + ] + expect = { + 'fake': { + 'quote': "I'll be back", + } + } + actual = self.controller._prepare_request_body(body, params) + self.assertThat(expect, matchers.Equals(actual)) + + def test_prepare_request_body_keyerror(self): + body = {'t2': {}} + params = [] + self.assertRaises(exc.HTTPBadRequest, + self.controller._prepare_request_body, + body, + params) + + def test_prepare_request_param_value_none(self): + body = { + 'fake': { + 'name': None, + } + } + params = [ + {'param-name': 'name', + 'required': True}, + ] + self.assertRaises(exc.HTTPBadRequest, + self.controller._prepare_request_body, + body, + params) diff --git a/neutron/tests/unit/test_api_v2.py b/neutron/tests/unit/test_api_v2.py new file mode 100644 index 000000000..38d54f7ca --- /dev/null +++ b/neutron/tests/unit/test_api_v2.py @@ -0,0 +1,1541 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import mock +from oslo.config import cfg +import six.moves.urllib.parse as urlparse +import webob +from webob import exc +import webtest + +from neutron.api import api_common +from neutron.api import extensions +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.api.v2 import base as v2_base +from neutron.api.v2 import router +from neutron.common import exceptions as n_exc +from neutron import context +from neutron import manager +from neutron.openstack.common import policy as common_policy +from neutron.openstack.common import uuidutils +from neutron import policy +from neutron import quota +from neutron.tests import base +from neutron.tests import fake_notifier +from neutron.tests.unit import testlib_api + + +ROOTDIR = os.path.dirname(os.path.dirname(__file__)) +EXTDIR = os.path.join(ROOTDIR, 'unit/extensions') + +_uuid = uuidutils.generate_uuid + + +def _get_path(resource, id=None, action=None, fmt=None): + path = '/%s' % resource + + if id is not None: + path = path + '/%s' % id + + if action is not None: + path = path + '/%s' % action + + if fmt is not None: + path = path + '.%s' % fmt + + return path + + +class ResourceIndexTestCase(base.BaseTestCase): + def test_index_json(self): + index = webtest.TestApp(router.Index({'foo': 'bar'})) + res = index.get('') + + self.assertIn('resources', res.json) + self.assertEqual(len(res.json['resources']), 1) + + resource = res.json['resources'][0] + self.assertIn('collection', resource) + self.assertEqual(resource['collection'], 'bar') + + self.assertIn('name', resource) + self.assertEqual(resource['name'], 'foo') + + self.assertIn('links', resource) + self.assertEqual(len(resource['links']), 1) + + link = resource['links'][0] + self.assertIn('href', link) + self.assertEqual(link['href'], 'http://localhost/bar') + self.assertIn('rel', link) + self.assertEqual(link['rel'], 'self') + + +class APIv2TestBase(base.BaseTestCase): + def setUp(self): + super(APIv2TestBase, self).setUp() + + plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2' + # Ensure existing ExtensionManager is not used + extensions.PluginAwareExtensionManager._instance = None + # Create the default configurations + self.config_parse() + # Update the plugin + self.setup_coreplugin(plugin) + cfg.CONF.set_override('allow_pagination', True) + cfg.CONF.set_override('allow_sorting', True) + self._plugin_patcher = mock.patch(plugin, autospec=True) + self.plugin = self._plugin_patcher.start() + instance = self.plugin.return_value + instance._NeutronPluginBaseV2__native_pagination_support = True + instance._NeutronPluginBaseV2__native_sorting_support = True + + api = router.APIRouter() + self.api = webtest.TestApp(api) + + quota.QUOTAS._driver = None + cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', + group='QUOTAS') + + +class _ArgMatcher(object): + """An adapter to assist mock assertions, used to custom compare.""" + + def __init__(self, cmp, obj): + self.cmp = cmp + self.obj = obj + + def __eq__(self, other): + return self.cmp(self.obj, other) + + +def _list_cmp(l1, l2): + return set(l1) == set(l2) + + +class APIv2TestCase(APIv2TestBase): + def _do_field_list(self, resource, base_fields): + attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[resource] + policy_attrs = [name for (name, info) in attr_info.items() + if info.get('required_by_policy') or + info.get('primary_key')] + fields = base_fields + fields.extend(policy_attrs) + return fields + + def _get_collection_kwargs(self, skipargs=[], **kwargs): + args_list = ['filters', 'fields', 'sorts', 'limit', 'marker', + 'page_reverse'] + args_dict = dict((arg, mock.ANY) + for arg in set(args_list) - set(skipargs)) + args_dict.update(kwargs) + return args_dict + + def test_fields(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'fields': 'foo'}) + fields = self._do_field_list('networks', ['foo']) + kwargs = self._get_collection_kwargs(fields=fields) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_fields_multiple(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + fields = self._do_field_list('networks', ['foo', 'bar']) + self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']}) + kwargs = self._get_collection_kwargs(fields=fields) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_fields_multiple_with_empty(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + fields = self._do_field_list('networks', ['foo']) + self.api.get(_get_path('networks'), {'fields': ['foo', '']}) + kwargs = self._get_collection_kwargs(fields=fields) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_fields_empty(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'fields': ''}) + kwargs = self._get_collection_kwargs(fields=[]) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_fields_multiple_empty(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'fields': ['', '']}) + kwargs = self._get_collection_kwargs(fields=[]) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': 'bar'}) + filters = {'name': ['bar']} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_empty(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': ''}) + filters = {} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_multiple_empty(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': ['', '']}) + filters = {} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_multiple_with_empty(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': ['bar', '']}) + filters = {'name': ['bar']} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_multiple_values(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']}) + filters = {'name': ['bar', 'bar2']} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_multiple(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': 'bar', + 'tenant_id': 'bar2'}) + filters = {'name': ['bar'], 'tenant_id': ['bar2']} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_with_fields(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'}) + filters = {'name': ['bar']} + fields = self._do_field_list('networks', ['foo']) + kwargs = self._get_collection_kwargs(filters=filters, fields=fields) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_with_convert_to(self): + instance = self.plugin.return_value + instance.get_ports.return_value = [] + + self.api.get(_get_path('ports'), {'admin_state_up': 'true'}) + filters = {'admin_state_up': [True]} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_ports.assert_called_once_with(mock.ANY, **kwargs) + + def test_filters_with_convert_list_to(self): + instance = self.plugin.return_value + instance.get_ports.return_value = [] + + self.api.get(_get_path('ports'), + {'fixed_ips': ['ip_address=foo', 'subnet_id=bar']}) + filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}} + kwargs = self._get_collection_kwargs(filters=filters) + instance.get_ports.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), + {'limit': '10'}) + kwargs = self._get_collection_kwargs(limit=10) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit_with_great_than_max_limit(self): + cfg.CONF.set_default('pagination_max_limit', '1000') + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), + {'limit': '1001'}) + kwargs = self._get_collection_kwargs(limit=1000) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit_with_zero(self): + cfg.CONF.set_default('pagination_max_limit', '1000') + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), {'limit': '0'}) + kwargs = self._get_collection_kwargs(limit=1000) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit_with_unspecific(self): + cfg.CONF.set_default('pagination_max_limit', '1000') + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks')) + kwargs = self._get_collection_kwargs(limit=1000) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit_with_negative_value(self): + cfg.CONF.set_default('pagination_max_limit', '1000') + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + res = self.api.get(_get_path('networks'), {'limit': -1}, + expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_limit_with_non_integer(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + res = self.api.get(_get_path('networks'), + {'limit': 'abc'}, expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_limit_with_infinite_pagination_max_limit(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + cfg.CONF.set_override('pagination_max_limit', 'Infinite') + self.api.get(_get_path('networks')) + kwargs = self._get_collection_kwargs(limit=None) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit_with_negative_pagination_max_limit(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + cfg.CONF.set_default('pagination_max_limit', '-1') + self.api.get(_get_path('networks')) + kwargs = self._get_collection_kwargs(limit=None) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_limit_with_non_integer_pagination_max_limit(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + cfg.CONF.set_default('pagination_max_limit', 'abc') + self.api.get(_get_path('networks')) + kwargs = self._get_collection_kwargs(limit=None) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_marker(self): + cfg.CONF.set_override('pagination_max_limit', '1000') + instance = self.plugin.return_value + instance.get_networks.return_value = [] + marker = _uuid() + self.api.get(_get_path('networks'), + {'marker': marker}) + kwargs = self._get_collection_kwargs(limit=1000, marker=marker) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_page_reverse(self): + calls = [] + instance = self.plugin.return_value + instance.get_networks.return_value = [] + self.api.get(_get_path('networks'), + {'page_reverse': 'True'}) + kwargs = self._get_collection_kwargs(page_reverse=True) + calls.append(mock.call.get_networks(mock.ANY, **kwargs)) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + instance = self.plugin.return_value + instance.get_networks.return_value = [] + self.api.get(_get_path('networks'), + {'page_reverse': 'False'}) + kwargs = self._get_collection_kwargs(page_reverse=False) + calls.append(mock.call.get_networks(mock.ANY, **kwargs)) + + def test_page_reverse_with_non_bool(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), + {'page_reverse': 'abc'}) + kwargs = self._get_collection_kwargs(page_reverse=False) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_page_reverse_with_unspecific(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks')) + kwargs = self._get_collection_kwargs(page_reverse=False) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_sort(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), + {'sort_key': ['name', 'admin_state_up'], + 'sort_dir': ['desc', 'asc']}) + kwargs = self._get_collection_kwargs(sorts=[('name', False), + ('admin_state_up', True), + ('id', True)]) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_sort_with_primary_key(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + self.api.get(_get_path('networks'), + {'sort_key': ['name', 'admin_state_up', 'id'], + 'sort_dir': ['desc', 'asc', 'desc']}) + kwargs = self._get_collection_kwargs(sorts=[('name', False), + ('admin_state_up', True), + ('id', False)]) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_sort_without_direction(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + res = self.api.get(_get_path('networks'), {'sort_key': ['name']}, + expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_sort_with_invalid_attribute(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + res = self.api.get(_get_path('networks'), + {'sort_key': 'abc', + 'sort_dir': 'asc'}, + expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_sort_with_invalid_dirs(self): + instance = self.plugin.return_value + instance.get_networks.return_value = [] + + res = self.api.get(_get_path('networks'), + {'sort_key': 'name', + 'sort_dir': 'abc'}, + expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_emulated_sort(self): + instance = self.plugin.return_value + instance._NeutronPluginBaseV2__native_pagination_support = False + instance._NeutronPluginBaseV2__native_sorting_support = False + instance.get_networks.return_value = [] + api = webtest.TestApp(router.APIRouter()) + api.get(_get_path('networks'), {'sort_key': ['name', 'status'], + 'sort_dir': ['desc', 'asc']}) + kwargs = self._get_collection_kwargs( + skipargs=['sorts', 'limit', 'marker', 'page_reverse']) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_emulated_sort_without_sort_field(self): + instance = self.plugin.return_value + instance._NeutronPluginBaseV2__native_pagination_support = False + instance._NeutronPluginBaseV2__native_sorting_support = False + instance.get_networks.return_value = [] + api = webtest.TestApp(router.APIRouter()) + api.get(_get_path('networks'), {'sort_key': ['name', 'status'], + 'sort_dir': ['desc', 'asc'], + 'fields': ['subnets']}) + kwargs = self._get_collection_kwargs( + skipargs=['sorts', 'limit', 'marker', 'page_reverse'], + fields=_ArgMatcher(_list_cmp, ['name', + 'status', + 'id', + 'subnets', + 'shared', + 'tenant_id'])) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_emulated_pagination(self): + instance = self.plugin.return_value + instance._NeutronPluginBaseV2__native_pagination_support = False + instance.get_networks.return_value = [] + api = webtest.TestApp(router.APIRouter()) + api.get(_get_path('networks'), {'limit': 10, + 'marker': 'foo', + 'page_reverse': False}) + kwargs = self._get_collection_kwargs(skipargs=['limit', + 'marker', + 'page_reverse']) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + def test_native_pagination_without_native_sorting(self): + instance = self.plugin.return_value + instance._NeutronPluginBaseV2__native_sorting_support = False + self.assertRaises(n_exc.Invalid, router.APIRouter) + + def test_native_pagination_without_allow_sorting(self): + cfg.CONF.set_override('allow_sorting', False) + instance = self.plugin.return_value + instance.get_networks.return_value = [] + api = webtest.TestApp(router.APIRouter()) + api.get(_get_path('networks'), + {'sort_key': ['name', 'admin_state_up'], + 'sort_dir': ['desc', 'asc']}) + kwargs = self._get_collection_kwargs(sorts=[('name', False), + ('admin_state_up', True), + ('id', True)]) + instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) + + +# Note: since all resources use the same controller and validation +# logic, we actually get really good coverage from testing just networks. +class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase): + def setUp(self): + super(JSONV2TestCase, self).setUp() + + def _test_list(self, req_tenant_id, real_tenant_id): + env = {} + if req_tenant_id: + env = {'neutron.context': context.Context('', req_tenant_id)} + input_dict = {'id': uuidutils.generate_uuid(), + 'name': 'net1', + 'admin_state_up': True, + 'status': "ACTIVE", + 'tenant_id': real_tenant_id, + 'shared': False, + 'subnets': []} + return_value = [input_dict] + instance = self.plugin.return_value + instance.get_networks.return_value = return_value + + res = self.api.get(_get_path('networks', + fmt=self.fmt), extra_environ=env) + res = self.deserialize(res) + self.assertIn('networks', res) + if not req_tenant_id or req_tenant_id == real_tenant_id: + # expect full list returned + self.assertEqual(len(res['networks']), 1) + output_dict = res['networks'][0] + input_dict['shared'] = False + self.assertEqual(len(input_dict), len(output_dict)) + for k, v in input_dict.iteritems(): + self.assertEqual(v, output_dict[k]) + else: + # expect no results + self.assertEqual(len(res['networks']), 0) + + def test_list_noauth(self): + self._test_list(None, _uuid()) + + def test_list_keystone(self): + tenant_id = _uuid() + self._test_list(tenant_id, tenant_id) + + def test_list_keystone_bad(self): + tenant_id = _uuid() + self._test_list(tenant_id + "bad", tenant_id) + + def test_list_pagination(self): + id1 = str(_uuid()) + id2 = str(_uuid()) + input_dict1 = {'id': id1, + 'name': 'net1', + 'admin_state_up': True, + 'status': "ACTIVE", + 'tenant_id': '', + 'shared': False, + 'subnets': []} + input_dict2 = {'id': id2, + 'name': 'net2', + 'admin_state_up': True, + 'status': "ACTIVE", + 'tenant_id': '', + 'shared': False, + 'subnets': []} + return_value = [input_dict1, input_dict2] + instance = self.plugin.return_value + instance.get_networks.return_value = return_value + params = {'limit': ['2'], + 'marker': [str(_uuid())], + 'sort_key': ['name'], + 'sort_dir': ['asc']} + res = self.api.get(_get_path('networks'), + params=params).json + + self.assertEqual(len(res['networks']), 2) + self.assertEqual(sorted([id1, id2]), + sorted([res['networks'][0]['id'], + res['networks'][1]['id']])) + + self.assertIn('networks_links', res) + next_links = [] + previous_links = [] + for r in res['networks_links']: + if r['rel'] == 'next': + next_links.append(r) + if r['rel'] == 'previous': + previous_links.append(r) + self.assertEqual(len(next_links), 1) + self.assertEqual(len(previous_links), 1) + + url = urlparse.urlparse(next_links[0]['href']) + self.assertEqual(url.path, _get_path('networks')) + params['marker'] = [id2] + self.assertEqual(urlparse.parse_qs(url.query), params) + + url = urlparse.urlparse(previous_links[0]['href']) + self.assertEqual(url.path, _get_path('networks')) + params['marker'] = [id1] + params['page_reverse'] = ['True'] + self.assertEqual(urlparse.parse_qs(url.query), params) + + def test_list_pagination_with_last_page(self): + id = str(_uuid()) + input_dict = {'id': id, + 'name': 'net1', + 'admin_state_up': True, + 'status': "ACTIVE", + 'tenant_id': '', + 'shared': False, + 'subnets': []} + return_value = [input_dict] + instance = self.plugin.return_value + instance.get_networks.return_value = return_value + params = {'limit': ['2'], + 'marker': str(_uuid())} + res = self.api.get(_get_path('networks'), + params=params).json + + self.assertEqual(len(res['networks']), 1) + self.assertEqual(id, res['networks'][0]['id']) + + self.assertIn('networks_links', res) + previous_links = [] + for r in res['networks_links']: + self.assertNotEqual(r['rel'], 'next') + if r['rel'] == 'previous': + previous_links.append(r) + self.assertEqual(len(previous_links), 1) + + url = urlparse.urlparse(previous_links[0]['href']) + self.assertEqual(url.path, _get_path('networks')) + expect_params = params.copy() + expect_params['marker'] = [id] + expect_params['page_reverse'] = ['True'] + self.assertEqual(urlparse.parse_qs(url.query), expect_params) + + def test_list_pagination_with_empty_page(self): + return_value = [] + instance = self.plugin.return_value + instance.get_networks.return_value = return_value + params = {'limit': ['2'], + 'marker': str(_uuid())} + res = self.api.get(_get_path('networks'), + params=params).json + + self.assertEqual(res['networks'], []) + + previous_links = [] + if 'networks_links' in res: + for r in res['networks_links']: + self.assertNotEqual(r['rel'], 'next') + if r['rel'] == 'previous': + previous_links.append(r) + self.assertEqual(len(previous_links), 1) + + url = urlparse.urlparse(previous_links[0]['href']) + self.assertEqual(url.path, _get_path('networks')) + expect_params = params.copy() + del expect_params['marker'] + expect_params['page_reverse'] = ['True'] + self.assertEqual(urlparse.parse_qs(url.query), expect_params) + + def test_list_pagination_reverse_with_last_page(self): + id = str(_uuid()) + input_dict = {'id': id, + 'name': 'net1', + 'admin_state_up': True, + 'status': "ACTIVE", + 'tenant_id': '', + 'shared': False, + 'subnets': []} + return_value = [input_dict] + instance = self.plugin.return_value + instance.get_networks.return_value = return_value + params = {'limit': ['2'], + 'marker': [str(_uuid())], + 'page_reverse': ['True']} + res = self.api.get(_get_path('networks'), + params=params).json + + self.assertEqual(len(res['networks']), 1) + self.assertEqual(id, res['networks'][0]['id']) + + self.assertIn('networks_links', res) + next_links = [] + for r in res['networks_links']: + self.assertNotEqual(r['rel'], 'previous') + if r['rel'] == 'next': + next_links.append(r) + self.assertEqual(len(next_links), 1) + + url = urlparse.urlparse(next_links[0]['href']) + self.assertEqual(url.path, _get_path('networks')) + expected_params = params.copy() + del expected_params['page_reverse'] + expected_params['marker'] = [id] + self.assertEqual(urlparse.parse_qs(url.query), + expected_params) + + def test_list_pagination_reverse_with_empty_page(self): + return_value = [] + instance = self.plugin.return_value + instance.get_networks.return_value = return_value + params = {'limit': ['2'], + 'marker': [str(_uuid())], + 'page_reverse': ['True']} + res = self.api.get(_get_path('networks'), + params=params).json + self.assertEqual(res['networks'], []) + + next_links = [] + if 'networks_links' in res: + for r in res['networks_links']: + self.assertNotEqual(r['rel'], 'previous') + if r['rel'] == 'next': + next_links.append(r) + self.assertEqual(len(next_links), 1) + + url = urlparse.urlparse(next_links[0]['href']) + self.assertEqual(url.path, _get_path('networks')) + expect_params = params.copy() + del expect_params['marker'] + del expect_params['page_reverse'] + self.assertEqual(urlparse.parse_qs(url.query), expect_params) + + def test_create(self): + net_id = _uuid() + data = {'network': {'name': 'net1', 'admin_state_up': True, + 'tenant_id': _uuid()}} + return_value = {'subnets': [], 'status': "ACTIVE", + 'id': net_id} + return_value.update(data['network'].copy()) + + instance = self.plugin.return_value + instance.create_network.return_value = return_value + instance.get_networks_count.return_value = 0 + + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('network', res) + net = res['network'] + self.assertEqual(net['id'], net_id) + self.assertEqual(net['status'], "ACTIVE") + + def test_create_use_defaults(self): + net_id = _uuid() + initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} + full_input = {'network': {'admin_state_up': True, + 'shared': False}} + full_input['network'].update(initial_input['network']) + + return_value = {'id': net_id, 'status': "ACTIVE"} + return_value.update(full_input['network']) + + instance = self.plugin.return_value + instance.create_network.return_value = return_value + instance.get_networks_count.return_value = 0 + + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(initial_input), + content_type='application/' + self.fmt) + instance.create_network.assert_called_with(mock.ANY, + network=full_input) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('network', res) + net = res['network'] + self.assertEqual(net['id'], net_id) + self.assertEqual(net['admin_state_up'], True) + self.assertEqual(net['status'], "ACTIVE") + + def test_create_no_keystone_env(self): + data = {'name': 'net1'} + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_create_with_keystone_env(self): + tenant_id = _uuid() + net_id = _uuid() + env = {'neutron.context': context.Context('', tenant_id)} + # tenant_id should be fetched from env + initial_input = {'network': {'name': 'net1'}} + full_input = {'network': {'admin_state_up': True, + 'shared': False, 'tenant_id': tenant_id}} + full_input['network'].update(initial_input['network']) + + return_value = {'id': net_id, 'status': "ACTIVE"} + return_value.update(full_input['network']) + + instance = self.plugin.return_value + instance.create_network.return_value = return_value + instance.get_networks_count.return_value = 0 + + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(initial_input), + content_type='application/' + self.fmt, + extra_environ=env) + + instance.create_network.assert_called_with(mock.ANY, + network=full_input) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + + def test_create_bad_keystone_tenant(self): + tenant_id = _uuid() + data = {'network': {'name': 'net1', 'tenant_id': tenant_id}} + env = {'neutron.context': context.Context('', tenant_id + "bad")} + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True, + extra_environ=env) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_create_no_body(self): + data = {'whoa': None} + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_create_no_resource(self): + data = {} + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_create_missing_attr(self): + data = {'port': {'what': 'who', 'tenant_id': _uuid()}} + res = self.api.post(_get_path('ports', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(res.status_int, 400) + + def test_create_readonly_attr(self): + data = {'network': {'name': 'net1', 'tenant_id': _uuid(), + 'status': "ACTIVE"}} + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(res.status_int, 400) + + def test_create_bulk(self): + data = {'networks': [{'name': 'net1', + 'admin_state_up': True, + 'tenant_id': _uuid()}, + {'name': 'net2', + 'admin_state_up': True, + 'tenant_id': _uuid()}]} + + def side_effect(context, network): + net = network.copy() + net['network'].update({'subnets': []}) + return net['network'] + + instance = self.plugin.return_value + instance.create_network.side_effect = side_effect + instance.get_networks_count.return_value = 0 + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + + def test_create_bulk_no_networks(self): + data = {'networks': []} + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_create_bulk_missing_attr(self): + data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]} + res = self.api.post(_get_path('ports', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(res.status_int, 400) + + def test_create_bulk_partial_body(self): + data = {'ports': [{'device_id': 'device_1', + 'tenant_id': _uuid()}, + {'tenant_id': _uuid()}]} + res = self.api.post(_get_path('ports', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(res.status_int, 400) + + def test_create_attr_not_specified(self): + net_id = _uuid() + tenant_id = _uuid() + device_id = _uuid() + initial_input = {'port': {'name': '', 'network_id': net_id, + 'tenant_id': tenant_id, + 'device_id': device_id, + 'admin_state_up': True}} + full_input = {'port': {'admin_state_up': True, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, + 'device_owner': ''}} + full_input['port'].update(initial_input['port']) + return_value = {'id': _uuid(), 'status': 'ACTIVE', + 'admin_state_up': True, + 'mac_address': 'ca:fe:de:ad:be:ef', + 'device_id': device_id, + 'device_owner': ''} + return_value.update(initial_input['port']) + + instance = self.plugin.return_value + instance.get_network.return_value = {'tenant_id': unicode(tenant_id)} + instance.get_ports_count.return_value = 1 + instance.create_port.return_value = return_value + res = self.api.post(_get_path('ports', fmt=self.fmt), + self.serialize(initial_input), + content_type='application/' + self.fmt) + instance.create_port.assert_called_with(mock.ANY, port=full_input) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('port', res) + port = res['port'] + self.assertEqual(port['network_id'], net_id) + self.assertEqual(port['mac_address'], 'ca:fe:de:ad:be:ef') + + def test_create_return_extra_attr(self): + net_id = _uuid() + data = {'network': {'name': 'net1', 'admin_state_up': True, + 'tenant_id': _uuid()}} + return_value = {'subnets': [], 'status': "ACTIVE", + 'id': net_id, 'v2attrs:something': "123"} + return_value.update(data['network'].copy()) + + instance = self.plugin.return_value + instance.create_network.return_value = return_value + instance.get_networks_count.return_value = 0 + + res = self.api.post(_get_path('networks', fmt=self.fmt), + self.serialize(data), + content_type='application/' + self.fmt) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('network', res) + net = res['network'] + self.assertEqual(net['id'], net_id) + self.assertEqual(net['status'], "ACTIVE") + self.assertNotIn('v2attrs:something', net) + + def test_fields(self): + return_value = {'name': 'net1', 'admin_state_up': True, + 'subnets': []} + + instance = self.plugin.return_value + instance.get_network.return_value = return_value + + self.api.get(_get_path('networks', + id=uuidutils.generate_uuid(), + fmt=self.fmt)) + + def _test_delete(self, req_tenant_id, real_tenant_id, expected_code, + expect_errors=False): + env = {} + if req_tenant_id: + env = {'neutron.context': context.Context('', req_tenant_id)} + instance = self.plugin.return_value + instance.get_network.return_value = {'tenant_id': real_tenant_id, + 'shared': False} + instance.delete_network.return_value = None + + res = self.api.delete(_get_path('networks', + id=uuidutils.generate_uuid(), + fmt=self.fmt), + extra_environ=env, + expect_errors=expect_errors) + self.assertEqual(res.status_int, expected_code) + + def test_delete_noauth(self): + self._test_delete(None, _uuid(), exc.HTTPNoContent.code) + + def test_delete_keystone(self): + tenant_id = _uuid() + self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code) + + def test_delete_keystone_bad_tenant(self): + tenant_id = _uuid() + self._test_delete(tenant_id + "bad", tenant_id, + exc.HTTPNotFound.code, expect_errors=True) + + def _test_get(self, req_tenant_id, real_tenant_id, expected_code, + expect_errors=False): + env = {} + shared = False + if req_tenant_id: + env = {'neutron.context': context.Context('', req_tenant_id)} + if req_tenant_id.endswith('another'): + shared = True + env['neutron.context'].roles = ['tenant_admin'] + + data = {'tenant_id': real_tenant_id, 'shared': shared} + instance = self.plugin.return_value + instance.get_network.return_value = data + + res = self.api.get(_get_path('networks', + id=uuidutils.generate_uuid(), + fmt=self.fmt), + extra_environ=env, + expect_errors=expect_errors) + self.assertEqual(res.status_int, expected_code) + return res + + def test_get_noauth(self): + self._test_get(None, _uuid(), 200) + + def test_get_keystone(self): + tenant_id = _uuid() + self._test_get(tenant_id, tenant_id, 200) + + def test_get_keystone_bad_tenant(self): + tenant_id = _uuid() + self._test_get(tenant_id + "bad", tenant_id, + exc.HTTPNotFound.code, expect_errors=True) + + def test_get_keystone_shared_network(self): + tenant_id = _uuid() + self._test_get(tenant_id + "another", tenant_id, 200) + + def test_get_keystone_strip_admin_only_attribute(self): + tenant_id = _uuid() + # Inject rule in policy engine + policy.init() + common_policy._rules['get_network:name'] = common_policy.parse_rule( + "rule:admin_only") + res = self._test_get(tenant_id, tenant_id, 200) + res = self.deserialize(res) + try: + self.assertNotIn('name', res['network']) + finally: + del common_policy._rules['get_network:name'] + + def _test_update(self, req_tenant_id, real_tenant_id, expected_code, + expect_errors=False): + env = {} + if req_tenant_id: + env = {'neutron.context': context.Context('', req_tenant_id)} + # leave out 'name' field intentionally + data = {'network': {'admin_state_up': True}} + return_value = {'subnets': []} + return_value.update(data['network'].copy()) + + instance = self.plugin.return_value + instance.get_network.return_value = {'tenant_id': real_tenant_id, + 'shared': False} + instance.update_network.return_value = return_value + + res = self.api.put(_get_path('networks', + id=uuidutils.generate_uuid(), + fmt=self.fmt), + self.serialize(data), + extra_environ=env, + expect_errors=expect_errors) + # Ensure id attribute is included in fields returned by GET call + # in update procedure. + self.assertEqual(1, instance.get_network.call_count) + self.assertIn('id', instance.get_network.call_args[1]['fields']) + self.assertEqual(res.status_int, expected_code) + + def test_update_noauth(self): + self._test_update(None, _uuid(), 200) + + def test_update_keystone(self): + tenant_id = _uuid() + self._test_update(tenant_id, tenant_id, 200) + + def test_update_keystone_bad_tenant(self): + tenant_id = _uuid() + self._test_update(tenant_id + "bad", tenant_id, + exc.HTTPNotFound.code, expect_errors=True) + + def test_update_readonly_field(self): + data = {'network': {'status': "NANANA"}} + res = self.api.put(_get_path('networks', id=_uuid()), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(res.status_int, 400) + + def test_invalid_attribute_field(self): + data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}} + res = self.api.put(_get_path('networks', id=_uuid()), + self.serialize(data), + content_type='application/' + self.fmt, + expect_errors=True) + self.assertEqual(res.status_int, 400) + + +class SubresourceTest(base.BaseTestCase): + def setUp(self): + super(SubresourceTest, self).setUp() + + plugin = 'neutron.tests.unit.test_api_v2.TestSubresourcePlugin' + extensions.PluginAwareExtensionManager._instance = None + + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + + self.config_parse() + self.setup_coreplugin(plugin) + + self._plugin_patcher = mock.patch(plugin, autospec=True) + self.plugin = self._plugin_patcher.start() + + router.SUB_RESOURCES['dummy'] = { + 'collection_name': 'dummies', + 'parent': {'collection_name': 'networks', + 'member_name': 'network'} + } + attributes.RESOURCE_ATTRIBUTE_MAP['dummies'] = { + 'foo': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': '', 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True} + } + api = router.APIRouter() + self.api = webtest.TestApp(api) + + def tearDown(self): + router.SUB_RESOURCES = {} + # Restore the global RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + super(SubresourceTest, self).tearDown() + + def test_index_sub_resource(self): + instance = self.plugin.return_value + + self.api.get('/networks/id1/dummies') + instance.get_network_dummies.assert_called_once_with(mock.ANY, + filters=mock.ANY, + fields=mock.ANY, + network_id='id1') + + def test_show_sub_resource(self): + instance = self.plugin.return_value + + dummy_id = _uuid() + self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id)) + instance.get_network_dummy.assert_called_once_with(mock.ANY, + dummy_id, + network_id='id1', + fields=mock.ANY) + + def test_create_sub_resource(self): + instance = self.plugin.return_value + + body = {'dummy': {'foo': 'bar', 'tenant_id': _uuid()}} + self.api.post_json('/networks/id1/dummies', body) + instance.create_network_dummy.assert_called_once_with(mock.ANY, + network_id='id1', + dummy=body) + + def test_update_sub_resource(self): + instance = self.plugin.return_value + + dummy_id = _uuid() + body = {'dummy': {'foo': 'bar'}} + self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id), + body) + instance.update_network_dummy.assert_called_once_with(mock.ANY, + dummy_id, + network_id='id1', + dummy=body) + + def test_delete_sub_resource(self): + instance = self.plugin.return_value + + dummy_id = _uuid() + self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id)) + instance.delete_network_dummy.assert_called_once_with(mock.ANY, + dummy_id, + network_id='id1') + + +# Note: since all resources use the same controller and validation +# logic, we actually get really good coverage from testing just networks. +class XMLV2TestCase(JSONV2TestCase): + fmt = 'xml' + + +class V2Views(base.BaseTestCase): + def _view(self, keys, collection, resource): + data = dict((key, 'value') for key in keys) + data['fake'] = 'value' + attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[collection] + controller = v2_base.Controller(None, collection, resource, attr_info) + res = controller._view(context.get_admin_context(), data) + self.assertNotIn('fake', res) + for key in keys: + self.assertIn(key, res) + + def test_network(self): + keys = ('id', 'name', 'subnets', 'admin_state_up', 'status', + 'tenant_id') + self._view(keys, 'networks', 'network') + + def test_port(self): + keys = ('id', 'network_id', 'mac_address', 'fixed_ips', + 'device_id', 'admin_state_up', 'tenant_id', 'status') + self._view(keys, 'ports', 'port') + + def test_subnet(self): + keys = ('id', 'network_id', 'tenant_id', 'gateway_ip', + 'ip_version', 'cidr', 'enable_dhcp') + self._view(keys, 'subnets', 'subnet') + + +class NotificationTest(APIv2TestBase): + + def setUp(self): + super(NotificationTest, self).setUp() + fake_notifier.reset() + + def _resource_op_notifier(self, opname, resource, expected_errors=False): + initial_input = {resource: {'name': 'myname'}} + instance = self.plugin.return_value + instance.get_networks.return_value = initial_input + instance.get_networks_count.return_value = 0 + expected_code = exc.HTTPCreated.code + if opname == 'create': + initial_input[resource]['tenant_id'] = _uuid() + res = self.api.post_json( + _get_path('networks'), + initial_input, expect_errors=expected_errors) + if opname == 'update': + res = self.api.put_json( + _get_path('networks', id=_uuid()), + initial_input, expect_errors=expected_errors) + expected_code = exc.HTTPOk.code + if opname == 'delete': + initial_input[resource]['tenant_id'] = _uuid() + res = self.api.delete( + _get_path('networks', id=_uuid()), + expect_errors=expected_errors) + expected_code = exc.HTTPNoContent.code + + expected_events = ('.'.join([resource, opname, "start"]), + '.'.join([resource, opname, "end"])) + self.assertEqual(len(fake_notifier.NOTIFICATIONS), + len(expected_events)) + for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events): + self.assertEqual('INFO', msg['priority']) + self.assertEqual(event, msg['event_type']) + + self.assertEqual(res.status_int, expected_code) + + def test_network_create_notifer(self): + self._resource_op_notifier('create', 'network') + + def test_network_delete_notifer(self): + self._resource_op_notifier('delete', 'network') + + def test_network_update_notifer(self): + self._resource_op_notifier('update', 'network') + + +class DHCPNotificationTest(APIv2TestBase): + def _test_dhcp_notifier(self, opname, resource, initial_input=None): + instance = self.plugin.return_value + instance.get_networks.return_value = initial_input + instance.get_networks_count.return_value = 0 + expected_code = exc.HTTPCreated.code + with mock.patch.object(dhcp_rpc_agent_api.DhcpAgentNotifyAPI, + 'notify') as dhcp_notifier: + if opname == 'create': + res = self.api.post_json( + _get_path('networks'), + initial_input) + if opname == 'update': + res = self.api.put_json( + _get_path('networks', id=_uuid()), + initial_input) + expected_code = exc.HTTPOk.code + if opname == 'delete': + res = self.api.delete(_get_path('networks', id=_uuid())) + expected_code = exc.HTTPNoContent.code + expected_item = mock.call(mock.ANY, mock.ANY, + resource + "." + opname + ".end") + if initial_input and resource not in initial_input: + resource += 's' + num = len(initial_input[resource]) if initial_input and isinstance( + initial_input[resource], list) else 1 + expected = [expected_item for x in xrange(num)] + self.assertEqual(expected, dhcp_notifier.call_args_list) + self.assertEqual(num, dhcp_notifier.call_count) + self.assertEqual(expected_code, res.status_int) + + def test_network_create_dhcp_notifer(self): + input = {'network': {'name': 'net', + 'tenant_id': _uuid()}} + self._test_dhcp_notifier('create', 'network', input) + + def test_network_delete_dhcp_notifer(self): + self._test_dhcp_notifier('delete', 'network') + + def test_network_update_dhcp_notifer(self): + input = {'network': {'name': 'net'}} + self._test_dhcp_notifier('update', 'network', input) + + def test_networks_create_bulk_dhcp_notifer(self): + input = {'networks': [{'name': 'net1', + 'tenant_id': _uuid()}, + {'name': 'net2', + 'tenant_id': _uuid()}]} + self._test_dhcp_notifier('create', 'network', input) + + +class QuotaTest(APIv2TestBase): + def test_create_network_quota(self): + cfg.CONF.set_override('quota_network', 1, group='QUOTAS') + initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} + full_input = {'network': {'admin_state_up': True, 'subnets': []}} + full_input['network'].update(initial_input['network']) + + instance = self.plugin.return_value + instance.get_networks_count.return_value = 1 + res = self.api.post_json( + _get_path('networks'), initial_input, expect_errors=True) + instance.get_networks_count.assert_called_with(mock.ANY, + filters=mock.ANY) + self.assertIn("Quota exceeded for resources", + res.json['NeutronError']['message']) + + def test_create_network_quota_no_counts(self): + cfg.CONF.set_override('quota_network', 1, group='QUOTAS') + initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} + full_input = {'network': {'admin_state_up': True, 'subnets': []}} + full_input['network'].update(initial_input['network']) + + instance = self.plugin.return_value + instance.get_networks_count.side_effect = ( + NotImplementedError()) + instance.get_networks.return_value = ["foo"] + res = self.api.post_json( + _get_path('networks'), initial_input, expect_errors=True) + instance.get_networks_count.assert_called_with(mock.ANY, + filters=mock.ANY) + self.assertIn("Quota exceeded for resources", + res.json['NeutronError']['message']) + + def test_create_network_quota_without_limit(self): + cfg.CONF.set_override('quota_network', -1, group='QUOTAS') + initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} + instance = self.plugin.return_value + instance.get_networks_count.return_value = 3 + res = self.api.post_json( + _get_path('networks'), initial_input) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + + +class ExtensionTestCase(base.BaseTestCase): + def setUp(self): + super(ExtensionTestCase, self).setUp() + plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2' + + # Ensure existing ExtensionManager is not used + extensions.PluginAwareExtensionManager._instance = None + + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + + # Create the default configurations + self.config_parse() + + # Update the plugin and extensions path + self.setup_coreplugin(plugin) + cfg.CONF.set_override('api_extensions_path', EXTDIR) + + self._plugin_patcher = mock.patch(plugin, autospec=True) + self.plugin = self._plugin_patcher.start() + + # Instantiate mock plugin and enable the V2attributes extension + manager.NeutronManager.get_plugin().supported_extension_aliases = ( + ["v2attrs"]) + + api = router.APIRouter() + self.api = webtest.TestApp(api) + + quota.QUOTAS._driver = None + cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', + group='QUOTAS') + + def tearDown(self): + super(ExtensionTestCase, self).tearDown() + self.api = None + self.plugin = None + # Restore the global RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + + def test_extended_create(self): + net_id = _uuid() + initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid(), + 'v2attrs:something_else': "abc"}} + data = {'network': {'admin_state_up': True, 'shared': False}} + data['network'].update(initial_input['network']) + + return_value = {'subnets': [], 'status': "ACTIVE", + 'id': net_id, + 'v2attrs:something': "123"} + return_value.update(data['network'].copy()) + + instance = self.plugin.return_value + instance.create_network.return_value = return_value + instance.get_networks_count.return_value = 0 + + res = self.api.post_json(_get_path('networks'), initial_input) + + instance.create_network.assert_called_with(mock.ANY, + network=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + self.assertIn('network', res.json) + net = res.json['network'] + self.assertEqual(net['id'], net_id) + self.assertEqual(net['status'], "ACTIVE") + self.assertEqual(net['v2attrs:something'], "123") + self.assertNotIn('v2attrs:something_else', net) + + +class TestSubresourcePlugin(): + def get_network_dummies(self, context, network_id, + filters=None, fields=None): + return [] + + def get_network_dummy(self, context, id, network_id, + fields=None): + return {} + + def create_network_dummy(self, context, network_id, dummy): + return {} + + def update_network_dummy(self, context, id, network_id, dummy): + return {} + + def delete_network_dummy(self, context, id, network_id): + return + + +class ListArgsTestCase(base.BaseTestCase): + def test_list_args(self): + path = '/?fields=4&foo=3&fields=2&bar=1' + request = webob.Request.blank(path) + expect_val = ['2', '4'] + actual_val = api_common.list_args(request, 'fields') + self.assertEqual(sorted(actual_val), expect_val) + + def test_list_args_with_empty(self): + path = '/?foo=4&bar=3&baz=2&qux=1' + request = webob.Request.blank(path) + self.assertEqual([], api_common.list_args(request, 'fields')) + + +class FiltersTestCase(base.BaseTestCase): + def test_all_skip_args(self): + path = '/?fields=4&fields=3&fields=2&fields=1' + request = webob.Request.blank(path) + self.assertEqual({}, api_common.get_filters(request, None, + ["fields"])) + + def test_blank_values(self): + path = '/?foo=&bar=&baz=&qux=' + request = webob.Request.blank(path) + self.assertEqual({}, api_common.get_filters(request, {})) + + def test_no_attr_info(self): + path = '/?foo=4&bar=3&baz=2&qux=1' + request = webob.Request.blank(path) + expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} + actual_val = api_common.get_filters(request, {}) + self.assertEqual(actual_val, expect_val) + + def test_attr_info_without_conversion(self): + path = '/?foo=4&bar=3&baz=2&qux=1' + request = webob.Request.blank(path) + attr_info = {'foo': {'key': 'val'}} + expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} + actual_val = api_common.get_filters(request, attr_info) + self.assertEqual(actual_val, expect_val) + + def test_attr_info_with_convert_list_to(self): + path = '/?foo=key=4&bar=3&foo=key=2&qux=1' + request = webob.Request.blank(path) + attr_info = { + 'foo': { + 'convert_list_to': attributes.convert_kvp_list_to_dict, + } + } + expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']} + actual_val = api_common.get_filters(request, attr_info) + self.assertEqual(actual_val, expect_val) + + def test_attr_info_with_convert_to(self): + path = '/?foo=4&bar=3&baz=2&qux=1' + request = webob.Request.blank(path) + attr_info = {'foo': {'convert_to': attributes.convert_to_int}} + expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} + actual_val = api_common.get_filters(request, attr_info) + self.assertEqual(actual_val, expect_val) + + +class CreateResourceTestCase(base.BaseTestCase): + def test_resource_creation(self): + resource = v2_base.create_resource('fakes', 'fake', None, {}) + self.assertIsInstance(resource, webob.dec.wsgify) diff --git a/neutron/tests/unit/test_api_v2_extension.py b/neutron/tests/unit/test_api_v2_extension.py new file mode 100644 index 000000000..856f8290c --- /dev/null +++ b/neutron/tests/unit/test_api_v2_extension.py @@ -0,0 +1,125 @@ +# Copyright 2014 Intel Corporation. +# Copyright 2014 Isaku Yamahata +# +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Isaku Yamahata, Intel Corporation. + +import uuid + +import mock +from oslo.config import cfg +from webob import exc +import webtest + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron import quota +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_extensions +from neutron.tests.unit import testlib_api + + +class ExtensionTestCase(testlib_api.WebTestCase): + def _resotre_attr_map(self): + attributes.RESOURCE_ATTRIBUTE_MAP = self._saved_attr_map + + def _setUpExtension(self, plugin, service_type, + resource_attribute_map, extension_class, + resource_prefix, plural_mappings=None, + translate_resource_name=False, + allow_pagination=False, allow_sorting=False, + supported_extension_aliases=None, + use_quota=False, + ): + + self._resource_prefix = resource_prefix + self._plural_mappings = plural_mappings or {} + self._translate_resource_name = translate_resource_name + + # Ensure existing ExtensionManager is not used + extensions.PluginAwareExtensionManager._instance = None + + # Save the global RESOURCE_ATTRIBUTE_MAP + self._saved_attr_map = attributes.RESOURCE_ATTRIBUTE_MAP.copy() + # Restore the global RESOURCE_ATTRIBUTE_MAP + self.addCleanup(self._resotre_attr_map) + + # Create the default configurations + self.config_parse() + + #just stubbing core plugin with plugin + self.setup_coreplugin(plugin) + cfg.CONF.set_override('core_plugin', plugin) + if service_type: + cfg.CONF.set_override('service_plugins', [plugin]) + + self._plugin_patcher = mock.patch(plugin, autospec=True) + self.plugin = self._plugin_patcher.start() + instance = self.plugin.return_value + if service_type: + instance.get_plugin_type.return_value = service_type + if supported_extension_aliases is not None: + instance.supported_extension_aliases = supported_extension_aliases + if allow_pagination: + cfg.CONF.set_override('allow_pagination', True) + # instance.__native_pagination_support = True + native_pagination_attr_name = ("_%s__native_pagination_support" + % instance.__class__.__name__) + setattr(instance, native_pagination_attr_name, True) + if allow_sorting: + cfg.CONF.set_override('allow_sorting', True) + # instance.__native_sorting_support = True + native_sorting_attr_name = ("_%s__native_sorting_support" + % instance.__class__.__name__) + setattr(instance, native_sorting_attr_name, True) + if use_quota: + quota.QUOTAS._driver = None + cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', + group='QUOTAS') + + class ExtensionTestExtensionManager(object): + def get_resources(self): + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + resource_attribute_map) + return extension_class.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + ext_mgr = ExtensionTestExtensionManager() + self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) + self.api = webtest.TestApp(self.ext_mdw) + + def _test_entity_delete(self, entity): + """Does the entity deletion based on naming convention.""" + entity_id = str(uuid.uuid4()) + path = self._resource_prefix + '/' if self._resource_prefix else '' + path += self._plural_mappings.get(entity, entity + 's') + if self._translate_resource_name: + path = path.replace('_', '-') + res = self.api.delete( + test_api_v2._get_path(path, id=entity_id, fmt=self.fmt)) + delete_entity = getattr(self.plugin.return_value, "delete_" + entity) + delete_entity.assert_called_with(mock.ANY, entity_id) + self.assertEqual(res.status_int, exc.HTTPNoContent.code) diff --git a/neutron/tests/unit/test_api_v2_resource.py b/neutron/tests/unit/test_api_v2_resource.py new file mode 100644 index 000000000..550c16592 --- /dev/null +++ b/neutron/tests/unit/test_api_v2_resource.py @@ -0,0 +1,372 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Zhongyue Luo, Intel Corporation. +# + +import mock +from webob import exc +import webtest + +from neutron.api.v2 import resource as wsgi_resource +from neutron.common import exceptions as n_exc +from neutron import context +from neutron.openstack.common import gettextutils +from neutron.tests import base +from neutron import wsgi + + +class RequestTestCase(base.BaseTestCase): + def setUp(self): + super(RequestTestCase, self).setUp() + self.req = wsgi_resource.Request({'foo': 'bar'}) + + def test_content_type_missing(self): + request = wsgi.Request.blank('/tests/123', method='POST') + request.body = "" + self.assertIsNone(request.get_content_type()) + + def test_content_type_with_charset(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/json; charset=UTF-8" + result = request.get_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_from_accept(self): + for content_type in ('application/xml', + 'application/json'): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = content_type + result = request.best_match_content_type() + self.assertEqual(result, content_type) + + def test_content_type_from_accept_best(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml, application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = ("application/json; q=0.3, " + "application/xml; q=0.9") + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_from_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123.invalid') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_accept_and_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_accept_default(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + def test_context_with_neutron_context(self): + ctxt = context.Context('fake_user', 'fake_tenant') + self.req.environ['neutron.context'] = ctxt + self.assertEqual(self.req.context, ctxt) + + def test_context_without_neutron_context(self): + self.assertTrue(self.req.context.is_admin) + + def test_best_match_language(self): + # Test that we are actually invoking language negotiation by webop + request = wsgi.Request.blank('/') + gettextutils.get_available_languages = mock.MagicMock() + gettextutils.get_available_languages.return_value = ['known-language', + 'es', 'zh'] + request.headers['Accept-Language'] = 'known-language' + language = request.best_match_language() + self.assertEqual(language, 'known-language') + + # If the Accept-Leader is an unknown language, missing or empty, + # the best match locale should be None + request.headers['Accept-Language'] = 'unknown-language' + language = request.best_match_language() + self.assertIsNone(language) + request.headers['Accept-Language'] = '' + language = request.best_match_language() + self.assertIsNone(language) + request.headers.pop('Accept-Language') + language = request.best_match_language() + self.assertIsNone(language) + + +class ResourceTestCase(base.BaseTestCase): + + def test_unmapped_neutron_error_with_json(self): + msg = u'\u7f51\u7edc' + + class TestException(n_exc.NeutronException): + message = msg + expected_res = {'body': { + 'NeutronError': { + 'type': 'TestException', + 'message': msg, + 'detail': ''}}} + controller = mock.MagicMock() + controller.test.side_effect = TestException() + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'json'})} + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPInternalServerError.code) + self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body), + expected_res) + + def test_unmapped_neutron_error_with_xml(self): + msg = u'\u7f51\u7edc' + + class TestException(n_exc.NeutronException): + message = msg + expected_res = {'body': { + 'NeutronError': { + 'type': 'TestException', + 'message': msg, + 'detail': ''}}} + controller = mock.MagicMock() + controller.test.side_effect = TestException() + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'xml'})} + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPInternalServerError.code) + self.assertEqual(wsgi.XMLDeserializer().deserialize(res.body), + expected_res) + + @mock.patch('neutron.openstack.common.gettextutils.translate') + def test_unmapped_neutron_error_localized(self, mock_translation): + gettextutils.install('blaa', lazy=True) + msg_translation = 'Translated error' + mock_translation.return_value = msg_translation + msg = _('Unmapped error') + + class TestException(n_exc.NeutronException): + message = msg + + controller = mock.MagicMock() + controller.test.side_effect = TestException() + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'json'})} + + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPInternalServerError.code) + self.assertIn(msg_translation, + str(wsgi.JSONDeserializer().deserialize(res.body))) + + def test_mapped_neutron_error_with_json(self): + msg = u'\u7f51\u7edc' + + class TestException(n_exc.NeutronException): + message = msg + expected_res = {'body': { + 'NeutronError': { + 'type': 'TestException', + 'message': msg, + 'detail': ''}}} + controller = mock.MagicMock() + controller.test.side_effect = TestException() + + faults = {TestException: exc.HTTPGatewayTimeout} + resource = webtest.TestApp(wsgi_resource.Resource(controller, + faults=faults)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'json'})} + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code) + self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body), + expected_res) + + def test_mapped_neutron_error_with_xml(self): + msg = u'\u7f51\u7edc' + + class TestException(n_exc.NeutronException): + message = msg + expected_res = {'body': { + 'NeutronError': { + 'type': 'TestException', + 'message': msg, + 'detail': ''}}} + controller = mock.MagicMock() + controller.test.side_effect = TestException() + + faults = {TestException: exc.HTTPGatewayTimeout} + resource = webtest.TestApp(wsgi_resource.Resource(controller, + faults=faults)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'xml'})} + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code) + self.assertEqual(wsgi.XMLDeserializer().deserialize(res.body), + expected_res) + + @mock.patch('neutron.openstack.common.gettextutils.translate') + def test_mapped_neutron_error_localized(self, mock_translation): + gettextutils.install('blaa', lazy=True) + msg_translation = 'Translated error' + mock_translation.return_value = msg_translation + msg = _('Unmapped error') + + class TestException(n_exc.NeutronException): + message = msg + + controller = mock.MagicMock() + controller.test.side_effect = TestException() + faults = {TestException: exc.HTTPGatewayTimeout} + resource = webtest.TestApp(wsgi_resource.Resource(controller, + faults=faults)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'json'})} + + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code) + self.assertIn(msg_translation, + str(wsgi.JSONDeserializer().deserialize(res.body))) + + def test_http_error(self): + controller = mock.MagicMock() + controller.test.side_effect = exc.HTTPGatewayTimeout() + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code) + + def test_unhandled_error_with_json(self): + expected_res = {'body': {'NeutronError': + _('Request Failed: internal server error ' + 'while processing your request.')}} + controller = mock.MagicMock() + controller.test.side_effect = Exception() + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'json'})} + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPInternalServerError.code) + self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body), + expected_res) + + def test_unhandled_error_with_xml(self): + expected_res = {'body': {'NeutronError': + _('Request Failed: internal server error ' + 'while processing your request.')}} + controller = mock.MagicMock() + controller.test.side_effect = Exception() + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test', + 'format': 'xml'})} + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPInternalServerError.code) + self.assertEqual(wsgi.XMLDeserializer().deserialize(res.body), + expected_res) + + def test_status_200(self): + controller = mock.MagicMock() + controller.test = lambda request: {'foo': 'bar'} + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} + res = resource.get('', extra_environ=environ) + self.assertEqual(res.status_int, 200) + + def test_status_204(self): + controller = mock.MagicMock() + controller.test = lambda request: {'foo': 'bar'} + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'delete'})} + res = resource.delete('', extra_environ=environ) + self.assertEqual(res.status_int, 204) + + def _test_error_log_level(self, map_webob_exc, expect_log_info=False, + use_fault_map=True): + class TestException(n_exc.NeutronException): + message = 'Test Exception' + + controller = mock.MagicMock() + controller.test.side_effect = TestException() + faults = {TestException: map_webob_exc} if use_fault_map else {} + resource = webtest.TestApp(wsgi_resource.Resource(controller, faults)) + environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} + with mock.patch.object(wsgi_resource, 'LOG') as log: + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(res.status_int, map_webob_exc.code) + self.assertEqual(expect_log_info, log.info.called) + self.assertNotEqual(expect_log_info, log.exception.called) + + def test_4xx_error_logged_info_level(self): + self._test_error_log_level(exc.HTTPNotFound, expect_log_info=True) + + def test_non_4xx_error_logged_exception_level(self): + self._test_error_log_level(exc.HTTPServiceUnavailable, + expect_log_info=False) + + def test_unmapped_error_logged_exception_level(self): + self._test_error_log_level(exc.HTTPInternalServerError, + expect_log_info=False, use_fault_map=False) + + def test_no_route_args(self): + controller = mock.MagicMock() + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {} + res = resource.get('', extra_environ=environ, expect_errors=True) + self.assertEqual(res.status_int, exc.HTTPInternalServerError.code) + + def test_post_with_body(self): + controller = mock.MagicMock() + controller.test = lambda request, body: {'foo': 'bar'} + + resource = webtest.TestApp(wsgi_resource.Resource(controller)) + + environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} + res = resource.post('', params='{"key": "val"}', + extra_environ=environ) + self.assertEqual(res.status_int, 200) diff --git a/neutron/tests/unit/test_attributes.py b/neutron/tests/unit/test_attributes.py new file mode 100644 index 000000000..0282cdc0c --- /dev/null +++ b/neutron/tests/unit/test_attributes.py @@ -0,0 +1,800 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from neutron.api.v2 import attributes +from neutron.common import exceptions as n_exc +from neutron.tests import base + + +class TestAttributes(base.BaseTestCase): + + def _construct_dict_and_constraints(self): + """Constructs a test dictionary and a definition of constraints. + :return: A (dictionary, constraint) tuple + """ + constraints = {'key1': {'type:values': ['val1', 'val2'], + 'required': True}, + 'key2': {'type:string': None, + 'required': False}, + 'key3': {'type:dict': {'k4': {'type:string': None, + 'required': True}}, + 'required': True}} + + dictionary = {'key1': 'val1', + 'key2': 'a string value', + 'key3': {'k4': 'a string value'}} + + return dictionary, constraints + + def test_is_attr_set(self): + data = attributes.ATTR_NOT_SPECIFIED + self.assertIs(attributes.is_attr_set(data), False) + + data = None + self.assertIs(attributes.is_attr_set(data), False) + + data = "I'm set" + self.assertIs(attributes.is_attr_set(data), True) + + def test_validate_values(self): + msg = attributes._validate_values(4, [4, 6]) + self.assertIsNone(msg) + + msg = attributes._validate_values(4, (4, 6)) + self.assertIsNone(msg) + + msg = attributes._validate_values(7, [4, 6]) + self.assertEqual(msg, "'7' is not in [4, 6]") + + msg = attributes._validate_values(7, (4, 6)) + self.assertEqual(msg, "'7' is not in (4, 6)") + + def test_validate_not_empty_string(self): + msg = attributes._validate_not_empty_string(' ', None) + self.assertEqual(msg, u"' ' Blank strings are not permitted") + + def test_validate_not_empty_string_or_none(self): + msg = attributes._validate_not_empty_string_or_none(' ', None) + self.assertEqual(msg, u"' ' Blank strings are not permitted") + + msg = attributes._validate_not_empty_string_or_none(None, None) + self.assertIsNone(msg) + + def test_validate_string_or_none(self): + msg = attributes._validate_not_empty_string_or_none('test', None) + self.assertIsNone(msg) + + msg = attributes._validate_not_empty_string_or_none(None, None) + self.assertIsNone(msg) + + def test_validate_string(self): + msg = attributes._validate_string(None, None) + self.assertEqual(msg, "'None' is not a valid string") + + # 0 == len(data) == max_len + msg = attributes._validate_string("", 0) + self.assertIsNone(msg) + + # 0 == len(data) < max_len + msg = attributes._validate_string("", 9) + self.assertIsNone(msg) + + # 0 < len(data) < max_len + msg = attributes._validate_string("123456789", 10) + self.assertIsNone(msg) + + # 0 < len(data) == max_len + msg = attributes._validate_string("123456789", 9) + self.assertIsNone(msg) + + # 0 < max_len < len(data) + msg = attributes._validate_string("1234567890", 9) + self.assertEqual(msg, "'1234567890' exceeds maximum length of 9") + + msg = attributes._validate_string("123456789", None) + self.assertIsNone(msg) + + def test_validate_no_whitespace(self): + data = 'no_white_space' + result = attributes._validate_no_whitespace(data) + self.assertEqual(result, data) + + self.assertRaises(n_exc.InvalidInput, + attributes._validate_no_whitespace, + 'i have whitespace') + + self.assertRaises(n_exc.InvalidInput, + attributes._validate_no_whitespace, + 'i\thave\twhitespace') + + def test_validate_range(self): + msg = attributes._validate_range(1, [1, 9]) + self.assertIsNone(msg) + + msg = attributes._validate_range(5, [1, 9]) + self.assertIsNone(msg) + + msg = attributes._validate_range(9, [1, 9]) + self.assertIsNone(msg) + + msg = attributes._validate_range(1, (1, 9)) + self.assertIsNone(msg) + + msg = attributes._validate_range(5, (1, 9)) + self.assertIsNone(msg) + + msg = attributes._validate_range(9, (1, 9)) + self.assertIsNone(msg) + + msg = attributes._validate_range(0, [1, 9]) + self.assertEqual(msg, "'0' is too small - must be at least '1'") + + msg = attributes._validate_range(10, (1, 9)) + self.assertEqual(msg, + "'10' is too large - must be no larger than '9'") + + msg = attributes._validate_range("bogus", (1, 9)) + self.assertEqual(msg, "'bogus' is not an integer") + + msg = attributes._validate_range(10, (attributes.UNLIMITED, + attributes.UNLIMITED)) + self.assertIsNone(msg) + + msg = attributes._validate_range(10, (1, attributes.UNLIMITED)) + self.assertIsNone(msg) + + msg = attributes._validate_range(1, (attributes.UNLIMITED, 9)) + self.assertIsNone(msg) + + msg = attributes._validate_range(-1, (0, attributes.UNLIMITED)) + self.assertEqual(msg, "'-1' is too small - must be at least '0'") + + msg = attributes._validate_range(10, (attributes.UNLIMITED, 9)) + self.assertEqual(msg, + "'10' is too large - must be no larger than '9'") + + def _test_validate_mac_address(self, validator, allow_none=False): + mac_addr = "ff:16:3e:4f:00:00" + msg = validator(mac_addr) + self.assertIsNone(msg) + + mac_addr = "ffa:16:3e:4f:00:00" + msg = validator(mac_addr) + err_msg = "'%s' is not a valid MAC address" + self.assertEqual(msg, err_msg % mac_addr) + + mac_addr = "123" + msg = validator(mac_addr) + self.assertEqual(msg, err_msg % mac_addr) + + mac_addr = None + msg = validator(mac_addr) + if allow_none: + self.assertIsNone(msg) + else: + self.assertEqual(msg, err_msg % mac_addr) + + def test_validate_mac_address(self): + self._test_validate_mac_address(attributes._validate_mac_address) + + def test_validate_mac_address_or_none(self): + self._test_validate_mac_address( + attributes._validate_mac_address_or_none, allow_none=True) + + def test_validate_ip_address(self): + ip_addr = '1.1.1.1' + msg = attributes._validate_ip_address(ip_addr) + self.assertIsNone(msg) + + ip_addr = '1111.1.1.1' + msg = attributes._validate_ip_address(ip_addr) + self.assertEqual(msg, "'%s' is not a valid IP address" % ip_addr) + + ip_addr = '1.1.1.1 has whitespace' + msg = attributes._validate_ip_address(ip_addr) + self.assertEqual(msg, "'%s' is not a valid IP address" % ip_addr) + + ip_addr = '111.1.1.1\twhitespace' + msg = attributes._validate_ip_address(ip_addr) + self.assertEqual(msg, "'%s' is not a valid IP address" % ip_addr) + + ip_addr = '111.1.1.1\nwhitespace' + msg = attributes._validate_ip_address(ip_addr) + self.assertEqual(msg, "'%s' is not a valid IP address" % ip_addr) + + def test_validate_ip_pools(self): + pools = [[{'end': '10.0.0.254'}], + [{'start': '10.0.0.254'}], + [{'start': '1000.0.0.254', + 'end': '1.1.1.1'}], + [{'start': '10.0.0.2', 'end': '10.0.0.254', + 'forza': 'juve'}], + [{'start': '10.0.0.2', 'end': '10.0.0.254'}, + {'end': '10.0.0.254'}], + [None], + None] + for pool in pools: + msg = attributes._validate_ip_pools(pool) + self.assertIsNotNone(msg) + + pools = [[{'end': '10.0.0.254', 'start': '10.0.0.2'}, + {'start': '11.0.0.2', 'end': '11.1.1.1'}], + [{'start': '11.0.0.2', 'end': '11.0.0.100'}]] + for pool in pools: + msg = attributes._validate_ip_pools(pool) + self.assertIsNone(msg) + + def test_validate_fixed_ips(self): + fixed_ips = [ + {'data': [{'subnet_id': '00000000-ffff-ffff-ffff-000000000000', + 'ip_address': '1111.1.1.1'}], + 'error_msg': "'1111.1.1.1' is not a valid IP address"}, + {'data': [{'subnet_id': 'invalid', + 'ip_address': '1.1.1.1'}], + 'error_msg': "'invalid' is not a valid UUID"}, + {'data': None, + 'error_msg': "Invalid data format for fixed IP: 'None'"}, + {'data': "1.1.1.1", + 'error_msg': "Invalid data format for fixed IP: '1.1.1.1'"}, + {'data': ['00000000-ffff-ffff-ffff-000000000000', '1.1.1.1'], + 'error_msg': "Invalid data format for fixed IP: " + "'00000000-ffff-ffff-ffff-000000000000'"}, + {'data': [['00000000-ffff-ffff-ffff-000000000000', '1.1.1.1']], + 'error_msg': "Invalid data format for fixed IP: " + "'['00000000-ffff-ffff-ffff-000000000000', " + "'1.1.1.1']'"}, + {'data': [{'subnet_id': '00000000-0fff-ffff-ffff-000000000000', + 'ip_address': '1.1.1.1'}, + {'subnet_id': '00000000-ffff-ffff-ffff-000000000000', + 'ip_address': '1.1.1.1'}], + 'error_msg': "Duplicate IP address '1.1.1.1'"}] + for fixed in fixed_ips: + msg = attributes._validate_fixed_ips(fixed['data']) + self.assertEqual(msg, fixed['error_msg']) + + fixed_ips = [[{'subnet_id': '00000000-ffff-ffff-ffff-000000000000', + 'ip_address': '1.1.1.1'}], + [{'subnet_id': '00000000-0fff-ffff-ffff-000000000000', + 'ip_address': '1.1.1.1'}, + {'subnet_id': '00000000-ffff-ffff-ffff-000000000000', + 'ip_address': '1.1.1.2'}]] + for fixed in fixed_ips: + msg = attributes._validate_fixed_ips(fixed) + self.assertIsNone(msg) + + def test_validate_nameservers(self): + ns_pools = [['1.1.1.2', '1.1.1.2'], + ['www.hostname.com', 'www.hostname.com'], + ['77.hostname.com'], + ['1000.0.0.1'], + None] + + for ns in ns_pools: + msg = attributes._validate_nameservers(ns, None) + self.assertIsNotNone(msg) + + ns_pools = [['100.0.0.2'], + ['www.hostname.com'], + ['www.great.marathons.to.travel'], + ['valid'], + ['www.internal.hostname.com']] + + for ns in ns_pools: + msg = attributes._validate_nameservers(ns, None) + self.assertIsNone(msg) + + def test_validate_hostroutes(self): + hostroute_pools = [[{'destination': '100.0.0.0/24'}], + [{'nexthop': '10.0.2.20'}], + [{'nexthop': '10.0.2.20', + 'forza': 'juve', + 'destination': '100.0.0.0/8'}], + [{'nexthop': '1110.0.2.20', + 'destination': '100.0.0.0/8'}], + [{'nexthop': '10.0.2.20', + 'destination': '100.0.0.0'}], + [{'nexthop': '10.0.2.20', + 'destination': '100.0.0.0/8'}, + {'nexthop': '10.0.2.20', + 'destination': '100.0.0.0/8'}], + [None], + None] + for host_routes in hostroute_pools: + msg = attributes._validate_hostroutes(host_routes, None) + self.assertIsNotNone(msg) + + hostroute_pools = [[{'destination': '100.0.0.0/24', + 'nexthop': '10.0.2.20'}], + [{'nexthop': '10.0.2.20', + 'destination': '100.0.0.0/8'}, + {'nexthop': '10.0.2.20', + 'destination': '101.0.0.0/8'}]] + for host_routes in hostroute_pools: + msg = attributes._validate_hostroutes(host_routes, None) + self.assertIsNone(msg) + + def test_validate_ip_address_or_none(self): + ip_addr = None + msg = attributes._validate_ip_address_or_none(ip_addr) + self.assertIsNone(msg) + + ip_addr = '1.1.1.1' + msg = attributes._validate_ip_address_or_none(ip_addr) + self.assertIsNone(msg) + + ip_addr = '1111.1.1.1' + msg = attributes._validate_ip_address_or_none(ip_addr) + self.assertEqual(msg, "'%s' is not a valid IP address" % ip_addr) + + def test_hostname_pattern(self): + data = '@openstack' + msg = attributes._validate_regex(data, attributes.HOSTNAME_PATTERN) + self.assertIsNotNone(msg) + + data = 'www.openstack.org' + msg = attributes._validate_regex(data, attributes.HOSTNAME_PATTERN) + self.assertIsNone(msg) + + def test_uuid_pattern(self): + data = 'garbage' + msg = attributes._validate_regex(data, attributes.UUID_PATTERN) + self.assertIsNotNone(msg) + + data = '00000000-ffff-ffff-ffff-000000000000' + msg = attributes._validate_regex(data, attributes.UUID_PATTERN) + self.assertIsNone(msg) + + def test_mac_pattern(self): + # Valid - 3 octets + base_mac = "fa:16:3e:00:00:00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNone(msg) + + # Valid - 4 octets + base_mac = "fa:16:3e:4f:00:00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNone(msg) + + # Invalid - not unicast + base_mac = "01:16:3e:4f:00:00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + # Invalid - invalid format + base_mac = "a:16:3e:4f:00:00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + # Invalid - invalid format + base_mac = "ffa:16:3e:4f:00:00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + # Invalid - invalid format + base_mac = "01163e4f0000" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + # Invalid - invalid format + base_mac = "01-16-3e-4f-00-00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + # Invalid - invalid format + base_mac = "00:16:3:f:00:00" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + # Invalid - invalid format + base_mac = "12:3:4:5:67:89ab" + msg = attributes._validate_regex(base_mac, + attributes.MAC_PATTERN) + self.assertIsNotNone(msg) + + def _test_validate_subnet(self, validator, allow_none=False): + # Valid - IPv4 + cidr = "10.0.2.0/24" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - IPv6 without final octets + cidr = "fe80::/24" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - IPv6 with final octets + cidr = "fe80::/24" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - uncompressed ipv6 address + cidr = "fe80:0:0:0:0:0:0:0/128" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - ipv6 address with multiple consecutive zero + cidr = "2001:0db8:0:0:1::1/128" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - ipv6 address with multiple consecutive zero + cidr = "2001:0db8::1:0:0:1/128" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - ipv6 address with multiple consecutive zero + cidr = "2001::0:1:0:0:1100/120" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Valid - abbreviated ipv4 address + cidr = "10/24" + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Invalid - IPv4 missing mask + cidr = "10.0.2.0" + msg = validator(cidr, None) + error = _("'%(data)s' isn't a recognized IP subnet cidr," + " '%(cidr)s' is recommended") % {"data": cidr, + "cidr": "10.0.2.0/32"} + self.assertEqual(msg, error) + + # Valid - IPv4 with non-zero masked bits is ok + for i in range(1, 255): + cidr = "192.168.1.%s/24" % i + msg = validator(cidr, None) + self.assertIsNone(msg) + + # Invalid - IPv6 without final octets, missing mask + cidr = "fe80::" + msg = validator(cidr, None) + error = _("'%(data)s' isn't a recognized IP subnet cidr," + " '%(cidr)s' is recommended") % {"data": cidr, + "cidr": "fe80::/128"} + self.assertEqual(msg, error) + + # Invalid - IPv6 with final octets, missing mask + cidr = "fe80::0" + msg = validator(cidr, None) + error = _("'%(data)s' isn't a recognized IP subnet cidr," + " '%(cidr)s' is recommended") % {"data": cidr, + "cidr": "fe80::/128"} + self.assertEqual(msg, error) + + # Invalid - Address format error + cidr = 'invalid' + msg = validator(cidr, None) + error = "'%s' is not a valid IP subnet" % cidr + self.assertEqual(msg, error) + + cidr = None + msg = validator(cidr, None) + if allow_none: + self.assertIsNone(msg) + else: + error = "'%s' is not a valid IP subnet" % cidr + self.assertEqual(msg, error) + + def test_validate_subnet(self): + self._test_validate_subnet(attributes._validate_subnet) + + def test_validate_subnet_or_none(self): + self._test_validate_subnet(attributes._validate_subnet_or_none, + allow_none=True) + + def _test_validate_regex(self, validator, allow_none=False): + pattern = '[hc]at' + + data = None + msg = validator(data, pattern) + if allow_none: + self.assertIsNone(msg) + else: + self.assertEqual(msg, "'None' is not a valid input") + + data = 'bat' + msg = validator(data, pattern) + self.assertEqual(msg, "'%s' is not a valid input" % data) + + data = 'hat' + msg = validator(data, pattern) + self.assertIsNone(msg) + + data = 'cat' + msg = validator(data, pattern) + self.assertIsNone(msg) + + def test_validate_regex(self): + self._test_validate_regex(attributes._validate_regex) + + def test_validate_regex_or_none(self): + self._test_validate_regex(attributes._validate_regex_or_none, + allow_none=True) + + def test_validate_uuid(self): + msg = attributes._validate_uuid('garbage') + self.assertEqual(msg, "'garbage' is not a valid UUID") + + msg = attributes._validate_uuid('00000000-ffff-ffff-ffff-000000000000') + self.assertIsNone(msg) + + def test_validate_uuid_list(self): + # check not a list + uuids = [None, + 123, + 'e5069610-744b-42a7-8bd8-ceac1a229cd4', + '12345678123456781234567812345678', + {'uuid': 'e5069610-744b-42a7-8bd8-ceac1a229cd4'}] + for uuid in uuids: + msg = attributes._validate_uuid_list(uuid) + error = "'%s' is not a list" % uuid + self.assertEqual(msg, error) + + # check invalid uuid in a list + invalid_uuid_lists = [[None], + [123], + [123, 'e5069610-744b-42a7-8bd8-ceac1a229cd4'], + ['123', '12345678123456781234567812345678'], + ['t5069610-744b-42a7-8bd8-ceac1a229cd4'], + ['e5069610-744b-42a7-8bd8-ceac1a229cd44'], + ['e50696100-744b-42a7-8bd8-ceac1a229cd4'], + ['e5069610-744bb-42a7-8bd8-ceac1a229cd4']] + for uuid_list in invalid_uuid_lists: + msg = attributes._validate_uuid_list(uuid_list) + error = "'%s' is not a valid UUID" % uuid_list[0] + self.assertEqual(msg, error) + + # check duplicate items in a list + duplicate_uuids = ['e5069610-744b-42a7-8bd8-ceac1a229cd4', + 'f3eeab00-8367-4524-b662-55e64d4cacb5', + 'e5069610-744b-42a7-8bd8-ceac1a229cd4'] + msg = attributes._validate_uuid_list(duplicate_uuids) + error = ("Duplicate items in the list: " + "'%s'" % ', '.join(duplicate_uuids)) + self.assertEqual(msg, error) + + # check valid uuid lists + valid_uuid_lists = [['e5069610-744b-42a7-8bd8-ceac1a229cd4'], + ['f3eeab00-8367-4524-b662-55e64d4cacb5'], + ['e5069610-744b-42a7-8bd8-ceac1a229cd4', + 'f3eeab00-8367-4524-b662-55e64d4cacb5']] + for uuid_list in valid_uuid_lists: + msg = attributes._validate_uuid_list(uuid_list) + self.assertIsNone(msg) + + def test_validate_dict_type(self): + for value in (None, True, '1', []): + self.assertEqual(attributes._validate_dict(value), + "'%s' is not a dictionary" % value) + + def test_validate_dict_without_constraints(self): + msg = attributes._validate_dict({}) + self.assertIsNone(msg) + + # Validate a dictionary without constraints. + msg = attributes._validate_dict({'key': 'value'}) + self.assertIsNone(msg) + + def test_validate_a_valid_dict_with_constraints(self): + dictionary, constraints = self._construct_dict_and_constraints() + + msg = attributes._validate_dict(dictionary, constraints) + self.assertIsNone(msg, 'Validation of a valid dictionary failed.') + + def test_validate_dict_with_invalid_validator(self): + dictionary, constraints = self._construct_dict_and_constraints() + + constraints['key1'] = {'type:unsupported': None, 'required': True} + msg = attributes._validate_dict(dictionary, constraints) + self.assertEqual(msg, "Validator 'type:unsupported' does not exist.") + + def test_validate_dict_not_required_keys(self): + dictionary, constraints = self._construct_dict_and_constraints() + + del dictionary['key2'] + msg = attributes._validate_dict(dictionary, constraints) + self.assertIsNone(msg, 'Field that was not required by the specs was' + 'required by the validator.') + + def test_validate_dict_required_keys(self): + dictionary, constraints = self._construct_dict_and_constraints() + + del dictionary['key1'] + msg = attributes._validate_dict(dictionary, constraints) + self.assertIn('Expected keys:', msg) + + def test_validate_dict_wrong_values(self): + dictionary, constraints = self._construct_dict_and_constraints() + + dictionary['key1'] = 'UNSUPPORTED' + msg = attributes._validate_dict(dictionary, constraints) + self.assertIsNotNone(msg) + + def test_validate_dict_convert_boolean(self): + dictionary, constraints = self._construct_dict_and_constraints() + + constraints['key_bool'] = { + 'type:boolean': None, + 'required': False, + 'convert_to': attributes.convert_to_boolean} + dictionary['key_bool'] = 'true' + msg = attributes._validate_dict(dictionary, constraints) + self.assertIsNone(msg) + # Explicitly comparing with literal 'True' as assertTrue + # succeeds also for 'true' + self.assertIs(True, dictionary['key_bool']) + + def test_subdictionary(self): + dictionary, constraints = self._construct_dict_and_constraints() + + del dictionary['key3']['k4'] + dictionary['key3']['k5'] = 'a string value' + msg = attributes._validate_dict(dictionary, constraints) + self.assertIn('Expected keys:', msg) + + def test_validate_dict_or_none(self): + dictionary, constraints = self._construct_dict_and_constraints() + + # Check whether None is a valid value. + msg = attributes._validate_dict_or_none(None, constraints) + self.assertIsNone(msg, 'Validation of a None dictionary failed.') + + # Check validation of a regular dictionary. + msg = attributes._validate_dict_or_none(dictionary, constraints) + self.assertIsNone(msg, 'Validation of a valid dictionary failed.') + + def test_validate_dict_or_empty(self): + dictionary, constraints = self._construct_dict_and_constraints() + + # Check whether an empty dictionary is valid. + msg = attributes._validate_dict_or_empty({}, constraints) + self.assertIsNone(msg, 'Validation of a None dictionary failed.') + + # Check validation of a regular dictionary. + msg = attributes._validate_dict_or_none(dictionary, constraints) + self.assertIsNone(msg, 'Validation of a valid dictionary failed.') + self.assertIsNone(msg, 'Validation of a valid dictionary failed.') + + def test_validate_non_negative(self): + for value in (-1, '-2'): + self.assertEqual(attributes._validate_non_negative(value), + "'%s' should be non-negative" % value) + + for value in (0, 1, '2', True, False): + msg = attributes._validate_non_negative(value) + self.assertIsNone(msg) + + +class TestConvertToBoolean(base.BaseTestCase): + + def test_convert_to_boolean_bool(self): + self.assertIs(attributes.convert_to_boolean(True), True) + self.assertIs(attributes.convert_to_boolean(False), False) + + def test_convert_to_boolean_int(self): + self.assertIs(attributes.convert_to_boolean(0), False) + self.assertIs(attributes.convert_to_boolean(1), True) + self.assertRaises(n_exc.InvalidInput, + attributes.convert_to_boolean, + 7) + + def test_convert_to_boolean_str(self): + self.assertIs(attributes.convert_to_boolean('True'), True) + self.assertIs(attributes.convert_to_boolean('true'), True) + self.assertIs(attributes.convert_to_boolean('False'), False) + self.assertIs(attributes.convert_to_boolean('false'), False) + self.assertIs(attributes.convert_to_boolean('0'), False) + self.assertIs(attributes.convert_to_boolean('1'), True) + self.assertRaises(n_exc.InvalidInput, + attributes.convert_to_boolean, + '7') + + +class TestConvertToInt(base.BaseTestCase): + + def test_convert_to_int_int(self): + self.assertEqual(attributes.convert_to_int(-1), -1) + self.assertEqual(attributes.convert_to_int(0), 0) + self.assertEqual(attributes.convert_to_int(1), 1) + + def test_convert_to_int_str(self): + self.assertEqual(attributes.convert_to_int('4'), 4) + self.assertEqual(attributes.convert_to_int('6'), 6) + self.assertRaises(n_exc.InvalidInput, + attributes.convert_to_int, + 'garbage') + + def test_convert_to_int_none(self): + self.assertRaises(n_exc.InvalidInput, + attributes.convert_to_int, + None) + + def test_convert_none_to_empty_list_none(self): + self.assertEqual( + [], attributes.convert_none_to_empty_list(None)) + + def test_convert_none_to_empty_dict(self): + self.assertEqual( + {}, attributes.convert_none_to_empty_dict(None)) + + def test_convert_none_to_empty_list_value(self): + values = ['1', 3, [], [1], {}, {'a': 3}] + for value in values: + self.assertEqual( + value, attributes.convert_none_to_empty_list(value)) + + +class TestConvertKvp(base.BaseTestCase): + + def test_convert_kvp_list_to_dict_succeeds_for_missing_values(self): + result = attributes.convert_kvp_list_to_dict(['True']) + self.assertEqual({}, result) + + def test_convert_kvp_list_to_dict_succeeds_for_multiple_values(self): + result = attributes.convert_kvp_list_to_dict( + ['a=b', 'a=c', 'a=c', 'b=a']) + self.assertEqual({'a': ['c', 'b'], 'b': ['a']}, result) + + def test_convert_kvp_list_to_dict_succeeds_for_values(self): + result = attributes.convert_kvp_list_to_dict(['a=b', 'c=d']) + self.assertEqual({'a': ['b'], 'c': ['d']}, result) + + def test_convert_kvp_str_to_list_fails_for_missing_key(self): + with testtools.ExpectedException(n_exc.InvalidInput): + attributes.convert_kvp_str_to_list('=a') + + def test_convert_kvp_str_to_list_fails_for_missing_equals(self): + with testtools.ExpectedException(n_exc.InvalidInput): + attributes.convert_kvp_str_to_list('a') + + def test_convert_kvp_str_to_list_succeeds_for_one_equals(self): + result = attributes.convert_kvp_str_to_list('a=') + self.assertEqual(['a', ''], result) + + def test_convert_kvp_str_to_list_succeeds_for_two_equals(self): + result = attributes.convert_kvp_str_to_list('a=a=a') + self.assertEqual(['a', 'a=a'], result) + + +class TestConvertToList(base.BaseTestCase): + + def test_convert_to_empty_list(self): + for item in (None, [], (), {}): + self.assertEqual(attributes.convert_to_list(item), []) + + def test_convert_to_list_string(self): + for item in ('', 'foo'): + self.assertEqual(attributes.convert_to_list(item), [item]) + + def test_convert_to_list_iterable(self): + for item in ([None], [1, 2, 3], (1, 2, 3), set([1, 2, 3]), ['foo']): + self.assertEqual(attributes.convert_to_list(item), list(item)) + + def test_convert_to_list_non_iterable(self): + for item in (True, False, 1, 1.2, object()): + self.assertEqual(attributes.convert_to_list(item), [item]) diff --git a/neutron/tests/unit/test_auth.py b/neutron/tests/unit/test_auth.py new file mode 100644 index 000000000..aa5c06743 --- /dev/null +++ b/neutron/tests/unit/test_auth.py @@ -0,0 +1,99 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from neutron import auth +from neutron.openstack.common.middleware import request_id +from neutron.tests import base + + +class NeutronKeystoneContextTestCase(base.BaseTestCase): + def setUp(self): + super(NeutronKeystoneContextTestCase, self).setUp() + + @webob.dec.wsgify + def fake_app(req): + self.context = req.environ['neutron.context'] + return webob.Response() + + self.context = None + self.middleware = auth.NeutronKeystoneContext(fake_app) + self.request = webob.Request.blank('/') + self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' + + def test_no_user_id(self): + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '401 Unauthorized') + + def test_with_user_id(self): + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + self.request.headers['X_USER_ID'] = 'testuserid' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.user_id, 'testuserid') + self.assertEqual(self.context.user, 'testuserid') + + def test_with_tenant_id(self): + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + self.request.headers['X_USER_ID'] = 'test_user_id' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.tenant_id, 'testtenantid') + self.assertEqual(self.context.tenant, 'testtenantid') + + def test_roles_no_admin(self): + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + self.request.headers['X_USER_ID'] = 'testuserid' + self.request.headers['X_ROLES'] = 'role1, role2 , role3,role4,role5' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.roles, ['role1', 'role2', 'role3', + 'role4', 'role5']) + self.assertEqual(self.context.is_admin, False) + + def test_roles_with_admin(self): + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + self.request.headers['X_USER_ID'] = 'testuserid' + self.request.headers['X_ROLES'] = ('role1, role2 , role3,role4,role5,' + 'AdMiN') + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.roles, ['role1', 'role2', 'role3', + 'role4', 'role5', 'AdMiN']) + self.assertEqual(self.context.is_admin, True) + + def test_with_user_tenant_name(self): + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + self.request.headers['X_USER_ID'] = 'testuserid' + self.request.headers['X_PROJECT_NAME'] = 'testtenantname' + self.request.headers['X_USER_NAME'] = 'testusername' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.user_id, 'testuserid') + self.assertEqual(self.context.user_name, 'testusername') + self.assertEqual(self.context.tenant_id, 'testtenantid') + self.assertEqual(self.context.tenant_name, 'testtenantname') + + def test_request_id_extracted_from_env(self): + req_id = 'dummy-request-id' + self.request.headers['X_PROJECT_ID'] = 'testtenantid' + self.request.headers['X_USER_ID'] = 'testuserid' + self.request.environ[request_id.ENV_REQUEST_ID] = req_id + self.request.get_response(self.middleware) + self.assertEqual(req_id, self.context.request_id) diff --git a/neutron/tests/unit/test_common_log.py b/neutron/tests/unit/test_common_log.py new file mode 100644 index 000000000..318613a1b --- /dev/null +++ b/neutron/tests/unit/test_common_log.py @@ -0,0 +1,70 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.common import log as call_log +from neutron.tests import base + + +MODULE_NAME = 'neutron.tests.unit.test_common_log' + + +class TargetKlass(object): + + @call_log.log + def test_method(self, arg1, arg2, *args, **kwargs): + pass + + +class TestCallLog(base.BaseTestCase): + def setUp(self): + super(TestCallLog, self).setUp() + self.klass = TargetKlass() + self.expected_format = ('%(class_name)s method %(method_name)s ' + 'called with arguments %(args)s %(kwargs)s') + self.expected_data = {'class_name': MODULE_NAME + '.TargetKlass', + 'method_name': 'test_method', + 'args': (), + 'kwargs': {}} + + def test_call_log_all_args(self): + self.expected_data['args'] = (10, 20) + with mock.patch.object(call_log.LOG, 'debug') as log_debug: + self.klass.test_method(10, 20) + log_debug.assert_called_once_with(self.expected_format, + self.expected_data) + + def test_call_log_all_kwargs(self): + self.expected_data['kwargs'] = {'arg1': 10, 'arg2': 20} + with mock.patch.object(call_log.LOG, 'debug') as log_debug: + self.klass.test_method(arg1=10, arg2=20) + log_debug.assert_called_once_with(self.expected_format, + self.expected_data) + + def test_call_log_known_args_unknown_args_kwargs(self): + self.expected_data['args'] = (10, 20, 30) + self.expected_data['kwargs'] = {'arg4': 40} + with mock.patch.object(call_log.LOG, 'debug') as log_debug: + self.klass.test_method(10, 20, 30, arg4=40) + log_debug.assert_called_once_with(self.expected_format, + self.expected_data) + + def test_call_log_known_args_kwargs_unknown_kwargs(self): + self.expected_data['args'] = (10,) + self.expected_data['kwargs'] = {'arg2': 20, 'arg3': 30, 'arg4': 40} + with mock.patch.object(call_log.LOG, 'debug') as log_debug: + self.klass.test_method(10, arg2=20, arg3=30, arg4=40) + log_debug.assert_called_once_with(self.expected_format, + self.expected_data) diff --git a/neutron/tests/unit/test_common_utils.py b/neutron/tests/unit/test_common_utils.py new file mode 100644 index 000000000..2bcd6b45e --- /dev/null +++ b/neutron/tests/unit/test_common_utils.py @@ -0,0 +1,383 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +import testtools + +from neutron.common import exceptions as n_exc +from neutron.common import utils +from neutron.plugins.common import utils as plugin_utils +from neutron.tests import base + + +class TestParseMappings(base.BaseTestCase): + def parse(self, mapping_list, unique_values=True): + return utils.parse_mappings(mapping_list, unique_values) + + def test_parse_mappings_fails_for_missing_separator(self): + with testtools.ExpectedException(ValueError): + self.parse(['key']) + + def test_parse_mappings_fails_for_missing_key(self): + with testtools.ExpectedException(ValueError): + self.parse([':val']) + + def test_parse_mappings_fails_for_missing_value(self): + with testtools.ExpectedException(ValueError): + self.parse(['key:']) + + def test_parse_mappings_fails_for_extra_separator(self): + with testtools.ExpectedException(ValueError): + self.parse(['key:val:junk']) + + def test_parse_mappings_fails_for_duplicate_key(self): + with testtools.ExpectedException(ValueError): + self.parse(['key:val1', 'key:val2']) + + def test_parse_mappings_fails_for_duplicate_value(self): + with testtools.ExpectedException(ValueError): + self.parse(['key1:val', 'key2:val']) + + def test_parse_mappings_succeeds_for_one_mapping(self): + self.assertEqual(self.parse(['key:val']), {'key': 'val'}) + + def test_parse_mappings_succeeds_for_n_mappings(self): + self.assertEqual(self.parse(['key1:val1', 'key2:val2']), + {'key1': 'val1', 'key2': 'val2'}) + + def test_parse_mappings_succeeds_for_duplicate_value(self): + self.assertEqual(self.parse(['key1:val', 'key2:val'], False), + {'key1': 'val', 'key2': 'val'}) + + def test_parse_mappings_succeeds_for_no_mappings(self): + self.assertEqual(self.parse(['']), {}) + + +class UtilTestParseVlanRanges(base.BaseTestCase): + _err_prefix = "Invalid network VLAN range: '" + _err_too_few = "' - 'need more than 2 values to unpack'" + _err_too_many = "' - 'too many values to unpack'" + _err_not_int = "' - 'invalid literal for int() with base 10: '%s''" + _err_bad_vlan = "' - '%s is not a valid VLAN tag'" + _err_range = "' - 'End of VLAN range is less than start of VLAN range'" + + def _range_too_few_err(self, nv_range): + return self._err_prefix + nv_range + self._err_too_few + + def _range_too_many_err(self, nv_range): + return self._err_prefix + nv_range + self._err_too_many + + def _vlan_not_int_err(self, nv_range, vlan): + return self._err_prefix + nv_range + (self._err_not_int % vlan) + + def _nrange_invalid_vlan(self, nv_range, n): + vlan = nv_range.split(':')[n] + v_range = ':'.join(nv_range.split(':')[1:]) + return self._err_prefix + v_range + (self._err_bad_vlan % vlan) + + def _vrange_invalid_vlan(self, v_range_tuple, n): + vlan = v_range_tuple[n - 1] + v_range_str = '%d:%d' % v_range_tuple + return self._err_prefix + v_range_str + (self._err_bad_vlan % vlan) + + def _vrange_invalid(self, v_range_tuple): + v_range_str = '%d:%d' % v_range_tuple + return self._err_prefix + v_range_str + self._err_range + + +class TestVlanRangeVerifyValid(UtilTestParseVlanRanges): + def verify_range(self, vlan_range): + return plugin_utils.verify_vlan_range(vlan_range) + + def test_range_valid_ranges(self): + self.assertIsNone(self.verify_range((1, 2))) + self.assertIsNone(self.verify_range((1, 1999))) + self.assertIsNone(self.verify_range((100, 100))) + self.assertIsNone(self.verify_range((100, 200))) + self.assertIsNone(self.verify_range((4001, 4094))) + self.assertIsNone(self.verify_range((1, 4094))) + + def check_one_vlan_invalid(self, bad_range, which): + expected_msg = self._vrange_invalid_vlan(bad_range, which) + err = self.assertRaises(n_exc.NetworkVlanRangeError, + self.verify_range, bad_range) + self.assertEqual(str(err), expected_msg) + + def test_range_first_vlan_invalid_negative(self): + self.check_one_vlan_invalid((-1, 199), 1) + + def test_range_first_vlan_invalid_zero(self): + self.check_one_vlan_invalid((0, 199), 1) + + def test_range_first_vlan_invalid_limit_plus_one(self): + self.check_one_vlan_invalid((4095, 199), 1) + + def test_range_first_vlan_invalid_too_big(self): + self.check_one_vlan_invalid((9999, 199), 1) + + def test_range_second_vlan_invalid_negative(self): + self.check_one_vlan_invalid((299, -1), 2) + + def test_range_second_vlan_invalid_zero(self): + self.check_one_vlan_invalid((299, 0), 2) + + def test_range_second_vlan_invalid_limit_plus_one(self): + self.check_one_vlan_invalid((299, 4095), 2) + + def test_range_second_vlan_invalid_too_big(self): + self.check_one_vlan_invalid((299, 9999), 2) + + def test_range_both_vlans_invalid_01(self): + self.check_one_vlan_invalid((-1, 0), 1) + + def test_range_both_vlans_invalid_02(self): + self.check_one_vlan_invalid((0, 4095), 1) + + def test_range_both_vlans_invalid_03(self): + self.check_one_vlan_invalid((4095, 9999), 1) + + def test_range_both_vlans_invalid_04(self): + self.check_one_vlan_invalid((9999, -1), 1) + + def test_range_reversed(self): + bad_range = (95, 10) + expected_msg = self._vrange_invalid(bad_range) + err = self.assertRaises(n_exc.NetworkVlanRangeError, + self.verify_range, bad_range) + self.assertEqual(str(err), expected_msg) + + +class TestParseOneVlanRange(UtilTestParseVlanRanges): + def parse_one(self, cfg_entry): + return plugin_utils.parse_network_vlan_range(cfg_entry) + + def test_parse_one_net_no_vlan_range(self): + config_str = "net1" + expected_networks = ("net1", None) + self.assertEqual(self.parse_one(config_str), expected_networks) + + def test_parse_one_net_and_vlan_range(self): + config_str = "net1:100:199" + expected_networks = ("net1", (100, 199)) + self.assertEqual(self.parse_one(config_str), expected_networks) + + def test_parse_one_net_incomplete_range(self): + config_str = "net1:100" + expected_msg = self._range_too_few_err(config_str) + err = self.assertRaises(n_exc.NetworkVlanRangeError, + self.parse_one, config_str) + self.assertEqual(str(err), expected_msg) + + def test_parse_one_net_range_too_many(self): + config_str = "net1:100:150:200" + expected_msg = self._range_too_many_err(config_str) + err = self.assertRaises(n_exc.NetworkVlanRangeError, + self.parse_one, config_str) + self.assertEqual(str(err), expected_msg) + + def test_parse_one_net_vlan1_not_int(self): + config_str = "net1:foo:199" + expected_msg = self._vlan_not_int_err(config_str, 'foo') + err = self.assertRaises(n_exc.NetworkVlanRangeError, + self.parse_one, config_str) + self.assertEqual(str(err), expected_msg) + + def test_parse_one_net_vlan2_not_int(self): + config_str = "net1:100:bar" + expected_msg = self._vlan_not_int_err(config_str, 'bar') + err = self.assertRaises(n_exc.NetworkVlanRangeError, + self.parse_one, config_str) + self.assertEqual(str(err), expected_msg) + + def test_parse_one_net_and_max_range(self): + config_str = "net1:1:4094" + expected_networks = ("net1", (1, 4094)) + self.assertEqual(self.parse_one(config_str), expected_networks) + + def test_parse_one_net_range_bad_vlan1(self): + config_str = "net1:9000:150" + expected_msg = self._nrange_invalid_vlan(config_str, 1) + err = self.assertRaises(n_exc.NetworkVlanRangeError, + self.parse_one, config_str) + self.assertEqual(str(err), expected_msg) + + def test_parse_one_net_range_bad_vlan2(self): + config_str = "net1:4000:4999" + expected_msg = self._nrange_invalid_vlan(config_str, 2) + err = self.assertRaises(n_exc.NetworkVlanRangeError, + self.parse_one, config_str) + self.assertEqual(str(err), expected_msg) + + +class TestParseVlanRangeList(UtilTestParseVlanRanges): + def parse_list(self, cfg_entries): + return plugin_utils.parse_network_vlan_ranges(cfg_entries) + + def test_parse_list_one_net_no_vlan_range(self): + config_list = ["net1"] + expected_networks = {"net1": []} + self.assertEqual(self.parse_list(config_list), expected_networks) + + def test_parse_list_one_net_vlan_range(self): + config_list = ["net1:100:199"] + expected_networks = {"net1": [(100, 199)]} + self.assertEqual(self.parse_list(config_list), expected_networks) + + def test_parse_two_nets_no_vlan_range(self): + config_list = ["net1", + "net2"] + expected_networks = {"net1": [], + "net2": []} + self.assertEqual(self.parse_list(config_list), expected_networks) + + def test_parse_two_nets_range_and_no_range(self): + config_list = ["net1:100:199", + "net2"] + expected_networks = {"net1": [(100, 199)], + "net2": []} + self.assertEqual(self.parse_list(config_list), expected_networks) + + def test_parse_two_nets_no_range_and_range(self): + config_list = ["net1", + "net2:200:299"] + expected_networks = {"net1": [], + "net2": [(200, 299)]} + self.assertEqual(self.parse_list(config_list), expected_networks) + + def test_parse_two_nets_bad_vlan_range1(self): + config_list = ["net1:100", + "net2:200:299"] + expected_msg = self._range_too_few_err(config_list[0]) + err = self.assertRaises(n_exc.NetworkVlanRangeError, + self.parse_list, config_list) + self.assertEqual(str(err), expected_msg) + + def test_parse_two_nets_vlan_not_int2(self): + config_list = ["net1:100:199", + "net2:200:0x200"] + expected_msg = self._vlan_not_int_err(config_list[1], '0x200') + err = self.assertRaises(n_exc.NetworkVlanRangeError, + self.parse_list, config_list) + self.assertEqual(str(err), expected_msg) + + def test_parse_two_nets_and_append_1_2(self): + config_list = ["net1:100:199", + "net1:1000:1099", + "net2:200:299"] + expected_networks = {"net1": [(100, 199), + (1000, 1099)], + "net2": [(200, 299)]} + self.assertEqual(self.parse_list(config_list), expected_networks) + + def test_parse_two_nets_and_append_1_3(self): + config_list = ["net1:100:199", + "net2:200:299", + "net1:1000:1099"] + expected_networks = {"net1": [(100, 199), + (1000, 1099)], + "net2": [(200, 299)]} + self.assertEqual(self.parse_list(config_list), expected_networks) + + +class TestDictUtils(base.BaseTestCase): + def test_dict2str(self): + dic = {"key1": "value1", "key2": "value2", "key3": "value3"} + expected = "key1=value1,key2=value2,key3=value3" + self.assertEqual(utils.dict2str(dic), expected) + + def test_str2dict(self): + string = "key1=value1,key2=value2,key3=value3" + expected = {"key1": "value1", "key2": "value2", "key3": "value3"} + self.assertEqual(utils.str2dict(string), expected) + + def test_dict_str_conversion(self): + dic = {"key1": "value1", "key2": "value2"} + self.assertEqual(utils.str2dict(utils.dict2str(dic)), dic) + + def test_diff_list_of_dict(self): + old_list = [{"key1": "value1"}, + {"key2": "value2"}, + {"key3": "value3"}] + new_list = [{"key1": "value1"}, + {"key2": "value2"}, + {"key4": "value4"}] + added, removed = utils.diff_list_of_dict(old_list, new_list) + self.assertEqual(added, [dict(key4="value4")]) + self.assertEqual(removed, [dict(key3="value3")]) + + +class _CachingDecorator(object): + def __init__(self): + self.func_retval = 'bar' + self._cache = mock.Mock() + + @utils.cache_method_results + def func(self, *args, **kwargs): + return self.func_retval + + +class TestCachingDecorator(base.BaseTestCase): + def setUp(self): + super(TestCachingDecorator, self).setUp() + self.decor = _CachingDecorator() + self.func_name = '%(module)s._CachingDecorator.func' % { + 'module': self.__module__ + } + self.not_cached = self.decor.func.func.im_self._not_cached + + def test_cache_miss(self): + expected_key = (self.func_name, 1, 2, ('foo', 'bar')) + args = (1, 2) + kwargs = {'foo': 'bar'} + self.decor._cache.get.return_value = self.not_cached + retval = self.decor.func(*args, **kwargs) + self.decor._cache.set.assert_called_once_with( + expected_key, self.decor.func_retval, None) + self.assertEqual(self.decor.func_retval, retval) + + def test_cache_hit(self): + expected_key = (self.func_name, 1, 2, ('foo', 'bar')) + args = (1, 2) + kwargs = {'foo': 'bar'} + retval = self.decor.func(*args, **kwargs) + self.assertFalse(self.decor._cache.set.called) + self.assertEqual(self.decor._cache.get.return_value, retval) + self.decor._cache.get.assert_called_once_with(expected_key, + self.not_cached) + + def test_get_unhashable(self): + expected_key = (self.func_name, [1], 2) + self.decor._cache.get.side_effect = TypeError + retval = self.decor.func([1], 2) + self.assertFalse(self.decor._cache.set.called) + self.assertEqual(self.decor.func_retval, retval) + self.decor._cache.get.assert_called_once_with(expected_key, + self.not_cached) + + def test_missing_cache(self): + delattr(self.decor, '_cache') + self.assertRaises(NotImplementedError, self.decor.func, (1, 2)) + + def test_no_cache(self): + self.decor._cache = False + retval = self.decor.func((1, 2)) + self.assertEqual(self.decor.func_retval, retval) + + +class TestDict2Tuples(base.BaseTestCase): + def test_dict(self): + input_dict = {'foo': 'bar', 42: 'baz', 'aaa': 'zzz'} + expected = ((42, 'baz'), ('aaa', 'zzz'), ('foo', 'bar')) + output_tuple = utils.dict2tuple(input_dict) + self.assertEqual(expected, output_tuple) diff --git a/neutron/tests/unit/test_config.py b/neutron/tests/unit/test_config.py new file mode 100644 index 000000000..4a93ed6fe --- /dev/null +++ b/neutron/tests/unit/test_config.py @@ -0,0 +1,55 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import mock +from oslo.config import cfg + +from neutron.common import config # noqa +from neutron.tests import base + + +class ConfigurationTest(base.BaseTestCase): + + def test_defaults(self): + self.assertEqual('0.0.0.0', cfg.CONF.bind_host) + self.assertEqual(9696, cfg.CONF.bind_port) + self.assertEqual('api-paste.ini', cfg.CONF.api_paste_config) + self.assertEqual('', cfg.CONF.api_extensions_path) + self.assertEqual('policy.json', cfg.CONF.policy_file) + self.assertEqual('keystone', cfg.CONF.auth_strategy) + self.assertIsNone(cfg.CONF.core_plugin) + self.assertEqual(0, len(cfg.CONF.service_plugins)) + self.assertEqual('fa:16:3e:00:00:00', cfg.CONF.base_mac) + self.assertEqual(16, cfg.CONF.mac_generation_retries) + self.assertTrue(cfg.CONF.allow_bulk) + self.assertEqual(5, cfg.CONF.max_dns_nameservers) + self.assertEqual(20, cfg.CONF.max_subnet_host_routes) + relative_dir = os.path.join(os.path.dirname(__file__), + '..', '..', '..') + absolute_dir = os.path.abspath(relative_dir) + self.assertEqual(absolute_dir, cfg.CONF.state_path) + self.assertEqual(86400, cfg.CONF.dhcp_lease_duration) + self.assertFalse(cfg.CONF.allow_overlapping_ips) + self.assertEqual('neutron', cfg.CONF.control_exchange) + + def test_load_paste_app_not_found(self): + self.config(api_paste_config='no_such_file.conf') + with mock.patch.object(cfg.CONF, 'find_file', return_value=None) as ff: + e = self.assertRaises(cfg.ConfigFilesNotFoundError, + config.load_paste_app, 'app') + ff.assert_called_once_with('no_such_file.conf') + self.assertEqual(['no_such_file.conf'], e.config_files) diff --git a/neutron/tests/unit/test_db_migration.py b/neutron/tests/unit/test_db_migration.py new file mode 100644 index 000000000..20f979648 --- /dev/null +++ b/neutron/tests/unit/test_db_migration.py @@ -0,0 +1,184 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# @author Mark McClain (DreamHost) + +import sys + +import mock + +from neutron.db import migration +from neutron.db.migration import cli +from neutron.tests import base + + +class TestDbMigration(base.BaseTestCase): + def test_should_run_plugin_in_list(self): + self.assertTrue(migration.should_run(['foo'], ['foo', 'bar'])) + self.assertFalse(migration.should_run(['foo'], ['bar'])) + + def test_should_run_plugin_wildcard(self): + self.assertTrue(migration.should_run(['foo'], ['*'])) + + +class TestCli(base.BaseTestCase): + def setUp(self): + super(TestCli, self).setUp() + self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') + self.do_alembic_cmd = self.do_alembic_cmd_p.start() + self.mock_alembic_err = mock.patch('alembic.util.err').start() + self.mock_alembic_err.side_effect = SystemExit + + def _main_test_helper(self, argv, func_name, exp_args=(), exp_kwargs={}): + with mock.patch.object(sys, 'argv', argv): + cli.main() + self.do_alembic_cmd.assert_has_calls( + [mock.call(mock.ANY, func_name, *exp_args, **exp_kwargs)] + ) + + def test_stamp(self): + self._main_test_helper( + ['prog', 'stamp', 'foo'], + 'stamp', + ('foo',), + {'sql': False} + ) + + self._main_test_helper( + ['prog', 'stamp', 'foo', '--sql'], + 'stamp', + ('foo',), + {'sql': True} + ) + + def test_current(self): + self._main_test_helper(['prog', 'current'], 'current') + + def test_history(self): + self._main_test_helper(['prog', 'history'], 'history') + + def test_check_migration(self): + with mock.patch.object(cli, 'validate_head_file') as validate: + self._main_test_helper(['prog', 'check_migration'], 'branches') + validate.assert_called_once_with(mock.ANY) + + def test_database_sync_revision(self): + with mock.patch.object(cli, 'update_head_file') as update: + self._main_test_helper( + ['prog', 'revision', '--autogenerate', '-m', 'message'], + 'revision', + (), + {'message': 'message', 'sql': False, 'autogenerate': True} + ) + update.assert_called_once_with(mock.ANY) + + update.reset_mock() + self._main_test_helper( + ['prog', 'revision', '--sql', '-m', 'message'], + 'revision', + (), + {'message': 'message', 'sql': True, 'autogenerate': False} + ) + update.assert_called_once_with(mock.ANY) + + def test_upgrade(self): + self._main_test_helper( + ['prog', 'upgrade', '--sql', 'head'], + 'upgrade', + ('head',), + {'sql': True} + ) + + self._main_test_helper( + ['prog', 'upgrade', '--delta', '3'], + 'upgrade', + ('+3',), + {'sql': False} + ) + + def test_downgrade(self): + self._main_test_helper( + ['prog', 'downgrade', '--sql', 'folsom'], + 'downgrade', + ('folsom',), + {'sql': True} + ) + + self._main_test_helper( + ['prog', 'downgrade', '--delta', '2'], + 'downgrade', + ('-2',), + {'sql': False} + ) + + def _test_validate_head_file_helper(self, heads, file_content=None): + with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: + fc.return_value.get_heads.return_value = heads + fc.return_value.get_current_head.return_value = heads[0] + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.read.return_value = file_content + + with mock.patch('os.path.isfile') as is_file: + is_file.return_value = file_content is not None + + if file_content in heads: + cli.validate_head_file(mock.sentinel.config) + else: + self.assertRaises( + SystemExit, + cli.validate_head_file, + mock.sentinel.config + ) + self.mock_alembic_err.assert_called_once_with(mock.ANY) + fc.assert_called_once_with(mock.sentinel.config) + + def test_validate_head_file_multiple_heads(self): + self._test_validate_head_file_helper(['a', 'b']) + + def test_validate_head_file_missing_file(self): + self._test_validate_head_file_helper(['a']) + + def test_validate_head_file_wrong_contents(self): + self._test_validate_head_file_helper(['a'], 'b') + + def test_validate_head_success(self): + self._test_validate_head_file_helper(['a'], 'a') + + def test_update_head_file_multiple_heads(self): + with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: + fc.return_value.get_heads.return_value = ['a', 'b'] + self.assertRaises( + SystemExit, + cli.update_head_file, + mock.sentinel.config + ) + self.mock_alembic_err.assert_called_once_with(mock.ANY) + fc.assert_called_once_with(mock.sentinel.config) + + def test_update_head_file_success(self): + with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: + fc.return_value.get_heads.return_value = ['a'] + fc.return_value.get_current_head.return_value = 'a' + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + + cli.update_head_file(mock.sentinel.config) + mock_open.return_value.write.assert_called_once_with('a') + fc.assert_called_once_with(mock.sentinel.config) diff --git a/neutron/tests/unit/test_db_plugin.py b/neutron/tests/unit/test_db_plugin.py new file mode 100644 index 000000000..cbc901ec8 --- /dev/null +++ b/neutron/tests/unit/test_db_plugin.py @@ -0,0 +1,3982 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import copy + +import mock +from oslo.config import cfg +from testtools import matchers +import webob.exc + +import neutron +from neutron.api import api_common +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import router +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import ipv6_utils +from neutron.common import test_lib +from neutron.common import utils +from neutron import context +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import models_v2 +from neutron import manager +from neutron.openstack.common import importutils +from neutron.tests import base +from neutron.tests.unit import test_extensions +from neutron.tests.unit import testlib_api + +DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' + + +def optional_ctx(obj, fallback): + if not obj: + return fallback() + + @contextlib.contextmanager + def context_wrapper(): + yield obj + return context_wrapper() + + +def _fake_get_pagination_helper(self, request): + return api_common.PaginationEmulatedHelper(request, self._primary_key) + + +def _fake_get_sorting_helper(self, request): + return api_common.SortingEmulatedHelper(request, self._attr_info) + + +class NeutronDbPluginV2TestCase(testlib_api.WebTestCase): + fmt = 'json' + resource_prefix_map = {} + + def setUp(self, plugin=None, service_plugins=None, + ext_mgr=None): + + super(NeutronDbPluginV2TestCase, self).setUp() + cfg.CONF.set_override('notify_nova_on_port_status_changes', False) + # Make sure at each test according extensions for the plugin is loaded + extensions.PluginAwareExtensionManager._instance = None + # Save the attributes map in case the plugin will alter it + # loading extensions + # Note(salvatore-orlando): shallow copy is not good enough in + # this case, but copy.deepcopy does not seem to work, since it + # causes test failures + self._attribute_map_bk = {} + for item in attributes.RESOURCE_ATTRIBUTE_MAP: + self._attribute_map_bk[item] = (attributes. + RESOURCE_ATTRIBUTE_MAP[item]. + copy()) + self._tenant_id = 'test-tenant' + + if not plugin: + plugin = DB_PLUGIN_KLASS + + # Create the default configurations + args = ['--config-file', base.etcdir('neutron.conf.test')] + # If test_config specifies some config-file, use it, as well + for config_file in test_lib.test_config.get('config_files', []): + args.extend(['--config-file', config_file]) + self.config_parse(args=args) + # Update the plugin + self.setup_coreplugin(plugin) + cfg.CONF.set_override( + 'service_plugins', + [test_lib.test_config.get(key, default) + for key, default in (service_plugins or {}).iteritems()] + ) + + cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab") + cfg.CONF.set_override('max_dns_nameservers', 2) + cfg.CONF.set_override('max_subnet_host_routes', 2) + cfg.CONF.set_override('allow_pagination', True) + cfg.CONF.set_override('allow_sorting', True) + self.api = router.APIRouter() + # Set the defualt status + self.net_create_status = 'ACTIVE' + self.port_create_status = 'ACTIVE' + + def _is_native_bulk_supported(): + plugin_obj = manager.NeutronManager.get_plugin() + native_bulk_attr_name = ("_%s__native_bulk_support" + % plugin_obj.__class__.__name__) + return getattr(plugin_obj, native_bulk_attr_name, False) + + self._skip_native_bulk = not _is_native_bulk_supported() + + def _is_native_pagination_support(): + native_pagination_attr_name = ( + "_%s__native_pagination_support" % + manager.NeutronManager.get_plugin().__class__.__name__) + return (cfg.CONF.allow_pagination and + getattr(manager.NeutronManager.get_plugin(), + native_pagination_attr_name, False)) + + self._skip_native_pagination = not _is_native_pagination_support() + + def _is_native_sorting_support(): + native_sorting_attr_name = ( + "_%s__native_sorting_support" % + manager.NeutronManager.get_plugin().__class__.__name__) + return (cfg.CONF.allow_sorting and + getattr(manager.NeutronManager.get_plugin(), + native_sorting_attr_name, False)) + + self._skip_native_sorting = not _is_native_sorting_support() + if ext_mgr: + self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) + + def tearDown(self): + self.api = None + self._deserializers = None + self._skip_native_bulk = None + self._skip_native_pagination = None + self._skip_native_sortin = None + self.ext_api = None + # NOTE(jkoelker) for a 'pluggable' framework, Neutron sure + # doesn't like when the plugin changes ;) + db.clear_db() + # Restore the original attribute map + attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk + super(NeutronDbPluginV2TestCase, self).tearDown() + + def _req(self, method, resource, data=None, fmt=None, id=None, params=None, + action=None, subresource=None, sub_id=None, context=None): + fmt = fmt or self.fmt + + path = '/%s.%s' % ( + '/'.join(p for p in + (resource, id, subresource, sub_id, action) if p), + fmt + ) + + prefix = self.resource_prefix_map.get(resource) + if prefix: + path = prefix + path + + content_type = 'application/%s' % fmt + body = None + if data is not None: # empty dict is valid + body = self.serialize(data) + return testlib_api.create_request(path, body, content_type, method, + query_string=params, context=context) + + def new_create_request(self, resource, data, fmt=None, id=None, + subresource=None): + return self._req('POST', resource, data, fmt, id=id, + subresource=subresource) + + def new_list_request(self, resource, fmt=None, params=None, + subresource=None): + return self._req( + 'GET', resource, None, fmt, params=params, subresource=subresource + ) + + def new_show_request(self, resource, id, fmt=None, + subresource=None, fields=None): + if fields: + params = "&".join(["fields=%s" % x for x in fields]) + else: + params = None + return self._req('GET', resource, None, fmt, id=id, + params=params, subresource=subresource) + + def new_delete_request(self, resource, id, fmt=None, subresource=None, + sub_id=None): + return self._req( + 'DELETE', + resource, + None, + fmt, + id=id, + subresource=subresource, + sub_id=sub_id + ) + + def new_update_request(self, resource, data, id, fmt=None, + subresource=None, context=None): + return self._req( + 'PUT', resource, data, fmt, id=id, subresource=subresource, + context=context + ) + + def new_action_request(self, resource, data, id, action, fmt=None, + subresource=None): + return self._req( + 'PUT', + resource, + data, + fmt, + id=id, + action=action, + subresource=subresource + ) + + def deserialize(self, content_type, response): + ctype = 'application/%s' % content_type + data = self._deserializers[ctype].deserialize(response.body)['body'] + return data + + def _create_bulk_from_list(self, fmt, resource, objects, **kwargs): + """Creates a bulk request from a list of objects.""" + collection = "%ss" % resource + req_data = {collection: objects} + req = self.new_create_request(collection, req_data, fmt) + if ('set_context' in kwargs and + kwargs['set_context'] is True and + 'tenant_id' in kwargs): + # create a specific auth context for this request + req.environ['neutron.context'] = context.Context( + '', kwargs['tenant_id']) + elif 'context' in kwargs: + req.environ['neutron.context'] = kwargs['context'] + return req.get_response(self.api) + + def _create_bulk(self, fmt, number, resource, data, name='test', **kwargs): + """Creates a bulk request for any kind of resource.""" + objects = [] + collection = "%ss" % resource + for i in range(number): + obj = copy.deepcopy(data) + obj[resource]['name'] = "%s_%s" % (name, i) + if 'override' in kwargs and i in kwargs['override']: + obj[resource].update(kwargs['override'][i]) + objects.append(obj) + req_data = {collection: objects} + req = self.new_create_request(collection, req_data, fmt) + if ('set_context' in kwargs and + kwargs['set_context'] is True and + 'tenant_id' in kwargs): + # create a specific auth context for this request + req.environ['neutron.context'] = context.Context( + '', kwargs['tenant_id']) + elif 'context' in kwargs: + req.environ['neutron.context'] = kwargs['context'] + return req.get_response(self.api) + + def _create_network(self, fmt, name, admin_state_up, + arg_list=None, **kwargs): + data = {'network': {'name': name, + 'admin_state_up': admin_state_up, + 'tenant_id': self._tenant_id}} + for arg in (('admin_state_up', 'tenant_id', 'shared') + + (arg_list or ())): + # Arg must be present + if arg in kwargs: + data['network'][arg] = kwargs[arg] + network_req = self.new_create_request('networks', data, fmt) + if (kwargs.get('set_context') and 'tenant_id' in kwargs): + # create a specific auth context for this request + network_req.environ['neutron.context'] = context.Context( + '', kwargs['tenant_id']) + + return network_req.get_response(self.api) + + def _create_network_bulk(self, fmt, number, name, + admin_state_up, **kwargs): + base_data = {'network': {'admin_state_up': admin_state_up, + 'tenant_id': self._tenant_id}} + return self._create_bulk(fmt, number, 'network', base_data, **kwargs) + + def _create_subnet(self, fmt, net_id, cidr, + expected_res_status=None, **kwargs): + data = {'subnet': {'network_id': net_id, + 'cidr': cidr, + 'ip_version': 4, + 'tenant_id': self._tenant_id}} + for arg in ('ip_version', 'tenant_id', + 'enable_dhcp', 'allocation_pools', + 'dns_nameservers', 'host_routes', + 'shared', 'ipv6_ra_mode', 'ipv6_address_mode'): + # Arg must be present and not null (but can be false) + if arg in kwargs and kwargs[arg] is not None: + data['subnet'][arg] = kwargs[arg] + + if ('gateway_ip' in kwargs and + kwargs['gateway_ip'] is not attributes.ATTR_NOT_SPECIFIED): + data['subnet']['gateway_ip'] = kwargs['gateway_ip'] + + subnet_req = self.new_create_request('subnets', data, fmt) + if (kwargs.get('set_context') and 'tenant_id' in kwargs): + # create a specific auth context for this request + subnet_req.environ['neutron.context'] = context.Context( + '', kwargs['tenant_id']) + + subnet_res = subnet_req.get_response(self.api) + if expected_res_status: + self.assertEqual(subnet_res.status_int, expected_res_status) + return subnet_res + + def _create_subnet_bulk(self, fmt, number, net_id, name, + ip_version=4, **kwargs): + base_data = {'subnet': {'network_id': net_id, + 'ip_version': ip_version, + 'tenant_id': self._tenant_id}} + # auto-generate cidrs as they should not overlap + overrides = dict((k, v) + for (k, v) in zip(range(number), + [{'cidr': "10.0.%s.0/24" % num} + for num in range(number)])) + kwargs.update({'override': overrides}) + return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs) + + def _create_port(self, fmt, net_id, expected_res_status=None, + arg_list=None, **kwargs): + data = {'port': {'network_id': net_id, + 'tenant_id': self._tenant_id}} + + for arg in (('admin_state_up', 'device_id', + 'mac_address', 'name', 'fixed_ips', + 'tenant_id', 'device_owner', 'security_groups') + + (arg_list or ())): + # Arg must be present + if arg in kwargs: + data['port'][arg] = kwargs[arg] + # create a dhcp port device id if one hasn't been supplied + if ('device_owner' in kwargs and + kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and + 'host' in kwargs and + not 'device_id' in kwargs): + device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host']) + data['port']['device_id'] = device_id + port_req = self.new_create_request('ports', data, fmt) + if (kwargs.get('set_context') and 'tenant_id' in kwargs): + # create a specific auth context for this request + port_req.environ['neutron.context'] = context.Context( + '', kwargs['tenant_id']) + + port_res = port_req.get_response(self.api) + if expected_res_status: + self.assertEqual(port_res.status_int, expected_res_status) + return port_res + + def _list_ports(self, fmt, expected_res_status=None, + net_id=None, **kwargs): + query_params = None + if net_id: + query_params = "network_id=%s" % net_id + port_req = self.new_list_request('ports', fmt, query_params) + if ('set_context' in kwargs and + kwargs['set_context'] is True and + 'tenant_id' in kwargs): + # create a specific auth context for this request + port_req.environ['neutron.context'] = context.Context( + '', kwargs['tenant_id']) + + port_res = port_req.get_response(self.api) + if expected_res_status: + self.assertEqual(port_res.status_int, expected_res_status) + return port_res + + def _create_port_bulk(self, fmt, number, net_id, name, + admin_state_up, **kwargs): + base_data = {'port': {'network_id': net_id, + 'admin_state_up': admin_state_up, + 'tenant_id': self._tenant_id}} + return self._create_bulk(fmt, number, 'port', base_data, **kwargs) + + def _make_network(self, fmt, name, admin_state_up, **kwargs): + res = self._create_network(fmt, name, admin_state_up, **kwargs) + # TODO(salvatore-orlando): do exception handling in this test module + # in a uniform way (we do it differently for ports, subnets, and nets + # Things can go wrong - raise HTTP exc with res code only + # so it can be caught by unit tests + if res.status_int >= webob.exc.HTTPClientError.code: + raise webob.exc.HTTPClientError(code=res.status_int) + return self.deserialize(fmt, res) + + def _make_subnet(self, fmt, network, gateway, cidr, + allocation_pools=None, ip_version=4, enable_dhcp=True, + dns_nameservers=None, host_routes=None, shared=None, + ipv6_ra_mode=None, ipv6_address_mode=None): + res = self._create_subnet(fmt, + net_id=network['network']['id'], + cidr=cidr, + gateway_ip=gateway, + tenant_id=network['network']['tenant_id'], + allocation_pools=allocation_pools, + ip_version=ip_version, + enable_dhcp=enable_dhcp, + dns_nameservers=dns_nameservers, + host_routes=host_routes, + shared=shared, + ipv6_ra_mode=ipv6_ra_mode, + ipv6_address_mode=ipv6_address_mode) + # Things can go wrong - raise HTTP exc with res code only + # so it can be caught by unit tests + if res.status_int >= webob.exc.HTTPClientError.code: + raise webob.exc.HTTPClientError(code=res.status_int) + return self.deserialize(fmt, res) + + def _make_port(self, fmt, net_id, expected_res_status=None, **kwargs): + res = self._create_port(fmt, net_id, expected_res_status, **kwargs) + # Things can go wrong - raise HTTP exc with res code only + # so it can be caught by unit tests + if res.status_int >= webob.exc.HTTPClientError.code: + raise webob.exc.HTTPClientError(code=res.status_int) + return self.deserialize(fmt, res) + + def _api_for_resource(self, resource): + if resource in ['networks', 'subnets', 'ports']: + return self.api + else: + return self.ext_api + + def _delete(self, collection, id, + expected_code=webob.exc.HTTPNoContent.code, + neutron_context=None): + req = self.new_delete_request(collection, id) + if neutron_context: + # create a specific auth context for this request + req.environ['neutron.context'] = neutron_context + res = req.get_response(self._api_for_resource(collection)) + self.assertEqual(res.status_int, expected_code) + + def _show(self, resource, id, + expected_code=webob.exc.HTTPOk.code, + neutron_context=None): + req = self.new_show_request(resource, id) + if neutron_context: + # create a specific auth context for this request + req.environ['neutron.context'] = neutron_context + res = req.get_response(self._api_for_resource(resource)) + self.assertEqual(res.status_int, expected_code) + return self.deserialize(self.fmt, res) + + def _update(self, resource, id, new_data, + expected_code=webob.exc.HTTPOk.code, + neutron_context=None): + req = self.new_update_request(resource, new_data, id) + if neutron_context: + # create a specific auth context for this request + req.environ['neutron.context'] = neutron_context + res = req.get_response(self._api_for_resource(resource)) + self.assertEqual(res.status_int, expected_code) + return self.deserialize(self.fmt, res) + + def _list(self, resource, fmt=None, neutron_context=None, + query_params=None): + fmt = fmt or self.fmt + req = self.new_list_request(resource, fmt, query_params) + if neutron_context: + req.environ['neutron.context'] = neutron_context + res = req.get_response(self._api_for_resource(resource)) + self.assertEqual(res.status_int, webob.exc.HTTPOk.code) + return self.deserialize(fmt, res) + + def _fail_second_call(self, patched_plugin, orig, *args, **kwargs): + """Invoked by test cases for injecting failures in plugin.""" + def second_call(*args, **kwargs): + raise n_exc.NeutronException() + patched_plugin.side_effect = second_call + return orig(*args, **kwargs) + + def _validate_behavior_on_bulk_failure( + self, res, collection, + errcode=webob.exc.HTTPClientError.code): + self.assertEqual(res.status_int, errcode) + req = self.new_list_request(collection) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPOk.code) + items = self.deserialize(self.fmt, res) + self.assertEqual(len(items[collection]), 0) + + def _validate_behavior_on_bulk_success(self, res, collection, + names=['test_0', 'test_1']): + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + items = self.deserialize(self.fmt, res)[collection] + self.assertEqual(len(items), 2) + self.assertEqual(items[0]['name'], 'test_0') + self.assertEqual(items[1]['name'], 'test_1') + + def _test_list_resources(self, resource, items, neutron_context=None, + query_params=None): + res = self._list('%ss' % resource, + neutron_context=neutron_context, + query_params=query_params) + resource = resource.replace('-', '_') + self.assertEqual(sorted([i['id'] for i in res['%ss' % resource]]), + sorted([i[resource]['id'] for i in items])) + + @contextlib.contextmanager + def network(self, name='net1', + admin_state_up=True, + fmt=None, + do_delete=True, + **kwargs): + network = self._make_network(fmt or self.fmt, name, + admin_state_up, **kwargs) + yield network + if do_delete: + # The do_delete parameter allows you to control whether the + # created network is immediately deleted again. Therefore, this + # function is also usable in tests, which require the creation + # of many networks. + self._delete('networks', network['network']['id']) + + @contextlib.contextmanager + def subnet(self, network=None, + gateway_ip=attributes.ATTR_NOT_SPECIFIED, + cidr='10.0.0.0/24', + fmt=None, + ip_version=4, + allocation_pools=None, + enable_dhcp=True, + dns_nameservers=None, + host_routes=None, + shared=None, + do_delete=True, + ipv6_ra_mode=None, + ipv6_address_mode=None): + with optional_ctx(network, self.network) as network_to_use: + subnet = self._make_subnet(fmt or self.fmt, + network_to_use, + gateway_ip, + cidr, + allocation_pools, + ip_version, + enable_dhcp, + dns_nameservers, + host_routes, + shared=shared, + ipv6_ra_mode=ipv6_ra_mode, + ipv6_address_mode=ipv6_address_mode) + yield subnet + if do_delete: + self._delete('subnets', subnet['subnet']['id']) + + @contextlib.contextmanager + def port(self, subnet=None, fmt=None, no_delete=False, + **kwargs): + with optional_ctx(subnet, self.subnet) as subnet_to_use: + net_id = subnet_to_use['subnet']['network_id'] + port = self._make_port(fmt or self.fmt, net_id, **kwargs) + yield port + if not no_delete: + self._delete('ports', port['port']['id']) + + def _test_list_with_sort(self, resource, + items, sorts, resources=None, query_params=''): + query_str = query_params + for key, direction in sorts: + query_str = query_str + "&sort_key=%s&sort_dir=%s" % (key, + direction) + if not resources: + resources = '%ss' % resource + req = self.new_list_request(resources, + params=query_str) + api = self._api_for_resource(resources) + res = self.deserialize(self.fmt, req.get_response(api)) + resource = resource.replace('-', '_') + resources = resources.replace('-', '_') + expected_res = [item[resource]['id'] for item in items] + self.assertEqual(sorted([n['id'] for n in res[resources]]), + sorted(expected_res)) + + def _test_list_with_pagination(self, resource, items, sort, + limit, expected_page_num, + resources=None, + query_params='', + verify_key='id'): + if not resources: + resources = '%ss' % resource + query_str = query_params + '&' if query_params else '' + query_str = query_str + ("limit=%s&sort_key=%s&" + "sort_dir=%s") % (limit, sort[0], sort[1]) + req = self.new_list_request(resources, params=query_str) + items_res = [] + page_num = 0 + api = self._api_for_resource(resources) + resource = resource.replace('-', '_') + resources = resources.replace('-', '_') + while req: + page_num = page_num + 1 + res = self.deserialize(self.fmt, req.get_response(api)) + self.assertThat(len(res[resources]), + matchers.LessThan(limit + 1)) + items_res = items_res + res[resources] + req = None + if '%s_links' % resources in res: + for link in res['%s_links' % resources]: + if link['rel'] == 'next': + content_type = 'application/%s' % self.fmt + req = testlib_api.create_request(link['href'], + '', content_type) + self.assertEqual(len(res[resources]), + limit) + self.assertEqual(page_num, expected_page_num) + self.assertEqual(sorted([n[verify_key] for n in items_res]), + sorted([item[resource][verify_key] + for item in items])) + + def _test_list_with_pagination_reverse(self, resource, items, sort, + limit, expected_page_num, + resources=None, + query_params=''): + if not resources: + resources = '%ss' % resource + resource = resource.replace('-', '_') + api = self._api_for_resource(resources) + marker = items[-1][resource]['id'] + query_str = query_params + '&' if query_params else '' + query_str = query_str + ("limit=%s&page_reverse=True&" + "sort_key=%s&sort_dir=%s&" + "marker=%s") % (limit, sort[0], sort[1], + marker) + req = self.new_list_request(resources, params=query_str) + item_res = [items[-1][resource]] + page_num = 0 + resources = resources.replace('-', '_') + while req: + page_num = page_num + 1 + res = self.deserialize(self.fmt, req.get_response(api)) + self.assertThat(len(res[resources]), + matchers.LessThan(limit + 1)) + res[resources].reverse() + item_res = item_res + res[resources] + req = None + if '%s_links' % resources in res: + for link in res['%s_links' % resources]: + if link['rel'] == 'previous': + content_type = 'application/%s' % self.fmt + req = testlib_api.create_request(link['href'], + '', content_type) + self.assertEqual(len(res[resources]), + limit) + self.assertEqual(page_num, expected_page_num) + expected_res = [item[resource]['id'] for item in items] + expected_res.reverse() + self.assertEqual(sorted([n['id'] for n in item_res]), + sorted(expected_res)) + + +class TestBasicGet(NeutronDbPluginV2TestCase): + + def test_single_get_admin(self): + plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2() + with self.network() as network: + net_id = network['network']['id'] + ctx = context.get_admin_context() + n = plugin._get_network(ctx, net_id) + self.assertEqual(net_id, n.id) + + def test_single_get_tenant(self): + plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2() + with self.network() as network: + net_id = network['network']['id'] + ctx = context.get_admin_context() + n = plugin._get_network(ctx, net_id) + self.assertEqual(net_id, n.id) + + +class TestV2HTTPResponse(NeutronDbPluginV2TestCase): + def test_create_returns_201(self): + res = self._create_network(self.fmt, 'net2', True) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + def test_list_returns_200(self): + req = self.new_list_request('networks') + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPOk.code) + + def _check_list_with_fields(self, res, field_name): + self.assertEqual(res.status_int, webob.exc.HTTPOk.code) + body = self.deserialize(self.fmt, res) + # further checks: 1 networks + self.assertEqual(len(body['networks']), 1) + # 1 field in the network record + self.assertEqual(len(body['networks'][0]), 1) + # field is 'name' + self.assertIn(field_name, body['networks'][0]) + + def test_list_with_fields(self): + self._create_network(self.fmt, 'some_net', True) + req = self.new_list_request('networks', params="fields=name") + res = req.get_response(self.api) + self._check_list_with_fields(res, 'name') + + def test_list_with_fields_noadmin(self): + tenant_id = 'some_tenant' + self._create_network(self.fmt, + 'some_net', + True, + tenant_id=tenant_id, + set_context=True) + req = self.new_list_request('networks', params="fields=name") + req.environ['neutron.context'] = context.Context('', tenant_id) + res = req.get_response(self.api) + self._check_list_with_fields(res, 'name') + + def test_list_with_fields_noadmin_and_policy_field(self): + """If a field used by policy is selected, do not duplicate it. + + Verifies that if the field parameter explicitly specifies a field + which is used by the policy engine, then it is not duplicated + in the response. + + """ + tenant_id = 'some_tenant' + self._create_network(self.fmt, + 'some_net', + True, + tenant_id=tenant_id, + set_context=True) + req = self.new_list_request('networks', params="fields=tenant_id") + req.environ['neutron.context'] = context.Context('', tenant_id) + res = req.get_response(self.api) + self._check_list_with_fields(res, 'tenant_id') + + def test_show_returns_200(self): + with self.network() as net: + req = self.new_show_request('networks', net['network']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPOk.code) + + def test_delete_returns_204(self): + res = self._create_network(self.fmt, 'net1', True) + net = self.deserialize(self.fmt, res) + req = self.new_delete_request('networks', net['network']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + + def test_update_returns_200(self): + with self.network() as net: + req = self.new_update_request('networks', + {'network': {'name': 'steve'}}, + net['network']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPOk.code) + + def test_update_invalid_json_400(self): + with self.network() as net: + req = self.new_update_request('networks', + '{{"name": "aaa"}}', + net['network']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_bad_route_404(self): + req = self.new_list_request('doohickeys') + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) + + +class TestPortsV2(NeutronDbPluginV2TestCase): + def test_create_port_json(self): + keys = [('admin_state_up', True), ('status', self.port_create_status)] + with self.port(name='myname') as port: + for k, v in keys: + self.assertEqual(port['port'][k], v) + self.assertIn('mac_address', port['port']) + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual('myname', port['port']['name']) + + def test_create_port_as_admin(self): + with self.network(do_delete=False) as network: + self._create_port(self.fmt, + network['network']['id'], + webob.exc.HTTPCreated.code, + tenant_id='bad_tenant_id', + device_id='fake_device', + device_owner='fake_owner', + fixed_ips=[], + set_context=False) + + def test_create_port_bad_tenant(self): + with self.network() as network: + self._create_port(self.fmt, + network['network']['id'], + webob.exc.HTTPNotFound.code, + tenant_id='bad_tenant_id', + device_id='fake_device', + device_owner='fake_owner', + fixed_ips=[], + set_context=True) + + def test_create_port_public_network(self): + keys = [('admin_state_up', True), ('status', self.port_create_status)] + with self.network(shared=True) as network: + port_res = self._create_port(self.fmt, + network['network']['id'], + webob.exc.HTTPCreated.code, + tenant_id='another_tenant', + set_context=True) + port = self.deserialize(self.fmt, port_res) + for k, v in keys: + self.assertEqual(port['port'][k], v) + self.assertIn('mac_address', port['port']) + self._delete('ports', port['port']['id']) + + def test_create_port_public_network_with_ip(self): + with self.network(shared=True) as network: + with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: + keys = [('admin_state_up', True), + ('status', self.port_create_status), + ('fixed_ips', [{'subnet_id': subnet['subnet']['id'], + 'ip_address': '10.0.0.2'}])] + port_res = self._create_port(self.fmt, + network['network']['id'], + webob.exc.HTTPCreated.code, + tenant_id='another_tenant', + set_context=True) + port = self.deserialize(self.fmt, port_res) + for k, v in keys: + self.assertEqual(port['port'][k], v) + self.assertIn('mac_address', port['port']) + self._delete('ports', port['port']['id']) + + def test_create_ports_bulk_native(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk port create") + with self.network() as net: + res = self._create_port_bulk(self.fmt, 2, net['network']['id'], + 'test', True) + self._validate_behavior_on_bulk_success(res, 'ports') + for p in self.deserialize(self.fmt, res)['ports']: + self._delete('ports', p['id']) + + def test_create_ports_bulk_emulated(self): + real_has_attr = hasattr + + #ensures the API choose the emulation code path + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + with self.network() as net: + res = self._create_port_bulk(self.fmt, 2, net['network']['id'], + 'test', True) + self._validate_behavior_on_bulk_success(res, 'ports') + for p in self.deserialize(self.fmt, res)['ports']: + self._delete('ports', p['id']) + + def test_create_ports_bulk_wrong_input(self): + with self.network() as net: + overrides = {1: {'admin_state_up': 'doh'}} + res = self._create_port_bulk(self.fmt, 2, net['network']['id'], + 'test', True, + override=overrides) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + req = self.new_list_request('ports') + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPOk.code) + ports = self.deserialize(self.fmt, res) + self.assertEqual(len(ports['ports']), 0) + + def test_create_ports_bulk_emulated_plugin_failure(self): + real_has_attr = hasattr + + #ensures the API choose the emulation code path + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + orig = manager.NeutronManager.get_plugin().create_port + with mock.patch.object(manager.NeutronManager.get_plugin(), + 'create_port') as patched_plugin: + + def side_effect(*args, **kwargs): + return self._fail_second_call(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + with self.network() as net: + res = self._create_port_bulk(self.fmt, 2, + net['network']['id'], + 'test', + True) + # We expect a 500 as we injected a fault in the plugin + self._validate_behavior_on_bulk_failure( + res, 'ports', webob.exc.HTTPServerError.code + ) + + def test_create_ports_bulk_native_plugin_failure(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk port create") + ctx = context.get_admin_context() + with self.network() as net: + plugin = manager.NeutronManager.get_plugin() + orig = plugin.create_port + with mock.patch.object(plugin, 'create_port') as patched_plugin: + + def side_effect(*args, **kwargs): + return self._fail_second_call(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + res = self._create_port_bulk(self.fmt, 2, net['network']['id'], + 'test', True, context=ctx) + # We expect a 500 as we injected a fault in the plugin + self._validate_behavior_on_bulk_failure( + res, 'ports', webob.exc.HTTPServerError.code) + + def test_list_ports(self): + # for this test we need to enable overlapping ips + cfg.CONF.set_default('allow_overlapping_ips', True) + with contextlib.nested(self.port(), + self.port(), + self.port()) as ports: + self._test_list_resources('port', ports) + + def test_list_ports_filtered_by_fixed_ip(self): + # for this test we need to enable overlapping ips + cfg.CONF.set_default('allow_overlapping_ips', True) + with contextlib.nested(self.port(), self.port()) as (port1, port2): + fixed_ips = port1['port']['fixed_ips'][0] + query_params = """ +fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s +""".strip() % (fixed_ips['ip_address'], + '192.168.126.5', + fixed_ips['subnet_id']) + self._test_list_resources('port', [port1], + query_params=query_params) + + def test_list_ports_public_network(self): + with self.network(shared=True) as network: + with self.subnet(network) as subnet: + with contextlib.nested(self.port(subnet, tenant_id='tenant_1'), + self.port(subnet, tenant_id='tenant_2') + ) as (port1, port2): + # Admin request - must return both ports + self._test_list_resources('port', [port1, port2]) + # Tenant_1 request - must return single port + q_context = context.Context('', 'tenant_1') + self._test_list_resources('port', [port1], + neutron_context=q_context) + # Tenant_2 request - must return single port + q_context = context.Context('', 'tenant_2') + self._test_list_resources('port', [port2], + neutron_context=q_context) + + def test_list_ports_with_sort_native(self): + if self._skip_native_sorting: + self.skipTest("Skip test for not implemented sorting feature") + cfg.CONF.set_default('allow_overlapping_ips', True) + with contextlib.nested(self.port(admin_state_up='True', + mac_address='00:00:00:00:00:01'), + self.port(admin_state_up='False', + mac_address='00:00:00:00:00:02'), + self.port(admin_state_up='False', + mac_address='00:00:00:00:00:03') + ) as (port1, port2, port3): + self._test_list_with_sort('port', (port3, port2, port1), + [('admin_state_up', 'asc'), + ('mac_address', 'desc')]) + + def test_list_ports_with_sort_emulated(self): + helper_patcher = mock.patch( + 'neutron.api.v2.base.Controller._get_sorting_helper', + new=_fake_get_sorting_helper) + helper_patcher.start() + cfg.CONF.set_default('allow_overlapping_ips', True) + with contextlib.nested(self.port(admin_state_up='True', + mac_address='00:00:00:00:00:01'), + self.port(admin_state_up='False', + mac_address='00:00:00:00:00:02'), + self.port(admin_state_up='False', + mac_address='00:00:00:00:00:03') + ) as (port1, port2, port3): + self._test_list_with_sort('port', (port3, port2, port1), + [('admin_state_up', 'asc'), + ('mac_address', 'desc')]) + + def test_list_ports_with_pagination_native(self): + if self._skip_native_pagination: + self.skipTest("Skip test for not implemented pagination feature") + cfg.CONF.set_default('allow_overlapping_ips', True) + with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'), + self.port(mac_address='00:00:00:00:00:02'), + self.port(mac_address='00:00:00:00:00:03') + ) as (port1, port2, port3): + self._test_list_with_pagination('port', + (port1, port2, port3), + ('mac_address', 'asc'), 2, 2) + + def test_list_ports_with_pagination_emulated(self): + helper_patcher = mock.patch( + 'neutron.api.v2.base.Controller._get_pagination_helper', + new=_fake_get_pagination_helper) + helper_patcher.start() + cfg.CONF.set_default('allow_overlapping_ips', True) + with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'), + self.port(mac_address='00:00:00:00:00:02'), + self.port(mac_address='00:00:00:00:00:03') + ) as (port1, port2, port3): + self._test_list_with_pagination('port', + (port1, port2, port3), + ('mac_address', 'asc'), 2, 2) + + def test_list_ports_with_pagination_reverse_native(self): + if self._skip_native_pagination: + self.skipTest("Skip test for not implemented pagination feature") + cfg.CONF.set_default('allow_overlapping_ips', True) + with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'), + self.port(mac_address='00:00:00:00:00:02'), + self.port(mac_address='00:00:00:00:00:03') + ) as (port1, port2, port3): + self._test_list_with_pagination_reverse('port', + (port1, port2, port3), + ('mac_address', 'asc'), + 2, 2) + + def test_list_ports_with_pagination_reverse_emulated(self): + helper_patcher = mock.patch( + 'neutron.api.v2.base.Controller._get_pagination_helper', + new=_fake_get_pagination_helper) + helper_patcher.start() + cfg.CONF.set_default('allow_overlapping_ips', True) + with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'), + self.port(mac_address='00:00:00:00:00:02'), + self.port(mac_address='00:00:00:00:00:03') + ) as (port1, port2, port3): + self._test_list_with_pagination_reverse('port', + (port1, port2, port3), + ('mac_address', 'asc'), + 2, 2) + + def test_show_port(self): + with self.port() as port: + req = self.new_show_request('ports', port['port']['id'], self.fmt) + sport = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(port['port']['id'], sport['port']['id']) + + def test_delete_port(self): + with self.port(no_delete=True) as port: + self._delete('ports', port['port']['id']) + self._show('ports', port['port']['id'], + expected_code=webob.exc.HTTPNotFound.code) + + def test_delete_port_public_network(self): + with self.network(shared=True) as network: + port_res = self._create_port(self.fmt, + network['network']['id'], + webob.exc.HTTPCreated.code, + tenant_id='another_tenant', + set_context=True) + + port = self.deserialize(self.fmt, port_res) + self._delete('ports', port['port']['id']) + self._show('ports', port['port']['id'], + expected_code=webob.exc.HTTPNotFound.code) + + def test_update_port(self): + with self.port() as port: + data = {'port': {'admin_state_up': False}} + req = self.new_update_request('ports', data, port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['port']['admin_state_up'], + data['port']['admin_state_up']) + + def test_update_port_not_admin(self): + res = self._create_network(self.fmt, 'net1', True, + tenant_id='not_admin', + set_context=True) + net1 = self.deserialize(self.fmt, res) + res = self._create_port(self.fmt, net1['network']['id'], + tenant_id='not_admin', set_context=True) + port = self.deserialize(self.fmt, res) + data = {'port': {'admin_state_up': False}} + neutron_context = context.Context('', 'not_admin') + port = self._update('ports', port['port']['id'], data, + neutron_context=neutron_context) + self.assertEqual(port['port']['admin_state_up'], False) + + def test_update_device_id_null(self): + with self.port() as port: + data = {'port': {'device_id': None}} + req = self.new_update_request('ports', data, port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_delete_network_if_port_exists(self): + with self.port() as port: + req = self.new_delete_request('networks', + port['port']['network_id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + def test_delete_network_port_exists_owned_by_network(self): + res = self._create_network(fmt=self.fmt, name='net', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + network_id = network['network']['id'] + self._create_port(self.fmt, network_id, + device_owner=constants.DEVICE_OWNER_DHCP) + req = self.new_delete_request('networks', network_id) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + + def test_update_port_delete_ip(self): + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + data = {'port': {'admin_state_up': False, + 'fixed_ips': []}} + req = self.new_update_request('ports', + data, port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['port']['admin_state_up'], + data['port']['admin_state_up']) + self.assertEqual(res['port']['fixed_ips'], + data['port']['fixed_ips']) + + def test_no_more_port_exception(self): + with self.subnet(cidr='10.0.0.0/32') as subnet: + id = subnet['subnet']['network_id'] + res = self._create_port(self.fmt, id) + data = self.deserialize(self.fmt, res) + msg = str(n_exc.IpAddressGenerationFailure(net_id=id)) + self.assertEqual(data['NeutronError']['message'], msg) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + def test_update_port_update_ip(self): + """Test update of port IP. + + Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10. + """ + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + data = {'port': {'fixed_ips': [{'subnet_id': + subnet['subnet']['id'], + 'ip_address': "10.0.0.10"}]}} + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + ips = res['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.10') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + + def test_update_port_update_ip_address_only(self): + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + data = {'port': {'fixed_ips': [{'subnet_id': + subnet['subnet']['id'], + 'ip_address': "10.0.0.10"}, + {'ip_address': "10.0.0.2"}]}} + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + ips = res['port']['fixed_ips'] + self.assertEqual(len(ips), 2) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + self.assertEqual(ips[1]['ip_address'], '10.0.0.10') + self.assertEqual(ips[1]['subnet_id'], subnet['subnet']['id']) + + def test_update_port_update_ips(self): + """Update IP and associate new IP on port. + + Check a port update with the specified subnet_id's. A IP address + will be allocated for each subnet_id. + """ + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + data = {'port': {'admin_state_up': False, + 'fixed_ips': [{'subnet_id': + subnet['subnet']['id'], + 'ip_address': '10.0.0.3'}]}} + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['port']['admin_state_up'], + data['port']['admin_state_up']) + ips = res['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.3') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + + def test_update_port_add_additional_ip(self): + """Test update of port with additional IP.""" + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + data = {'port': {'admin_state_up': False, + 'fixed_ips': [{'subnet_id': + subnet['subnet']['id']}, + {'subnet_id': + subnet['subnet']['id']}]}} + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['port']['admin_state_up'], + data['port']['admin_state_up']) + ips = res['port']['fixed_ips'] + self.assertEqual(len(ips), 2) + self.assertEqual(ips[0]['ip_address'], '10.0.0.3') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + self.assertEqual(ips[1]['ip_address'], '10.0.0.4') + self.assertEqual(ips[1]['subnet_id'], subnet['subnet']['id']) + + def test_requested_duplicate_mac(self): + with self.port() as port: + mac = port['port']['mac_address'] + # check that MAC address matches base MAC + base_mac = cfg.CONF.base_mac[0:2] + self.assertTrue(mac.startswith(base_mac)) + kwargs = {"mac_address": mac} + net_id = port['port']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + def test_mac_generation(self): + cfg.CONF.set_override('base_mac', "12:34:56:00:00:00") + with self.port() as port: + mac = port['port']['mac_address'] + self.assertTrue(mac.startswith("12:34:56")) + + def test_mac_generation_4octet(self): + cfg.CONF.set_override('base_mac', "12:34:56:78:00:00") + with self.port() as port: + mac = port['port']['mac_address'] + self.assertTrue(mac.startswith("12:34:56:78")) + + def test_bad_mac_format(self): + cfg.CONF.set_override('base_mac', "bad_mac") + try: + self.plugin._check_base_mac_format() + except Exception: + return + self.fail("No exception for illegal base_mac format") + + def test_mac_exhaustion(self): + # rather than actually consuming all MAC (would take a LONG time) + # we just raise the exception that would result. + @staticmethod + def fake_gen_mac(context, net_id): + raise n_exc.MacAddressGenerationFailure(net_id=net_id) + + with mock.patch.object(neutron.db.db_base_plugin_v2.NeutronDbPluginV2, + '_generate_mac', new=fake_gen_mac): + res = self._create_network(fmt=self.fmt, name='net1', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + net_id = network['network']['id'] + res = self._create_port(self.fmt, net_id=net_id) + self.assertEqual(res.status_int, + webob.exc.HTTPServiceUnavailable.code) + + def test_requested_duplicate_ip(self): + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + # Check configuring of duplicate IP + kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], + 'ip_address': ips[0]['ip_address']}]} + net_id = port['port']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + def test_requested_subnet_delete(self): + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + req = self.new_delete_request('subnet', + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) + + def test_requested_subnet_id(self): + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + # Request a IP from specific subnet + kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}]} + net_id = port['port']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + port2 = self.deserialize(self.fmt, res) + ips = port2['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.3') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + self._delete('ports', port2['port']['id']) + + def test_requested_subnet_id_not_on_network(self): + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + # Create new network + res = self._create_network(fmt=self.fmt, name='net2', + admin_state_up=True) + network2 = self.deserialize(self.fmt, res) + subnet2 = self._make_subnet(self.fmt, network2, "1.1.1.1", + "1.1.1.0/24", ip_version=4) + net_id = port['port']['network_id'] + # Request a IP from specific subnet + kwargs = {"fixed_ips": [{'subnet_id': + subnet2['subnet']['id']}]} + net_id = port['port']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_overlapping_subnets(self): + with self.subnet() as subnet: + tenant_id = subnet['subnet']['tenant_id'] + net_id = subnet['subnet']['network_id'] + res = self._create_subnet(self.fmt, + tenant_id=tenant_id, + net_id=net_id, + cidr='10.0.0.225/28', + ip_version=4, + gateway_ip=attributes.ATTR_NOT_SPECIFIED) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_requested_subnet_id_v4_and_v6(self): + with self.subnet() as subnet: + # Get a IPv4 and IPv6 address + tenant_id = subnet['subnet']['tenant_id'] + net_id = subnet['subnet']['network_id'] + res = self._create_subnet( + self.fmt, + tenant_id=tenant_id, + net_id=net_id, + cidr='2607:f0d0:1002:51::/124', + ip_version=6, + gateway_ip=attributes.ATTR_NOT_SPECIFIED) + subnet2 = self.deserialize(self.fmt, res) + kwargs = {"fixed_ips": + [{'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet2['subnet']['id']}]} + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + port3 = self.deserialize(self.fmt, res) + ips = port3['port']['fixed_ips'] + self.assertEqual(len(ips), 2) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + self.assertEqual(ips[1]['ip_address'], '2607:f0d0:1002:51::2') + self.assertEqual(ips[1]['subnet_id'], subnet2['subnet']['id']) + res = self._create_port(self.fmt, net_id=net_id) + port4 = self.deserialize(self.fmt, res) + # Check that a v4 and a v6 address are allocated + ips = port4['port']['fixed_ips'] + self.assertEqual(len(ips), 2) + self.assertEqual(ips[0]['ip_address'], '10.0.0.3') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + self.assertEqual(ips[1]['ip_address'], '2607:f0d0:1002:51::3') + self.assertEqual(ips[1]['subnet_id'], subnet2['subnet']['id']) + self._delete('ports', port3['port']['id']) + self._delete('ports', port4['port']['id']) + + def test_ip_allocation_for_ipv6_subnet_slaac_adddress_mode(self): + res = self._create_network(fmt=self.fmt, name='net', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + v6_subnet = self._make_subnet(self.fmt, network, + gateway='fe80::1', + cidr='fe80::/80', + ip_version=6, + ipv6_ra_mode=None, + ipv6_address_mode=constants.IPV6_SLAAC) + port = self._make_port(self.fmt, network['network']['id']) + self.assertEqual(len(port['port']['fixed_ips']), 1) + port_mac = port['port']['mac_address'] + subnet_cidr = v6_subnet['subnet']['cidr'] + eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, + port_mac)) + self.assertEqual(port['port']['fixed_ips'][0]['ip_address'], eui_addr) + + def test_range_allocation(self): + with self.subnet(gateway_ip='10.0.0.3', + cidr='10.0.0.0/29') as subnet: + kwargs = {"fixed_ips": + [{'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}]} + net_id = subnet['subnet']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + port = self.deserialize(self.fmt, res) + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 5) + alloc = ['10.0.0.1', '10.0.0.2', '10.0.0.4', '10.0.0.5', + '10.0.0.6'] + for ip in ips: + self.assertIn(ip['ip_address'], alloc) + self.assertEqual(ip['subnet_id'], + subnet['subnet']['id']) + alloc.remove(ip['ip_address']) + self.assertEqual(len(alloc), 0) + self._delete('ports', port['port']['id']) + + with self.subnet(gateway_ip='11.0.0.6', + cidr='11.0.0.0/29') as subnet: + kwargs = {"fixed_ips": + [{'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}]} + net_id = subnet['subnet']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + port = self.deserialize(self.fmt, res) + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 5) + alloc = ['11.0.0.1', '11.0.0.2', '11.0.0.3', '11.0.0.4', + '11.0.0.5'] + for ip in ips: + self.assertIn(ip['ip_address'], alloc) + self.assertEqual(ip['subnet_id'], + subnet['subnet']['id']) + alloc.remove(ip['ip_address']) + self.assertEqual(len(alloc), 0) + self._delete('ports', port['port']['id']) + + def test_requested_invalid_fixed_ips(self): + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + # Test invalid subnet_id + kwargs = {"fixed_ips": + [{'subnet_id': subnet['subnet']['id']}, + {'subnet_id': + '00000000-ffff-ffff-ffff-000000000000'}]} + net_id = port['port']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + port2 = self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) + + # Test invalid IP address on specified subnet_id + kwargs = {"fixed_ips": + [{'subnet_id': subnet['subnet']['id'], + 'ip_address': '1.1.1.1'}]} + net_id = port['port']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + port2 = self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + # Test invalid addresses - IP's not on subnet or network + # address or broadcast address + bad_ips = ['1.1.1.1', '10.0.0.0', '10.0.0.255'] + net_id = port['port']['network_id'] + for ip in bad_ips: + kwargs = {"fixed_ips": [{'ip_address': ip}]} + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + port2 = self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + # Enable allocation of gateway address + kwargs = {"fixed_ips": + [{'subnet_id': subnet['subnet']['id'], + 'ip_address': '10.0.0.1'}]} + net_id = port['port']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + port2 = self.deserialize(self.fmt, res) + ips = port2['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.1') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + self._delete('ports', port2['port']['id']) + + def test_invalid_ip(self): + with self.subnet() as subnet: + # Allocate specific IP + kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], + 'ip_address': '1011.0.0.5'}]} + net_id = subnet['subnet']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_requested_split(self): + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + ports_to_delete = [] + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + # Allocate specific IP + kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], + 'ip_address': '10.0.0.5'}]} + net_id = port['port']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + port2 = self.deserialize(self.fmt, res) + ports_to_delete.append(port2) + ips = port2['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.5') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + # Allocate specific IP's + allocated = ['10.0.0.3', '10.0.0.4', '10.0.0.6'] + + for a in allocated: + res = self._create_port(self.fmt, net_id=net_id) + port2 = self.deserialize(self.fmt, res) + ports_to_delete.append(port2) + ips = port2['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], a) + self.assertEqual(ips[0]['subnet_id'], + subnet['subnet']['id']) + + for p in ports_to_delete: + self._delete('ports', p['port']['id']) + + def test_duplicate_ips(self): + with self.subnet() as subnet: + # Allocate specific IP + kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], + 'ip_address': '10.0.0.5'}, + {'subnet_id': subnet['subnet']['id'], + 'ip_address': '10.0.0.5'}]} + net_id = subnet['subnet']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_fixed_ip_invalid_subnet_id(self): + with self.subnet() as subnet: + # Allocate specific IP + kwargs = {"fixed_ips": [{'subnet_id': 'i am invalid', + 'ip_address': '10.0.0.5'}]} + net_id = subnet['subnet']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_fixed_ip_invalid_ip(self): + with self.subnet() as subnet: + # Allocate specific IP + kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], + 'ip_address': '10.0.0.55555'}]} + net_id = subnet['subnet']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_requested_ips_only(self): + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], '10.0.0.2') + self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) + ips_only = ['10.0.0.18', '10.0.0.20', '10.0.0.22', '10.0.0.21', + '10.0.0.3', '10.0.0.17', '10.0.0.19'] + ports_to_delete = [] + for i in ips_only: + kwargs = {"fixed_ips": [{'ip_address': i}]} + net_id = port['port']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + port = self.deserialize(self.fmt, res) + ports_to_delete.append(port) + ips = port['port']['fixed_ips'] + self.assertEqual(len(ips), 1) + self.assertEqual(ips[0]['ip_address'], i) + self.assertEqual(ips[0]['subnet_id'], + subnet['subnet']['id']) + for p in ports_to_delete: + self._delete('ports', p['port']['id']) + + def test_invalid_admin_state(self): + with self.network() as network: + data = {'port': {'network_id': network['network']['id'], + 'tenant_id': network['network']['tenant_id'], + 'admin_state_up': 7, + 'fixed_ips': []}} + port_req = self.new_create_request('ports', data) + res = port_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_invalid_mac_address(self): + with self.network() as network: + data = {'port': {'network_id': network['network']['id'], + 'tenant_id': network['network']['tenant_id'], + 'admin_state_up': 1, + 'mac_address': 'mac', + 'fixed_ips': []}} + port_req = self.new_create_request('ports', data) + res = port_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_max_fixed_ips_exceeded(self): + with self.subnet(gateway_ip='10.0.0.3', + cidr='10.0.0.0/24') as subnet: + kwargs = {"fixed_ips": + [{'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}]} + net_id = subnet['subnet']['network_id'] + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_max_fixed_ips_exceeded(self): + with self.subnet(gateway_ip='10.0.0.3', + cidr='10.0.0.0/24') as subnet: + with self.port(subnet) as port: + data = {'port': {'fixed_ips': + [{'subnet_id': subnet['subnet']['id'], + 'ip_address': '10.0.0.2'}, + {'subnet_id': subnet['subnet']['id'], + 'ip_address': '10.0.0.4'}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id']}]}} + req = self.new_update_request('ports', data, + port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_delete_ports_by_device_id(self): + plugin = manager.NeutronManager.get_plugin() + ctx = context.get_admin_context() + with self.subnet() as subnet: + with contextlib.nested( + self.port(subnet=subnet, device_id='owner1', no_delete=True), + self.port(subnet=subnet, device_id='owner1', no_delete=True), + self.port(subnet=subnet, device_id='owner2'), + ) as (p1, p2, p3): + network_id = subnet['subnet']['network_id'] + plugin.delete_ports_by_device_id(ctx, 'owner1', + network_id) + self._show('ports', p1['port']['id'], + expected_code=webob.exc.HTTPNotFound.code) + self._show('ports', p2['port']['id'], + expected_code=webob.exc.HTTPNotFound.code) + self._show('ports', p3['port']['id'], + expected_code=webob.exc.HTTPOk.code) + + def _test_delete_ports_by_device_id_second_call_failure(self, plugin): + ctx = context.get_admin_context() + with self.subnet() as subnet: + with contextlib.nested( + self.port(subnet=subnet, device_id='owner1', no_delete=True), + self.port(subnet=subnet, device_id='owner1'), + self.port(subnet=subnet, device_id='owner2'), + ) as (p1, p2, p3): + orig = plugin.delete_port + with mock.patch.object(plugin, 'delete_port') as del_port: + + def side_effect(*args, **kwargs): + return self._fail_second_call(del_port, orig, + *args, **kwargs) + + del_port.side_effect = side_effect + network_id = subnet['subnet']['network_id'] + self.assertRaises(n_exc.NeutronException, + plugin.delete_ports_by_device_id, + ctx, 'owner1', network_id) + self._show('ports', p1['port']['id'], + expected_code=webob.exc.HTTPNotFound.code) + self._show('ports', p2['port']['id'], + expected_code=webob.exc.HTTPOk.code) + self._show('ports', p3['port']['id'], + expected_code=webob.exc.HTTPOk.code) + + def test_delete_ports_by_device_id_second_call_failure(self): + plugin = manager.NeutronManager.get_plugin() + self._test_delete_ports_by_device_id_second_call_failure(plugin) + + def _test_delete_ports_ignores_port_not_found(self, plugin): + ctx = context.get_admin_context() + with self.subnet() as subnet: + with contextlib.nested( + self.port(subnet=subnet, device_id='owner1'), + mock.patch.object(plugin, 'delete_port') + ) as (p, del_port): + del_port.side_effect = n_exc.PortNotFound( + port_id=p['port']['id'] + ) + network_id = subnet['subnet']['network_id'] + try: + plugin.delete_ports_by_device_id(ctx, 'owner1', + network_id) + except n_exc.PortNotFound: + self.fail("delete_ports_by_device_id unexpectedly raised " + "a PortNotFound exception. It should ignore " + "this exception because it is often called at " + "the same time other concurrent operations are " + "deleting some of the same ports.") + + def test_delete_ports_ignores_port_not_found(self): + plugin = manager.NeutronManager.get_plugin() + self._test_delete_ports_ignores_port_not_found(plugin) + + +class TestNetworksV2(NeutronDbPluginV2TestCase): + # NOTE(cerberus): successful network update and delete are + # effectively tested above + def test_create_network(self): + name = 'net1' + keys = [('subnets', []), ('name', name), ('admin_state_up', True), + ('status', self.net_create_status), ('shared', False)] + with self.network(name=name) as net: + for k, v in keys: + self.assertEqual(net['network'][k], v) + + def test_create_public_network(self): + name = 'public_net' + keys = [('subnets', []), ('name', name), ('admin_state_up', True), + ('status', self.net_create_status), ('shared', True)] + with self.network(name=name, shared=True) as net: + for k, v in keys: + self.assertEqual(net['network'][k], v) + + def test_create_public_network_no_admin_tenant(self): + name = 'public_net' + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + with self.network(name=name, + shared=True, + tenant_id="another_tenant", + set_context=True): + pass + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPForbidden.code) + + def test_update_network(self): + with self.network() as network: + data = {'network': {'name': 'a_brand_new_name'}} + req = self.new_update_request('networks', + data, + network['network']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['network']['name'], + data['network']['name']) + + def test_update_shared_network_noadmin_returns_403(self): + with self.network(shared=True) as network: + data = {'network': {'name': 'a_brand_new_name'}} + req = self.new_update_request('networks', + data, + network['network']['id']) + req.environ['neutron.context'] = context.Context('', 'somebody') + res = req.get_response(self.api) + # The API layer always returns 404 on updates in place of 403 + self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) + + def test_update_network_set_shared(self): + with self.network(shared=False) as network: + data = {'network': {'shared': True}} + req = self.new_update_request('networks', + data, + network['network']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertTrue(res['network']['shared']) + + def test_update_network_set_shared_owner_returns_404(self): + with self.network(shared=False) as network: + net_owner = network['network']['tenant_id'] + data = {'network': {'shared': True}} + req = self.new_update_request('networks', + data, + network['network']['id']) + req.environ['neutron.context'] = context.Context('u', net_owner) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) + + def test_update_network_with_subnet_set_shared(self): + with self.network(shared=False) as network: + with self.subnet(network=network) as subnet: + data = {'network': {'shared': True}} + req = self.new_update_request('networks', + data, + network['network']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertTrue(res['network']['shared']) + # must query db to see whether subnet's shared attribute + # has been updated or not + ctx = context.Context('', '', is_admin=True) + subnet_db = manager.NeutronManager.get_plugin()._get_subnet( + ctx, subnet['subnet']['id']) + self.assertEqual(subnet_db['shared'], True) + + def test_update_network_set_not_shared_single_tenant(self): + with self.network(shared=True) as network: + res1 = self._create_port(self.fmt, + network['network']['id'], + webob.exc.HTTPCreated.code, + tenant_id=network['network']['tenant_id'], + set_context=True) + data = {'network': {'shared': False}} + req = self.new_update_request('networks', + data, + network['network']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertFalse(res['network']['shared']) + port1 = self.deserialize(self.fmt, res1) + self._delete('ports', port1['port']['id']) + + def test_update_network_set_not_shared_other_tenant_returns_409(self): + with self.network(shared=True) as network: + res1 = self._create_port(self.fmt, + network['network']['id'], + webob.exc.HTTPCreated.code, + tenant_id='somebody_else', + set_context=True) + data = {'network': {'shared': False}} + req = self.new_update_request('networks', + data, + network['network']['id']) + self.assertEqual(req.get_response(self.api).status_int, + webob.exc.HTTPConflict.code) + port1 = self.deserialize(self.fmt, res1) + self._delete('ports', port1['port']['id']) + + def test_update_network_set_not_shared_multi_tenants_returns_409(self): + with self.network(shared=True) as network: + res1 = self._create_port(self.fmt, + network['network']['id'], + webob.exc.HTTPCreated.code, + tenant_id='somebody_else', + set_context=True) + res2 = self._create_port(self.fmt, + network['network']['id'], + webob.exc.HTTPCreated.code, + tenant_id=network['network']['tenant_id'], + set_context=True) + data = {'network': {'shared': False}} + req = self.new_update_request('networks', + data, + network['network']['id']) + self.assertEqual(req.get_response(self.api).status_int, + webob.exc.HTTPConflict.code) + port1 = self.deserialize(self.fmt, res1) + port2 = self.deserialize(self.fmt, res2) + self._delete('ports', port1['port']['id']) + self._delete('ports', port2['port']['id']) + + def test_update_network_set_not_shared_multi_tenants2_returns_409(self): + with self.network(shared=True) as network: + res1 = self._create_port(self.fmt, + network['network']['id'], + webob.exc.HTTPCreated.code, + tenant_id='somebody_else', + set_context=True) + self._create_subnet(self.fmt, + network['network']['id'], + '10.0.0.0/24', + webob.exc.HTTPCreated.code, + tenant_id=network['network']['tenant_id'], + set_context=True) + data = {'network': {'shared': False}} + req = self.new_update_request('networks', + data, + network['network']['id']) + self.assertEqual(req.get_response(self.api).status_int, + webob.exc.HTTPConflict.code) + + port1 = self.deserialize(self.fmt, res1) + self._delete('ports', port1['port']['id']) + + def test_create_networks_bulk_native(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk network create") + res = self._create_network_bulk(self.fmt, 2, 'test', True) + self._validate_behavior_on_bulk_success(res, 'networks') + + def test_create_networks_bulk_native_quotas(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk network create") + quota = 4 + cfg.CONF.set_override('quota_network', quota, group='QUOTAS') + res = self._create_network_bulk(self.fmt, quota + 1, 'test', True) + self._validate_behavior_on_bulk_failure( + res, 'networks', + errcode=webob.exc.HTTPConflict.code) + + def test_create_networks_bulk_tenants_and_quotas(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk network create") + quota = 2 + cfg.CONF.set_override('quota_network', quota, group='QUOTAS') + networks = [{'network': {'name': 'n1', + 'tenant_id': self._tenant_id}}, + {'network': {'name': 'n2', + 'tenant_id': self._tenant_id}}, + {'network': {'name': 'n1', + 'tenant_id': 't1'}}, + {'network': {'name': 'n2', + 'tenant_id': 't1'}}] + + res = self._create_bulk_from_list(self.fmt, 'network', networks) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + def test_create_networks_bulk_tenants_and_quotas_fail(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk network create") + quota = 2 + cfg.CONF.set_override('quota_network', quota, group='QUOTAS') + networks = [{'network': {'name': 'n1', + 'tenant_id': self._tenant_id}}, + {'network': {'name': 'n2', + 'tenant_id': self._tenant_id}}, + {'network': {'name': 'n1', + 'tenant_id': 't1'}}, + {'network': {'name': 'n3', + 'tenant_id': self._tenant_id}}, + {'network': {'name': 'n2', + 'tenant_id': 't1'}}] + + res = self._create_bulk_from_list(self.fmt, 'network', networks) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + def test_create_networks_bulk_emulated(self): + real_has_attr = hasattr + + #ensures the API choose the emulation code path + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + res = self._create_network_bulk(self.fmt, 2, 'test', True) + self._validate_behavior_on_bulk_success(res, 'networks') + + def test_create_networks_bulk_wrong_input(self): + res = self._create_network_bulk(self.fmt, 2, 'test', True, + override={1: + {'admin_state_up': 'doh'}}) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + req = self.new_list_request('networks') + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPOk.code) + nets = self.deserialize(self.fmt, res) + self.assertEqual(len(nets['networks']), 0) + + def test_create_networks_bulk_emulated_plugin_failure(self): + real_has_attr = hasattr + + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + orig = manager.NeutronManager.get_plugin().create_network + #ensures the API choose the emulation code path + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + with mock.patch.object(manager.NeutronManager.get_plugin(), + 'create_network') as patched_plugin: + + def side_effect(*args, **kwargs): + return self._fail_second_call(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + res = self._create_network_bulk(self.fmt, 2, 'test', True) + # We expect a 500 as we injected a fault in the plugin + self._validate_behavior_on_bulk_failure( + res, 'networks', webob.exc.HTTPServerError.code + ) + + def test_create_networks_bulk_native_plugin_failure(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk network create") + orig = manager.NeutronManager.get_plugin().create_network + with mock.patch.object(manager.NeutronManager.get_plugin(), + 'create_network') as patched_plugin: + + def side_effect(*args, **kwargs): + return self._fail_second_call(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + res = self._create_network_bulk(self.fmt, 2, 'test', True) + # We expect a 500 as we injected a fault in the plugin + self._validate_behavior_on_bulk_failure( + res, 'networks', webob.exc.HTTPServerError.code + ) + + def test_list_networks(self): + with contextlib.nested(self.network(), + self.network(), + self.network()) as networks: + self._test_list_resources('network', networks) + + def test_list_networks_with_sort_native(self): + if self._skip_native_sorting: + self.skipTest("Skip test for not implemented sorting feature") + with contextlib.nested(self.network(admin_status_up=True, + name='net1'), + self.network(admin_status_up=False, + name='net2'), + self.network(admin_status_up=False, + name='net3') + ) as (net1, net2, net3): + self._test_list_with_sort('network', (net3, net2, net1), + [('admin_state_up', 'asc'), + ('name', 'desc')]) + + def test_list_networks_with_sort_extended_attr_native_returns_400(self): + if self._skip_native_sorting: + self.skipTest("Skip test for not implemented sorting feature") + with contextlib.nested(self.network(admin_status_up=True, + name='net1'), + self.network(admin_status_up=False, + name='net2'), + self.network(admin_status_up=False, + name='net3') + ): + req = self.new_list_request( + 'networks', + params='sort_key=provider:segmentation_id&sort_dir=asc') + res = req.get_response(self.api) + self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) + + def test_list_networks_with_sort_remote_key_native_returns_400(self): + if self._skip_native_sorting: + self.skipTest("Skip test for not implemented sorting feature") + with contextlib.nested(self.network(admin_status_up=True, + name='net1'), + self.network(admin_status_up=False, + name='net2'), + self.network(admin_status_up=False, + name='net3') + ): + req = self.new_list_request( + 'networks', params='sort_key=subnets&sort_dir=asc') + res = req.get_response(self.api) + self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) + + def test_list_networks_with_sort_emulated(self): + helper_patcher = mock.patch( + 'neutron.api.v2.base.Controller._get_sorting_helper', + new=_fake_get_sorting_helper) + helper_patcher.start() + with contextlib.nested(self.network(admin_status_up=True, + name='net1'), + self.network(admin_status_up=False, + name='net2'), + self.network(admin_status_up=False, + name='net3') + ) as (net1, net2, net3): + self._test_list_with_sort('network', (net3, net2, net1), + [('admin_state_up', 'asc'), + ('name', 'desc')]) + + def test_list_networks_with_pagination_native(self): + if self._skip_native_pagination: + self.skipTest("Skip test for not implemented pagination feature") + with contextlib.nested(self.network(name='net1'), + self.network(name='net2'), + self.network(name='net3') + ) as (net1, net2, net3): + self._test_list_with_pagination('network', + (net1, net2, net3), + ('name', 'asc'), 2, 2) + + def test_list_networks_with_pagination_emulated(self): + helper_patcher = mock.patch( + 'neutron.api.v2.base.Controller._get_pagination_helper', + new=_fake_get_pagination_helper) + helper_patcher.start() + with contextlib.nested(self.network(name='net1'), + self.network(name='net2'), + self.network(name='net3') + ) as (net1, net2, net3): + self._test_list_with_pagination('network', + (net1, net2, net3), + ('name', 'asc'), 2, 2) + + def test_list_networks_without_pk_in_fields_pagination_emulated(self): + helper_patcher = mock.patch( + 'neutron.api.v2.base.Controller._get_pagination_helper', + new=_fake_get_pagination_helper) + helper_patcher.start() + with contextlib.nested(self.network(name='net1', + shared=True), + self.network(name='net2', + shared=False), + self.network(name='net3', + shared=True) + ) as (net1, net2, net3): + self._test_list_with_pagination('network', + (net1, net2, net3), + ('name', 'asc'), 2, 2, + query_params="fields=name", + verify_key='name') + + def test_list_networks_without_pk_in_fields_pagination_native(self): + if self._skip_native_pagination: + self.skipTest("Skip test for not implemented pagination feature") + with contextlib.nested(self.network(name='net1'), + self.network(name='net2'), + self.network(name='net3') + ) as (net1, net2, net3): + self._test_list_with_pagination('network', + (net1, net2, net3), + ('name', 'asc'), 2, 2, + query_params="fields=shared", + verify_key='shared') + + def test_list_networks_with_pagination_reverse_native(self): + if self._skip_native_pagination: + self.skipTest("Skip test for not implemented pagination feature") + with contextlib.nested(self.network(name='net1'), + self.network(name='net2'), + self.network(name='net3') + ) as (net1, net2, net3): + self._test_list_with_pagination_reverse('network', + (net1, net2, net3), + ('name', 'asc'), 2, 2) + + def test_list_networks_with_pagination_reverse_emulated(self): + helper_patcher = mock.patch( + 'neutron.api.v2.base.Controller._get_pagination_helper', + new=_fake_get_pagination_helper) + helper_patcher.start() + with contextlib.nested(self.network(name='net1'), + self.network(name='net2'), + self.network(name='net3') + ) as (net1, net2, net3): + self._test_list_with_pagination_reverse('network', + (net1, net2, net3), + ('name', 'asc'), 2, 2) + + def test_list_networks_with_parameters(self): + with contextlib.nested(self.network(name='net1', + admin_state_up=False), + self.network(name='net2')) as (net1, net2): + query_params = 'admin_state_up=False' + self._test_list_resources('network', [net1], + query_params=query_params) + query_params = 'admin_state_up=True' + self._test_list_resources('network', [net2], + query_params=query_params) + + def test_list_networks_with_fields(self): + with self.network(name='net1') as net1: + req = self.new_list_request('networks', + params='fields=name') + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(1, len(res['networks'])) + self.assertEqual(res['networks'][0]['name'], + net1['network']['name']) + self.assertIsNone(res['networks'][0].get('id')) + + def test_list_networks_with_parameters_invalid_values(self): + with contextlib.nested(self.network(name='net1', + admin_state_up=False), + self.network(name='net2')) as (net1, net2): + req = self.new_list_request('networks', + params='admin_state_up=fake') + res = req.get_response(self.api) + self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) + + def test_list_shared_networks_with_non_admin_user(self): + with contextlib.nested(self.network(shared=False, + name='net1', + tenant_id='tenant1'), + self.network(shared=True, + name='net2', + tenant_id='another_tenant'), + self.network(shared=False, + name='net3', + tenant_id='another_tenant') + ) as (net1, net2, net3): + ctx = context.Context(user_id='non_admin', + tenant_id='tenant1', + is_admin=False) + self._test_list_resources('network', (net1, net2), ctx) + + def test_show_network(self): + with self.network(name='net1') as net: + req = self.new_show_request('networks', net['network']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['network']['name'], + net['network']['name']) + + def test_show_network_with_subnet(self): + with self.network(name='net1') as net: + with self.subnet(net) as subnet: + req = self.new_show_request('networks', net['network']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['network']['subnets'][0], + subnet['subnet']['id']) + + def test_invalid_admin_status(self): + value = [[7, False, webob.exc.HTTPClientError.code], + [True, True, webob.exc.HTTPCreated.code], + ["True", True, webob.exc.HTTPCreated.code], + ["true", True, webob.exc.HTTPCreated.code], + [1, True, webob.exc.HTTPCreated.code], + ["False", False, webob.exc.HTTPCreated.code], + [False, False, webob.exc.HTTPCreated.code], + ["false", False, webob.exc.HTTPCreated.code], + ["7", False, webob.exc.HTTPClientError.code]] + for v in value: + data = {'network': {'name': 'net', + 'admin_state_up': v[0], + 'tenant_id': self._tenant_id}} + network_req = self.new_create_request('networks', data) + req = network_req.get_response(self.api) + self.assertEqual(req.status_int, v[2]) + if v[2] == webob.exc.HTTPCreated.code: + res = self.deserialize(self.fmt, req) + self.assertEqual(res['network']['admin_state_up'], v[1]) + + +class TestSubnetsV2(NeutronDbPluginV2TestCase): + + def _test_create_subnet(self, network=None, expected=None, **kwargs): + keys = kwargs.copy() + keys.setdefault('cidr', '10.0.0.0/24') + keys.setdefault('ip_version', 4) + keys.setdefault('enable_dhcp', True) + with self.subnet(network=network, **keys) as subnet: + # verify the response has each key with the correct value + for k in keys: + self.assertIn(k, subnet['subnet']) + if isinstance(keys[k], list): + self.assertEqual(sorted(subnet['subnet'][k]), + sorted(keys[k])) + else: + self.assertEqual(subnet['subnet'][k], keys[k]) + # verify the configured validations are correct + if expected: + for k in expected: + self.assertIn(k, subnet['subnet']) + if isinstance(expected[k], list): + self.assertEqual(sorted(subnet['subnet'][k]), + sorted(expected[k])) + else: + self.assertEqual(subnet['subnet'][k], expected[k]) + return subnet + + def test_create_subnet(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + subnet = self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr) + self.assertEqual(4, subnet['subnet']['ip_version']) + self.assertIn('name', subnet['subnet']) + + def test_create_two_subnets(self): + gateway_ips = ['10.0.0.1', '10.0.1.1'] + cidrs = ['10.0.0.0/24', '10.0.1.0/24'] + with self.network() as network: + with self.subnet(network=network, + gateway_ip=gateway_ips[0], + cidr=cidrs[0]): + with self.subnet(network=network, + gateway_ip=gateway_ips[1], + cidr=cidrs[1]): + net_req = self.new_show_request('networks', + network['network']['id']) + raw_res = net_req.get_response(self.api) + net_res = self.deserialize(self.fmt, raw_res) + for subnet_id in net_res['network']['subnets']: + sub_req = self.new_show_request('subnets', subnet_id) + raw_res = sub_req.get_response(self.api) + sub_res = self.deserialize(self.fmt, raw_res) + self.assertIn(sub_res['subnet']['cidr'], cidrs) + self.assertIn(sub_res['subnet']['gateway_ip'], + gateway_ips) + + def test_create_two_subnets_same_cidr_returns_400(self): + gateway_ip_1 = '10.0.0.1' + cidr_1 = '10.0.0.0/24' + gateway_ip_2 = '10.0.0.10' + cidr_2 = '10.0.0.0/24' + with self.network() as network: + with self.subnet(network=network, + gateway_ip=gateway_ip_1, + cidr=cidr_1): + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + with self.subnet(network=network, + gateway_ip=gateway_ip_2, + cidr=cidr_2): + pass + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPClientError.code) + + def test_create_subnet_bad_V4_cidr(self): + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0', + 'ip_version': '4', + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': '10.0.2.1'}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_bad_V6_cidr(self): + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': 'fe80::', + 'ip_version': '6', + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': 'fe80::1'}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_2_subnets_overlapping_cidr_allowed_returns_200(self): + cidr_1 = '10.0.0.0/23' + cidr_2 = '10.0.0.0/24' + cfg.CONF.set_override('allow_overlapping_ips', True) + + with contextlib.nested(self.subnet(cidr=cidr_1), + self.subnet(cidr=cidr_2)): + pass + + def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self): + cidr_1 = '10.0.0.0/23' + cidr_2 = '10.0.0.0/24' + cfg.CONF.set_override('allow_overlapping_ips', False) + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + with contextlib.nested(self.subnet(cidr=cidr_1), + self.subnet(cidr=cidr_2)): + pass + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPClientError.code) + + def test_create_subnets_bulk_native(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk subnet create") + with self.network() as net: + res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], + 'test') + self._validate_behavior_on_bulk_success(res, 'subnets') + + def test_create_subnets_bulk_emulated(self): + real_has_attr = hasattr + + #ensures the API choose the emulation code path + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + with self.network() as net: + res = self._create_subnet_bulk(self.fmt, 2, + net['network']['id'], + 'test') + self._validate_behavior_on_bulk_success(res, 'subnets') + + def test_create_subnets_bulk_emulated_plugin_failure(self): + real_has_attr = hasattr + + #ensures the API choose the emulation code path + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + orig = manager.NeutronManager.get_plugin().create_subnet + with mock.patch.object(manager.NeutronManager.get_plugin(), + 'create_subnet') as patched_plugin: + + def side_effect(*args, **kwargs): + self._fail_second_call(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + with self.network() as net: + res = self._create_subnet_bulk(self.fmt, 2, + net['network']['id'], + 'test') + # We expect a 500 as we injected a fault in the plugin + self._validate_behavior_on_bulk_failure( + res, 'subnets', webob.exc.HTTPServerError.code + ) + + def test_create_subnets_bulk_native_plugin_failure(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk subnet create") + plugin = manager.NeutronManager.get_plugin() + orig = plugin.create_subnet + with mock.patch.object(plugin, 'create_subnet') as patched_plugin: + def side_effect(*args, **kwargs): + return self._fail_second_call(patched_plugin, orig, + *args, **kwargs) + + patched_plugin.side_effect = side_effect + with self.network() as net: + res = self._create_subnet_bulk(self.fmt, 2, + net['network']['id'], + 'test') + + # We expect a 500 as we injected a fault in the plugin + self._validate_behavior_on_bulk_failure( + res, 'subnets', webob.exc.HTTPServerError.code + ) + + def test_delete_subnet(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + # Create new network + res = self._create_network(fmt=self.fmt, name='net', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + subnet = self._make_subnet(self.fmt, network, gateway_ip, + cidr, ip_version=4) + req = self.new_delete_request('subnets', subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + + def test_delete_subnet_port_exists_owned_by_network(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + # Create new network + res = self._create_network(fmt=self.fmt, name='net', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + subnet = self._make_subnet(self.fmt, network, gateway_ip, + cidr, ip_version=4) + self._create_port(self.fmt, + network['network']['id'], + device_owner=constants.DEVICE_OWNER_DHCP) + req = self.new_delete_request('subnets', subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + + def test_delete_subnet_dhcp_port_associated_with_other_subnets(self): + res = self._create_network(fmt=self.fmt, name='net', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + subnet1 = self._make_subnet(self.fmt, network, '10.0.0.1', + '10.0.0.0/24', ip_version=4) + subnet2 = self._make_subnet(self.fmt, network, '10.0.1.1', + '10.0.1.0/24', ip_version=4) + res = self._create_port(self.fmt, + network['network']['id'], + device_owner=constants.DEVICE_OWNER_DHCP, + fixed_ips=[ + {'subnet_id': subnet1['subnet']['id']}, + {'subnet_id': subnet2['subnet']['id']} + ]) + port = self.deserialize(self.fmt, res) + expected_subnets = [subnet1['subnet']['id'], subnet2['subnet']['id']] + self.assertEqual(expected_subnets, + [s['subnet_id'] for s in port['port']['fixed_ips']]) + req = self.new_delete_request('subnets', subnet1['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 204) + port = self._show('ports', port['port']['id']) + + expected_subnets = [subnet2['subnet']['id']] + self.assertEqual(expected_subnets, + [s['subnet_id'] for s in port['port']['fixed_ips']]) + req = self.new_delete_request('subnets', subnet2['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 204) + port = self._show('ports', port['port']['id']) + self.assertFalse(port['port']['fixed_ips']) + + def test_delete_subnet_port_exists_owned_by_other(self): + with self.subnet() as subnet: + with self.port(subnet=subnet): + id = subnet['subnet']['id'] + req = self.new_delete_request('subnets', id) + res = req.get_response(self.api) + data = self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + msg = str(n_exc.SubnetInUse(subnet_id=id)) + self.assertEqual(data['NeutronError']['message'], msg) + + def test_delete_subnet_with_other_subnet_on_network_still_in_use(self): + with self.network() as network: + with contextlib.nested( + self.subnet(network=network), + self.subnet(network=network, cidr='10.0.1.0/24', + do_delete=False)) as (subnet1, subnet2): + subnet1_id = subnet1['subnet']['id'] + subnet2_id = subnet2['subnet']['id'] + with self.port( + subnet=subnet1, + fixed_ips=[{'subnet_id': subnet1_id}]): + req = self.new_delete_request('subnets', subnet2_id) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPNoContent.code) + + def test_delete_network(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + # Create new network + res = self._create_network(fmt=self.fmt, name='net', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4) + req = self.new_delete_request('networks', network['network']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + + def test_create_subnet_bad_tenant(self): + with self.network() as network: + self._create_subnet(self.fmt, + network['network']['id'], + '10.0.2.0/24', + webob.exc.HTTPNotFound.code, + ip_version=4, + tenant_id='bad_tenant_id', + gateway_ip='10.0.2.1', + device_owner='fake_owner', + set_context=True) + + def test_create_subnet_as_admin(self): + with self.network(do_delete=False) as network: + self._create_subnet(self.fmt, + network['network']['id'], + '10.0.2.0/24', + webob.exc.HTTPCreated.code, + ip_version=4, + tenant_id='bad_tenant_id', + gateway_ip='10.0.2.1', + device_owner='fake_owner', + set_context=False) + + def test_create_subnet_nonzero_cidr(self): + with contextlib.nested( + self.subnet(cidr='10.129.122.5/8'), + self.subnet(cidr='11.129.122.5/15'), + self.subnet(cidr='12.129.122.5/16'), + self.subnet(cidr='13.129.122.5/18'), + self.subnet(cidr='14.129.122.5/22'), + self.subnet(cidr='15.129.122.5/24'), + self.subnet(cidr='16.129.122.5/28'), + self.subnet(cidr='17.129.122.5/32') + ) as subs: + # the API should accept and correct these for users + self.assertEqual(subs[0]['subnet']['cidr'], '10.0.0.0/8') + self.assertEqual(subs[1]['subnet']['cidr'], '11.128.0.0/15') + self.assertEqual(subs[2]['subnet']['cidr'], '12.129.0.0/16') + self.assertEqual(subs[3]['subnet']['cidr'], '13.129.64.0/18') + self.assertEqual(subs[4]['subnet']['cidr'], '14.129.120.0/22') + self.assertEqual(subs[5]['subnet']['cidr'], '15.129.122.0/24') + self.assertEqual(subs[6]['subnet']['cidr'], '16.129.122.0/28') + self.assertEqual(subs[7]['subnet']['cidr'], '17.129.122.5/32') + + def test_create_subnet_bad_ip_version(self): + with self.network() as network: + # Check bad IP version + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': 'abc', + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': '10.0.2.1'}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_bad_ip_version_null(self): + with self.network() as network: + # Check bad IP version + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': None, + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': '10.0.2.1'}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_bad_uuid(self): + with self.network() as network: + # Check invalid UUID + data = {'subnet': {'network_id': None, + 'cidr': '10.0.2.0/24', + 'ip_version': 4, + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': '10.0.2.1'}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_bad_boolean(self): + with self.network() as network: + # Check invalid boolean + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': '4', + 'enable_dhcp': None, + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': '10.0.2.1'}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_bad_pools(self): + with self.network() as network: + # Check allocation pools + allocation_pools = [[{'end': '10.0.0.254'}], + [{'start': '10.0.0.254'}], + [{'start': '1000.0.0.254'}], + [{'start': '10.0.0.2', 'end': '10.0.0.254'}, + {'end': '10.0.0.254'}], + None, + [{'start': '10.0.0.2', 'end': '10.0.0.3'}, + {'start': '10.0.0.2', 'end': '10.0.0.3'}]] + tenant_id = network['network']['tenant_id'] + for pool in allocation_pools: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': '4', + 'tenant_id': tenant_id, + 'gateway_ip': '10.0.2.1', + 'allocation_pools': pool}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_create_subnet_bad_nameserver(self): + with self.network() as network: + # Check nameservers + nameserver_pools = [['1100.0.0.2'], + ['1.1.1.2', '1.1000.1.3'], + ['1.1.1.2', '1.1.1.2']] + tenant_id = network['network']['tenant_id'] + for nameservers in nameserver_pools: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': '4', + 'tenant_id': tenant_id, + 'gateway_ip': '10.0.2.1', + 'dns_nameservers': nameservers}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_create_subnet_bad_hostroutes(self): + with self.network() as network: + # Check hostroutes + hostroute_pools = [[{'destination': '100.0.0.0/24'}], + [{'nexthop': '10.0.2.20'}], + [{'nexthop': '10.0.2.20', + 'destination': '100.0.0.0/8'}, + {'nexthop': '10.0.2.20', + 'destination': '100.0.0.0/8'}]] + tenant_id = network['network']['tenant_id'] + for hostroutes in hostroute_pools: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': '4', + 'tenant_id': tenant_id, + 'gateway_ip': '10.0.2.1', + 'host_routes': hostroutes}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_create_subnet_defaults(self): + gateway = '10.0.0.1' + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.254'}] + enable_dhcp = True + subnet = self._test_create_subnet() + # verify cidr & gw have been correctly generated + self.assertEqual(subnet['subnet']['cidr'], cidr) + self.assertEqual(subnet['subnet']['gateway_ip'], gateway) + self.assertEqual(subnet['subnet']['enable_dhcp'], enable_dhcp) + self.assertEqual(subnet['subnet']['allocation_pools'], + allocation_pools) + + def test_create_subnet_gw_values(self): + # Gateway not in subnet + gateway = '100.0.0.1' + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.1', + 'end': '10.0.0.254'}] + expected = {'gateway_ip': gateway, + 'cidr': cidr, + 'allocation_pools': allocation_pools} + self._test_create_subnet(expected=expected, gateway_ip=gateway) + # Gateway is last IP in range + gateway = '10.0.0.254' + allocation_pools = [{'start': '10.0.0.1', + 'end': '10.0.0.253'}] + expected = {'gateway_ip': gateway, + 'cidr': cidr, + 'allocation_pools': allocation_pools} + self._test_create_subnet(expected=expected, gateway_ip=gateway) + # Gateway is first in subnet + gateway = '10.0.0.1' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.254'}] + expected = {'gateway_ip': gateway, + 'cidr': cidr, + 'allocation_pools': allocation_pools} + self._test_create_subnet(expected=expected, + gateway_ip=gateway) + + def test_create_subnet_gw_outside_cidr_force_on_returns_400(self): + cfg.CONF.set_override('force_gateway_on_subnet', True) + with self.network() as network: + self._create_subnet(self.fmt, + network['network']['id'], + '10.0.0.0/24', + webob.exc.HTTPClientError.code, + gateway_ip='100.0.0.1') + + def test_create_subnet_gw_of_network_force_on_returns_400(self): + cfg.CONF.set_override('force_gateway_on_subnet', True) + with self.network() as network: + self._create_subnet(self.fmt, + network['network']['id'], + '10.0.0.0/24', + webob.exc.HTTPClientError.code, + gateway_ip='10.0.0.0') + + def test_create_subnet_gw_bcast_force_on_returns_400(self): + cfg.CONF.set_override('force_gateway_on_subnet', True) + with self.network() as network: + self._create_subnet(self.fmt, + network['network']['id'], + '10.0.0.0/24', + webob.exc.HTTPClientError.code, + gateway_ip='10.0.0.255') + + def test_create_subnet_with_allocation_pool(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.100'}] + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, + allocation_pools=allocation_pools) + + def test_create_subnet_with_none_gateway(self): + cidr = '10.0.0.0/24' + self._test_create_subnet(gateway_ip=None, + cidr=cidr) + + def test_create_subnet_with_none_gateway_fully_allocated(self): + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.1', + 'end': '10.0.0.254'}] + self._test_create_subnet(gateway_ip=None, + cidr=cidr, + allocation_pools=allocation_pools) + + def test_subnet_with_allocation_range(self): + with self.network() as network: + net_id = network['network']['id'] + data = {'subnet': {'network_id': net_id, + 'cidr': '10.0.0.0/24', + 'ip_version': 4, + 'gateway_ip': '10.0.0.1', + 'tenant_id': network['network']['tenant_id'], + 'allocation_pools': [{'start': '10.0.0.100', + 'end': '10.0.0.120'}]}} + subnet_req = self.new_create_request('subnets', data) + subnet = self.deserialize(self.fmt, + subnet_req.get_response(self.api)) + # Check fixed IP not in allocation range + kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], + 'ip_address': '10.0.0.10'}]} + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + port = self.deserialize(self.fmt, res) + # delete the port + self._delete('ports', port['port']['id']) + + # Check when fixed IP is gateway + kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], + 'ip_address': '10.0.0.1'}]} + res = self._create_port(self.fmt, net_id=net_id, **kwargs) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + port = self.deserialize(self.fmt, res) + # delete the port + self._delete('ports', port['port']['id']) + + def test_create_subnet_with_none_gateway_allocation_pool(self): + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.100'}] + self._test_create_subnet(gateway_ip=None, + cidr=cidr, + allocation_pools=allocation_pools) + + def test_create_subnet_with_v6_allocation_pool(self): + gateway_ip = 'fe80::1' + cidr = 'fe80::/80' + allocation_pools = [{'start': 'fe80::2', + 'end': 'fe80::ffff:fffa:ffff'}] + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, ip_version=6, + allocation_pools=allocation_pools) + + def test_create_subnet_with_large_allocation_pool(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/8' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.100'}, + {'start': '10.1.0.0', + 'end': '10.200.0.100'}] + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, + allocation_pools=allocation_pools) + + def test_create_subnet_multiple_allocation_pools(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.100'}, + {'start': '10.0.0.110', + 'end': '10.0.0.150'}] + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, + allocation_pools=allocation_pools) + + def test_create_subnet_with_dhcp_disabled(self): + enable_dhcp = False + self._test_create_subnet(enable_dhcp=enable_dhcp) + + def test_create_subnet_default_gw_conflict_allocation_pool_returns_409( + self): + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.1', + 'end': '10.0.0.5'}] + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + self._test_create_subnet(cidr=cidr, + allocation_pools=allocation_pools) + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPConflict.code) + + def test_create_subnet_gateway_in_allocation_pool_returns_409(self): + gateway_ip = '10.0.0.50' + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.1', + 'end': '10.0.0.100'}] + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, + allocation_pools=allocation_pools) + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPConflict.code) + + def test_create_subnet_overlapping_allocation_pools_returns_409(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.150'}, + {'start': '10.0.0.140', + 'end': '10.0.0.180'}] + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, + allocation_pools=allocation_pools) + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPConflict.code) + + def test_create_subnet_invalid_allocation_pool_returns_400(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.256'}] + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, + allocation_pools=allocation_pools) + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPClientError.code) + + def test_create_subnet_out_of_range_allocation_pool_returns_400(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.1.6'}] + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, + allocation_pools=allocation_pools) + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPClientError.code) + + def test_create_subnet_shared_returns_400(self): + cidr = '10.0.0.0/24' + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + self._test_create_subnet(cidr=cidr, + shared=True) + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPClientError.code) + + def test_create_subnet_inconsistent_ipv6_cidrv4(self): + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': 6, + 'tenant_id': network['network']['tenant_id']}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_inconsistent_ipv4_cidrv6(self): + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': 'fe80::0/80', + 'ip_version': 4, + 'tenant_id': network['network']['tenant_id']}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_inconsistent_ipv4_gatewayv6(self): + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': 4, + 'gateway_ip': 'fe80::1', + 'tenant_id': network['network']['tenant_id']}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_inconsistent_ipv6_gatewayv4(self): + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': 'fe80::0/80', + 'ip_version': 6, + 'gateway_ip': '192.168.0.1', + 'tenant_id': network['network']['tenant_id']}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_inconsistent_ipv6_dns_v4(self): + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': 'fe80::0/80', + 'ip_version': 6, + 'dns_nameservers': ['192.168.0.1'], + 'tenant_id': network['network']['tenant_id']}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self): + host_routes = [{'destination': 'fe80::0/48', + 'nexthop': '10.0.2.20'}] + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': 4, + 'host_routes': host_routes, + 'tenant_id': network['network']['tenant_id']}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self): + host_routes = [{'destination': '172.16.0.0/24', + 'nexthop': 'fe80::1'}] + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': 4, + 'host_routes': host_routes, + 'tenant_id': network['network']['tenant_id']}} + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_ipv6_attributes(self): + gateway_ip = 'fe80::1' + cidr = 'fe80::/80' + + for mode in constants.IPV6_MODES: + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, ip_version=6, + ipv6_ra_mode=mode, + ipv6_address_mode=mode) + + def test_create_subnet_ipv6_attributes_no_dhcp_enabled(self): + gateway_ip = 'fe80::1' + cidr = 'fe80::/80' + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + for mode in constants.IPV6_MODES: + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, ip_version=6, + enable_dhcp=False, + ipv6_ra_mode=mode, + ipv6_address_mode=mode) + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPClientError.code) + + def test_create_subnet_invalid_ipv6_ra_mode(self): + gateway_ip = 'fe80::1' + cidr = 'fe80::/80' + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, ip_version=6, + ipv6_ra_mode='foo', + ipv6_address_mode='slaac') + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPClientError.code) + + def test_create_subnet_invalid_ipv6_address_mode(self): + gateway_ip = 'fe80::1' + cidr = 'fe80::/80' + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, ip_version=6, + ipv6_ra_mode='slaac', + ipv6_address_mode='baz') + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPClientError.code) + + def test_create_subnet_invalid_ipv6_combination(self): + gateway_ip = 'fe80::1' + cidr = 'fe80::/80' + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, ip_version=6, + ipv6_ra_mode='stateful', + ipv6_address_mode='stateless') + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPClientError.code) + + def test_create_subnet_ipv6_single_attribute_set(self): + gateway_ip = 'fe80::1' + cidr = 'fe80::/80' + for mode in constants.IPV6_MODES: + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, ip_version=6, + ipv6_ra_mode=None, + ipv6_address_mode=mode) + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, ip_version=6, + ipv6_ra_mode=mode, + ipv6_address_mode=None) + + def test_create_subnet_ipv6_ra_mode_ip_version_4(self): + cidr = '10.0.2.0/24' + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + self._test_create_subnet(cidr=cidr, ip_version=4, + ipv6_ra_mode=constants.DHCPV6_STATEFUL) + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPClientError.code) + + def test_create_subnet_ipv6_address_mode_ip_version_4(self): + cidr = '10.0.2.0/24' + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + self._test_create_subnet( + cidr=cidr, ip_version=4, + ipv6_address_mode=constants.DHCPV6_STATEFUL) + self.assertEqual(ctx_manager.exception.code, + webob.exc.HTTPClientError.code) + + def test_update_subnet_no_gateway(self): + with self.subnet() as subnet: + data = {'subnet': {'gateway_ip': '11.0.0.1'}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['subnet']['gateway_ip'], + data['subnet']['gateway_ip']) + data = {'subnet': {'gateway_ip': None}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertIsNone(data['subnet']['gateway_ip']) + + def test_update_subnet(self): + with self.subnet() as subnet: + data = {'subnet': {'gateway_ip': '11.0.0.1'}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['subnet']['gateway_ip'], + data['subnet']['gateway_ip']) + + def test_update_subnet_adding_additional_host_routes_and_dns(self): + host_routes = [{'destination': '172.16.0.0/24', + 'nexthop': '10.0.2.2'}] + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': 4, + 'dns_nameservers': ['192.168.0.1'], + 'host_routes': host_routes, + 'tenant_id': network['network']['tenant_id']}} + subnet_req = self.new_create_request('subnets', data) + res = self.deserialize(self.fmt, subnet_req.get_response(self.api)) + + host_routes = [{'destination': '172.16.0.0/24', + 'nexthop': '10.0.2.2'}, + {'destination': '192.168.0.0/24', + 'nexthop': '10.0.2.3'}] + + dns_nameservers = ['192.168.0.1', '192.168.0.2'] + data = {'subnet': {'host_routes': host_routes, + 'dns_nameservers': dns_nameservers}} + req = self.new_update_request('subnets', data, + res['subnet']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(sorted(res['subnet']['host_routes']), + sorted(host_routes)) + self.assertEqual(sorted(res['subnet']['dns_nameservers']), + sorted(dns_nameservers)) + + def test_update_subnet_shared_returns_400(self): + with self.network(shared=True) as network: + with self.subnet(network=network) as subnet: + data = {'subnet': {'shared': True}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_subnet_gw_outside_cidr_force_on_returns_400(self): + cfg.CONF.set_override('force_gateway_on_subnet', True) + with self.network() as network: + with self.subnet(network=network) as subnet: + data = {'subnet': {'gateway_ip': '100.0.0.1'}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_subnet_gw_ip_in_use_returns_409(self): + with self.network() as network: + with self.subnet( + network=network, + allocation_pools=[{'start': '10.0.0.100', + 'end': '10.0.0.253'}]) as subnet: + subnet_data = subnet['subnet'] + with self.port( + subnet=subnet, + fixed_ips=[{'subnet_id': subnet_data['id'], + 'ip_address': subnet_data['gateway_ip']}]): + data = {'subnet': {'gateway_ip': '10.0.0.99'}} + req = self.new_update_request('subnets', data, + subnet_data['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 409) + + def test_update_subnet_inconsistent_ipv4_gatewayv6(self): + with self.network() as network: + with self.subnet(network=network) as subnet: + data = {'subnet': {'gateway_ip': 'fe80::1'}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_subnet_inconsistent_ipv6_gatewayv4(self): + with self.network() as network: + with self.subnet(network=network, + ip_version=6, cidr='fe80::/48') as subnet: + data = {'subnet': {'gateway_ip': '10.1.1.1'}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_subnet_inconsistent_ipv4_dns_v6(self): + dns_nameservers = ['fe80::1'] + with self.network() as network: + with self.subnet(network=network) as subnet: + data = {'subnet': {'dns_nameservers': dns_nameservers}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): + host_routes = [{'destination': 'fe80::0/48', + 'nexthop': '10.0.2.20'}] + with self.network() as network: + with self.subnet(network=network, + ip_version=6, cidr='fe80::/48') as subnet: + data = {'subnet': {'host_routes': host_routes}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): + host_routes = [{'destination': '172.16.0.0/24', + 'nexthop': 'fe80::1'}] + with self.network() as network: + with self.subnet(network=network, + ip_version=6, cidr='fe80::/48') as subnet: + data = {'subnet': {'host_routes': host_routes}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_subnet_gateway_in_allocation_pool_returns_409(self): + allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] + with self.network() as network: + with self.subnet(network=network, + allocation_pools=allocation_pools, + cidr='10.0.0.0/24') as subnet: + data = {'subnet': {'gateway_ip': '10.0.0.50'}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPConflict.code) + + def test_update_subnet_ipv6_attributes(self): + with self.subnet(ip_version=6, cidr='fe80::/80', + ipv6_ra_mode=constants.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC) as subnet: + data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL, + 'ipv6_address_mode': constants.DHCPV6_STATEFUL}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['subnet']['ipv6_ra_mode'], + data['subnet']['ipv6_ra_mode']) + self.assertEqual(res['subnet']['ipv6_address_mode'], + data['subnet']['ipv6_address_mode']) + + def test_update_subnet_ipv6_inconsistent_ra_attribute(self): + with self.subnet(ip_version=6, cidr='fe80::/80', + ipv6_ra_mode=constants.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC) as subnet: + data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_subnet_ipv6_inconsistent_address_attribute(self): + with self.subnet(ip_version=6, cidr='fe80::/80', + ipv6_ra_mode=constants.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC) as subnet: + data = {'subnet': {'ipv6_address_mode': constants.DHCPV6_STATEFUL}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_subnet_ipv6_inconsistent_enable_dhcp(self): + with self.subnet(ip_version=6, cidr='fe80::/80', + ipv6_ra_mode=constants.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC) as subnet: + data = {'subnet': {'enable_dhcp': False}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_subnet_ipv6_ra_mode_ip_version_4(self): + with self.network() as network: + with self.subnet(network=network) as subnet: + data = {'subnet': {'ipv6_ra_mode': + constants.DHCPV6_STATEFUL}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_subnet_ipv6_address_mode_ip_version_4(self): + with self.network() as network: + with self.subnet(network=network) as subnet: + data = {'subnet': {'ipv6_address_mode': + constants.DHCPV6_STATEFUL}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_update_subnet_allocation_pools(self): + """Test that we can successfully update with sane params. + + This will create a subnet with specified allocation_pools + Then issue an update (PUT) to update these using correct + (i.e. non erroneous) params. Finally retrieve the updated + subnet and verify. + """ + allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}] + with self.network() as network: + with self.subnet(network=network, + allocation_pools=allocation_pools, + cidr='192.168.0.0/24') as subnet: + data = {'subnet': {'allocation_pools': [ + {'start': '192.168.0.10', 'end': '192.168.0.20'}, + {'start': '192.168.0.30', 'end': '192.168.0.40'}]}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + #check res code but then do GET on subnet for verification + res = req.get_response(self.api) + self.assertEqual(res.status_code, 200) + req = self.new_show_request('subnets', subnet['subnet']['id'], + self.fmt) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(len(res['subnet']['allocation_pools']), 2) + res_vals = res['subnet']['allocation_pools'][0].values() +\ + res['subnet']['allocation_pools'][1].values() + for pool_val in ['10', '20', '30', '40']: + self.assertTrue('192.168.0.%s' % (pool_val) in res_vals) + + #updating alloc pool to something outside subnet.cidr + def test_update_subnet_allocation_pools_invalid_pool_for_cidr(self): + """Test update alloc pool to something outside subnet.cidr. + + This makes sure that an erroneous allocation_pool specified + in a subnet update (outside subnet cidr) will result in an error. + """ + allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}] + with self.network() as network: + with self.subnet(network=network, + allocation_pools=allocation_pools, + cidr='192.168.0.0/24') as subnet: + data = {'subnet': {'allocation_pools': [ + {'start': '10.0.0.10', 'end': '10.0.0.20'}]}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPClientError.code) + + def test_show_subnet(self): + with self.network() as network: + with self.subnet(network=network) as subnet: + req = self.new_show_request('subnets', + subnet['subnet']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['subnet']['id'], + subnet['subnet']['id']) + self.assertEqual(res['subnet']['network_id'], + network['network']['id']) + + def test_list_subnets(self): + with self.network() as network: + with contextlib.nested(self.subnet(network=network, + gateway_ip='10.0.0.1', + cidr='10.0.0.0/24'), + self.subnet(network=network, + gateway_ip='10.0.1.1', + cidr='10.0.1.0/24'), + self.subnet(network=network, + gateway_ip='10.0.2.1', + cidr='10.0.2.0/24')) as subnets: + self._test_list_resources('subnet', subnets) + + def test_list_subnets_shared(self): + with self.network(shared=True) as network: + with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: + with self.subnet(cidr='10.0.1.0/24') as priv_subnet: + # normal user should see only 1 subnet + req = self.new_list_request('subnets') + req.environ['neutron.context'] = context.Context( + '', 'some_tenant') + res = self.deserialize(self.fmt, + req.get_response(self.api)) + self.assertEqual(len(res['subnets']), 1) + self.assertEqual(res['subnets'][0]['cidr'], + subnet['subnet']['cidr']) + # admin will see both subnets + admin_req = self.new_list_request('subnets') + admin_res = self.deserialize( + self.fmt, admin_req.get_response(self.api)) + self.assertEqual(len(admin_res['subnets']), 2) + cidrs = [sub['cidr'] for sub in admin_res['subnets']] + self.assertIn(subnet['subnet']['cidr'], cidrs) + self.assertIn(priv_subnet['subnet']['cidr'], cidrs) + + def test_list_subnets_with_parameter(self): + with self.network() as network: + with contextlib.nested(self.subnet(network=network, + gateway_ip='10.0.0.1', + cidr='10.0.0.0/24'), + self.subnet(network=network, + gateway_ip='10.0.1.1', + cidr='10.0.1.0/24') + ) as subnets: + query_params = 'ip_version=4&ip_version=6' + self._test_list_resources('subnet', subnets, + query_params=query_params) + query_params = 'ip_version=6' + self._test_list_resources('subnet', [], + query_params=query_params) + + def test_list_subnets_with_sort_native(self): + if self._skip_native_sorting: + self.skipTest("Skip test for not implemented sorting feature") + with contextlib.nested(self.subnet(enable_dhcp=True, + cidr='10.0.0.0/24'), + self.subnet(enable_dhcp=False, + cidr='11.0.0.0/24'), + self.subnet(enable_dhcp=False, + cidr='12.0.0.0/24') + ) as (subnet1, subnet2, subnet3): + self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1), + [('enable_dhcp', 'asc'), + ('cidr', 'desc')]) + + def test_list_subnets_with_sort_emulated(self): + helper_patcher = mock.patch( + 'neutron.api.v2.base.Controller._get_sorting_helper', + new=_fake_get_sorting_helper) + helper_patcher.start() + with contextlib.nested(self.subnet(enable_dhcp=True, + cidr='10.0.0.0/24'), + self.subnet(enable_dhcp=False, + cidr='11.0.0.0/24'), + self.subnet(enable_dhcp=False, + cidr='12.0.0.0/24') + ) as (subnet1, subnet2, subnet3): + self._test_list_with_sort('subnet', (subnet3, + subnet2, + subnet1), + [('enable_dhcp', 'asc'), + ('cidr', 'desc')]) + + def test_list_subnets_with_pagination_native(self): + if self._skip_native_pagination: + self.skipTest("Skip test for not implemented sorting feature") + with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), + self.subnet(cidr='11.0.0.0/24'), + self.subnet(cidr='12.0.0.0/24') + ) as (subnet1, subnet2, subnet3): + self._test_list_with_pagination('subnet', + (subnet1, subnet2, subnet3), + ('cidr', 'asc'), 2, 2) + + def test_list_subnets_with_pagination_emulated(self): + helper_patcher = mock.patch( + 'neutron.api.v2.base.Controller._get_pagination_helper', + new=_fake_get_pagination_helper) + helper_patcher.start() + with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), + self.subnet(cidr='11.0.0.0/24'), + self.subnet(cidr='12.0.0.0/24') + ) as (subnet1, subnet2, subnet3): + self._test_list_with_pagination('subnet', + (subnet1, subnet2, subnet3), + ('cidr', 'asc'), 2, 2) + + def test_list_subnets_with_pagination_reverse_native(self): + if self._skip_native_sorting: + self.skipTest("Skip test for not implemented sorting feature") + with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), + self.subnet(cidr='11.0.0.0/24'), + self.subnet(cidr='12.0.0.0/24') + ) as (subnet1, subnet2, subnet3): + self._test_list_with_pagination_reverse('subnet', + (subnet1, subnet2, + subnet3), + ('cidr', 'asc'), 2, 2) + + def test_list_subnets_with_pagination_reverse_emulated(self): + helper_patcher = mock.patch( + 'neutron.api.v2.base.Controller._get_pagination_helper', + new=_fake_get_pagination_helper) + helper_patcher.start() + with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), + self.subnet(cidr='11.0.0.0/24'), + self.subnet(cidr='12.0.0.0/24') + ) as (subnet1, subnet2, subnet3): + self._test_list_with_pagination_reverse('subnet', + (subnet1, subnet2, + subnet3), + ('cidr', 'asc'), 2, 2) + + def test_invalid_ip_version(self): + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': 7, + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': '10.0.2.1'}} + + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_invalid_subnet(self): + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': 'invalid', + 'ip_version': 4, + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': '10.0.2.1'}} + + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_invalid_ip_address(self): + with self.network() as network: + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': 4, + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': 'ipaddress'}} + + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_invalid_uuid(self): + with self.network() as network: + data = {'subnet': {'network_id': 'invalid-uuid', + 'cidr': '10.0.2.0/24', + 'ip_version': 4, + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': '10.0.0.1'}} + + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_with_one_dns(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.100'}] + dns_nameservers = ['1.2.3.4'] + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, + allocation_pools=allocation_pools, + dns_nameservers=dns_nameservers) + + def test_create_subnet_with_two_dns(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.100'}] + dns_nameservers = ['1.2.3.4', '4.3.2.1'] + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, + allocation_pools=allocation_pools, + dns_nameservers=dns_nameservers) + + def test_create_subnet_with_too_many_dns(self): + with self.network() as network: + dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': 4, + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': '10.0.0.1', + 'dns_nameservers': dns_list}} + + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_create_subnet_with_one_host_route(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.100'}] + host_routes = [{'destination': '135.207.0.0/16', + 'nexthop': '1.2.3.4'}] + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, + allocation_pools=allocation_pools, + host_routes=host_routes) + + def test_create_subnet_with_two_host_routes(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.100'}] + host_routes = [{'destination': '135.207.0.0/16', + 'nexthop': '1.2.3.4'}, + {'destination': '12.0.0.0/8', + 'nexthop': '4.3.2.1'}] + + self._test_create_subnet(gateway_ip=gateway_ip, + cidr=cidr, + allocation_pools=allocation_pools, + host_routes=host_routes) + + def test_create_subnet_with_too_many_routes(self): + with self.network() as network: + host_routes = [{'destination': '135.207.0.0/16', + 'nexthop': '1.2.3.4'}, + {'destination': '12.0.0.0/8', + 'nexthop': '4.3.2.1'}, + {'destination': '141.212.0.0/16', + 'nexthop': '2.2.2.2'}] + + data = {'subnet': {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': 4, + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': '10.0.0.1', + 'host_routes': host_routes}} + + subnet_req = self.new_create_request('subnets', data) + res = subnet_req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_update_subnet_dns(self): + with self.subnet() as subnet: + data = {'subnet': {'dns_nameservers': ['11.0.0.1']}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['subnet']['dns_nameservers'], + data['subnet']['dns_nameservers']) + + def test_update_subnet_dns_to_None(self): + with self.subnet(dns_nameservers=['11.0.0.1']) as subnet: + data = {'subnet': {'dns_nameservers': None}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual([], res['subnet']['dns_nameservers']) + data = {'subnet': {'dns_nameservers': ['11.0.0.3']}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(data['subnet']['dns_nameservers'], + res['subnet']['dns_nameservers']) + + def test_update_subnet_dns_with_too_many_entries(self): + with self.subnet() as subnet: + dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] + data = {'subnet': {'dns_nameservers': dns_list}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_update_subnet_route(self): + with self.subnet() as subnet: + data = {'subnet': {'host_routes': + [{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(res['subnet']['host_routes'], + data['subnet']['host_routes']) + + def test_update_subnet_route_to_None(self): + with self.subnet(host_routes=[{'destination': '12.0.0.0/8', + 'nexthop': '1.2.3.4'}]) as subnet: + data = {'subnet': {'host_routes': None}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual([], res['subnet']['host_routes']) + data = {'subnet': {'host_routes': [{'destination': '12.0.0.0/8', + 'nexthop': '1.2.3.4'}]}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(data['subnet']['host_routes'], + res['subnet']['host_routes']) + + def test_update_subnet_route_with_too_many_entries(self): + with self.subnet() as subnet: + data = {'subnet': {'host_routes': [ + {'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}, + {'destination': '13.0.0.0/8', 'nexthop': '1.2.3.5'}, + {'destination': '14.0.0.0/8', 'nexthop': '1.2.3.6'}]}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + + def test_delete_subnet_with_dns(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + dns_nameservers = ['1.2.3.4'] + # Create new network + res = self._create_network(fmt=self.fmt, name='net', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + subnet = self._make_subnet(self.fmt, network, gateway_ip, + cidr, ip_version=4, + dns_nameservers=dns_nameservers) + req = self.new_delete_request('subnets', subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + + def test_delete_subnet_with_route(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + host_routes = [{'destination': '135.207.0.0/16', + 'nexthop': '1.2.3.4'}] + # Create new network + res = self._create_network(fmt=self.fmt, name='net', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + subnet = self._make_subnet(self.fmt, network, gateway_ip, + cidr, ip_version=4, + host_routes=host_routes) + req = self.new_delete_request('subnets', subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + + def test_delete_subnet_with_dns_and_route(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + dns_nameservers = ['1.2.3.4'] + host_routes = [{'destination': '135.207.0.0/16', + 'nexthop': '1.2.3.4'}] + # Create new network + res = self._create_network(fmt=self.fmt, name='net', + admin_state_up=True) + network = self.deserialize(self.fmt, res) + subnet = self._make_subnet(self.fmt, network, gateway_ip, + cidr, ip_version=4, + dns_nameservers=dns_nameservers, + host_routes=host_routes) + req = self.new_delete_request('subnets', subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + + def _helper_test_validate_subnet(self, option, exception): + cfg.CONF.set_override(option, 0) + with self.network() as network: + subnet = {'network_id': network['network']['id'], + 'cidr': '10.0.2.0/24', + 'ip_version': 4, + 'tenant_id': network['network']['tenant_id'], + 'gateway_ip': '10.0.2.1', + 'dns_nameservers': ['8.8.8.8'], + 'host_routes': [{'destination': '135.207.0.0/16', + 'nexthop': '1.2.3.4'}]} + plugin = manager.NeutronManager.get_plugin() + e = self.assertRaises(exception, + plugin._validate_subnet, + context.get_admin_context( + load_admin_roles=False), + subnet) + self.assertThat( + str(e), + matchers.Not(matchers.Contains('built-in function id'))) + + def test_validate_subnet_dns_nameservers_exhausted(self): + self._helper_test_validate_subnet( + 'max_dns_nameservers', + n_exc.DNSNameServersExhausted) + + def test_validate_subnet_host_routes_exhausted(self): + self._helper_test_validate_subnet( + 'max_subnet_host_routes', + n_exc.HostRoutesExhausted) + + +class DbModelTestCase(base.BaseTestCase): + """DB model tests.""" + def test_repr(self): + """testing the string representation of 'model' classes.""" + network = models_v2.Network(name="net_net", status="OK", + admin_state_up=True) + actual_repr_output = repr(network) + exp_start_with = "") + final_exp = exp_start_with + exp_middle + exp_end_with + self.assertEqual(actual_repr_output, final_exp) + + +class TestNeutronDbPluginV2(base.BaseTestCase): + """Unit Tests for NeutronDbPluginV2 IPAM Logic.""" + + def test_generate_ip(self): + with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, + '_try_generate_ip') as generate: + with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, + '_rebuild_availability_ranges') as rebuild: + + db_base_plugin_v2.NeutronDbPluginV2._generate_ip('c', 's') + + generate.assert_called_once_with('c', 's') + self.assertEqual(0, rebuild.call_count) + + def test_generate_ip_exhausted_pool(self): + with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, + '_try_generate_ip') as generate: + with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, + '_rebuild_availability_ranges') as rebuild: + + exception = n_exc.IpAddressGenerationFailure(net_id='n') + generate.side_effect = exception + + # I want the side_effect to throw an exception once but I + # didn't see a way to do this. So, let it throw twice and + # catch the second one. Check below to ensure that + # _try_generate_ip was called twice. + try: + db_base_plugin_v2.NeutronDbPluginV2._generate_ip('c', 's') + except n_exc.IpAddressGenerationFailure: + pass + + self.assertEqual(2, generate.call_count) + rebuild.assert_called_once_with('c', 's') + + def test_rebuild_availability_ranges(self): + pools = [{'id': 'a', + 'first_ip': '192.168.1.3', + 'last_ip': '192.168.1.10'}, + {'id': 'b', + 'first_ip': '192.168.1.100', + 'last_ip': '192.168.1.120'}] + + allocations = [{'ip_address': '192.168.1.3'}, + {'ip_address': '192.168.1.78'}, + {'ip_address': '192.168.1.7'}, + {'ip_address': '192.168.1.110'}, + {'ip_address': '192.168.1.11'}, + {'ip_address': '192.168.1.4'}, + {'ip_address': '192.168.1.111'}] + + ip_qry = mock.Mock() + ip_qry.with_lockmode.return_value = ip_qry + ip_qry.filter_by.return_value = allocations + + pool_qry = mock.Mock() + pool_qry.options.return_value = pool_qry + pool_qry.with_lockmode.return_value = pool_qry + pool_qry.filter_by.return_value = pools + + def return_queries_side_effect(*args, **kwargs): + if args[0] == models_v2.IPAllocation: + return ip_qry + if args[0] == models_v2.IPAllocationPool: + return pool_qry + + context = mock.Mock() + context.session.query.side_effect = return_queries_side_effect + subnets = [mock.MagicMock()] + + db_base_plugin_v2.NeutronDbPluginV2._rebuild_availability_ranges( + context, subnets) + + actual = [[args[0].allocation_pool_id, + args[0].first_ip, args[0].last_ip] + for _name, args, _kwargs in context.session.add.mock_calls] + + self.assertEqual([['a', '192.168.1.5', '192.168.1.6'], + ['a', '192.168.1.8', '192.168.1.10'], + ['b', '192.168.1.100', '192.168.1.109'], + ['b', '192.168.1.112', '192.168.1.120']], actual) + + +class NeutronDbPluginV2AsMixinTestCase(base.BaseTestCase): + """Tests for NeutronDbPluginV2 as Mixin. + + While NeutronDbPluginV2TestCase checks NeutronDbPlugin and all plugins as + a complete plugin, this test case verifies abilities of NeutronDbPlugin + which are provided to other plugins (e.g. DB operations). This test case + may include tests only for NeutronDbPlugin, so this should not be used in + unit tests for other plugins. + """ + + def setUp(self): + super(NeutronDbPluginV2AsMixinTestCase, self).setUp() + self.plugin = importutils.import_object(DB_PLUGIN_KLASS) + self.context = context.get_admin_context() + self.net_data = {'network': {'id': 'fake-id', + 'name': 'net1', + 'admin_state_up': True, + 'tenant_id': 'test-tenant', + 'shared': False}} + self.addCleanup(db.clear_db) + + def test_create_network_with_default_status(self): + net = self.plugin.create_network(self.context, self.net_data) + default_net_create_status = 'ACTIVE' + expected = [('id', 'fake-id'), ('name', 'net1'), + ('admin_state_up', True), ('tenant_id', 'test-tenant'), + ('shared', False), ('status', default_net_create_status)] + for k, v in expected: + self.assertEqual(net[k], v) + + def test_create_network_with_status_BUILD(self): + self.net_data['network']['status'] = 'BUILD' + net = self.plugin.create_network(self.context, self.net_data) + self.assertEqual(net['status'], 'BUILD') + + +class TestBasicGetXML(TestBasicGet): + fmt = 'xml' + + +class TestNetworksV2XML(TestNetworksV2): + fmt = 'xml' + + +class TestPortsV2XML(TestPortsV2): + fmt = 'xml' + + +class TestSubnetsV2XML(TestSubnetsV2): + fmt = 'xml' + + +class TestV2HTTPResponseXML(TestV2HTTPResponse): + fmt = 'xml' diff --git a/neutron/tests/unit/test_db_rpc_base.py b/neutron/tests/unit/test_db_rpc_base.py new file mode 100644 index 000000000..3ba662313 --- /dev/null +++ b/neutron/tests/unit/test_db_rpc_base.py @@ -0,0 +1,233 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.db import dhcp_rpc_base +from neutron.openstack.common.db import exception as db_exc +from neutron.tests import base + + +class TestDhcpRpcCallbackMixin(base.BaseTestCase): + + def setUp(self): + super(TestDhcpRpcCallbackMixin, self).setUp() + self.plugin_p = mock.patch('neutron.manager.NeutronManager.get_plugin') + get_plugin = self.plugin_p.start() + self.plugin = mock.MagicMock() + get_plugin.return_value = self.plugin + self.callbacks = dhcp_rpc_base.DhcpRpcCallbackMixin() + self.log_p = mock.patch('neutron.db.dhcp_rpc_base.LOG') + self.log = self.log_p.start() + + def test_get_active_networks(self): + plugin_retval = [dict(id='a'), dict(id='b')] + self.plugin.get_networks.return_value = plugin_retval + + networks = self.callbacks.get_active_networks(mock.Mock(), host='host') + + self.assertEqual(networks, ['a', 'b']) + self.plugin.assert_has_calls( + [mock.call.get_networks(mock.ANY, + filters=dict(admin_state_up=[True]))]) + + self.assertEqual(len(self.log.mock_calls), 1) + + def _test__port_action_with_failures(self, exc=None, action=None): + port = { + 'network_id': 'foo_network_id', + 'device_owner': constants.DEVICE_OWNER_DHCP, + 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] + } + self.plugin.create_port.side_effect = exc + self.assertIsNone(self.callbacks._port_action(self.plugin, + mock.Mock(), + {'port': port}, + action)) + + def _test__port_action_good_action(self, action, port, expected_call): + self.callbacks._port_action(self.plugin, mock.Mock(), + port, action) + self.plugin.assert_has_calls(expected_call) + + def test_port_action_create_port(self): + self._test__port_action_good_action( + 'create_port', mock.Mock(), + mock.call.create_port(mock.ANY, mock.ANY)) + + def test_port_action_update_port(self): + fake_port = {'id': 'foo_port_id', 'port': mock.Mock()} + self._test__port_action_good_action( + 'update_port', fake_port, + mock.call.update_port(mock.ANY, 'foo_port_id', mock.ANY)) + + def test__port_action_bad_action(self): + self.assertRaises( + n_exc.Invalid, + self._test__port_action_with_failures, + exc=None, + action='foo_action') + + def test_create_port_catch_network_not_found(self): + self._test__port_action_with_failures( + exc=n_exc.NetworkNotFound(net_id='foo_network_id'), + action='create_port') + + def test_create_port_catch_subnet_not_found(self): + self._test__port_action_with_failures( + exc=n_exc.SubnetNotFound(subnet_id='foo_subnet_id'), + action='create_port') + + def test_create_port_catch_db_error(self): + self._test__port_action_with_failures(exc=db_exc.DBError(), + action='create_port') + + def test_create_port_catch_ip_generation_failure_reraise(self): + self.assertRaises( + n_exc.IpAddressGenerationFailure, + self._test__port_action_with_failures, + exc=n_exc.IpAddressGenerationFailure(net_id='foo_network_id'), + action='create_port') + + def test_create_port_catch_and_handle_ip_generation_failure(self): + self.plugin.get_subnet.side_effect = ( + n_exc.SubnetNotFound(subnet_id='foo_subnet_id')) + self._test__port_action_with_failures( + exc=n_exc.IpAddressGenerationFailure(net_id='foo_network_id'), + action='create_port') + + def test_get_network_info_return_none_on_not_found(self): + self.plugin.get_network.side_effect = n_exc.NetworkNotFound(net_id='a') + retval = self.callbacks.get_network_info(mock.Mock(), network_id='a') + self.assertIsNone(retval) + + def test_get_network_info(self): + network_retval = dict(id='a') + + subnet_retval = mock.Mock() + port_retval = mock.Mock() + + self.plugin.get_network.return_value = network_retval + self.plugin.get_subnets.return_value = subnet_retval + self.plugin.get_ports.return_value = port_retval + + retval = self.callbacks.get_network_info(mock.Mock(), network_id='a') + self.assertEqual(retval, network_retval) + self.assertEqual(retval['subnets'], subnet_retval) + self.assertEqual(retval['ports'], port_retval) + + def _test_get_dhcp_port_helper(self, port_retval, other_expectations=[], + update_port=None, create_port=None): + subnets_retval = [dict(id='a', enable_dhcp=True), + dict(id='b', enable_dhcp=False)] + + self.plugin.get_subnets.return_value = subnets_retval + if port_retval: + self.plugin.get_ports.return_value = [port_retval] + else: + self.plugin.get_ports.return_value = [] + if isinstance(update_port, n_exc.NotFound): + self.plugin.update_port.side_effect = update_port + else: + self.plugin.update_port.return_value = update_port + self.plugin.create_port.return_value = create_port + + retval = self.callbacks.get_dhcp_port(mock.Mock(), + network_id='netid', + device_id='devid', + host='host') + + expected = [mock.call.get_subnets(mock.ANY, + filters=dict(network_id=['netid'])), + mock.call.get_ports(mock.ANY, + filters=dict(network_id=['netid'], + device_id=['devid']))] + + expected.extend(other_expectations) + self.plugin.assert_has_calls(expected) + return retval + + def test_update_dhcp_port(self): + self.callbacks.update_dhcp_port(mock.Mock(), + host='foo_host', + port_id='foo_port_id', + port=mock.Mock()) + self.plugin.assert_has_calls( + mock.call.update_port(mock.ANY, 'foo_port_id', mock.ANY)) + + def test_get_dhcp_port_existing(self): + port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')]) + expectations = [ + mock.call.update_port(mock.ANY, 'port_id', dict(port=port_retval))] + + self._test_get_dhcp_port_helper(port_retval, expectations, + update_port=port_retval) + self.assertEqual(len(self.log.mock_calls), 1) + + def _test_get_dhcp_port_create_new(self, update_port=None): + self.plugin.get_network.return_value = dict(tenant_id='tenantid') + create_spec = dict(tenant_id='tenantid', device_id='devid', + network_id='netid', name='', + admin_state_up=True, + device_owner=constants.DEVICE_OWNER_DHCP, + mac_address=mock.ANY) + create_retval = create_spec.copy() + create_retval['id'] = 'port_id' + create_retval['fixed_ips'] = [dict(subnet_id='a', enable_dhcp=True)] + + create_spec['fixed_ips'] = [dict(subnet_id='a')] + + expectations = [ + mock.call.get_network(mock.ANY, 'netid'), + mock.call.create_port(mock.ANY, dict(port=create_spec))] + + retval = self._test_get_dhcp_port_helper(None, expectations, + update_port=update_port, + create_port=create_retval) + self.assertEqual(create_retval, retval) + self.assertEqual(len(self.log.mock_calls), 2) + + def test_get_dhcp_port_create_new(self): + self._test_get_dhcp_port_create_new() + + def test_get_dhcp_port_create_new_with_failure_on_port_update(self): + self._test_get_dhcp_port_create_new( + update_port=n_exc.PortNotFound(port_id='foo')) + + def test_release_dhcp_port(self): + port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')]) + self.plugin.get_ports.return_value = [port_retval] + + self.callbacks.release_dhcp_port(mock.ANY, network_id='netid', + device_id='devid') + + self.plugin.assert_has_calls([ + mock.call.delete_ports_by_device_id(mock.ANY, 'devid', 'netid')]) + + def test_release_port_fixed_ip(self): + port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')]) + port_update = dict(id='port_id', fixed_ips=[]) + self.plugin.get_ports.return_value = [port_retval] + + self.callbacks.release_port_fixed_ip(mock.ANY, network_id='netid', + device_id='devid', subnet_id='a') + + self.plugin.assert_has_calls([ + mock.call.get_ports(mock.ANY, filters=dict(network_id=['netid'], + device_id=['devid'])), + mock.call.update_port(mock.ANY, 'port_id', + dict(port=port_update))]) diff --git a/neutron/tests/unit/test_debug_commands.py b/neutron/tests/unit/test_debug_commands.py new file mode 100644 index 000000000..16b6c8dda --- /dev/null +++ b/neutron/tests/unit/test_debug_commands.py @@ -0,0 +1,363 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket + +import mock +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import interface +from neutron.common import config as common_config +from neutron.debug import commands +from neutron.debug import debug_agent +from neutron.tests import base + + +class MyApp(object): + def __init__(self, _stdout): + self.stdout = _stdout + + +class TestDebugCommands(base.BaseTestCase): + def setUp(self): + super(TestDebugCommands, self).setUp() + cfg.CONF.register_opts(interface.OPTS) + cfg.CONF.register_opts(debug_agent.NeutronDebugAgent.OPTS) + common_config.init([]) + config.register_interface_driver_opts_helper(cfg.CONF) + config.register_use_namespaces_opts_helper(cfg.CONF) + config.register_root_helper(cfg.CONF) + cfg.CONF.set_override('use_namespaces', True) + + device_exists_p = mock.patch( + 'neutron.agent.linux.ip_lib.device_exists', return_value=False) + device_exists_p.start() + namespace_p = mock.patch( + 'neutron.agent.linux.ip_lib.IpNetnsCommand') + namespace_p.start() + ensure_namespace_p = mock.patch( + 'neutron.agent.linux.ip_lib.IPWrapper.ensure_namespace') + ensure_namespace_p.start() + dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') + driver_cls = dvr_cls_p.start() + mock_driver = mock.MagicMock() + mock_driver.DEV_NAME_LEN = ( + interface.LinuxInterfaceDriver.DEV_NAME_LEN) + mock_driver.get_device_name.return_value = 'tap12345678-12' + driver_cls.return_value = mock_driver + self.driver = mock_driver + + client_cls_p = mock.patch('neutronclient.v2_0.client.Client') + client_cls = client_cls_p.start() + client_inst = mock.Mock() + client_cls.return_value = client_inst + + fake_network = {'network': {'id': 'fake_net', + 'tenant_id': 'fake_tenant', + 'subnets': ['fake_subnet']}} + fake_port = {'port': + {'id': 'fake_port', + 'device_owner': 'fake_device', + 'mac_address': 'aa:bb:cc:dd:ee:ffa', + 'network_id': 'fake_net', + 'fixed_ips': + [{'subnet_id': 'fake_subnet', 'ip_address': '10.0.0.3'}] + }} + fake_ports = {'ports': [fake_port['port']]} + self.fake_ports = fake_ports + allocation_pools = [{'start': '10.0.0.2', + 'end': '10.0.0.254'}] + fake_subnet_v4 = {'subnet': {'name': 'fake_subnet_v4', + 'id': 'fake_subnet', + 'network_id': 'fake_net', + 'gateway_ip': '10.0.0.1', + 'dns_nameservers': ['10.0.0.2'], + 'host_routes': [], + 'cidr': '10.0.0.0/24', + 'allocation_pools': allocation_pools, + 'enable_dhcp': True, + 'ip_version': 4}} + + client_inst.list_ports.return_value = fake_ports + client_inst.create_port.return_value = fake_port + client_inst.show_port.return_value = fake_port + client_inst.show_network.return_value = fake_network + client_inst.show_subnet.return_value = fake_subnet_v4 + self.client = client_inst + mock_std = mock.Mock() + self.app = MyApp(mock_std) + self.app.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF, + client_inst, + mock_driver) + + def _test_create_probe(self, device_owner): + cmd = commands.CreateProbe(self.app, None) + cmd_parser = cmd.get_parser('create_probe') + if device_owner == debug_agent.DEVICE_OWNER_COMPUTE_PROBE: + args = ['fake_net', '--device-owner', 'compute'] + else: + args = ['fake_net'] + parsed_args = cmd_parser.parse_args(args) + cmd.run(parsed_args) + fake_port = {'port': + {'device_owner': device_owner, + 'admin_state_up': True, + 'network_id': 'fake_net', + 'tenant_id': 'fake_tenant', + 'binding:host_id': cfg.CONF.host, + 'fixed_ips': [{'subnet_id': 'fake_subnet'}], + 'device_id': socket.gethostname()}} + namespace = 'qprobe-fake_port' + self.client.assert_has_calls([mock.call.show_network('fake_net'), + mock.call.show_subnet('fake_subnet'), + mock.call.create_port(fake_port), + mock.call.show_subnet('fake_subnet')]) + self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), + mock.call.plug('fake_net', + 'fake_port', + 'tap12345678-12', + 'aa:bb:cc:dd:ee:ffa', + bridge=None, + namespace=namespace), + mock.call.init_l3('tap12345678-12', + ['10.0.0.3/24'], + namespace=namespace + )]) + + def test_create_network_probe(self): + self._test_create_probe(debug_agent.DEVICE_OWNER_NETWORK_PROBE) + + def test_create_nova_probe(self): + self._test_create_probe(debug_agent.DEVICE_OWNER_COMPUTE_PROBE) + + def _test_create_probe_external(self, device_owner): + fake_network = {'network': {'id': 'fake_net', + 'tenant_id': 'fake_tenant', + 'router:external': True, + 'subnets': ['fake_subnet']}} + self.client.show_network.return_value = fake_network + cmd = commands.CreateProbe(self.app, None) + cmd_parser = cmd.get_parser('create_probe') + if device_owner == debug_agent.DEVICE_OWNER_COMPUTE_PROBE: + args = ['fake_net', '--device-owner', 'compute'] + else: + args = ['fake_net'] + parsed_args = cmd_parser.parse_args(args) + cmd.run(parsed_args) + fake_port = {'port': + {'device_owner': device_owner, + 'admin_state_up': True, + 'network_id': 'fake_net', + 'tenant_id': 'fake_tenant', + 'binding:host_id': cfg.CONF.host, + 'fixed_ips': [{'subnet_id': 'fake_subnet'}], + 'device_id': socket.gethostname()}} + namespace = 'qprobe-fake_port' + self.client.assert_has_calls([mock.call.show_network('fake_net'), + mock.call.show_subnet('fake_subnet'), + mock.call.create_port(fake_port), + mock.call.show_subnet('fake_subnet')]) + self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), + mock.call.plug('fake_net', + 'fake_port', + 'tap12345678-12', + 'aa:bb:cc:dd:ee:ffa', + bridge='br-ex', + namespace=namespace), + mock.call.init_l3('tap12345678-12', + ['10.0.0.3/24'], + namespace=namespace + )]) + + def test_create_network_probe_external(self): + self._test_create_probe_external( + debug_agent.DEVICE_OWNER_NETWORK_PROBE) + + def test_create_nova_probe_external(self): + self._test_create_probe_external( + debug_agent.DEVICE_OWNER_COMPUTE_PROBE) + + def test_delete_probe(self): + cmd = commands.DeleteProbe(self.app, None) + cmd_parser = cmd.get_parser('delete_probe') + args = ['fake_port'] + parsed_args = cmd_parser.parse_args(args) + cmd.run(parsed_args) + namespace = 'qprobe-fake_port' + self.client.assert_has_calls([mock.call.show_port('fake_port'), + mock.call.show_network('fake_net'), + mock.call.show_subnet('fake_subnet'), + mock.call.delete_port('fake_port')]) + self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), + mock.call.unplug('tap12345678-12', + namespace=namespace, + bridge=None)]) + + def test_delete_probe_external(self): + fake_network = {'network': {'id': 'fake_net', + 'tenant_id': 'fake_tenant', + 'router:external': True, + 'subnets': ['fake_subnet']}} + self.client.show_network.return_value = fake_network + cmd = commands.DeleteProbe(self.app, None) + cmd_parser = cmd.get_parser('delete_probe') + args = ['fake_port'] + parsed_args = cmd_parser.parse_args(args) + cmd.run(parsed_args) + namespace = 'qprobe-fake_port' + self.client.assert_has_calls([mock.call.show_port('fake_port'), + mock.call.show_network('fake_net'), + mock.call.show_subnet('fake_subnet'), + mock.call.delete_port('fake_port')]) + self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), + mock.call.unplug('tap12345678-12', + namespace=namespace, + bridge='br-ex')]) + + def test_delete_probe_without_namespace(self): + cfg.CONF.set_override('use_namespaces', False) + cmd = commands.DeleteProbe(self.app, None) + cmd_parser = cmd.get_parser('delete_probe') + args = ['fake_port'] + parsed_args = cmd_parser.parse_args(args) + cmd.run(parsed_args) + self.client.assert_has_calls([mock.call.show_port('fake_port'), + mock.call.show_network('fake_net'), + mock.call.show_subnet('fake_subnet'), + mock.call.delete_port('fake_port')]) + self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), + mock.call.unplug('tap12345678-12', + bridge=None)]) + + def test_list_probe(self): + cmd = commands.ListProbe(self.app, None) + cmd_parser = cmd.get_parser('list_probe') + args = [] + parsed_args = cmd_parser.parse_args(args) + cmd.run(parsed_args) + self.client.assert_has_calls( + [mock.call.list_ports( + device_owner=[debug_agent.DEVICE_OWNER_NETWORK_PROBE, + debug_agent.DEVICE_OWNER_COMPUTE_PROBE])]) + + def test_exec_command(self): + cmd = commands.ExecProbe(self.app, None) + cmd_parser = cmd.get_parser('exec_command') + args = ['fake_port', 'fake_command'] + parsed_args = cmd_parser.parse_args(args) + with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns: + cmd.run(parsed_args) + ns.assert_has_calls([mock.call.execute(mock.ANY)]) + self.client.assert_has_calls([mock.call.show_port('fake_port')]) + + def test_exec_command_without_namespace(self): + cfg.CONF.set_override('use_namespaces', False) + cmd = commands.ExecProbe(self.app, None) + cmd_parser = cmd.get_parser('exec_command') + args = ['fake_port', 'fake_command'] + parsed_args = cmd_parser.parse_args(args) + with mock.patch('neutron.agent.linux.utils.execute') as exe: + cmd.run(parsed_args) + exe.assert_has_calls([mock.call.execute(mock.ANY)]) + self.client.assert_has_calls([mock.call.show_port('fake_port')]) + + def test_clear_probe(self): + cmd = commands.ClearProbe(self.app, None) + cmd_parser = cmd.get_parser('clear_probe') + args = [] + parsed_args = cmd_parser.parse_args(args) + cmd.run(parsed_args) + namespace = 'qprobe-fake_port' + self.client.assert_has_calls( + [mock.call.list_ports( + device_id=socket.gethostname(), + device_owner=[debug_agent.DEVICE_OWNER_NETWORK_PROBE, + debug_agent.DEVICE_OWNER_COMPUTE_PROBE]), + mock.call.show_port('fake_port'), + mock.call.show_network('fake_net'), + mock.call.show_subnet('fake_subnet'), + mock.call.delete_port('fake_port')]) + self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), + mock.call.unplug('tap12345678-12', + namespace=namespace, + bridge=None)]) + + def test_ping_all_with_ensure_port(self): + fake_ports = self.fake_ports + + def fake_port_list(network_id=None, device_owner=None, device_id=None): + if network_id: + # In order to test ensure_port, return [] + return {'ports': []} + return fake_ports + self.client.list_ports.side_effect = fake_port_list + cmd = commands.PingAll(self.app, None) + cmd_parser = cmd.get_parser('ping_all') + args = [] + parsed_args = cmd_parser.parse_args(args) + namespace = 'qprobe-fake_port' + with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns: + cmd.run(parsed_args) + ns.assert_has_calls([mock.call.execute(mock.ANY)]) + fake_port = {'port': + {'device_owner': debug_agent.DEVICE_OWNER_NETWORK_PROBE, + 'admin_state_up': True, + 'network_id': 'fake_net', + 'tenant_id': 'fake_tenant', + 'binding:host_id': cfg.CONF.host, + 'fixed_ips': [{'subnet_id': 'fake_subnet'}], + 'device_id': socket.gethostname()}} + expected = [mock.call.show_network('fake_net'), + mock.call.show_subnet('fake_subnet'), + mock.call.create_port(fake_port), + mock.call.show_subnet('fake_subnet')] + self.client.assert_has_calls(expected) + self.driver.assert_has_calls([mock.call.init_l3('tap12345678-12', + ['10.0.0.3/24'], + namespace=namespace + )]) + + def test_ping_all(self): + cmd = commands.PingAll(self.app, None) + cmd_parser = cmd.get_parser('ping_all') + args = [] + parsed_args = cmd_parser.parse_args(args) + with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns: + cmd.run(parsed_args) + ns.assert_has_calls([mock.call.execute(mock.ANY)]) + expected = [mock.call.list_ports(), + mock.call.list_ports( + network_id='fake_net', + device_owner=debug_agent.DEVICE_OWNER_NETWORK_PROBE, + device_id=socket.gethostname()), + mock.call.show_subnet('fake_subnet'), + mock.call.show_port('fake_port')] + self.client.assert_has_calls(expected) + + def test_ping_all_v6(self): + fake_subnet_v6 = {'subnet': {'name': 'fake_v6', + 'ip_version': 6}} + self.client.show_subnet.return_value = fake_subnet_v6 + cmd = commands.PingAll(self.app, None) + cmd_parser = cmd.get_parser('ping_all') + args = [] + parsed_args = cmd_parser.parse_args(args) + with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns: + cmd.run(parsed_args) + ns.assert_has_calls([mock.call.execute(mock.ANY)]) + self.client.assert_has_calls([mock.call.list_ports()]) diff --git a/neutron/tests/unit/test_dhcp_agent.py b/neutron/tests/unit/test_dhcp_agent.py new file mode 100644 index 000000000..0b0826bb9 --- /dev/null +++ b/neutron/tests/unit/test_dhcp_agent.py @@ -0,0 +1,1466 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import sys +import uuid + +import eventlet +import mock +from oslo.config import cfg +import testtools + +from neutron.agent.common import config +from neutron.agent import dhcp_agent +from neutron.agent.linux import dhcp +from neutron.agent.linux import interface +from neutron.common import config as common_config +from neutron.common import constants as const +from neutron.common import exceptions +from neutron.common import rpc_compat +from neutron.tests import base + + +HOSTNAME = 'hostname' +dev_man = dhcp.DeviceManager +rpc_api = dhcp_agent.DhcpPluginApi +DEVICE_MANAGER = '%s.%s' % (dev_man.__module__, dev_man.__name__) +DHCP_PLUGIN = '%s.%s' % (rpc_api.__module__, rpc_api.__name__) + + +fake_tenant_id = 'aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa' +fake_subnet1_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.9.2', + end='172.9.9.254')) +fake_subnet1 = dhcp.DictModel(dict(id='bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb', + network_id='12345678-1234-5678-1234567890ab', + cidr='172.9.9.0/24', enable_dhcp=True, name='', + tenant_id=fake_tenant_id, + gateway_ip='172.9.9.1', host_routes=[], + dns_nameservers=[], ip_version=4, + allocation_pools=fake_subnet1_allocation_pools)) + +fake_subnet2_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.8.2', + end='172.9.8.254')) +fake_subnet2 = dhcp.DictModel(dict(id='dddddddd-dddd-dddd-dddddddddddd', + network_id='12345678-1234-5678-1234567890ab', + cidr='172.9.8.0/24', enable_dhcp=False, name='', + tenant_id=fake_tenant_id, gateway_ip='172.9.8.1', + host_routes=[], dns_nameservers=[], ip_version=4, + allocation_pools=fake_subnet2_allocation_pools)) + +fake_subnet3 = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb', + network_id='12345678-1234-5678-1234567890ab', + cidr='192.168.1.1/24', enable_dhcp=True)) + +fake_meta_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb', + network_id='12345678-1234-5678-1234567890ab', + cidr='169.254.169.252/30', + gateway_ip='169.254.169.253', + enable_dhcp=True)) + +fake_fixed_ip1 = dhcp.DictModel(dict(id='', subnet_id=fake_subnet1.id, + ip_address='172.9.9.9')) +fake_meta_fixed_ip = dhcp.DictModel(dict(id='', subnet=fake_meta_subnet, + ip_address='169.254.169.254')) +fake_allocation_pool_subnet1 = dhcp.DictModel(dict(id='', start='172.9.9.2', + end='172.9.9.254')) + +fake_port1 = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab', + device_id='dhcp-12345678-1234-aaaa-1234567890ab', + allocation_pools=fake_subnet1_allocation_pools, + mac_address='aa:bb:cc:dd:ee:ff', + network_id='12345678-1234-5678-1234567890ab', + fixed_ips=[fake_fixed_ip1])) + +fake_port2 = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789000', + mac_address='aa:bb:cc:dd:ee:99', + network_id='12345678-1234-5678-1234567890ab', + fixed_ips=[])) + +fake_meta_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab', + mac_address='aa:bb:cc:dd:ee:ff', + network_id='12345678-1234-5678-1234567890ab', + device_owner=const.DEVICE_OWNER_ROUTER_INTF, + device_id='forzanapoli', + fixed_ips=[fake_meta_fixed_ip])) + +fake_network = dhcp.NetModel(True, dict(id='12345678-1234-5678-1234567890ab', + tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', + admin_state_up=True, + subnets=[fake_subnet1, fake_subnet2], + ports=[fake_port1])) + +fake_meta_network = dhcp.NetModel( + True, dict(id='12345678-1234-5678-1234567890ab', + tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', + admin_state_up=True, + subnets=[fake_meta_subnet], + ports=[fake_meta_port])) + +fake_down_network = dhcp.NetModel( + True, dict(id='12345678-dddd-dddd-1234567890ab', + tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', + admin_state_up=False, + subnets=[], + ports=[])) + + +class TestDhcpAgent(base.BaseTestCase): + def setUp(self): + super(TestDhcpAgent, self).setUp() + dhcp_agent.register_options() + cfg.CONF.set_override('interface_driver', + 'neutron.agent.linux.interface.NullDriver') + # disable setting up periodic state reporting + cfg.CONF.set_override('report_interval', 0, 'AGENT') + + self.driver_cls_p = mock.patch( + 'neutron.agent.dhcp_agent.importutils.import_class') + self.driver = mock.Mock(name='driver') + self.driver.existing_dhcp_networks.return_value = [] + self.driver_cls = self.driver_cls_p.start() + self.driver_cls.return_value = self.driver + self.mock_makedirs_p = mock.patch("os.makedirs") + self.mock_makedirs = self.mock_makedirs_p.start() + + def test_dhcp_agent_manager(self): + state_rpc_str = 'neutron.agent.rpc.PluginReportStateAPI' + # sync_state is needed for this test + cfg.CONF.set_override('report_interval', 1, 'AGENT') + with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport, + 'sync_state', + autospec=True) as mock_sync_state: + with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport, + 'periodic_resync', + autospec=True) as mock_periodic_resync: + with mock.patch(state_rpc_str) as state_rpc: + with mock.patch.object(sys, 'argv') as sys_argv: + sys_argv.return_value = [ + 'dhcp', '--config-file', + base.etcdir('neutron.conf.test')] + cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS) + config.register_interface_driver_opts_helper(cfg.CONF) + config.register_agent_state_opts_helper(cfg.CONF) + config.register_root_helper(cfg.CONF) + cfg.CONF.register_opts(dhcp.OPTS) + cfg.CONF.register_opts(interface.OPTS) + common_config.init(sys.argv[1:]) + agent_mgr = dhcp_agent.DhcpAgentWithStateReport( + 'testhost') + eventlet.greenthread.sleep(1) + agent_mgr.after_start() + mock_sync_state.assert_called_once_with(agent_mgr) + mock_periodic_resync.assert_called_once_with(agent_mgr) + state_rpc.assert_has_calls( + [mock.call(mock.ANY), + mock.call().report_state(mock.ANY, mock.ANY, + mock.ANY)]) + + def test_dhcp_agent_main_agent_manager(self): + logging_str = 'neutron.agent.common.config.setup_logging' + launcher_str = 'neutron.openstack.common.service.ServiceLauncher' + with mock.patch(logging_str): + with mock.patch.object(sys, 'argv') as sys_argv: + with mock.patch(launcher_str) as launcher: + sys_argv.return_value = ['dhcp', '--config-file', + base.etcdir('neutron.conf.test')] + dhcp_agent.main() + launcher.assert_has_calls( + [mock.call(), mock.call().launch_service(mock.ANY), + mock.call().wait()]) + + def test_run_completes_single_pass(self): + with mock.patch(DEVICE_MANAGER): + dhcp = dhcp_agent.DhcpAgent(HOSTNAME) + attrs_to_mock = dict( + [(a, mock.DEFAULT) for a in + ['sync_state', 'periodic_resync']]) + with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks: + dhcp.run() + mocks['sync_state'].assert_called_once_with() + mocks['periodic_resync'].assert_called_once_with() + + def test_call_driver(self): + network = mock.Mock() + network.id = '1' + dhcp = dhcp_agent.DhcpAgent(cfg.CONF) + self.assertTrue(dhcp.call_driver('foo', network)) + self.driver.assert_called_once_with(cfg.CONF, + mock.ANY, + 'sudo', + mock.ANY, + mock.ANY) + + def _test_call_driver_failure(self, exc=None, + trace_level='exception', expected_sync=True): + network = mock.Mock() + network.id = '1' + self.driver.return_value.foo.side_effect = exc or Exception + with mock.patch.object(dhcp_agent.LOG, trace_level) as log: + dhcp = dhcp_agent.DhcpAgent(HOSTNAME) + with mock.patch.object(dhcp, + 'schedule_resync') as schedule_resync: + self.assertIsNone(dhcp.call_driver('foo', network)) + self.driver.assert_called_once_with(cfg.CONF, + mock.ANY, + 'sudo', + mock.ANY, + mock.ANY) + self.assertEqual(log.call_count, 1) + self.assertEqual(expected_sync, schedule_resync.called) + + def test_call_driver_failure(self): + self._test_call_driver_failure() + + def test_call_driver_remote_error_net_not_found(self): + self._test_call_driver_failure( + exc=rpc_compat.RemoteError(exc_type='NetworkNotFound'), + trace_level='warning') + + def test_call_driver_network_not_found(self): + self._test_call_driver_failure( + exc=exceptions.NetworkNotFound(net_id='1'), + trace_level='warning') + + def test_call_driver_conflict(self): + self._test_call_driver_failure( + exc=exceptions.Conflict(), + trace_level='warning', + expected_sync=False) + + def _test_sync_state_helper(self, known_networks, active_networks): + with mock.patch(DHCP_PLUGIN) as plug: + mock_plugin = mock.Mock() + mock_plugin.get_active_networks_info.return_value = active_networks + plug.return_value = mock_plugin + + dhcp = dhcp_agent.DhcpAgent(HOSTNAME) + + attrs_to_mock = dict( + [(a, mock.DEFAULT) for a in + ['refresh_dhcp_helper', 'disable_dhcp_helper', 'cache']]) + + with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks: + mocks['cache'].get_network_ids.return_value = known_networks + dhcp.sync_state() + + exp_refresh = [ + mock.call(net_id) for net_id in active_networks] + + diff = set(known_networks) - set(active_networks) + exp_disable = [mock.call(net_id) for net_id in diff] + + mocks['cache'].assert_has_calls([mock.call.get_network_ids()]) + mocks['refresh_dhcp_helper'].assert_has_called(exp_refresh) + mocks['disable_dhcp_helper'].assert_has_called(exp_disable) + + def test_sync_state_initial(self): + self._test_sync_state_helper([], ['a']) + + def test_sync_state_same(self): + self._test_sync_state_helper(['a'], ['a']) + + def test_sync_state_disabled_net(self): + self._test_sync_state_helper(['b'], ['a']) + + def test_sync_state_waitall(self): + class mockNetwork(): + id = '0' + admin_state_up = True + subnets = [] + + def __init__(self, id): + self.id = id + with mock.patch.object(dhcp_agent.eventlet.GreenPool, 'waitall') as w: + active_networks = [mockNetwork('1'), mockNetwork('2'), + mockNetwork('3'), mockNetwork('4'), + mockNetwork('5')] + known_networks = ['1', '2', '3', '4', '5'] + self._test_sync_state_helper(known_networks, active_networks) + w.assert_called_once_with() + + def test_sync_state_plugin_error(self): + with mock.patch(DHCP_PLUGIN) as plug: + mock_plugin = mock.Mock() + mock_plugin.get_active_networks_info.side_effect = Exception + plug.return_value = mock_plugin + + with mock.patch.object(dhcp_agent.LOG, 'exception') as log: + dhcp = dhcp_agent.DhcpAgent(HOSTNAME) + with mock.patch.object(dhcp, + 'schedule_resync') as schedule_resync: + dhcp.sync_state() + + self.assertTrue(log.called) + self.assertTrue(schedule_resync.called) + + def test_periodic_resync(self): + dhcp = dhcp_agent.DhcpAgent(HOSTNAME) + with mock.patch.object(dhcp_agent.eventlet, 'spawn') as spawn: + dhcp.periodic_resync() + spawn.assert_called_once_with(dhcp._periodic_resync_helper) + + def test_periodoc_resync_helper(self): + with mock.patch.object(dhcp_agent.eventlet, 'sleep') as sleep: + dhcp = dhcp_agent.DhcpAgent(HOSTNAME) + dhcp.needs_resync_reasons = ['reason1', 'reason2'] + with mock.patch.object(dhcp, 'sync_state') as sync_state: + sync_state.side_effect = RuntimeError + with testtools.ExpectedException(RuntimeError): + dhcp._periodic_resync_helper() + sync_state.assert_called_once_with() + sleep.assert_called_once_with(dhcp.conf.resync_interval) + self.assertEqual(len(dhcp.needs_resync_reasons), 0) + + def test_populate_cache_on_start_without_active_networks_support(self): + # emul dhcp driver that doesn't support retrieving of active networks + self.driver.existing_dhcp_networks.side_effect = NotImplementedError + + with mock.patch.object(dhcp_agent.LOG, 'debug') as log: + dhcp = dhcp_agent.DhcpAgent(HOSTNAME) + + self.driver.existing_dhcp_networks.assert_called_once_with( + dhcp.conf, + cfg.CONF.root_helper + ) + + self.assertFalse(dhcp.cache.get_network_ids()) + self.assertTrue(log.called) + + def test_populate_cache_on_start(self): + networks = ['aaa', 'bbb'] + self.driver.existing_dhcp_networks.return_value = networks + + dhcp = dhcp_agent.DhcpAgent(HOSTNAME) + + self.driver.existing_dhcp_networks.assert_called_once_with( + dhcp.conf, + cfg.CONF.root_helper + ) + + self.assertEqual(set(networks), set(dhcp.cache.get_network_ids())) + + def test_none_interface_driver(self): + cfg.CONF.set_override('interface_driver', None) + with mock.patch.object(dhcp, 'LOG') as log: + self.assertRaises(SystemExit, dhcp.DeviceManager, + cfg.CONF, 'sudo', None) + msg = 'An interface driver must be specified' + log.error.assert_called_once_with(msg) + + def test_nonexistent_interface_driver(self): + # Temporarily turn off mock, so could use the real import_class + # to import interface_driver. + self.driver_cls_p.stop() + self.addCleanup(self.driver_cls_p.start) + cfg.CONF.set_override('interface_driver', 'foo') + with mock.patch.object(dhcp, 'LOG') as log: + self.assertRaises(SystemExit, dhcp.DeviceManager, + cfg.CONF, 'sudo', None) + self.assertEqual(log.error.call_count, 1) + + +class TestLogArgs(base.BaseTestCase): + + def test_log_args_without_log_dir_and_file(self): + conf_dict = {'debug': True, + 'verbose': False, + 'log_dir': None, + 'log_file': None, + 'use_syslog': True, + 'syslog_log_facility': 'LOG_USER'} + conf = dhcp.DictModel(conf_dict) + expected_args = ['--debug', + '--use-syslog', + '--syslog-log-facility=LOG_USER'] + args = config.get_log_args(conf, 'log_file_name') + self.assertEqual(expected_args, args) + + def test_log_args_without_log_file(self): + conf_dict = {'debug': True, + 'verbose': True, + 'log_dir': '/etc/tests', + 'log_file': None, + 'use_syslog': False, + 'syslog_log_facility': 'LOG_USER'} + conf = dhcp.DictModel(conf_dict) + expected_args = ['--debug', + '--verbose', + '--log-file=log_file_name', + '--log-dir=/etc/tests'] + args = config.get_log_args(conf, 'log_file_name') + self.assertEqual(expected_args, args) + + def test_log_args_with_log_dir_and_file(self): + conf_dict = {'debug': True, + 'verbose': False, + 'log_dir': '/etc/tests', + 'log_file': 'tests/filelog', + 'use_syslog': False, + 'syslog_log_facility': 'LOG_USER'} + conf = dhcp.DictModel(conf_dict) + expected_args = ['--debug', + '--log-file=log_file_name', + '--log-dir=/etc/tests/tests'] + args = config.get_log_args(conf, 'log_file_name') + self.assertEqual(expected_args, args) + + def test_log_args_without_log_dir(self): + conf_dict = {'debug': True, + 'verbose': False, + 'log_file': 'tests/filelog', + 'log_dir': None, + 'use_syslog': False, + 'syslog_log_facility': 'LOG_USER'} + conf = dhcp.DictModel(conf_dict) + expected_args = ['--debug', + '--log-file=log_file_name', + '--log-dir=tests'] + args = config.get_log_args(conf, 'log_file_name') + self.assertEqual(expected_args, args) + + def test_log_args_with_filelog_and_syslog(self): + conf_dict = {'debug': True, + 'verbose': True, + 'log_file': 'tests/filelog', + 'log_dir': '/etc/tests', + 'use_syslog': True, + 'syslog_log_facility': 'LOG_USER'} + conf = dhcp.DictModel(conf_dict) + expected_args = ['--debug', + '--verbose', + '--log-file=log_file_name', + '--log-dir=/etc/tests/tests'] + args = config.get_log_args(conf, 'log_file_name') + self.assertEqual(expected_args, args) + + +class TestDhcpAgentEventHandler(base.BaseTestCase): + def setUp(self): + super(TestDhcpAgentEventHandler, self).setUp() + config.register_interface_driver_opts_helper(cfg.CONF) + cfg.CONF.register_opts(dhcp.OPTS) + cfg.CONF.set_override('interface_driver', + 'neutron.agent.linux.interface.NullDriver') + config.register_root_helper(cfg.CONF) + cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS) + + self.plugin_p = mock.patch(DHCP_PLUGIN) + plugin_cls = self.plugin_p.start() + self.plugin = mock.Mock() + plugin_cls.return_value = self.plugin + + self.cache_p = mock.patch('neutron.agent.dhcp_agent.NetworkCache') + cache_cls = self.cache_p.start() + self.cache = mock.Mock() + cache_cls.return_value = self.cache + self.mock_makedirs_p = mock.patch("os.makedirs") + self.mock_makedirs = self.mock_makedirs_p.start() + self.mock_init_p = mock.patch('neutron.agent.dhcp_agent.' + 'DhcpAgent._populate_networks_cache') + self.mock_init = self.mock_init_p.start() + with mock.patch.object(dhcp.Dnsmasq, + 'check_version') as check_v: + check_v.return_value = dhcp.Dnsmasq.MINIMUM_VERSION + self.dhcp = dhcp_agent.DhcpAgent(HOSTNAME) + self.call_driver_p = mock.patch.object(self.dhcp, 'call_driver') + self.call_driver = self.call_driver_p.start() + self.schedule_resync_p = mock.patch.object(self.dhcp, + 'schedule_resync') + self.schedule_resync = self.schedule_resync_p.start() + self.external_process_p = mock.patch( + 'neutron.agent.linux.external_process.ProcessManager' + ) + self.external_process = self.external_process_p.start() + + def _enable_dhcp_helper(self, isolated_metadata=False): + if isolated_metadata: + cfg.CONF.set_override('enable_isolated_metadata', True) + self.plugin.get_network_info.return_value = fake_network + self.dhcp.enable_dhcp_helper(fake_network.id) + self.plugin.assert_has_calls( + [mock.call.get_network_info(fake_network.id)]) + self.call_driver.assert_called_once_with('enable', fake_network) + self.cache.assert_has_calls([mock.call.put(fake_network)]) + if isolated_metadata: + self.external_process.assert_has_calls([ + mock.call( + cfg.CONF, + '12345678-1234-5678-1234567890ab', + 'sudo', + 'qdhcp-12345678-1234-5678-1234567890ab'), + mock.call().enable(mock.ANY) + ]) + else: + self.assertFalse(self.external_process.call_count) + + def test_enable_dhcp_helper_enable_isolated_metadata(self): + self._enable_dhcp_helper(isolated_metadata=True) + + def test_enable_dhcp_helper(self): + self._enable_dhcp_helper() + + def test_enable_dhcp_helper_down_network(self): + self.plugin.get_network_info.return_value = fake_down_network + self.dhcp.enable_dhcp_helper(fake_down_network.id) + self.plugin.assert_has_calls( + [mock.call.get_network_info(fake_down_network.id)]) + self.assertFalse(self.call_driver.called) + self.assertFalse(self.cache.called) + self.assertFalse(self.external_process.called) + + def test_enable_dhcp_helper_network_none(self): + self.plugin.get_network_info.return_value = None + with mock.patch.object(dhcp_agent.LOG, 'warn') as log: + self.dhcp.enable_dhcp_helper('fake_id') + self.plugin.assert_has_calls( + [mock.call.get_network_info('fake_id')]) + self.assertFalse(self.call_driver.called) + self.assertTrue(log.called) + self.assertFalse(self.dhcp.schedule_resync.called) + + def test_enable_dhcp_helper_exception_during_rpc(self): + self.plugin.get_network_info.side_effect = Exception + with mock.patch.object(dhcp_agent.LOG, 'exception') as log: + self.dhcp.enable_dhcp_helper(fake_network.id) + self.plugin.assert_has_calls( + [mock.call.get_network_info(fake_network.id)]) + self.assertFalse(self.call_driver.called) + self.assertTrue(log.called) + self.assertTrue(self.schedule_resync.called) + self.assertFalse(self.cache.called) + self.assertFalse(self.external_process.called) + + def test_enable_dhcp_helper_driver_failure(self): + self.plugin.get_network_info.return_value = fake_network + self.call_driver.return_value = False + self.dhcp.enable_dhcp_helper(fake_network.id) + self.plugin.assert_has_calls( + [mock.call.get_network_info(fake_network.id)]) + self.call_driver.assert_called_once_with('enable', fake_network) + self.assertFalse(self.cache.called) + self.assertFalse(self.external_process.called) + + def _disable_dhcp_helper_known_network(self, isolated_metadata=False): + if isolated_metadata: + cfg.CONF.set_override('enable_isolated_metadata', True) + self.cache.get_network_by_id.return_value = fake_network + self.dhcp.disable_dhcp_helper(fake_network.id) + self.cache.assert_has_calls( + [mock.call.get_network_by_id(fake_network.id)]) + self.call_driver.assert_called_once_with('disable', fake_network) + if isolated_metadata: + self.external_process.assert_has_calls([ + mock.call( + cfg.CONF, + '12345678-1234-5678-1234567890ab', + 'sudo', + 'qdhcp-12345678-1234-5678-1234567890ab'), + mock.call().disable() + ]) + else: + self.assertFalse(self.external_process.call_count) + + def test_disable_dhcp_helper_known_network_isolated_metadata(self): + self._disable_dhcp_helper_known_network(isolated_metadata=True) + + def test_disable_dhcp_helper_known_network(self): + self._disable_dhcp_helper_known_network() + + def test_disable_dhcp_helper_unknown_network(self): + self.cache.get_network_by_id.return_value = None + self.dhcp.disable_dhcp_helper('abcdef') + self.cache.assert_has_calls( + [mock.call.get_network_by_id('abcdef')]) + self.assertEqual(0, self.call_driver.call_count) + self.assertFalse(self.external_process.called) + + def _disable_dhcp_helper_driver_failure(self, isolated_metadata=False): + if isolated_metadata: + cfg.CONF.set_override('enable_isolated_metadata', True) + self.cache.get_network_by_id.return_value = fake_network + self.call_driver.return_value = False + self.dhcp.disable_dhcp_helper(fake_network.id) + self.cache.assert_has_calls( + [mock.call.get_network_by_id(fake_network.id)]) + self.call_driver.assert_called_once_with('disable', fake_network) + self.cache.assert_has_calls( + [mock.call.get_network_by_id(fake_network.id)]) + if isolated_metadata: + self.external_process.assert_has_calls([ + mock.call( + cfg.CONF, + '12345678-1234-5678-1234567890ab', + 'sudo', + 'qdhcp-12345678-1234-5678-1234567890ab'), + mock.call().disable() + ]) + else: + self.assertFalse(self.external_process.call_count) + + def test_disable_dhcp_helper_driver_failure_isolated_metadata(self): + self._disable_dhcp_helper_driver_failure(isolated_metadata=True) + + def test_disable_dhcp_helper_driver_failure(self): + self._disable_dhcp_helper_driver_failure() + + def test_enable_isolated_metadata_proxy(self): + class_path = 'neutron.agent.linux.external_process.ProcessManager' + with mock.patch(class_path) as ext_process: + self.dhcp.enable_isolated_metadata_proxy(fake_network) + ext_process.assert_has_calls([ + mock.call( + cfg.CONF, + '12345678-1234-5678-1234567890ab', + 'sudo', + 'qdhcp-12345678-1234-5678-1234567890ab'), + mock.call().enable(mock.ANY) + ]) + + def test_disable_isolated_metadata_proxy(self): + class_path = 'neutron.agent.linux.external_process.ProcessManager' + with mock.patch(class_path) as ext_process: + self.dhcp.disable_isolated_metadata_proxy(fake_network) + ext_process.assert_has_calls([ + mock.call( + cfg.CONF, + '12345678-1234-5678-1234567890ab', + 'sudo', + 'qdhcp-12345678-1234-5678-1234567890ab'), + mock.call().disable() + ]) + + def test_enable_isolated_metadata_proxy_with_metadata_network(self): + cfg.CONF.set_override('enable_metadata_network', True) + cfg.CONF.set_override('debug', True) + cfg.CONF.set_override('log_file', 'test.log') + class_path = 'neutron.agent.linux.ip_lib.IPWrapper' + self.external_process_p.stop() + # Ensure the mock is restored if this test fail + try: + with mock.patch(class_path) as ip_wrapper: + self.dhcp.enable_isolated_metadata_proxy(fake_meta_network) + ip_wrapper.assert_has_calls([mock.call( + 'sudo', + 'qdhcp-12345678-1234-5678-1234567890ab'), + mock.call().netns.execute([ + 'neutron-ns-metadata-proxy', + mock.ANY, + mock.ANY, + '--router_id=forzanapoli', + mock.ANY, + mock.ANY, + '--debug', + ('--log-file=neutron-ns-metadata-proxy-%s.log' % + fake_meta_network.id)]) + ]) + finally: + self.external_process_p.start() + + def test_network_create_end(self): + payload = dict(network=dict(id=fake_network.id)) + + with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable: + self.dhcp.network_create_end(None, payload) + enable.assertCalledOnceWith(fake_network.id) + + def test_network_update_end_admin_state_up(self): + payload = dict(network=dict(id=fake_network.id, admin_state_up=True)) + with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable: + self.dhcp.network_update_end(None, payload) + enable.assertCalledOnceWith(fake_network.id) + + def test_network_update_end_admin_state_down(self): + payload = dict(network=dict(id=fake_network.id, admin_state_up=False)) + with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable: + self.dhcp.network_update_end(None, payload) + disable.assertCalledOnceWith(fake_network.id) + + def test_network_delete_end(self): + payload = dict(network_id=fake_network.id) + + with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable: + self.dhcp.network_delete_end(None, payload) + disable.assertCalledOnceWith(fake_network.id) + + def test_refresh_dhcp_helper_no_dhcp_enabled_networks(self): + network = dhcp.NetModel(True, dict(id='net-id', + tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', + admin_state_up=True, + subnets=[], + ports=[])) + + self.cache.get_network_by_id.return_value = network + self.plugin.get_network_info.return_value = network + with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable: + self.dhcp.refresh_dhcp_helper(network.id) + disable.assert_called_once_with(network.id) + self.assertFalse(self.cache.called) + self.assertFalse(self.call_driver.called) + self.cache.assert_has_calls( + [mock.call.get_network_by_id('net-id')]) + + def test_refresh_dhcp_helper_exception_during_rpc(self): + network = dhcp.NetModel(True, dict(id='net-id', + tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', + admin_state_up=True, + subnets=[], + ports=[])) + + self.cache.get_network_by_id.return_value = network + self.plugin.get_network_info.side_effect = Exception + with mock.patch.object(dhcp_agent.LOG, 'exception') as log: + self.dhcp.refresh_dhcp_helper(network.id) + self.assertFalse(self.call_driver.called) + self.cache.assert_has_calls( + [mock.call.get_network_by_id('net-id')]) + self.assertTrue(log.called) + self.assertTrue(self.dhcp.schedule_resync.called) + + def test_subnet_update_end(self): + payload = dict(subnet=dict(network_id=fake_network.id)) + self.cache.get_network_by_id.return_value = fake_network + self.plugin.get_network_info.return_value = fake_network + + self.dhcp.subnet_update_end(None, payload) + + self.cache.assert_has_calls([mock.call.put(fake_network)]) + self.call_driver.assert_called_once_with('reload_allocations', + fake_network) + + def test_subnet_update_end_restart(self): + new_state = dhcp.NetModel(True, dict(id=fake_network.id, + tenant_id=fake_network.tenant_id, + admin_state_up=True, + subnets=[fake_subnet1, fake_subnet3], + ports=[fake_port1])) + + payload = dict(subnet=dict(network_id=fake_network.id)) + self.cache.get_network_by_id.return_value = fake_network + self.plugin.get_network_info.return_value = new_state + + self.dhcp.subnet_update_end(None, payload) + + self.cache.assert_has_calls([mock.call.put(new_state)]) + self.call_driver.assert_called_once_with('restart', + new_state) + + def test_subnet_update_end_delete_payload(self): + prev_state = dhcp.NetModel(True, dict(id=fake_network.id, + tenant_id=fake_network.tenant_id, + admin_state_up=True, + subnets=[fake_subnet1, fake_subnet3], + ports=[fake_port1])) + + payload = dict(subnet_id=fake_subnet1.id) + self.cache.get_network_by_subnet_id.return_value = prev_state + self.cache.get_network_by_id.return_value = prev_state + self.plugin.get_network_info.return_value = fake_network + + self.dhcp.subnet_delete_end(None, payload) + + self.cache.assert_has_calls([ + mock.call.get_network_by_subnet_id( + 'bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb'), + mock.call.get_network_by_id('12345678-1234-5678-1234567890ab'), + mock.call.put(fake_network)]) + self.call_driver.assert_called_once_with('restart', + fake_network) + + def test_port_update_end(self): + payload = dict(port=fake_port2) + self.cache.get_network_by_id.return_value = fake_network + self.cache.get_port_by_id.return_value = fake_port2 + self.dhcp.port_update_end(None, payload) + self.cache.assert_has_calls( + [mock.call.get_network_by_id(fake_port2.network_id), + mock.call.put_port(mock.ANY)]) + self.call_driver.assert_called_once_with('reload_allocations', + fake_network) + + def test_port_update_change_ip_on_port(self): + payload = dict(port=fake_port1) + self.cache.get_network_by_id.return_value = fake_network + updated_fake_port1 = copy.deepcopy(fake_port1) + updated_fake_port1.fixed_ips[0].ip_address = '172.9.9.99' + self.cache.get_port_by_id.return_value = updated_fake_port1 + self.dhcp.port_update_end(None, payload) + self.cache.assert_has_calls( + [mock.call.get_network_by_id(fake_port1.network_id), + mock.call.put_port(mock.ANY)]) + self.call_driver.assert_has_calls( + [mock.call.call_driver('reload_allocations', fake_network)]) + + def test_port_delete_end(self): + payload = dict(port_id=fake_port2.id) + self.cache.get_network_by_id.return_value = fake_network + self.cache.get_port_by_id.return_value = fake_port2 + + self.dhcp.port_delete_end(None, payload) + self.cache.assert_has_calls( + [mock.call.get_port_by_id(fake_port2.id), + mock.call.get_network_by_id(fake_network.id), + mock.call.remove_port(fake_port2)]) + self.call_driver.assert_has_calls( + [mock.call.call_driver('reload_allocations', fake_network)]) + + def test_port_delete_end_unknown_port(self): + payload = dict(port_id='unknown') + self.cache.get_port_by_id.return_value = None + + self.dhcp.port_delete_end(None, payload) + + self.cache.assert_has_calls([mock.call.get_port_by_id('unknown')]) + self.assertEqual(self.call_driver.call_count, 0) + + +class TestDhcpPluginApiProxy(base.BaseTestCase): + def setUp(self): + super(TestDhcpPluginApiProxy, self).setUp() + self.proxy = dhcp_agent.DhcpPluginApi('foo', {}, None) + self.proxy.host = 'foo' + + self.call_p = mock.patch.object(self.proxy, 'call') + self.call = self.call_p.start() + self.make_msg_p = mock.patch.object(self.proxy, 'make_msg') + self.make_msg = self.make_msg_p.start() + + def test_get_network_info(self): + self.call.return_value = dict(a=1) + retval = self.proxy.get_network_info('netid') + self.assertEqual(retval.a, 1) + self.assertTrue(self.call.called) + self.make_msg.assert_called_once_with('get_network_info', + network_id='netid', + host='foo') + + def test_get_dhcp_port(self): + self.call.return_value = dict(a=1) + retval = self.proxy.get_dhcp_port('netid', 'devid') + self.assertEqual(retval.a, 1) + self.assertTrue(self.call.called) + self.make_msg.assert_called_once_with('get_dhcp_port', + network_id='netid', + device_id='devid', + host='foo') + + def test_get_dhcp_port_none(self): + self.call.return_value = None + self.assertIsNone(self.proxy.get_dhcp_port('netid', 'devid')) + + def test_get_active_networks_info(self): + self.proxy.get_active_networks_info() + self.make_msg.assert_called_once_with('get_active_networks_info', + host='foo') + + def test_create_dhcp_port(self): + port_body = ( + {'port': + {'name': '', 'admin_state_up': True, + 'network_id': fake_network.id, + 'tenant_id': fake_network.tenant_id, + 'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id}], + 'device_id': mock.ANY}}) + + self.proxy.create_dhcp_port(port_body) + self.make_msg.assert_called_once_with('create_dhcp_port', + port=port_body, + host='foo') + + def test_create_dhcp_port_none(self): + self.call.return_value = None + port_body = ( + {'port': + {'name': '', 'admin_state_up': True, + 'network_id': fake_network.id, + 'tenant_id': fake_network.tenant_id, + 'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id}], + 'device_id': mock.ANY}}) + self.assertIsNone(self.proxy.create_dhcp_port(port_body)) + + def test_update_dhcp_port_none(self): + self.call.return_value = None + port_body = {'port': {'fixed_ips': + [{'subnet_id': fake_fixed_ip1.subnet_id}]}} + self.assertIsNone(self.proxy.update_dhcp_port(fake_port1.id, + port_body)) + + def test_update_dhcp_port(self): + port_body = {'port': {'fixed_ips': + [{'subnet_id': fake_fixed_ip1.subnet_id}]}} + self.proxy.update_dhcp_port(fake_port1.id, port_body) + self.make_msg.assert_called_once_with('update_dhcp_port', + port_id=fake_port1.id, + port=port_body, + host='foo') + + def test_release_dhcp_port(self): + self.proxy.release_dhcp_port('netid', 'devid') + self.assertTrue(self.call.called) + self.make_msg.assert_called_once_with('release_dhcp_port', + network_id='netid', + device_id='devid', + host='foo') + + def test_release_port_fixed_ip(self): + self.proxy.release_port_fixed_ip('netid', 'devid', 'subid') + self.assertTrue(self.call.called) + self.make_msg.assert_called_once_with('release_port_fixed_ip', + network_id='netid', + subnet_id='subid', + device_id='devid', + host='foo') + + +class TestNetworkCache(base.BaseTestCase): + def test_put_network(self): + nc = dhcp_agent.NetworkCache() + nc.put(fake_network) + self.assertEqual(nc.cache, + {fake_network.id: fake_network}) + self.assertEqual(nc.subnet_lookup, + {fake_subnet1.id: fake_network.id, + fake_subnet2.id: fake_network.id}) + self.assertEqual(nc.port_lookup, + {fake_port1.id: fake_network.id}) + + def test_put_network_existing(self): + prev_network_info = mock.Mock() + nc = dhcp_agent.NetworkCache() + with mock.patch.object(nc, 'remove') as remove: + nc.cache[fake_network.id] = prev_network_info + + nc.put(fake_network) + remove.assert_called_once_with(prev_network_info) + self.assertEqual(nc.cache, + {fake_network.id: fake_network}) + self.assertEqual(nc.subnet_lookup, + {fake_subnet1.id: fake_network.id, + fake_subnet2.id: fake_network.id}) + self.assertEqual(nc.port_lookup, + {fake_port1.id: fake_network.id}) + + def test_remove_network(self): + nc = dhcp_agent.NetworkCache() + nc.cache = {fake_network.id: fake_network} + nc.subnet_lookup = {fake_subnet1.id: fake_network.id, + fake_subnet2.id: fake_network.id} + nc.port_lookup = {fake_port1.id: fake_network.id} + nc.remove(fake_network) + + self.assertEqual(len(nc.cache), 0) + self.assertEqual(len(nc.subnet_lookup), 0) + self.assertEqual(len(nc.port_lookup), 0) + + def test_get_network_by_id(self): + nc = dhcp_agent.NetworkCache() + nc.put(fake_network) + + self.assertEqual(nc.get_network_by_id(fake_network.id), fake_network) + + def test_get_network_ids(self): + nc = dhcp_agent.NetworkCache() + nc.put(fake_network) + + self.assertEqual(nc.get_network_ids(), [fake_network.id]) + + def test_get_network_by_subnet_id(self): + nc = dhcp_agent.NetworkCache() + nc.put(fake_network) + + self.assertEqual(nc.get_network_by_subnet_id(fake_subnet1.id), + fake_network) + + def test_get_network_by_port_id(self): + nc = dhcp_agent.NetworkCache() + nc.put(fake_network) + + self.assertEqual(nc.get_network_by_port_id(fake_port1.id), + fake_network) + + def test_put_port(self): + fake_net = dhcp.NetModel( + True, dict(id='12345678-1234-5678-1234567890ab', + tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', + subnets=[fake_subnet1], + ports=[fake_port1])) + nc = dhcp_agent.NetworkCache() + nc.put(fake_net) + nc.put_port(fake_port2) + self.assertEqual(len(nc.port_lookup), 2) + self.assertIn(fake_port2, fake_net.ports) + + def test_put_port_existing(self): + fake_net = dhcp.NetModel( + True, dict(id='12345678-1234-5678-1234567890ab', + tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', + subnets=[fake_subnet1], + ports=[fake_port1, fake_port2])) + nc = dhcp_agent.NetworkCache() + nc.put(fake_net) + nc.put_port(fake_port2) + + self.assertEqual(len(nc.port_lookup), 2) + self.assertIn(fake_port2, fake_net.ports) + + def test_remove_port_existing(self): + fake_net = dhcp.NetModel( + True, dict(id='12345678-1234-5678-1234567890ab', + tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa', + subnets=[fake_subnet1], + ports=[fake_port1, fake_port2])) + nc = dhcp_agent.NetworkCache() + nc.put(fake_net) + nc.remove_port(fake_port2) + + self.assertEqual(len(nc.port_lookup), 1) + self.assertNotIn(fake_port2, fake_net.ports) + + def test_get_port_by_id(self): + nc = dhcp_agent.NetworkCache() + nc.put(fake_network) + self.assertEqual(nc.get_port_by_id(fake_port1.id), fake_port1) + + +class FakePort1: + id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' + + +class FakeV4Subnet: + id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' + ip_version = 4 + cidr = '192.168.0.0/24' + gateway_ip = '192.168.0.1' + enable_dhcp = True + + +class FakeV4SubnetNoGateway: + id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' + ip_version = 4 + cidr = '192.168.1.0/24' + gateway_ip = None + enable_dhcp = True + + +class FakeV4Network: + id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + subnets = [FakeV4Subnet()] + ports = [FakePort1()] + namespace = 'qdhcp-aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + + +class FakeV4NetworkNoSubnet: + id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + subnets = [] + ports = [] + + +class FakeV4NetworkNoGateway: + id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' + subnets = [FakeV4SubnetNoGateway()] + ports = [FakePort1()] + + +class TestDeviceManager(base.BaseTestCase): + def setUp(self): + super(TestDeviceManager, self).setUp() + config.register_interface_driver_opts_helper(cfg.CONF) + config.register_use_namespaces_opts_helper(cfg.CONF) + cfg.CONF.register_opts(dhcp_agent.DhcpAgent.OPTS) + cfg.CONF.register_opts(dhcp.OPTS) + cfg.CONF.set_override('interface_driver', + 'neutron.agent.linux.interface.NullDriver') + config.register_root_helper(cfg.CONF) + cfg.CONF.set_override('use_namespaces', True) + cfg.CONF.set_override('enable_isolated_metadata', True) + + self.ensure_device_is_ready_p = mock.patch( + 'neutron.agent.linux.ip_lib.ensure_device_is_ready') + self.ensure_device_is_ready = (self.ensure_device_is_ready_p.start()) + + self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') + self.iproute_cls_p = mock.patch('neutron.agent.linux.' + 'ip_lib.IpRouteCommand') + driver_cls = self.dvr_cls_p.start() + iproute_cls = self.iproute_cls_p.start() + self.mock_driver = mock.MagicMock() + self.mock_driver.DEV_NAME_LEN = ( + interface.LinuxInterfaceDriver.DEV_NAME_LEN) + self.mock_iproute = mock.MagicMock() + driver_cls.return_value = self.mock_driver + iproute_cls.return_value = self.mock_iproute + + def _test_setup_helper(self, device_is_ready, net=None, port=None): + net = net or fake_network + port = port or fake_port1 + plugin = mock.Mock() + plugin.create_dhcp_port.return_value = port or fake_port1 + plugin.get_dhcp_port.return_value = port or fake_port1 + self.ensure_device_is_ready.return_value = device_is_ready + self.mock_driver.get_device_name.return_value = 'tap12345678-12' + + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin) + dh._set_default_route = mock.Mock() + interface_name = dh.setup(net) + + self.assertEqual(interface_name, 'tap12345678-12') + + plugin.assert_has_calls([ + mock.call.create_dhcp_port( + {'port': {'name': '', 'admin_state_up': True, + 'network_id': net.id, 'tenant_id': net.tenant_id, + 'fixed_ips': + [{'subnet_id': fake_fixed_ip1.subnet_id}], + 'device_id': mock.ANY}})]) + + expected_ips = ['172.9.9.9/24', '169.254.169.254/16'] + expected = [ + mock.call.get_device_name(port), + mock.call.init_l3( + 'tap12345678-12', + expected_ips, + namespace=net.namespace)] + + if not device_is_ready: + expected.insert(1, + mock.call.plug(net.id, + port.id, + 'tap12345678-12', + 'aa:bb:cc:dd:ee:ff', + namespace=net.namespace)) + self.mock_driver.assert_has_calls(expected) + + dh._set_default_route.assert_called_once_with(net, 'tap12345678-12') + + def test_setup(self): + cfg.CONF.set_override('enable_metadata_network', False) + self._test_setup_helper(False) + cfg.CONF.set_override('enable_metadata_network', True) + self._test_setup_helper(False) + + def test_setup_device_is_ready(self): + self._test_setup_helper(True) + + def test_create_dhcp_port_raise_conflict(self): + plugin = mock.Mock() + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin) + plugin.create_dhcp_port.return_value = None + self.assertRaises(exceptions.Conflict, + dh.setup_dhcp_port, + fake_network) + + def test_create_dhcp_port_create_new(self): + plugin = mock.Mock() + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin) + plugin.create_dhcp_port.return_value = fake_network.ports[0] + dh.setup_dhcp_port(fake_network) + plugin.assert_has_calls([ + mock.call.create_dhcp_port( + {'port': {'name': '', 'admin_state_up': True, + 'network_id': + fake_network.id, 'tenant_id': fake_network.tenant_id, + 'fixed_ips': + [{'subnet_id': fake_fixed_ip1.subnet_id}], + 'device_id': mock.ANY}})]) + + def test_create_dhcp_port_update_add_subnet(self): + plugin = mock.Mock() + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin) + fake_network_copy = copy.deepcopy(fake_network) + fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) + fake_network_copy.subnets[1].enable_dhcp = True + plugin.update_dhcp_port.return_value = fake_network.ports[0] + dh.setup_dhcp_port(fake_network_copy) + port_body = {'port': { + 'network_id': fake_network.id, + 'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id, + 'ip_address': fake_fixed_ip1.ip_address}, + {'subnet_id': fake_subnet2.id}]}} + + plugin.assert_has_calls([ + mock.call.update_dhcp_port(fake_network_copy.ports[0].id, + port_body)]) + + def test_update_dhcp_port_raises_conflict(self): + plugin = mock.Mock() + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin) + fake_network_copy = copy.deepcopy(fake_network) + fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) + fake_network_copy.subnets[1].enable_dhcp = True + plugin.update_dhcp_port.return_value = None + self.assertRaises(exceptions.Conflict, + dh.setup_dhcp_port, + fake_network_copy) + + def test_create_dhcp_port_no_update_or_create(self): + plugin = mock.Mock() + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin) + fake_network_copy = copy.deepcopy(fake_network) + fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) + dh.setup_dhcp_port(fake_network_copy) + self.assertFalse(plugin.setup_dhcp_port.called) + self.assertFalse(plugin.update_dhcp_port.called) + + def test_destroy(self): + fake_net = dhcp.NetModel( + True, dict(id='12345678-1234-5678-1234567890ab', + tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')) + + fake_port = dhcp.DictModel( + dict(id='12345678-1234-aaaa-1234567890ab', + mac_address='aa:bb:cc:dd:ee:ff')) + + with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls: + mock_driver = mock.MagicMock() + mock_driver.get_device_name.return_value = 'tap12345678-12' + dvr_cls.return_value = mock_driver + + plugin = mock.Mock() + plugin.get_dhcp_port.return_value = fake_port + + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin) + dh.destroy(fake_net, 'tap12345678-12') + + dvr_cls.assert_called_once_with(cfg.CONF) + mock_driver.assert_has_calls( + [mock.call.unplug('tap12345678-12', + namespace='qdhcp-' + fake_net.id)]) + plugin.assert_has_calls( + [mock.call.release_dhcp_port(fake_net.id, mock.ANY)]) + + def test_get_interface_name(self): + fake_net = dhcp.NetModel( + True, dict(id='12345678-1234-5678-1234567890ab', + tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')) + + fake_port = dhcp.DictModel( + dict(id='12345678-1234-aaaa-1234567890ab', + mac_address='aa:bb:cc:dd:ee:ff')) + + with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls: + mock_driver = mock.MagicMock() + mock_driver.get_device_name.return_value = 'tap12345678-12' + dvr_cls.return_value = mock_driver + + plugin = mock.Mock() + plugin.get_dhcp_port.return_value = fake_port + + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, plugin) + dh.get_interface_name(fake_net, fake_port) + + dvr_cls.assert_called_once_with(cfg.CONF) + mock_driver.assert_has_calls( + [mock.call.get_device_name(fake_port)]) + + self.assertEqual(len(plugin.mock_calls), 0) + + def test_get_device_id(self): + fake_net = dhcp.NetModel( + True, dict(id='12345678-1234-5678-1234567890ab', + tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')) + expected = ('dhcp1ae5f96c-c527-5079-82ea-371a01645457-12345678-1234-' + '5678-1234567890ab') + + with mock.patch('uuid.uuid5') as uuid5: + uuid5.return_value = '1ae5f96c-c527-5079-82ea-371a01645457' + + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None) + uuid5.called_once_with(uuid.NAMESPACE_DNS, cfg.CONF.host) + self.assertEqual(dh.get_device_id(fake_net), expected) + + def test_update(self): + # Try with namespaces and no metadata network + cfg.CONF.set_override('use_namespaces', True) + cfg.CONF.set_override('enable_metadata_network', False) + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None) + dh._set_default_route = mock.Mock() + network = mock.Mock() + + dh.update(network, 'ns-12345678-12') + + dh._set_default_route.assert_called_once_with(network, + 'ns-12345678-12') + + # No namespaces, shouldn't set default route. + cfg.CONF.set_override('use_namespaces', False) + cfg.CONF.set_override('enable_metadata_network', False) + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None) + dh._set_default_route = mock.Mock() + + dh.update(FakeV4Network(), 'tap12345678-12') + + self.assertFalse(dh._set_default_route.called) + + # Meta data network enabled, don't interfere with its gateway. + cfg.CONF.set_override('use_namespaces', True) + cfg.CONF.set_override('enable_metadata_network', True) + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None) + dh._set_default_route = mock.Mock() + + dh.update(FakeV4Network(), 'ns-12345678-12') + + self.assertTrue(dh._set_default_route.called) + + # For completeness + cfg.CONF.set_override('use_namespaces', False) + cfg.CONF.set_override('enable_metadata_network', True) + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None) + dh._set_default_route = mock.Mock() + + dh.update(FakeV4Network(), 'ns-12345678-12') + + self.assertFalse(dh._set_default_route.called) + + def test_set_default_route(self): + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None) + with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: + device = mock.Mock() + mock_IPDevice.return_value = device + device.route.get_gateway.return_value = None + # Basic one subnet with gateway. + network = FakeV4Network() + dh._set_default_route(network, 'tap-name') + + self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertFalse(device.route.delete_gateway.called) + device.route.add_gateway.assert_called_once_with('192.168.0.1') + + def test_set_default_route_no_subnet(self): + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None) + with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: + device = mock.Mock() + mock_IPDevice.return_value = device + device.route.get_gateway.return_value = None + network = FakeV4NetworkNoSubnet() + network.namespace = 'qdhcp-1234' + dh._set_default_route(network, 'tap-name') + + self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertFalse(device.route.delete_gateway.called) + self.assertFalse(device.route.add_gateway.called) + + def test_set_default_route_no_subnet_delete_gateway(self): + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None) + with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: + device = mock.Mock() + mock_IPDevice.return_value = device + device.route.get_gateway.return_value = dict(gateway='192.168.0.1') + network = FakeV4NetworkNoSubnet() + network.namespace = 'qdhcp-1234' + dh._set_default_route(network, 'tap-name') + + self.assertEqual(device.route.get_gateway.call_count, 1) + device.route.delete_gateway.assert_called_once_with('192.168.0.1') + self.assertFalse(device.route.add_gateway.called) + + def test_set_default_route_no_gateway(self): + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None) + with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: + device = mock.Mock() + mock_IPDevice.return_value = device + device.route.get_gateway.return_value = dict(gateway='192.168.0.1') + network = FakeV4NetworkNoGateway() + network.namespace = 'qdhcp-1234' + dh._set_default_route(network, 'tap-name') + + self.assertEqual(device.route.get_gateway.call_count, 1) + device.route.delete_gateway.assert_called_once_with('192.168.0.1') + self.assertFalse(device.route.add_gateway.called) + + def test_set_default_route_do_nothing(self): + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None) + with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: + device = mock.Mock() + mock_IPDevice.return_value = device + device.route.get_gateway.return_value = dict(gateway='192.168.0.1') + network = FakeV4Network() + dh._set_default_route(network, 'tap-name') + + self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertFalse(device.route.delete_gateway.called) + self.assertFalse(device.route.add_gateway.called) + + def test_set_default_route_change_gateway(self): + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None) + with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: + device = mock.Mock() + mock_IPDevice.return_value = device + device.route.get_gateway.return_value = dict(gateway='192.168.0.2') + network = FakeV4Network() + dh._set_default_route(network, 'tap-name') + + self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertFalse(device.route.delete_gateway.called) + device.route.add_gateway.assert_called_once_with('192.168.0.1') + + def test_set_default_route_two_subnets(self): + # Try two subnets. Should set gateway from the first. + dh = dhcp.DeviceManager(cfg.CONF, cfg.CONF.root_helper, None) + with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: + device = mock.Mock() + mock_IPDevice.return_value = device + device.route.get_gateway.return_value = None + network = FakeV4Network() + subnet2 = FakeV4Subnet() + subnet2.gateway_ip = '192.168.1.1' + network.subnets = [subnet2, FakeV4Subnet()] + dh._set_default_route(network, 'tap-name') + + self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertFalse(device.route.delete_gateway.called) + device.route.add_gateway.assert_called_once_with('192.168.1.1') + + +class TestDictModel(base.BaseTestCase): + def test_basic_dict(self): + d = dict(a=1, b=2) + + m = dhcp.DictModel(d) + self.assertEqual(m.a, 1) + self.assertEqual(m.b, 2) + + def test_dict_has_sub_dict(self): + d = dict(a=dict(b=2)) + m = dhcp.DictModel(d) + self.assertEqual(m.a.b, 2) + + def test_dict_contains_list(self): + d = dict(a=[1, 2]) + + m = dhcp.DictModel(d) + self.assertEqual(m.a, [1, 2]) + + def test_dict_contains_list_of_dicts(self): + d = dict(a=[dict(b=2), dict(c=3)]) + + m = dhcp.DictModel(d) + self.assertEqual(m.a[0].b, 2) + self.assertEqual(m.a[1].c, 3) + + +class TestNetModel(base.BaseTestCase): + def test_ns_name(self): + network = dhcp.NetModel(True, {'id': 'foo'}) + self.assertEqual(network.namespace, 'qdhcp-foo') + + def test_ns_name_false_namespace(self): + network = dhcp.NetModel(False, {'id': 'foo'}) + self.assertIsNone(network.namespace) + + def test_ns_name_none_namespace(self): + network = dhcp.NetModel(None, {'id': 'foo'}) + self.assertIsNone(network.namespace) diff --git a/neutron/tests/unit/test_dhcp_scheduler.py b/neutron/tests/unit/test_dhcp_scheduler.py new file mode 100644 index 000000000..fd071f73a --- /dev/null +++ b/neutron/tests/unit/test_dhcp_scheduler.py @@ -0,0 +1,90 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from neutron.common import constants +from neutron.common import topics +from neutron import context +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import api as db +from neutron.db import models_v2 +from neutron.openstack.common import timeutils +from neutron.scheduler import dhcp_agent_scheduler +from neutron.tests import base + + +class DhcpSchedulerTestCase(base.BaseTestCase): + + def setUp(self): + super(DhcpSchedulerTestCase, self).setUp() + db.configure_db() + self.ctx = context.get_admin_context() + self.network_id = 'foo_network_id' + self._save_networks([self.network_id]) + self.addCleanup(db.clear_db) + + def _get_agents(self, hosts): + return [ + agents_db.Agent( + binary='neutron-dhcp-agent', + host=host, + topic=topics.DHCP_AGENT, + configurations="", + agent_type=constants.AGENT_TYPE_DHCP, + created_at=timeutils.utcnow(), + started_at=timeutils.utcnow(), + heartbeat_timestamp=timeutils.utcnow()) + for host in hosts + ] + + def _save_agents(self, agents): + for agent in agents: + with self.ctx.session.begin(subtransactions=True): + self.ctx.session.add(agent) + + def _save_networks(self, networks): + for network_id in networks: + with self.ctx.session.begin(subtransactions=True): + self.ctx.session.add(models_v2.Network(id=network_id)) + + def _test__schedule_bind_network(self, agents, network_id): + scheduler = dhcp_agent_scheduler.ChanceScheduler() + scheduler._schedule_bind_network(self.ctx, agents, network_id) + results = ( + self.ctx.session.query(agentschedulers_db.NetworkDhcpAgentBinding). + filter_by(network_id=network_id).all()) + self.assertEqual(len(agents), len(results)) + for result in results: + self.assertEqual(network_id, result.network_id) + + def test__schedule_bind_network_single_agent(self): + agents = self._get_agents(['host-a']) + self._save_agents(agents) + self._test__schedule_bind_network(agents, self.network_id) + + def test__schedule_bind_network_multi_agents(self): + agents = self._get_agents(['host-a', 'host-b']) + self._save_agents(agents) + self._test__schedule_bind_network(agents, self.network_id) + + def test__schedule_bind_network_multi_agent_fail_one(self): + agents = self._get_agents(['host-a']) + self._save_agents(agents) + self._test__schedule_bind_network(agents, self.network_id) + with mock.patch.object(dhcp_agent_scheduler.LOG, 'info') as fake_log: + self._test__schedule_bind_network(agents, self.network_id) + self.assertEqual(1, fake_log.call_count) diff --git a/neutron/tests/unit/test_extension_allowedaddresspairs.py b/neutron/tests/unit/test_extension_allowedaddresspairs.py new file mode 100644 index 000000000..28dcd91b1 --- /dev/null +++ b/neutron/tests/unit/test_extension_allowedaddresspairs.py @@ -0,0 +1,262 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from neutron.api.v2 import attributes as attr +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import db_base_plugin_v2 +from neutron.db import portsecurity_db +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import portsecurity as psec +from neutron import manager +from neutron.tests.unit import test_db_plugin + +DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_allowedaddresspairs.' + 'AllowedAddressPairTestPlugin') + + +class AllowedAddressPairTestCase(test_db_plugin.NeutronDbPluginV2TestCase): + def setUp(self, plugin=None, ext_mgr=None): + super(AllowedAddressPairTestCase, self).setUp(plugin) + + # Check if a plugin supports security groups + plugin_obj = manager.NeutronManager.get_plugin() + self._skip_port_security = ('port-security' not in + plugin_obj.supported_extension_aliases) + + +class AllowedAddressPairTestPlugin(portsecurity_db.PortSecurityDbMixin, + db_base_plugin_v2.NeutronDbPluginV2, + addr_pair_db.AllowedAddressPairsMixin): + + """Test plugin that implements necessary calls on create/delete port for + associating ports with port security and allowed address pairs. + """ + + supported_extension_aliases = ["allowed-address-pairs"] + + def create_port(self, context, port): + p = port['port'] + with context.session.begin(subtransactions=True): + neutron_db = super(AllowedAddressPairTestPlugin, self).create_port( + context, port) + p.update(neutron_db) + if attr.is_attr_set(p.get(addr_pair.ADDRESS_PAIRS)): + self._process_create_allowed_address_pairs( + context, p, + p[addr_pair.ADDRESS_PAIRS]) + else: + p[addr_pair.ADDRESS_PAIRS] = None + + return port['port'] + + def update_port(self, context, id, port): + delete_addr_pairs = self._check_update_deletes_allowed_address_pairs( + port) + has_addr_pairs = self._check_update_has_allowed_address_pairs(port) + + with context.session.begin(subtransactions=True): + ret_port = super(AllowedAddressPairTestPlugin, self).update_port( + context, id, port) + # copy values over - but not fixed_ips + port['port'].pop('fixed_ips', None) + ret_port.update(port['port']) + + if (delete_addr_pairs or has_addr_pairs): + # delete address pairds and readd them + self._delete_allowed_address_pairs(context, id) + self._process_create_allowed_address_pairs( + context, ret_port, + ret_port[addr_pair.ADDRESS_PAIRS]) + + return ret_port + + +class AllowedAddressPairDBTestCase(AllowedAddressPairTestCase): + def setUp(self, plugin=None, ext_mgr=None): + plugin = plugin or DB_PLUGIN_KLASS + super(AllowedAddressPairDBTestCase, + self).setUp(plugin=plugin, ext_mgr=ext_mgr) + + +class TestAllowedAddressPairs(AllowedAddressPairDBTestCase): + + def test_create_port_allowed_address_pairs(self): + with self.network() as net: + address_pairs = [{'mac_address': '00:00:00:00:00:01', + 'ip_address': '10.0.0.1'}] + res = self._create_port(self.fmt, net['network']['id'], + arg_list=(addr_pair.ADDRESS_PAIRS,), + allowed_address_pairs=address_pairs) + port = self.deserialize(self.fmt, res) + self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], + address_pairs) + self._delete('ports', port['port']['id']) + + def test_create_port_security_true_allowed_address_pairs(self): + if self._skip_port_security: + self.skipTest("Plugin does not implement port-security extension") + + with self.network() as net: + address_pairs = [{'mac_address': '00:00:00:00:00:01', + 'ip_address': '10.0.0.1'}] + res = self._create_port(self.fmt, net['network']['id'], + arg_list=('port_security_enabled', + addr_pair.ADDRESS_PAIRS,), + port_security_enabled=True, + allowed_address_pairs=address_pairs) + port = self.deserialize(self.fmt, res) + self.assertEqual(port['port'][psec.PORTSECURITY], True) + self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], + address_pairs) + self._delete('ports', port['port']['id']) + + def test_create_port_security_false_allowed_address_pairs(self): + if self._skip_port_security: + self.skipTest("Plugin does not implement port-security extension") + + with self.network() as net: + address_pairs = [{'mac_address': '00:00:00:00:00:01', + 'ip_address': '10.0.0.1'}] + res = self._create_port(self.fmt, net['network']['id'], + arg_list=('port_security_enabled', + addr_pair.ADDRESS_PAIRS,), + port_security_enabled=False, + allowed_address_pairs=address_pairs) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, 409) + + def test_create_port_bad_mac(self): + address_pairs = [{'mac_address': 'invalid_mac', + 'ip_address': '10.0.0.1'}] + self._create_port_with_address_pairs(address_pairs, 400) + + def test_create_port_bad_ip(self): + address_pairs = [{'mac_address': '00:00:00:00:00:01', + 'ip_address': '10.0.0.1222'}] + self._create_port_with_address_pairs(address_pairs, 400) + + def test_create_missing_ip_field(self): + address_pairs = [{'mac_address': '00:00:00:00:00:01'}] + self._create_port_with_address_pairs(address_pairs, 400) + + def test_create_duplicate_mac_ip(self): + address_pairs = [{'mac_address': '00:00:00:00:00:01', + 'ip_address': '10.0.0.1'}, + {'mac_address': '00:00:00:00:00:01', + 'ip_address': '10.0.0.1'}] + self._create_port_with_address_pairs(address_pairs, 400) + + def test_create_overlap_with_fixed_ip(self): + address_pairs = [{'mac_address': '00:00:00:00:00:01', + 'ip_address': '10.0.0.2'}] + with self.network() as network: + with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: + fixed_ips = [{'subnet_id': subnet['subnet']['id'], + 'ip_address': '10.0.0.2'}] + res = self._create_port(self.fmt, network['network']['id'], + arg_list=(addr_pair.ADDRESS_PAIRS, + 'fixed_ips'), + allowed_address_pairs=address_pairs, + fixed_ips=fixed_ips) + self.assertEqual(res.status_int, 201) + port = self.deserialize(self.fmt, res) + self._delete('ports', port['port']['id']) + + def test_create_port_extra_args(self): + address_pairs = [{'mac_address': '00:00:00:00:00:01', + 'ip_address': '10.0.0.1', + 'icbb': 'agreed'}] + self._create_port_with_address_pairs(address_pairs, 400) + + def _create_port_with_address_pairs(self, address_pairs, ret_code): + with self.network() as net: + res = self._create_port(self.fmt, net['network']['id'], + arg_list=(addr_pair.ADDRESS_PAIRS,), + allowed_address_pairs=address_pairs) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, ret_code) + + def test_update_add_address_pairs(self): + with self.network() as net: + res = self._create_port(self.fmt, net['network']['id']) + port = self.deserialize(self.fmt, res) + address_pairs = [{'mac_address': '00:00:00:00:00:01', + 'ip_address': '10.0.0.1'}] + update_port = {'port': {addr_pair.ADDRESS_PAIRS: + address_pairs}} + req = self.new_update_request('ports', update_port, + port['port']['id']) + port = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], + address_pairs) + self._delete('ports', port['port']['id']) + + def test_create_address_gets_port_mac(self): + with self.network() as net: + address_pairs = [{'ip_address': '23.23.23.23'}] + res = self._create_port(self.fmt, net['network']['id'], + arg_list=('port_security_enabled', + addr_pair.ADDRESS_PAIRS,), + allowed_address_pairs=address_pairs) + port = self.deserialize(self.fmt, res)['port'] + port_addr_mac = port[addr_pair.ADDRESS_PAIRS][0]['mac_address'] + self.assertEqual(port_addr_mac, + port['mac_address']) + self._delete('ports', port['id']) + + def test_update_port_security_off_address_pairs(self): + if self._skip_port_security: + self.skipTest("Plugin does not implement port-security extension") + with self.network() as net: + with self.subnet(network=net): + address_pairs = [{'mac_address': '00:00:00:00:00:01', + 'ip_address': '10.0.0.1'}] + res = self._create_port(self.fmt, net['network']['id'], + arg_list=('port_security_enabled', + addr_pair.ADDRESS_PAIRS,), + port_security_enabled=True, + allowed_address_pairs=address_pairs) + port = self.deserialize(self.fmt, res) + update_port = {'port': {psec.PORTSECURITY: False}} + # If plugin implements security groups we also need to remove + # the security group on port. + plugin_obj = manager.NeutronManager.get_plugin() + if 'security-groups' in plugin_obj.supported_extension_aliases: + update_port['port']['security_groups'] = [] + req = self.new_update_request('ports', update_port, + port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 409) + self._delete('ports', port['port']['id']) + + def test_create_port_remove_allowed_address_pairs(self): + with self.network() as net: + address_pairs = [{'mac_address': '00:00:00:00:00:01', + 'ip_address': '10.0.0.1'}] + res = self._create_port(self.fmt, net['network']['id'], + arg_list=(addr_pair.ADDRESS_PAIRS,), + allowed_address_pairs=address_pairs) + port = self.deserialize(self.fmt, res) + update_port = {'port': {addr_pair.ADDRESS_PAIRS: []}} + req = self.new_update_request('ports', update_port, + port['port']['id']) + port = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], []) + self._delete('ports', port['port']['id']) + + +class TestAllowedAddressPairsXML(TestAllowedAddressPairs): + fmt = 'xml' diff --git a/neutron/tests/unit/test_extension_ext_gw_mode.py b/neutron/tests/unit/test_extension_ext_gw_mode.py new file mode 100644 index 000000000..cabb1428e --- /dev/null +++ b/neutron/tests/unit/test_extension_ext_gw_mode.py @@ -0,0 +1,421 @@ +# Copyright 2013 VMware, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +from oslo.config import cfg +from webob import exc + +from neutron.common import constants +from neutron.db import api as db_api +from neutron.db import external_net_db +from neutron.db import l3_db +from neutron.db import l3_gwmode_db +from neutron.db import models_v2 +from neutron.extensions import l3 +from neutron.extensions import l3_ext_gw_mode +from neutron.openstack.common import uuidutils +from neutron.tests import base +from neutron.tests.unit import test_db_plugin +from neutron.tests.unit import test_l3_plugin + +_uuid = uuidutils.generate_uuid +FAKE_GW_PORT_ID = _uuid() +FAKE_GW_PORT_MAC = 'aa:bb:cc:dd:ee:ff' +FAKE_FIP_EXT_PORT_ID = _uuid() +FAKE_FIP_EXT_PORT_MAC = '11:22:33:44:55:66' +FAKE_FIP_INT_PORT_ID = _uuid() +FAKE_FIP_INT_PORT_MAC = 'aa:aa:aa:aa:aa:aa' +FAKE_ROUTER_PORT_ID = _uuid() +FAKE_ROUTER_PORT_MAC = 'bb:bb:bb:bb:bb:bb' + + +class TestExtensionManager(object): + + def get_resources(self): + # Simulate extension of L3 attribute map + for key in l3.RESOURCE_ATTRIBUTE_MAP.keys(): + l3.RESOURCE_ATTRIBUTE_MAP[key].update( + l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {})) + return l3.L3.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +# A simple class for making a concrete class out of the mixin +# for the case of a plugin that integrates l3 routing. +class TestDbIntPlugin(test_l3_plugin.TestL3NatIntPlugin, + l3_gwmode_db.L3_NAT_db_mixin): + + supported_extension_aliases = ["external-net", "router", "ext-gw-mode"] + + +# A simple class for making a concrete class out of the mixin +# for the case of a l3 router service plugin +class TestDbSepPlugin(test_l3_plugin.TestL3NatServicePlugin, + l3_gwmode_db.L3_NAT_db_mixin): + + supported_extension_aliases = ["router", "ext-gw-mode"] + + +class TestL3GwModeMixin(base.BaseTestCase): + + def setUp(self): + super(TestL3GwModeMixin, self).setUp() + plugin = __name__ + '.' + TestDbIntPlugin.__name__ + self.setup_coreplugin(plugin) + self.target_object = TestDbIntPlugin() + # Patch the context + ctx_patcher = mock.patch('neutron.context', autospec=True) + mock_context = ctx_patcher.start() + self.addCleanup(db_api.clear_db) + self.context = mock_context.get_admin_context() + # This ensure also calls to elevated work in unit tests + self.context.elevated.return_value = self.context + self.context.session = db_api.get_session() + # Create sample data for tests + self.ext_net_id = _uuid() + self.int_net_id = _uuid() + self.int_sub_id = _uuid() + self.tenant_id = 'the_tenant' + self.network = models_v2.Network( + id=self.ext_net_id, + tenant_id=self.tenant_id, + admin_state_up=True, + status=constants.NET_STATUS_ACTIVE) + self.net_ext = external_net_db.ExternalNetwork( + network_id=self.ext_net_id) + self.context.session.add(self.network) + # The following is to avoid complains from sqlite on + # foreign key violations + self.context.session.flush() + self.context.session.add(self.net_ext) + self.router = l3_db.Router( + id=_uuid(), + name=None, + tenant_id=self.tenant_id, + admin_state_up=True, + status=constants.NET_STATUS_ACTIVE, + enable_snat=True, + gw_port_id=None) + self.context.session.add(self.router) + self.context.session.flush() + self.router_gw_port = models_v2.Port( + id=FAKE_GW_PORT_ID, + tenant_id=self.tenant_id, + device_id=self.router.id, + device_owner=l3_db.DEVICE_OWNER_ROUTER_GW, + admin_state_up=True, + status=constants.PORT_STATUS_ACTIVE, + mac_address=FAKE_GW_PORT_MAC, + network_id=self.ext_net_id) + self.router.gw_port_id = self.router_gw_port.id + self.context.session.add(self.router) + self.context.session.add(self.router_gw_port) + self.context.session.flush() + self.fip_ext_port = models_v2.Port( + id=FAKE_FIP_EXT_PORT_ID, + tenant_id=self.tenant_id, + admin_state_up=True, + device_id=self.router.id, + device_owner=l3_db.DEVICE_OWNER_FLOATINGIP, + status=constants.PORT_STATUS_ACTIVE, + mac_address=FAKE_FIP_EXT_PORT_MAC, + network_id=self.ext_net_id) + self.context.session.add(self.fip_ext_port) + self.context.session.flush() + self.int_net = models_v2.Network( + id=self.int_net_id, + tenant_id=self.tenant_id, + admin_state_up=True, + status=constants.NET_STATUS_ACTIVE) + self.int_sub = models_v2.Subnet( + id=self.int_sub_id, + tenant_id=self.tenant_id, + ip_version=4, + cidr='3.3.3.0/24', + gateway_ip='3.3.3.1', + network_id=self.int_net_id) + self.router_port = models_v2.Port( + id=FAKE_ROUTER_PORT_ID, + tenant_id=self.tenant_id, + admin_state_up=True, + device_id=self.router.id, + device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF, + status=constants.PORT_STATUS_ACTIVE, + mac_address=FAKE_ROUTER_PORT_MAC, + network_id=self.int_net_id) + self.router_port_ip_info = models_v2.IPAllocation( + port_id=self.router_port.id, + network_id=self.int_net.id, + subnet_id=self.int_sub_id, + ip_address='3.3.3.1') + self.context.session.add(self.int_net) + self.context.session.add(self.int_sub) + self.context.session.add(self.router_port) + self.context.session.add(self.router_port_ip_info) + self.context.session.flush() + self.fip_int_port = models_v2.Port( + id=FAKE_FIP_INT_PORT_ID, + tenant_id=self.tenant_id, + admin_state_up=True, + device_id='something', + device_owner='compute:nova', + status=constants.PORT_STATUS_ACTIVE, + mac_address=FAKE_FIP_INT_PORT_MAC, + network_id=self.int_net_id) + self.fip_int_ip_info = models_v2.IPAllocation( + port_id=self.fip_int_port.id, + network_id=self.int_net.id, + subnet_id=self.int_sub_id, + ip_address='3.3.3.3') + self.fip = l3_db.FloatingIP( + id=_uuid(), + floating_ip_address='1.1.1.2', + floating_network_id=self.ext_net_id, + floating_port_id=FAKE_FIP_EXT_PORT_ID, + fixed_port_id=None, + fixed_ip_address=None, + router_id=None) + self.context.session.add(self.fip_int_port) + self.context.session.add(self.fip_int_ip_info) + self.context.session.add(self.fip) + self.context.session.flush() + self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID, + 'tenant_id': self.tenant_id} + + def _reset_ext_gw(self): + # Reset external gateway + self.router.gw_port_id = None + self.context.session.add(self.router) + self.context.session.flush() + + def _test_update_router_gw(self, gw_info, expected_enable_snat): + self.target_object._update_router_gw_info( + self.context, self.router.id, gw_info) + router = self.target_object._get_router( + self.context, self.router.id) + try: + self.assertEqual(FAKE_GW_PORT_ID, + router.gw_port.id) + self.assertEqual(FAKE_GW_PORT_MAC, + router.gw_port.mac_address) + except AttributeError: + self.assertIsNone(router.gw_port) + self.assertEqual(expected_enable_snat, router.enable_snat) + + def test_update_router_gw_with_gw_info_none(self): + self._test_update_router_gw(None, True) + + def test_update_router_gw_with_network_only(self): + info = {'network_id': self.ext_net_id} + self._test_update_router_gw(info, True) + + def test_update_router_gw_with_snat_disabled(self): + info = {'network_id': self.ext_net_id, + 'enable_snat': False} + self._test_update_router_gw(info, False) + + def test_make_router_dict_no_ext_gw(self): + self._reset_ext_gw() + router_dict = self.target_object._make_router_dict(self.router) + self.assertIsNone(router_dict[l3.EXTERNAL_GW_INFO]) + + def test_make_router_dict_with_ext_gw(self): + router_dict = self.target_object._make_router_dict(self.router) + self.assertEqual({'network_id': self.ext_net_id, + 'enable_snat': True}, + router_dict[l3.EXTERNAL_GW_INFO]) + + def test_make_router_dict_with_ext_gw_snat_disabled(self): + self.router.enable_snat = False + router_dict = self.target_object._make_router_dict(self.router) + self.assertEqual({'network_id': self.ext_net_id, + 'enable_snat': False}, + router_dict[l3.EXTERNAL_GW_INFO]) + + def test_build_routers_list_no_ext_gw(self): + self._reset_ext_gw() + router_dict = self.target_object._make_router_dict(self.router) + routers = self.target_object._build_routers_list([router_dict], []) + self.assertEqual(1, len(routers)) + router = routers[0] + self.assertIsNone(router.get('gw_port')) + self.assertIsNone(router.get('enable_snat')) + + def test_build_routers_list_with_ext_gw(self): + router_dict = self.target_object._make_router_dict(self.router) + routers = self.target_object._build_routers_list( + [router_dict], [self.router.gw_port]) + self.assertEqual(1, len(routers)) + router = routers[0] + self.assertIsNotNone(router.get('gw_port')) + self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id']) + self.assertTrue(router.get('enable_snat')) + + def test_build_routers_list_with_ext_gw_snat_disabled(self): + self.router.enable_snat = False + router_dict = self.target_object._make_router_dict(self.router) + routers = self.target_object._build_routers_list( + [router_dict], [self.router.gw_port]) + self.assertEqual(1, len(routers)) + router = routers[0] + self.assertIsNotNone(router.get('gw_port')) + self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id']) + self.assertFalse(router.get('enable_snat')) + + +class ExtGwModeIntTestCase(test_db_plugin.NeutronDbPluginV2TestCase, + test_l3_plugin.L3NatTestCaseMixin): + + def setUp(self, plugin=None, svc_plugins=None, ext_mgr=None): + # Store l3 resource attribute map as it will be updated + self._l3_attribute_map_bk = {} + for item in l3.RESOURCE_ATTRIBUTE_MAP: + self._l3_attribute_map_bk[item] = ( + l3.RESOURCE_ATTRIBUTE_MAP[item].copy()) + plugin = plugin or ( + 'neutron.tests.unit.test_extension_ext_gw_mode.TestDbIntPlugin') + # for these tests we need to enable overlapping ips + cfg.CONF.set_default('allow_overlapping_ips', True) + ext_mgr = ext_mgr or TestExtensionManager() + super(ExtGwModeIntTestCase, self).setUp(plugin=plugin, + ext_mgr=ext_mgr, + service_plugins=svc_plugins) + self.addCleanup(self.restore_l3_attribute_map) + + def restore_l3_attribute_map(self): + l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk + + def tearDown(self): + super(ExtGwModeIntTestCase, self).tearDown() + + def _set_router_external_gateway(self, router_id, network_id, + snat_enabled=None, + expected_code=exc.HTTPOk.code, + neutron_context=None): + ext_gw_info = {'network_id': network_id} + # Need to set enable_snat also if snat_enabled == False + if snat_enabled is not None: + ext_gw_info['enable_snat'] = snat_enabled + return self._update('routers', router_id, + {'router': {'external_gateway_info': + ext_gw_info}}, + expected_code=expected_code, + neutron_context=neutron_context) + + def test_router_create_show_no_ext_gwinfo(self): + name = 'router1' + tenant_id = _uuid() + expected_value = [('name', name), ('tenant_id', tenant_id), + ('admin_state_up', True), ('status', 'ACTIVE'), + ('external_gateway_info', None)] + with self.router(name=name, admin_state_up=True, + tenant_id=tenant_id) as router: + res = self._show('routers', router['router']['id']) + for k, v in expected_value: + self.assertEqual(res['router'][k], v) + + def _test_router_create_show_ext_gwinfo(self, snat_input_value, + snat_expected_value): + name = 'router1' + tenant_id = _uuid() + with self.subnet() as s: + ext_net_id = s['subnet']['network_id'] + self._set_net_external(ext_net_id) + input_value = {'network_id': ext_net_id} + if snat_input_value in (True, False): + input_value['enable_snat'] = snat_input_value + expected_value = [('name', name), ('tenant_id', tenant_id), + ('admin_state_up', True), ('status', 'ACTIVE'), + ('external_gateway_info', + {'network_id': ext_net_id, + 'enable_snat': snat_expected_value})] + with self.router( + name=name, admin_state_up=True, tenant_id=tenant_id, + external_gateway_info=input_value) as router: + res = self._show('routers', router['router']['id']) + for k, v in expected_value: + self.assertEqual(res['router'][k], v) + + def test_router_create_show_ext_gwinfo_default(self): + self._test_router_create_show_ext_gwinfo(None, True) + + def test_router_create_show_ext_gwinfo_with_snat_enabled(self): + self._test_router_create_show_ext_gwinfo(True, True) + + def test_router_create_show_ext_gwinfo_with_snat_disabled(self): + self._test_router_create_show_ext_gwinfo(False, False) + + def _test_router_update_ext_gwinfo(self, snat_input_value, + snat_expected_value=False, + expected_http_code=exc.HTTPOk.code): + with self.router() as r: + with self.subnet() as s: + try: + ext_net_id = s['subnet']['network_id'] + self._set_net_external(ext_net_id) + self._set_router_external_gateway( + r['router']['id'], ext_net_id, + snat_enabled=snat_input_value, + expected_code=expected_http_code) + if expected_http_code != exc.HTTPOk.code: + return + body = self._show('routers', r['router']['id']) + res_gw_info = body['router']['external_gateway_info'] + self.assertEqual(res_gw_info['network_id'], ext_net_id) + self.assertEqual(res_gw_info['enable_snat'], + snat_expected_value) + finally: + self._remove_external_gateway_from_router( + r['router']['id'], ext_net_id) + + def test_router_update_ext_gwinfo_default(self): + self._test_router_update_ext_gwinfo(None, True) + + def test_router_update_ext_gwinfo_with_snat_enabled(self): + self._test_router_update_ext_gwinfo(True, True) + + def test_router_update_ext_gwinfo_with_snat_disabled(self): + self._test_router_update_ext_gwinfo(False, False) + + def test_router_update_ext_gwinfo_with_invalid_snat_setting(self): + self._test_router_update_ext_gwinfo( + 'xxx', None, expected_http_code=exc.HTTPBadRequest.code) + + +class ExtGwModeSepTestCase(ExtGwModeIntTestCase): + + def setUp(self, plugin=None): + # Store l3 resource attribute map as it will be updated + self._l3_attribute_map_bk = {} + for item in l3.RESOURCE_ATTRIBUTE_MAP: + self._l3_attribute_map_bk[item] = ( + l3.RESOURCE_ATTRIBUTE_MAP[item].copy()) + plugin = plugin or ( + 'neutron.tests.unit.test_l3_plugin.TestNoL3NatPlugin') + # the L3 service plugin + l3_plugin = ('neutron.tests.unit.test_extension_ext_gw_mode.' + 'TestDbSepPlugin') + svc_plugins = {'l3_plugin_name': l3_plugin} + # for these tests we need to enable overlapping ips + cfg.CONF.set_default('allow_overlapping_ips', True) + super(ExtGwModeSepTestCase, self).setUp(plugin=plugin, + svc_plugins=svc_plugins) + self.addCleanup(self.restore_l3_attribute_map) diff --git a/neutron/tests/unit/test_extension_ext_net.py b/neutron/tests/unit/test_extension_ext_net.py new file mode 100644 index 000000000..b525b9dd6 --- /dev/null +++ b/neutron/tests/unit/test_extension_ext_net.py @@ -0,0 +1,176 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import itertools + +import mock +import testtools +from webob import exc + +from neutron import context +from neutron.db import models_v2 +from neutron.extensions import external_net as external_net +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_db_plugin + + +LOG = logging.getLogger(__name__) + +_uuid = uuidutils.generate_uuid +_get_path = test_api_v2._get_path + + +class ExtNetTestExtensionManager(object): + + def get_resources(self): + return [] + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class ExtNetDBTestCase(test_db_plugin.NeutronDbPluginV2TestCase): + + def _create_network(self, fmt, name, admin_state_up, **kwargs): + """Override the routine for allowing the router:external attribute.""" + # attributes containing a colon should be passed with + # a double underscore + new_args = dict(itertools.izip(map(lambda x: x.replace('__', ':'), + kwargs), + kwargs.values())) + arg_list = new_args.pop('arg_list', ()) + (external_net.EXTERNAL,) + return super(ExtNetDBTestCase, self)._create_network( + fmt, name, admin_state_up, arg_list=arg_list, **new_args) + + def setUp(self): + plugin = 'neutron.tests.unit.test_l3_plugin.TestNoL3NatPlugin' + ext_mgr = ExtNetTestExtensionManager() + super(ExtNetDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) + + def _set_net_external(self, net_id): + self._update('networks', net_id, + {'network': {external_net.EXTERNAL: True}}) + + def test_list_nets_external(self): + with self.network() as n1: + self._set_net_external(n1['network']['id']) + with self.network(): + body = self._list('networks') + self.assertEqual(len(body['networks']), 2) + + body = self._list('networks', + query_params="%s=True" % + external_net.EXTERNAL) + self.assertEqual(len(body['networks']), 1) + + body = self._list('networks', + query_params="%s=False" % + external_net.EXTERNAL) + self.assertEqual(len(body['networks']), 1) + + def test_list_nets_external_pagination(self): + if self._skip_native_pagination: + self.skipTest("Skip test for not implemented pagination feature") + with contextlib.nested(self.network(name='net1'), + self.network(name='net3')) as (n1, n3): + self._set_net_external(n1['network']['id']) + self._set_net_external(n3['network']['id']) + with self.network(name='net2') as n2: + self._test_list_with_pagination( + 'network', (n1, n3), ('name', 'asc'), 1, 3, + query_params='router:external=True') + self._test_list_with_pagination( + 'network', (n2, ), ('name', 'asc'), 1, 2, + query_params='router:external=False') + + def test_get_network_succeeds_without_filter(self): + plugin = manager.NeutronManager.get_plugin() + ctx = context.Context(None, None, is_admin=True) + result = plugin.get_networks(ctx, filters=None) + self.assertEqual(result, []) + + def test_network_filter_hook_admin_context(self): + plugin = manager.NeutronManager.get_plugin() + ctx = context.Context(None, None, is_admin=True) + model = models_v2.Network + conditions = plugin._network_filter_hook(ctx, model, []) + self.assertEqual(conditions, []) + + def test_network_filter_hook_nonadmin_context(self): + plugin = manager.NeutronManager.get_plugin() + ctx = context.Context('edinson', 'cavani') + model = models_v2.Network + txt = "externalnetworks.network_id IS NOT NULL" + conditions = plugin._network_filter_hook(ctx, model, []) + self.assertEqual(conditions.__str__(), txt) + # Try to concatenate conditions + conditions = plugin._network_filter_hook(ctx, model, conditions) + self.assertEqual(conditions.__str__(), "%s OR %s" % (txt, txt)) + + def test_create_port_external_network_non_admin_fails(self): + with self.network(router__external=True) as ext_net: + with self.subnet(network=ext_net) as ext_subnet: + with testtools.ExpectedException( + exc.HTTPClientError) as ctx_manager: + with self.port(subnet=ext_subnet, + set_context='True', + tenant_id='noadmin'): + pass + self.assertEqual(ctx_manager.exception.code, 403) + + def test_create_port_external_network_admin_succeeds(self): + with self.network(router__external=True) as ext_net: + with self.subnet(network=ext_net) as ext_subnet: + with self.port(subnet=ext_subnet) as port: + self.assertEqual(port['port']['network_id'], + ext_net['network']['id']) + + def test_create_external_network_non_admin_fails(self): + with testtools.ExpectedException(exc.HTTPClientError) as ctx_manager: + with self.network(router__external=True, + set_context='True', + tenant_id='noadmin'): + pass + self.assertEqual(ctx_manager.exception.code, 403) + + def test_create_external_network_admin_succeeds(self): + with self.network(router__external=True) as ext_net: + self.assertEqual(ext_net['network'][external_net.EXTERNAL], + True) + + def test_delete_network_check_disassociated_floatingips(self): + with mock.patch.object(manager.NeutronManager, + 'get_service_plugins') as srv_plugins: + l3_mock = mock.Mock() + srv_plugins.return_value = {'L3_ROUTER_NAT': l3_mock} + with self.network(do_delete=False) as net: + req = self.new_delete_request('networks', net['network']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, exc.HTTPNoContent.code) + (l3_mock.delete_disassociated_floatingips + .assert_called_once_with(mock.ANY, net['network']['id'])) + + +class ExtNetDBTestCaseXML(ExtNetDBTestCase): + fmt = 'xml' diff --git a/neutron/tests/unit/test_extension_extended_attribute.py b/neutron/tests/unit/test_extension_extended_attribute.py new file mode 100644 index 000000000..5c4ad4038 --- /dev/null +++ b/neutron/tests/unit/test_extension_extended_attribute.py @@ -0,0 +1,156 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit tests for extension extended attribute +""" + +from oslo.config import cfg +import webob.exc as webexc + +import neutron +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.common import config +from neutron import manager +from neutron.plugins.common import constants +from neutron.plugins.openvswitch import ovs_neutron_plugin +from neutron import quota +from neutron.tests import base +from neutron.tests.unit.extensions import extendedattribute as extattr +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import testlib_api +from neutron import wsgi + +_uuid = test_api_v2._uuid +_get_path = test_api_v2._get_path +extensions_path = ':'.join(neutron.tests.unit.extensions.__path__) + + +class ExtensionExtendedAttributeTestPlugin( + ovs_neutron_plugin.OVSNeutronPluginV2): + + supported_extension_aliases = [ + 'ext-obj-test', "extended-ext-attr" + ] + + def __init__(self, configfile=None): + super(ExtensionExtendedAttributeTestPlugin, self) + self.objs = [] + self.objh = {} + + def create_ext_test_resource(self, context, ext_test_resource): + obj = ext_test_resource['ext_test_resource'] + id = _uuid() + obj['id'] = id + self.objs.append(obj) + self.objh.update({id: obj}) + return obj + + def get_ext_test_resources(self, context, filters=None, fields=None): + return self.objs + + def get_ext_test_resource(self, context, id, fields=None): + return self.objh[id] + + +class ExtensionExtendedAttributeTestCase(base.BaseTestCase): + def setUp(self): + super(ExtensionExtendedAttributeTestCase, self).setUp() + plugin = ( + "neutron.tests.unit.test_extension_extended_attribute." + "ExtensionExtendedAttributeTestPlugin" + ) + + # point config file to: neutron/tests/etc/neutron.conf.test + self.config_parse() + + self.setup_coreplugin(plugin) + + ext_mgr = extensions.PluginAwareExtensionManager( + extensions_path, + {constants.CORE: ExtensionExtendedAttributeTestPlugin} + ) + ext_mgr.extend_resources("2.0", {}) + extensions.PluginAwareExtensionManager._instance = ext_mgr + + app = config.load_paste_app('extensions_test_app') + self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) + + self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1" + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + extattr.EXTENDED_ATTRIBUTES_2_0) + self.agentscheduler_dbMinxin = manager.NeutronManager.get_plugin() + self.addCleanup(self.restore_attribute_map) + + quota.QUOTAS._driver = None + cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', + group='QUOTAS') + + def restore_attribute_map(self): + # Restore the original RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + + def _do_request(self, method, path, data=None, params=None, action=None): + content_type = 'application/json' + body = None + if data is not None: # empty dict is valid + body = wsgi.Serializer().serialize(data, content_type) + + req = testlib_api.create_request( + path, body, content_type, + method, query_string=params) + res = req.get_response(self._api) + if res.status_code >= 400: + raise webexc.HTTPClientError(detail=res.body, code=res.status_code) + if res.status_code != webexc.HTTPNoContent.code: + return res.json + + def _ext_test_resource_create(self, attr=None): + data = { + "ext_test_resource": { + "tenant_id": self._tenant_id, + "name": "test", + extattr.EXTENDED_ATTRIBUTE: attr + } + } + + res = self._do_request('POST', _get_path('ext_test_resources'), data) + return res['ext_test_resource'] + + def test_ext_test_resource_create(self): + ext_test_resource = self._ext_test_resource_create() + attr = _uuid() + ext_test_resource = self._ext_test_resource_create(attr) + self.assertEqual(ext_test_resource[extattr.EXTENDED_ATTRIBUTE], attr) + + def test_ext_test_resource_get(self): + attr = _uuid() + obj = self._ext_test_resource_create(attr) + obj_id = obj['id'] + res = self._do_request('GET', _get_path( + 'ext_test_resources/{0}'.format(obj_id))) + obj2 = res['ext_test_resource'] + self.assertEqual(obj2[extattr.EXTENDED_ATTRIBUTE], attr) diff --git a/neutron/tests/unit/test_extension_extradhcpopts.py b/neutron/tests/unit/test_extension_extradhcpopts.py new file mode 100644 index 000000000..8d0f1e26c --- /dev/null +++ b/neutron/tests/unit/test_extension_extradhcpopts.py @@ -0,0 +1,266 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @author: D.E. Kehn, dekehn@gmail.com +# + +import copy +import webob.exc + +from neutron.db import db_base_plugin_v2 +from neutron.db import extradhcpopt_db as edo_db +from neutron.extensions import extra_dhcp_opt as edo_ext +from neutron.openstack.common import log as logging +from neutron.tests.unit import test_db_plugin + +LOG = logging.getLogger(__name__) + +DB_PLUGIN_KLASS = ( + 'neutron.tests.unit.test_extension_extradhcpopts.ExtraDhcpOptTestPlugin') + + +class ExtraDhcpOptTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, + edo_db.ExtraDhcpOptMixin): + """Test plugin that implements necessary calls on create/delete port for + associating ports with extra dhcp options. + """ + + supported_extension_aliases = ["extra_dhcp_opt"] + + def create_port(self, context, port): + with context.session.begin(subtransactions=True): + edos = port['port'].get(edo_ext.EXTRADHCPOPTS, []) + new_port = super(ExtraDhcpOptTestPlugin, self).create_port( + context, port) + self._process_port_create_extra_dhcp_opts(context, new_port, edos) + return new_port + + def update_port(self, context, id, port): + with context.session.begin(subtransactions=True): + rtn_port = super(ExtraDhcpOptTestPlugin, self).update_port( + context, id, port) + self._update_extra_dhcp_opts_on_port(context, id, port, rtn_port) + return rtn_port + + +class ExtraDhcpOptDBTestCase(test_db_plugin.NeutronDbPluginV2TestCase): + + def setUp(self, plugin=DB_PLUGIN_KLASS): + super(ExtraDhcpOptDBTestCase, self).setUp(plugin=plugin) + + +class TestExtraDhcpOpt(ExtraDhcpOptDBTestCase): + def _check_opts(self, expected, returned): + self.assertEqual(len(expected), len(returned)) + for opt in returned: + name = opt['opt_name'] + for exp in expected: + if name == exp['opt_name']: + val = exp['opt_value'] + break + self.assertEqual(opt['opt_value'], val) + + def test_create_port_with_extradhcpopts(self): + opt_list = [{'opt_name': 'bootfile-name', + 'opt_value': 'pxelinux.0'}, + {'opt_name': 'server-ip-address', + 'opt_value': '123.123.123.456'}, + {'opt_name': 'tftp-server', + 'opt_value': '123.123.123.123'}] + + params = {edo_ext.EXTRADHCPOPTS: opt_list, + 'arg_list': (edo_ext.EXTRADHCPOPTS,)} + + with self.port(**params) as port: + self._check_opts(opt_list, + port['port'][edo_ext.EXTRADHCPOPTS]) + + def test_create_port_with_none_extradhcpopts(self): + opt_list = [{'opt_name': 'bootfile-name', + 'opt_value': None}, + {'opt_name': 'server-ip-address', + 'opt_value': '123.123.123.456'}, + {'opt_name': 'tftp-server', + 'opt_value': '123.123.123.123'}] + expected = [{'opt_name': 'server-ip-address', + 'opt_value': '123.123.123.456'}, + {'opt_name': 'tftp-server', + 'opt_value': '123.123.123.123'}] + + params = {edo_ext.EXTRADHCPOPTS: opt_list, + 'arg_list': (edo_ext.EXTRADHCPOPTS,)} + + with self.port(**params) as port: + self._check_opts(expected, + port['port'][edo_ext.EXTRADHCPOPTS]) + + def test_update_port_with_extradhcpopts_with_same(self): + opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, + {'opt_name': 'tftp-server', + 'opt_value': '123.123.123.123'}, + {'opt_name': 'server-ip-address', + 'opt_value': '123.123.123.456'}] + upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] + expected_opts = opt_list[:] + for i in expected_opts: + if i['opt_name'] == upd_opts[0]['opt_name']: + i['opt_value'] = upd_opts[0]['opt_value'] + break + + params = {edo_ext.EXTRADHCPOPTS: opt_list, + 'arg_list': (edo_ext.EXTRADHCPOPTS,)} + + with self.port(**params) as port: + update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} + + req = self.new_update_request('ports', update_port, + port['port']['id']) + port = self.deserialize('json', req.get_response(self.api)) + self._check_opts(expected_opts, + port['port'][edo_ext.EXTRADHCPOPTS]) + + def test_update_port_with_additional_extradhcpopt(self): + opt_list = [{'opt_name': 'tftp-server', + 'opt_value': '123.123.123.123'}, + {'opt_name': 'server-ip-address', + 'opt_value': '123.123.123.456'}] + upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] + expected_opts = copy.deepcopy(opt_list) + expected_opts.append(upd_opts[0]) + params = {edo_ext.EXTRADHCPOPTS: opt_list, + 'arg_list': (edo_ext.EXTRADHCPOPTS,)} + + with self.port(**params) as port: + update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} + + req = self.new_update_request('ports', update_port, + port['port']['id']) + port = self.deserialize('json', req.get_response(self.api)) + self._check_opts(expected_opts, + port['port'][edo_ext.EXTRADHCPOPTS]) + + def test_update_port_with_extradhcpopts(self): + opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, + {'opt_name': 'tftp-server', + 'opt_value': '123.123.123.123'}, + {'opt_name': 'server-ip-address', + 'opt_value': '123.123.123.456'}] + upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] + expected_opts = copy.deepcopy(opt_list) + for i in expected_opts: + if i['opt_name'] == upd_opts[0]['opt_name']: + i['opt_value'] = upd_opts[0]['opt_value'] + break + + params = {edo_ext.EXTRADHCPOPTS: opt_list, + 'arg_list': (edo_ext.EXTRADHCPOPTS,)} + + with self.port(**params) as port: + update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} + + req = self.new_update_request('ports', update_port, + port['port']['id']) + port = self.deserialize('json', req.get_response(self.api)) + self._check_opts(expected_opts, + port['port'][edo_ext.EXTRADHCPOPTS]) + + def test_update_port_with_extradhcpopt_delete(self): + opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, + {'opt_name': 'tftp-server', + 'opt_value': '123.123.123.123'}, + {'opt_name': 'server-ip-address', + 'opt_value': '123.123.123.456'}] + upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}] + expected_opts = [] + + expected_opts = [opt for opt in opt_list + if opt['opt_name'] != 'bootfile-name'] + + params = {edo_ext.EXTRADHCPOPTS: opt_list, + 'arg_list': (edo_ext.EXTRADHCPOPTS,)} + + with self.port(**params) as port: + update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} + + req = self.new_update_request('ports', update_port, + port['port']['id']) + port = self.deserialize('json', req.get_response(self.api)) + self._check_opts(expected_opts, + port['port'][edo_ext.EXTRADHCPOPTS]) + + def test_update_port_without_extradhcpopt_delete(self): + upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}] + + with self.port() as port: + update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} + + req = self.new_update_request('ports', update_port, + port['port']['id']) + port = self.deserialize('json', req.get_response(self.api)) + edo_attr = port['port'].get(edo_ext.EXTRADHCPOPTS) + self.assertEqual(edo_attr, []) + + def test_update_port_adding_extradhcpopts(self): + opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, + {'opt_name': 'tftp-server', + 'opt_value': '123.123.123.123'}, + {'opt_name': 'server-ip-address', + 'opt_value': '123.123.123.456'}] + with self.port() as port: + update_port = {'port': {edo_ext.EXTRADHCPOPTS: opt_list}} + + req = self.new_update_request('ports', update_port, + port['port']['id']) + port = self.deserialize('json', req.get_response(self.api)) + self._check_opts(opt_list, + port['port'][edo_ext.EXTRADHCPOPTS]) + + def test_update_port_with_blank_string_extradhcpopt(self): + opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, + {'opt_name': 'tftp-server', + 'opt_value': '123.123.123.123'}, + {'opt_name': 'server-ip-address', + 'opt_value': '123.123.123.456'}] + upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': ' '}] + + params = {edo_ext.EXTRADHCPOPTS: opt_list, + 'arg_list': (edo_ext.EXTRADHCPOPTS,)} + + with self.port(**params) as port: + update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} + + req = self.new_update_request('ports', update_port, + port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_update_port_with_blank_name_extradhcpopt(self): + opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, + {'opt_name': 'tftp-server', + 'opt_value': '123.123.123.123'}, + {'opt_name': 'server-ip-address', + 'opt_value': '123.123.123.456'}] + upd_opts = [{'opt_name': ' ', 'opt_value': 'pxelinux.0'}] + + params = {edo_ext.EXTRADHCPOPTS: opt_list, + 'arg_list': (edo_ext.EXTRADHCPOPTS,)} + + with self.port(**params) as port: + update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} + + req = self.new_update_request('ports', update_port, + port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) diff --git a/neutron/tests/unit/test_extension_extraroute.py b/neutron/tests/unit/test_extension_extraroute.py new file mode 100644 index 000000000..826ca80a2 --- /dev/null +++ b/neutron/tests/unit/test_extension_extraroute.py @@ -0,0 +1,500 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +from oslo.config import cfg +from webob import exc + +from neutron.common import constants +from neutron.db import extraroute_db +from neutron.extensions import extraroute +from neutron.extensions import l3 +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_l3_plugin as test_l3 + + +LOG = logging.getLogger(__name__) + +_uuid = uuidutils.generate_uuid +_get_path = test_api_v2._get_path + + +class ExtraRouteTestExtensionManager(object): + + def get_resources(self): + l3.RESOURCE_ATTRIBUTE_MAP['routers'].update( + extraroute.EXTENDED_ATTRIBUTES_2_0['routers']) + return l3.L3.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +# This plugin class is for tests with plugin that integrates L3. +class TestExtraRouteIntPlugin(test_l3.TestL3NatIntPlugin, + extraroute_db.ExtraRoute_db_mixin): + supported_extension_aliases = ["external-net", "router", "extraroute"] + + +# A fake l3 service plugin class with extra route capability for +# plugins that delegate away L3 routing functionality +class TestExtraRouteL3NatServicePlugin(test_l3.TestL3NatServicePlugin, + extraroute_db.ExtraRoute_db_mixin): + supported_extension_aliases = ["router", "extraroute"] + + +class ExtraRouteDBTestCaseBase(object): + def _routes_update_prepare(self, router_id, subnet_id, + port_id, routes, skip_add=False): + if not skip_add: + self._router_interface_action('add', router_id, subnet_id, port_id) + self._update('routers', router_id, {'router': {'routes': routes}}) + return self._show('routers', router_id) + + def _routes_update_cleanup(self, port_id, subnet_id, router_id, routes): + self._update('routers', router_id, {'router': {'routes': routes}}) + self._router_interface_action('remove', router_id, subnet_id, port_id) + + def test_route_update_with_one_route(self): + routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + body = self._routes_update_prepare(r['router']['id'], + None, p['port']['id'], + routes) + self.assertEqual(body['router']['routes'], routes) + self._routes_update_cleanup(p['port']['id'], + None, r['router']['id'], []) + + def test_route_clear_routes_with_None(self): + routes = [{'destination': '135.207.0.0/16', + 'nexthop': '10.0.1.3'}, + {'destination': '12.0.0.0/8', + 'nexthop': '10.0.1.4'}, + {'destination': '141.212.0.0/16', + 'nexthop': '10.0.1.5'}] + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + self._routes_update_prepare(r['router']['id'], + None, p['port']['id'], routes) + body = self._update('routers', r['router']['id'], + {'router': {'routes': None}}) + self.assertEqual(body['router']['routes'], []) + self._routes_update_cleanup(p['port']['id'], + None, r['router']['id'], []) + + def test_router_interface_in_use_by_route(self): + routes = [{'destination': '135.207.0.0/16', + 'nexthop': '10.0.1.3'}] + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + body = self._routes_update_prepare(r['router']['id'], + None, p['port']['id'], + routes) + self.assertEqual(body['router']['routes'], routes) + self._router_interface_action( + 'remove', + r['router']['id'], + None, + p['port']['id'], + expected_code=exc.HTTPConflict.code) + + self._routes_update_cleanup(p['port']['id'], + None, r['router']['id'], []) + + def test_route_update_with_multi_routes(self): + routes = [{'destination': '135.207.0.0/16', + 'nexthop': '10.0.1.3'}, + {'destination': '12.0.0.0/8', + 'nexthop': '10.0.1.4'}, + {'destination': '141.212.0.0/16', + 'nexthop': '10.0.1.5'}] + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + body = self._routes_update_prepare(r['router']['id'], + None, p['port']['id'], + routes) + self.assertEqual(sorted(body['router']['routes']), + sorted(routes)) + self._routes_update_cleanup(p['port']['id'], + None, r['router']['id'], []) + + def test_routes_update_for_multiple_routers(self): + routes1 = [{'destination': '135.207.0.0/16', + 'nexthop': '10.0.0.3'}] + routes2 = [{'destination': '12.0.0.0/8', + 'nexthop': '10.0.0.4'}] + with contextlib.nested( + self.router(), + self.router(), + self.subnet(cidr='10.0.0.0/24')) as (r1, r2, s): + with contextlib.nested( + self.port(subnet=s, no_delete=True), + self.port(subnet=s, no_delete=True)) as (p1, p2): + body = self._routes_update_prepare(r1['router']['id'], + None, p1['port']['id'], + routes1) + self.assertEqual(body['router']['routes'], routes1) + + body = self._routes_update_prepare(r2['router']['id'], + None, p2['port']['id'], + routes2) + self.assertEqual(body['router']['routes'], routes2) + + self._routes_update_cleanup(p1['port']['id'], + None, r1['router']['id'], []) + self._routes_update_cleanup(p2['port']['id'], + None, r2['router']['id'], []) + + def test_router_update_delete_routes(self): + routes_orig = [{'destination': '135.207.0.0/16', + 'nexthop': '10.0.1.3'}, + {'destination': '12.0.0.0/8', + 'nexthop': '10.0.1.4'}, + {'destination': '141.212.0.0/16', + 'nexthop': '10.0.1.5'}] + routes_left = [{'destination': '135.207.0.0/16', + 'nexthop': '10.0.1.3'}, + {'destination': '141.212.0.0/16', + 'nexthop': '10.0.1.5'}] + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + body = self._routes_update_prepare(r['router']['id'], + None, p['port']['id'], + routes_orig) + self.assertEqual(sorted(body['router']['routes']), + sorted(routes_orig)) + body = self._routes_update_prepare(r['router']['id'], + None, p['port']['id'], + routes_left, + skip_add=True) + self.assertEqual(sorted(body['router']['routes']), + sorted(routes_left)) + self._routes_update_cleanup(p['port']['id'], + None, r['router']['id'], []) + + def _test_malformed_route(self, routes): + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + + self._update('routers', r['router']['id'], + {'router': {'routes': routes}}, + expected_code=exc.HTTPBadRequest.code) + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_no_destination_route(self): + self._test_malformed_route([{'nexthop': '10.0.1.6'}]) + + def test_no_nexthop_route(self): + self._test_malformed_route({'destination': '135.207.0.0/16'}) + + def test_none_destination(self): + self._test_malformed_route([{'destination': None, + 'nexthop': '10.0.1.3'}]) + + def test_none_nexthop(self): + self._test_malformed_route([{'destination': '135.207.0.0/16', + 'nexthop': None}]) + + def test_nexthop_is_port_ip(self): + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + port_ip = p['port']['fixed_ips'][0]['ip_address'] + routes = [{'destination': '135.207.0.0/16', + 'nexthop': port_ip}] + + self._update('routers', r['router']['id'], + {'router': {'routes': + routes}}, + expected_code=exc.HTTPBadRequest.code) + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_router_update_with_too_many_routes(self): + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + + routes = [{'destination': '135.207.0.0/16', + 'nexthop': '10.0.1.3'}, + {'destination': '12.0.0.0/8', + 'nexthop': '10.0.1.4'}, + {'destination': '141.212.0.0/16', + 'nexthop': '10.0.1.5'}, + {'destination': '192.168.0.0/16', + 'nexthop': '10.0.1.6'}] + + self._update('routers', r['router']['id'], + {'router': {'routes': + routes}}, + expected_code=exc.HTTPBadRequest.code) + + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_router_update_with_dup_address(self): + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + + routes = [{'destination': '135.207.0.0/16', + 'nexthop': '10.0.1.3'}, + {'destination': '135.207.0.0/16', + 'nexthop': '10.0.1.3'}] + + self._update('routers', r['router']['id'], + {'router': {'routes': + routes}}, + expected_code=exc.HTTPBadRequest.code) + + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_router_update_with_invalid_ip_address(self): + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + + routes = [{'destination': '512.207.0.0/16', + 'nexthop': '10.0.1.3'}] + + self._update('routers', r['router']['id'], + {'router': {'routes': + routes}}, + expected_code=exc.HTTPBadRequest.code) + + routes = [{'destination': '127.207.0.0/48', + 'nexthop': '10.0.1.3'}] + + self._update('routers', r['router']['id'], + {'router': {'routes': + routes}}, + expected_code=exc.HTTPBadRequest.code) + + routes = [{'destination': 'invalid_ip_address', + 'nexthop': '10.0.1.3'}] + + self._update('routers', r['router']['id'], + {'router': {'routes': + routes}}, + expected_code=exc.HTTPBadRequest.code) + + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_router_update_with_invalid_nexthop_ip(self): + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + + routes = [{'destination': '127.207.0.0/16', + 'nexthop': ' 300.10.10.4'}] + + self._update('routers', r['router']['id'], + {'router': {'routes': + routes}}, + expected_code=exc.HTTPBadRequest.code) + + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_router_update_with_nexthop_is_outside_port_subnet(self): + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + with self.port(subnet=s, no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + + routes = [{'destination': '127.207.0.0/16', + 'nexthop': ' 20.10.10.4'}] + + self._update('routers', r['router']['id'], + {'router': {'routes': + routes}}, + expected_code=exc.HTTPBadRequest.code) + + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_router_update_on_external_port(self): + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s: + self._set_net_external(s['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s['subnet']['network_id']) + body = self._show('routers', r['router']['id']) + net_id = body['router']['external_gateway_info']['network_id'] + self.assertEqual(net_id, s['subnet']['network_id']) + port_res = self._list_ports( + 'json', + 200, + s['subnet']['network_id'], + tenant_id=r['router']['tenant_id'], + device_own=constants.DEVICE_OWNER_ROUTER_GW) + port_list = self.deserialize('json', port_res) + self.assertEqual(len(port_list['ports']), 1) + + routes = [{'destination': '135.207.0.0/16', + 'nexthop': '10.0.1.3'}] + + body = self._update('routers', r['router']['id'], + {'router': {'routes': + routes}}) + + body = self._show('routers', r['router']['id']) + self.assertEqual(body['router']['routes'], + routes) + + self._remove_external_gateway_from_router( + r['router']['id'], + s['subnet']['network_id']) + body = self._show('routers', r['router']['id']) + gw_info = body['router']['external_gateway_info'] + self.assertIsNone(gw_info) + + def test_router_list_with_sort(self): + with contextlib.nested(self.router(name='router1'), + self.router(name='router2'), + self.router(name='router3') + ) as (router1, router2, router3): + self._test_list_with_sort('router', (router3, router2, router1), + [('name', 'desc')]) + + def test_router_list_with_pagination(self): + with contextlib.nested(self.router(name='router1'), + self.router(name='router2'), + self.router(name='router3') + ) as (router1, router2, router3): + self._test_list_with_pagination('router', + (router1, router2, router3), + ('name', 'asc'), 2, 2) + + def test_router_list_with_pagination_reverse(self): + with contextlib.nested(self.router(name='router1'), + self.router(name='router2'), + self.router(name='router3') + ) as (router1, router2, router3): + self._test_list_with_pagination_reverse('router', + (router1, router2, + router3), + ('name', 'asc'), 2, 2) + + +class ExtraRouteDBIntTestCase(test_l3.L3NatDBIntTestCase, + ExtraRouteDBTestCaseBase): + + def setUp(self, plugin=None, ext_mgr=None): + if not plugin: + plugin = ('neutron.tests.unit.test_extension_extraroute.' + 'TestExtraRouteIntPlugin') + # for these tests we need to enable overlapping ips + cfg.CONF.set_default('allow_overlapping_ips', True) + cfg.CONF.set_default('max_routes', 3) + ext_mgr = ExtraRouteTestExtensionManager() + super(test_l3.L3BaseForIntTests, self).setUp(plugin=plugin, + ext_mgr=ext_mgr) + self.setup_notification_driver() + + +class ExtraRouteDBIntTestCaseXML(ExtraRouteDBIntTestCase): + fmt = 'xml' + + +class ExtraRouteDBSepTestCase(test_l3.L3NatDBSepTestCase, + ExtraRouteDBTestCaseBase): + def setUp(self): + # the plugin without L3 support + plugin = 'neutron.tests.unit.test_l3_plugin.TestNoL3NatPlugin' + # the L3 service plugin + l3_plugin = ('neutron.tests.unit.test_extension_extraroute.' + 'TestExtraRouteL3NatServicePlugin') + service_plugins = {'l3_plugin_name': l3_plugin} + + # for these tests we need to enable overlapping ips + cfg.CONF.set_default('allow_overlapping_ips', True) + cfg.CONF.set_default('max_routes', 3) + ext_mgr = ExtraRouteTestExtensionManager() + super(test_l3.L3BaseForSepTests, self).setUp( + plugin=plugin, ext_mgr=ext_mgr, + service_plugins=service_plugins) + + self.setup_notification_driver() + + +class ExtraRouteDBSepTestCaseXML(ExtraRouteDBSepTestCase): + fmt = 'xml' diff --git a/neutron/tests/unit/test_extension_firewall.py b/neutron/tests/unit/test_extension_firewall.py new file mode 100644 index 000000000..62f2fec79 --- /dev/null +++ b/neutron/tests/unit/test_extension_firewall.py @@ -0,0 +1,495 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. + +import copy + +import mock +from webob import exc +import webtest + +from neutron.extensions import firewall +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.tests import base +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_api_v2_extension + + +_uuid = uuidutils.generate_uuid +_get_path = test_api_v2._get_path + + +class FirewallExtensionTestCase(test_api_v2_extension.ExtensionTestCase): + fmt = 'json' + + def setUp(self): + super(FirewallExtensionTestCase, self).setUp() + plural_mappings = {'firewall_policy': 'firewall_policies'} + self._setUpExtension( + 'neutron.extensions.firewall.FirewallPluginBase', + constants.FIREWALL, firewall.RESOURCE_ATTRIBUTE_MAP, + firewall.Firewall, 'fw', plural_mappings=plural_mappings) + + def test_create_firewall(self): + fw_id = _uuid() + data = {'firewall': {'description': 'descr_firewall1', + 'name': 'firewall1', + 'admin_state_up': True, + 'firewall_policy_id': _uuid(), + 'shared': False, + 'tenant_id': _uuid()}} + return_value = copy.copy(data['firewall']) + return_value.update({'id': fw_id}) + # since 'shared' is hidden + del return_value['shared'] + + instance = self.plugin.return_value + instance.create_firewall.return_value = return_value + res = self.api.post(_get_path('fw/firewalls', fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt) + instance.create_firewall.assert_called_with(mock.ANY, + firewall=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('firewall', res) + self.assertEqual(res['firewall'], return_value) + + def test_firewall_list(self): + fw_id = _uuid() + return_value = [{'tenant_id': _uuid(), + 'id': fw_id}] + + instance = self.plugin.return_value + instance.get_firewalls.return_value = return_value + + res = self.api.get(_get_path('fw/firewalls', fmt=self.fmt)) + + instance.get_firewalls.assert_called_with(mock.ANY, + fields=mock.ANY, + filters=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_firewall_get(self): + fw_id = _uuid() + return_value = {'tenant_id': _uuid(), + 'id': fw_id} + + instance = self.plugin.return_value + instance.get_firewall.return_value = return_value + + res = self.api.get(_get_path('fw/firewalls', + id=fw_id, fmt=self.fmt)) + + instance.get_firewall.assert_called_with(mock.ANY, + fw_id, + fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('firewall', res) + self.assertEqual(res['firewall'], return_value) + + def test_firewall_update(self): + fw_id = _uuid() + update_data = {'firewall': {'name': 'new_name'}} + return_value = {'tenant_id': _uuid(), + 'id': fw_id} + + instance = self.plugin.return_value + instance.update_firewall.return_value = return_value + + res = self.api.put(_get_path('fw/firewalls', id=fw_id, + fmt=self.fmt), + self.serialize(update_data)) + + instance.update_firewall.assert_called_with(mock.ANY, fw_id, + firewall=update_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('firewall', res) + self.assertEqual(res['firewall'], return_value) + + def test_firewall_delete(self): + self._test_entity_delete('firewall') + + def _test_create_firewall_rule(self, src_port, dst_port): + rule_id = _uuid() + data = {'firewall_rule': {'description': 'descr_firewall_rule1', + 'name': 'rule1', + 'shared': False, + 'protocol': 'tcp', + 'ip_version': 4, + 'source_ip_address': '192.168.0.1', + 'destination_ip_address': '127.0.0.1', + 'source_port': src_port, + 'destination_port': dst_port, + 'action': 'allow', + 'enabled': True, + 'tenant_id': _uuid()}} + expected_ret_val = copy.copy(data['firewall_rule']) + expected_ret_val['source_port'] = str(src_port) + expected_ret_val['destination_port'] = str(dst_port) + expected_call_args = copy.copy(expected_ret_val) + expected_ret_val['id'] = rule_id + instance = self.plugin.return_value + instance.create_firewall_rule.return_value = expected_ret_val + res = self.api.post(_get_path('fw/firewall_rules', fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt) + instance.create_firewall_rule.assert_called_with(mock.ANY, + firewall_rule= + {'firewall_rule': + expected_call_args}) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('firewall_rule', res) + self.assertEqual(res['firewall_rule'], expected_ret_val) + + def test_create_firewall_rule_with_integer_ports(self): + self._test_create_firewall_rule(1, 10) + + def test_create_firewall_rule_with_string_ports(self): + self._test_create_firewall_rule('1', '10') + + def test_create_firewall_rule_with_port_range(self): + self._test_create_firewall_rule('1:20', '30:40') + + def test_firewall_rule_list(self): + rule_id = _uuid() + return_value = [{'tenant_id': _uuid(), + 'id': rule_id}] + + instance = self.plugin.return_value + instance.get_firewall_rules.return_value = return_value + + res = self.api.get(_get_path('fw/firewall_rules', fmt=self.fmt)) + + instance.get_firewall_rules.assert_called_with(mock.ANY, + fields=mock.ANY, + filters=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_firewall_rule_get(self): + rule_id = _uuid() + return_value = {'tenant_id': _uuid(), + 'id': rule_id} + + instance = self.plugin.return_value + instance.get_firewall_rule.return_value = return_value + + res = self.api.get(_get_path('fw/firewall_rules', + id=rule_id, fmt=self.fmt)) + + instance.get_firewall_rule.assert_called_with(mock.ANY, + rule_id, + fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('firewall_rule', res) + self.assertEqual(res['firewall_rule'], return_value) + + def test_firewall_rule_update(self): + rule_id = _uuid() + update_data = {'firewall_rule': {'action': 'deny'}} + return_value = {'tenant_id': _uuid(), + 'id': rule_id} + + instance = self.plugin.return_value + instance.update_firewall_rule.return_value = return_value + + res = self.api.put(_get_path('fw/firewall_rules', id=rule_id, + fmt=self.fmt), + self.serialize(update_data)) + + instance.update_firewall_rule.assert_called_with(mock.ANY, + rule_id, + firewall_rule= + update_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('firewall_rule', res) + self.assertEqual(res['firewall_rule'], return_value) + + def test_firewall_rule_delete(self): + self._test_entity_delete('firewall_rule') + + def test_create_firewall_policy(self): + policy_id = _uuid() + data = {'firewall_policy': {'description': 'descr_firewall_policy1', + 'name': 'new_fw_policy1', + 'shared': False, + 'firewall_rules': [_uuid(), _uuid()], + 'audited': False, + 'tenant_id': _uuid()}} + return_value = copy.copy(data['firewall_policy']) + return_value.update({'id': policy_id}) + + instance = self.plugin.return_value + instance.create_firewall_policy.return_value = return_value + res = self.api.post(_get_path('fw/firewall_policies', + fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt) + instance.create_firewall_policy.assert_called_with(mock.ANY, + firewall_policy= + data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('firewall_policy', res) + self.assertEqual(res['firewall_policy'], return_value) + + def test_firewall_policy_list(self): + policy_id = _uuid() + return_value = [{'tenant_id': _uuid(), + 'id': policy_id}] + + instance = self.plugin.return_value + instance.get_firewall_policies.return_value = return_value + + res = self.api.get(_get_path('fw/firewall_policies', + fmt=self.fmt)) + + instance.get_firewall_policies.assert_called_with(mock.ANY, + fields=mock.ANY, + filters=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_firewall_policy_get(self): + policy_id = _uuid() + return_value = {'tenant_id': _uuid(), + 'id': policy_id} + + instance = self.plugin.return_value + instance.get_firewall_policy.return_value = return_value + + res = self.api.get(_get_path('fw/firewall_policies', + id=policy_id, fmt=self.fmt)) + + instance.get_firewall_policy.assert_called_with(mock.ANY, + policy_id, + fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('firewall_policy', res) + self.assertEqual(res['firewall_policy'], return_value) + + def test_firewall_policy_update(self): + policy_id = _uuid() + update_data = {'firewall_policy': {'audited': True}} + return_value = {'tenant_id': _uuid(), + 'id': policy_id} + + instance = self.plugin.return_value + instance.update_firewall_policy.return_value = return_value + + res = self.api.put(_get_path('fw/firewall_policies', + id=policy_id, + fmt=self.fmt), + self.serialize(update_data)) + + instance.update_firewall_policy.assert_called_with(mock.ANY, + policy_id, + firewall_policy= + update_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('firewall_policy', res) + self.assertEqual(res['firewall_policy'], return_value) + + def test_firewall_policy_update_malformed_rules(self): + # emulating client request when no rule uuids are provided for + # --firewall_rules parameter + update_data = {'firewall_policy': {'firewall_rules': True}} + # have to check for generic AppError + self.assertRaises( + webtest.AppError, + self.api.put, + _get_path('fw/firewall_policies', id=_uuid(), fmt=self.fmt), + self.serialize(update_data)) + + def test_firewall_policy_delete(self): + self._test_entity_delete('firewall_policy') + + def test_firewall_policy_insert_rule(self): + firewall_policy_id = _uuid() + firewall_rule_id = _uuid() + ref_firewall_rule_id = _uuid() + + insert_data = {'firewall_rule_id': firewall_rule_id, + 'insert_before': ref_firewall_rule_id, + 'insert_after': None} + return_value = {'firewall_policy': + {'tenant_id': _uuid(), + 'id': firewall_policy_id, + 'firewall_rules': [ref_firewall_rule_id, + firewall_rule_id]}} + + instance = self.plugin.return_value + instance.insert_rule.return_value = return_value + + path = _get_path('fw/firewall_policies', id=firewall_policy_id, + action="insert_rule", + fmt=self.fmt) + res = self.api.put(path, self.serialize(insert_data)) + instance.insert_rule.assert_called_with(mock.ANY, firewall_policy_id, + insert_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertEqual(res, return_value) + + def test_firewall_policy_remove_rule(self): + firewall_policy_id = _uuid() + firewall_rule_id = _uuid() + + remove_data = {'firewall_rule_id': firewall_rule_id} + return_value = {'firewall_policy': + {'tenant_id': _uuid(), + 'id': firewall_policy_id, + 'firewall_rules': []}} + + instance = self.plugin.return_value + instance.remove_rule.return_value = return_value + + path = _get_path('fw/firewall_policies', id=firewall_policy_id, + action="remove_rule", + fmt=self.fmt) + res = self.api.put(path, self.serialize(remove_data)) + instance.remove_rule.assert_called_with(mock.ANY, firewall_policy_id, + remove_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertEqual(res, return_value) + + +class FirewallExtensionTestCaseXML(FirewallExtensionTestCase): + fmt = 'xml' + + +class TestFirewallAttributeValidators(base.BaseTestCase): + + def test_validate_port_range(self): + msg = firewall._validate_port_range(None) + self.assertIsNone(msg) + + msg = firewall._validate_port_range('10') + self.assertIsNone(msg) + + msg = firewall._validate_port_range(10) + self.assertIsNone(msg) + + msg = firewall._validate_port_range(-1) + self.assertEqual(msg, "Invalid port '-1'") + + msg = firewall._validate_port_range('66000') + self.assertEqual(msg, "Invalid port '66000'") + + msg = firewall._validate_port_range('10:20') + self.assertIsNone(msg) + + msg = firewall._validate_port_range('1:65535') + self.assertIsNone(msg) + + msg = firewall._validate_port_range('0:65535') + self.assertEqual(msg, "Invalid port '0'") + + msg = firewall._validate_port_range('1:65536') + self.assertEqual(msg, "Invalid port '65536'") + + msg = firewall._validate_port_range('abc:efg') + self.assertEqual(msg, "Port 'abc' is not a valid number") + + msg = firewall._validate_port_range('1:efg') + self.assertEqual(msg, "Port 'efg' is not a valid number") + + msg = firewall._validate_port_range('-1:10') + self.assertEqual(msg, "Invalid port '-1'") + + msg = firewall._validate_port_range('66000:10') + self.assertEqual(msg, "Invalid port '66000'") + + msg = firewall._validate_port_range('10:66000') + self.assertEqual(msg, "Invalid port '66000'") + + msg = firewall._validate_port_range('1:-10') + self.assertEqual(msg, "Invalid port '-10'") + + def test_validate_ip_or_subnet_or_none(self): + msg = firewall._validate_ip_or_subnet_or_none(None) + self.assertIsNone(msg) + + msg = firewall._validate_ip_or_subnet_or_none('1.1.1.1') + self.assertIsNone(msg) + + msg = firewall._validate_ip_or_subnet_or_none('1.1.1.0/24') + self.assertIsNone(msg) + + ip_addr = '1111.1.1.1' + msg = firewall._validate_ip_or_subnet_or_none(ip_addr) + self.assertEqual(msg, ("'%s' is not a valid IP address and " + "'%s' is not a valid IP subnet") % (ip_addr, + ip_addr)) + + ip_addr = '1.1.1.1 has whitespace' + msg = firewall._validate_ip_or_subnet_or_none(ip_addr) + self.assertEqual(msg, ("'%s' is not a valid IP address and " + "'%s' is not a valid IP subnet") % (ip_addr, + ip_addr)) + + ip_addr = '111.1.1.1\twhitespace' + msg = firewall._validate_ip_or_subnet_or_none(ip_addr) + self.assertEqual(msg, ("'%s' is not a valid IP address and " + "'%s' is not a valid IP subnet") % (ip_addr, + ip_addr)) + + ip_addr = '111.1.1.1\nwhitespace' + msg = firewall._validate_ip_or_subnet_or_none(ip_addr) + self.assertEqual(msg, ("'%s' is not a valid IP address and " + "'%s' is not a valid IP subnet") % (ip_addr, + ip_addr)) + + # Valid - IPv4 + cidr = "10.0.2.0/24" + msg = firewall._validate_ip_or_subnet_or_none(cidr, None) + self.assertIsNone(msg) + + # Valid - IPv6 without final octets + cidr = "fe80::/24" + msg = firewall._validate_ip_or_subnet_or_none(cidr, None) + self.assertIsNone(msg) + + # Valid - IPv6 with final octets + cidr = "fe80::0/24" + msg = firewall._validate_ip_or_subnet_or_none(cidr, None) + self.assertIsNone(msg) + + cidr = "fe80::" + msg = firewall._validate_ip_or_subnet_or_none(cidr, None) + self.assertIsNone(msg) + + # Invalid - IPv6 with final octets, missing mask + cidr = "fe80::0" + msg = firewall._validate_ip_or_subnet_or_none(cidr, None) + self.assertIsNone(msg) + + # Invalid - Address format error + cidr = 'invalid' + msg = firewall._validate_ip_or_subnet_or_none(cidr, None) + self.assertEqual(msg, ("'%s' is not a valid IP address and " + "'%s' is not a valid IP subnet") % (cidr, + cidr)) diff --git a/neutron/tests/unit/test_extension_pnet.py b/neutron/tests/unit/test_extension_pnet.py new file mode 100644 index 000000000..fa88bbf33 --- /dev/null +++ b/neutron/tests/unit/test_extension_pnet.py @@ -0,0 +1,161 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Salvatore Orlando, VMware +# + +import mock +from oslo.config import cfg +from webob import exc as web_exc +import webtest + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import router +from neutron import context +from neutron.extensions import providernet as pnet +from neutron import manager +from neutron.openstack.common import uuidutils +from neutron import quota +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_extensions +from neutron.tests.unit import testlib_api + + +class ProviderExtensionManager(object): + + def get_resources(self): + return [] + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + def get_extended_resources(self, version): + return pnet.get_extended_resources(version) + + +class ProvidernetExtensionTestCase(testlib_api.WebTestCase): + fmt = 'json' + + def setUp(self): + super(ProvidernetExtensionTestCase, self).setUp() + + plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2' + + # Ensure existing ExtensionManager is not used + extensions.PluginAwareExtensionManager._instance = None + + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + + # Update the plugin and extensions path + self.setup_coreplugin(plugin) + cfg.CONF.set_override('allow_pagination', True) + cfg.CONF.set_override('allow_sorting', True) + self._plugin_patcher = mock.patch(plugin, autospec=True) + self.plugin = self._plugin_patcher.start() + # Ensure Quota checks never fail because of mock + instance = self.plugin.return_value + instance.get_networks_count.return_value = 1 + # Instantiate mock plugin and enable the 'provider' extension + manager.NeutronManager.get_plugin().supported_extension_aliases = ( + ["provider"]) + ext_mgr = ProviderExtensionManager() + self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) + self.addCleanup(self._plugin_patcher.stop) + self.addCleanup(self._restore_attribute_map) + self.api = webtest.TestApp(router.APIRouter()) + + quota.QUOTAS._driver = None + cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', + group='QUOTAS') + + def _restore_attribute_map(self): + # Restore the global RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + + def _prepare_net_data(self): + return {'name': 'net1', + pnet.NETWORK_TYPE: 'sometype', + pnet.PHYSICAL_NETWORK: 'physnet', + pnet.SEGMENTATION_ID: 666} + + def _put_network_with_provider_attrs(self, ctx, expect_errors=False): + data = self._prepare_net_data() + env = {'neutron.context': ctx} + instance = self.plugin.return_value + instance.get_network.return_value = {'tenant_id': ctx.tenant_id, + 'shared': False} + net_id = uuidutils.generate_uuid() + res = self.api.put(test_api_v2._get_path('networks', + id=net_id, + fmt=self.fmt), + self.serialize({'network': data}), + extra_environ=env, + expect_errors=expect_errors) + return res, data, net_id + + def _post_network_with_provider_attrs(self, ctx, expect_errors=False): + data = self._prepare_net_data() + env = {'neutron.context': ctx} + res = self.api.post(test_api_v2._get_path('networks', fmt=self.fmt), + self.serialize({'network': data}), + content_type='application/' + self.fmt, + extra_environ=env, + expect_errors=expect_errors) + return res, data + + def test_network_create_with_provider_attrs(self): + ctx = context.get_admin_context() + ctx.tenant_id = 'an_admin' + res, data = self._post_network_with_provider_attrs(ctx) + instance = self.plugin.return_value + exp_input = {'network': data} + exp_input['network'].update({'admin_state_up': True, + 'tenant_id': 'an_admin', + 'shared': False}) + instance.create_network.assert_called_with(mock.ANY, + network=exp_input) + self.assertEqual(res.status_int, web_exc.HTTPCreated.code) + + def test_network_update_with_provider_attrs(self): + ctx = context.get_admin_context() + ctx.tenant_id = 'an_admin' + res, data, net_id = self._put_network_with_provider_attrs(ctx) + instance = self.plugin.return_value + exp_input = {'network': data} + instance.update_network.assert_called_with(mock.ANY, + net_id, + network=exp_input) + self.assertEqual(res.status_int, web_exc.HTTPOk.code) + + def test_network_create_with_provider_attrs_noadmin_returns_403(self): + tenant_id = 'no_admin' + ctx = context.Context('', tenant_id, is_admin=False) + res, _1 = self._post_network_with_provider_attrs(ctx, True) + self.assertEqual(res.status_int, web_exc.HTTPForbidden.code) + + def test_network_update_with_provider_attrs_noadmin_returns_404(self): + tenant_id = 'no_admin' + ctx = context.Context('', tenant_id, is_admin=False) + res, _1, _2 = self._put_network_with_provider_attrs(ctx, True) + self.assertEqual(res.status_int, web_exc.HTTPNotFound.code) diff --git a/neutron/tests/unit/test_extension_portsecurity.py b/neutron/tests/unit/test_extension_portsecurity.py new file mode 100644 index 000000000..b40166f71 --- /dev/null +++ b/neutron/tests/unit/test_extension_portsecurity.py @@ -0,0 +1,392 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from neutron.api.v2 import attributes as attr +from neutron import context +from neutron.db import db_base_plugin_v2 +from neutron.db import portsecurity_db +from neutron.db import securitygroups_db +from neutron.extensions import portsecurity as psec +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.tests.unit import test_db_plugin +from neutron.tests.unit import test_extension_security_group + +DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_portsecurity.' + 'PortSecurityTestPlugin') + + +class PortSecurityTestCase( + test_extension_security_group.SecurityGroupsTestCase, + test_db_plugin.NeutronDbPluginV2TestCase): + + def setUp(self, plugin=None): + ext_mgr = ( + test_extension_security_group.SecurityGroupTestExtensionManager()) + super(PortSecurityTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) + + # Check if a plugin supports security groups + plugin_obj = manager.NeutronManager.get_plugin() + self._skip_security_group = ('security-group' not in + plugin_obj.supported_extension_aliases) + + def tearDown(self): + super(PortSecurityTestCase, self).tearDown() + self._skip_security_group = None + + +class PortSecurityTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, + securitygroups_db.SecurityGroupDbMixin, + portsecurity_db.PortSecurityDbMixin): + + """Test plugin that implements necessary calls on create/delete port for + associating ports with security groups and port security. + """ + + supported_extension_aliases = ["security-group", "port-security"] + + def create_network(self, context, network): + tenant_id = self._get_tenant_id_for_create(context, network['network']) + self._ensure_default_security_group(context, tenant_id) + with context.session.begin(subtransactions=True): + neutron_db = super(PortSecurityTestPlugin, self).create_network( + context, network) + neutron_db.update(network['network']) + self._process_network_port_security_create( + context, network['network'], neutron_db) + return neutron_db + + def update_network(self, context, id, network): + with context.session.begin(subtransactions=True): + neutron_db = super(PortSecurityTestPlugin, self).update_network( + context, id, network) + if psec.PORTSECURITY in network['network']: + self._process_network_port_security_update( + context, network['network'], neutron_db) + return neutron_db + + def get_network(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + net = super(PortSecurityTestPlugin, self).get_network( + context, id) + return self._fields(net, fields) + + def create_port(self, context, port): + p = port['port'] + with context.session.begin(subtransactions=True): + p[ext_sg.SECURITYGROUPS] = self._get_security_groups_on_port( + context, port) + neutron_db = super(PortSecurityTestPlugin, self).create_port( + context, port) + p.update(neutron_db) + + (port_security, has_ip) = self._determine_port_security_and_has_ip( + context, p) + p[psec.PORTSECURITY] = port_security + self._process_port_port_security_create(context, p, neutron_db) + + if (attr.is_attr_set(p.get(ext_sg.SECURITYGROUPS)) and + not (port_security and has_ip)): + raise psec.PortSecurityAndIPRequiredForSecurityGroups() + + # Port requires ip and port_security enabled for security group + if has_ip and port_security: + self._ensure_default_security_group_on_port(context, port) + + if (p.get(ext_sg.SECURITYGROUPS) and p[psec.PORTSECURITY]): + self._process_port_create_security_group( + context, p, p[ext_sg.SECURITYGROUPS]) + + return port['port'] + + def update_port(self, context, id, port): + delete_security_groups = self._check_update_deletes_security_groups( + port) + has_security_groups = self._check_update_has_security_groups(port) + with context.session.begin(subtransactions=True): + ret_port = super(PortSecurityTestPlugin, self).update_port( + context, id, port) + # copy values over - but not fixed_ips + port['port'].pop('fixed_ips', None) + ret_port.update(port['port']) + + # populate port_security setting + if psec.PORTSECURITY not in ret_port: + ret_port[psec.PORTSECURITY] = self._get_port_security_binding( + context, id) + has_ip = self._ip_on_port(ret_port) + # checks if security groups were updated adding/modifying + # security groups, port security is set and port has ip + if (has_security_groups and (not ret_port[psec.PORTSECURITY] + or not has_ip)): + raise psec.PortSecurityAndIPRequiredForSecurityGroups() + + # Port security/IP was updated off. Need to check that no security + # groups are on port. + if ret_port[psec.PORTSECURITY] is not True or not has_ip: + if has_security_groups: + raise psec.PortSecurityAndIPRequiredForSecurityGroups() + + # get security groups on port + filters = {'port_id': [id]} + security_groups = (super(PortSecurityTestPlugin, self). + _get_port_security_group_bindings( + context, filters)) + if security_groups and not delete_security_groups: + raise psec.PortSecurityPortHasSecurityGroup() + + if (delete_security_groups or has_security_groups): + # delete the port binding and read it with the new rules. + self._delete_port_security_group_bindings(context, id) + sgids = self._get_security_groups_on_port(context, port) + # process port create sec groups needs port id + port['id'] = id + self._process_port_create_security_group(context, + ret_port, sgids) + + if psec.PORTSECURITY in port['port']: + self._process_port_port_security_update( + context, port['port'], ret_port) + + return ret_port + + +class PortSecurityDBTestCase(PortSecurityTestCase): + def setUp(self, plugin=None): + plugin = plugin or DB_PLUGIN_KLASS + super(PortSecurityDBTestCase, self).setUp(plugin) + + +class TestPortSecurity(PortSecurityDBTestCase): + def test_create_network_with_portsecurity_mac(self): + res = self._create_network('json', 'net1', True) + net = self.deserialize('json', res) + self.assertEqual(net['network'][psec.PORTSECURITY], True) + + def test_create_network_with_portsecurity_false(self): + res = self._create_network('json', 'net1', True, + arg_list=('port_security_enabled',), + port_security_enabled=False) + net = self.deserialize('json', res) + self.assertEqual(net['network'][psec.PORTSECURITY], False) + + def test_updating_network_port_security(self): + res = self._create_network('json', 'net1', True, + port_security_enabled='True') + net = self.deserialize('json', res) + self.assertEqual(net['network'][psec.PORTSECURITY], True) + update_net = {'network': {psec.PORTSECURITY: False}} + req = self.new_update_request('networks', update_net, + net['network']['id']) + net = self.deserialize('json', req.get_response(self.api)) + self.assertEqual(net['network'][psec.PORTSECURITY], False) + req = self.new_show_request('networks', net['network']['id']) + net = self.deserialize('json', req.get_response(self.api)) + self.assertEqual(net['network'][psec.PORTSECURITY], False) + + def test_create_port_default_true(self): + with self.network() as net: + res = self._create_port('json', net['network']['id']) + port = self.deserialize('json', res) + self.assertEqual(port['port'][psec.PORTSECURITY], True) + self._delete('ports', port['port']['id']) + + def test_create_port_passing_true(self): + res = self._create_network('json', 'net1', True, + arg_list=('port_security_enabled',), + port_security_enabled=True) + net = self.deserialize('json', res) + res = self._create_port('json', net['network']['id']) + port = self.deserialize('json', res) + self.assertEqual(port['port'][psec.PORTSECURITY], True) + self._delete('ports', port['port']['id']) + + def test_create_port_on_port_security_false_network(self): + res = self._create_network('json', 'net1', True, + arg_list=('port_security_enabled',), + port_security_enabled=False) + net = self.deserialize('json', res) + res = self._create_port('json', net['network']['id']) + port = self.deserialize('json', res) + self.assertEqual(port['port'][psec.PORTSECURITY], False) + self._delete('ports', port['port']['id']) + + def test_create_port_security_overrides_network_value(self): + res = self._create_network('json', 'net1', True, + arg_list=('port_security_enabled',), + port_security_enabled=False) + net = self.deserialize('json', res) + res = self._create_port('json', net['network']['id'], + arg_list=('port_security_enabled',), + port_security_enabled=True) + port = self.deserialize('json', res) + self.assertEqual(port['port'][psec.PORTSECURITY], True) + self._delete('ports', port['port']['id']) + + def test_create_port_fails_with_secgroup_and_port_security_false(self): + if self._skip_security_group: + self.skipTest("Plugin does not support security groups") + with self.network() as net: + with self.subnet(network=net): + security_group = self.deserialize( + 'json', + self._create_security_group(self.fmt, 'asdf', 'asdf')) + security_group_id = security_group['security_group']['id'] + res = self._create_port('json', net['network']['id'], + arg_list=('security_groups', + 'port_security_enabled'), + security_groups=[security_group_id], + port_security_enabled=False) + self.assertEqual(res.status_int, 400) + + def test_create_port_with_default_security_group(self): + if self._skip_security_group: + self.skipTest("Plugin does not support security groups") + with self.network() as net: + with self.subnet(network=net): + res = self._create_port('json', net['network']['id']) + port = self.deserialize('json', res) + self.assertEqual(port['port'][psec.PORTSECURITY], True) + self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 1) + self._delete('ports', port['port']['id']) + + def test_create_port_with_security_group_and_net_sec_false(self): + # This tests that port_security_enabled is true when creating + # a port on a network that is marked as port_security_enabled=False + # that has a subnet and securiy_groups are passed it. + if self._skip_security_group: + self.skipTest("Plugin does not support security groups") + res = self._create_network('json', 'net1', True, + arg_list=('port_security_enabled',), + port_security_enabled=False) + net = self.deserialize('json', res) + self._create_subnet('json', net['network']['id'], '10.0.0.0/24') + security_group = self.deserialize( + 'json', self._create_security_group(self.fmt, 'asdf', 'asdf')) + security_group_id = security_group['security_group']['id'] + res = self._create_port('json', net['network']['id'], + arg_list=('security_groups',), + security_groups=[security_group_id]) + port = self.deserialize('json', res) + self.assertEqual(port['port'][psec.PORTSECURITY], True) + self.assertEqual(port['port']['security_groups'], [security_group_id]) + self._delete('ports', port['port']['id']) + + def test_update_port_security_off_with_security_group(self): + if self._skip_security_group: + self.skipTest("Plugin does not support security groups") + with self.network() as net: + with self.subnet(network=net): + res = self._create_port('json', net['network']['id']) + port = self.deserialize('json', res) + self.assertEqual(port['port'][psec.PORTSECURITY], True) + + update_port = {'port': {psec.PORTSECURITY: False}} + req = self.new_update_request('ports', update_port, + port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 409) + # remove security group on port + update_port = {'port': {ext_sg.SECURITYGROUPS: None}} + req = self.new_update_request('ports', update_port, + port['port']['id']) + + self.deserialize('json', req.get_response(self.api)) + self._delete('ports', port['port']['id']) + + def test_update_port_remove_port_security_security_group(self): + if self._skip_security_group: + self.skipTest("Plugin does not support security groups") + with self.network() as net: + with self.subnet(network=net): + res = self._create_port('json', net['network']['id'], + arg_list=('port_security_enabled',), + port_security_enabled=True) + port = self.deserialize('json', res) + self.assertEqual(port['port'][psec.PORTSECURITY], True) + + # remove security group on port + update_port = {'port': {ext_sg.SECURITYGROUPS: None, + psec.PORTSECURITY: False}} + req = self.new_update_request('ports', update_port, + port['port']['id']) + port = self.deserialize('json', req.get_response(self.api)) + self.assertEqual(port['port'][psec.PORTSECURITY], False) + self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 0) + self._delete('ports', port['port']['id']) + + def test_update_port_remove_port_security_security_group_read(self): + if self._skip_security_group: + self.skipTest("Plugin does not support security groups") + with self.network() as net: + with self.subnet(network=net): + res = self._create_port('json', net['network']['id'], + arg_list=('port_security_enabled',), + port_security_enabled=True) + port = self.deserialize('json', res) + self.assertEqual(port['port'][psec.PORTSECURITY], True) + + # remove security group on port + update_port = {'port': {ext_sg.SECURITYGROUPS: None, + psec.PORTSECURITY: False}} + req = self.new_update_request('ports', update_port, + port['port']['id']) + self.deserialize('json', req.get_response(self.api)) + + sg_id = port['port'][ext_sg.SECURITYGROUPS] + update_port = {'port': {ext_sg.SECURITYGROUPS: [sg_id[0]], + psec.PORTSECURITY: True}} + + req = self.new_update_request('ports', update_port, + port['port']['id']) + + port = self.deserialize('json', req.get_response(self.api)) + self.assertEqual(port['port'][psec.PORTSECURITY], True) + self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 1) + self._delete('ports', port['port']['id']) + + def test_create_port_security_off_shared_network(self): + with self.network(shared=True) as net: + with self.subnet(network=net): + res = self._create_port('json', net['network']['id'], + arg_list=('port_security_enabled',), + port_security_enabled=False, + tenant_id='not_network_owner', + set_context=True) + self.deserialize('json', res) + self.assertEqual(res.status_int, 403) + + def test_update_port_security_off_shared_network(self): + with self.network(shared=True, do_delete=False) as net: + with self.subnet(network=net, do_delete=False): + res = self._create_port('json', net['network']['id'], + tenant_id='not_network_owner', + set_context=True) + port = self.deserialize('json', res) + # remove security group on port + update_port = {'port': {ext_sg.SECURITYGROUPS: None, + psec.PORTSECURITY: False}} + req = self.new_update_request('ports', update_port, + port['port']['id']) + req.environ['neutron.context'] = context.Context( + '', 'not_network_owner') + res = req.get_response(self.api) + # TODO(salvatore-orlando): Expected error is 404 because + # the current API controller always returns this error + # code for any policy check failures on update. + # It should be 404 when the caller cannot access the whole + # resource, and 403 when it cannot access a single attribute + self.assertEqual(res.status_int, 404) diff --git a/neutron/tests/unit/test_extension_security_group.py b/neutron/tests/unit/test_extension_security_group.py new file mode 100644 index 000000000..4a38f6da2 --- /dev/null +++ b/neutron/tests/unit/test_extension_security_group.py @@ -0,0 +1,1431 @@ +# Copyright (c) 2012 OpenStack Foundation. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib + +import mock +import webob.exc + +from neutron.api.v2 import attributes as attr +from neutron.common import constants as const +from neutron.common import exceptions as n_exc +from neutron import context +from neutron.db import db_base_plugin_v2 +from neutron.db import securitygroups_db +from neutron.extensions import securitygroup as ext_sg +from neutron.tests import base +from neutron.tests.unit import test_db_plugin + +DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_security_group.' + 'SecurityGroupTestPlugin') + + +class SecurityGroupTestExtensionManager(object): + + def get_resources(self): + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attr.RESOURCE_ATTRIBUTE_MAP.update( + ext_sg.RESOURCE_ATTRIBUTE_MAP) + return ext_sg.Securitygroup.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class SecurityGroupsTestCase(test_db_plugin.NeutronDbPluginV2TestCase): + + def _create_security_group(self, fmt, name, description, **kwargs): + + data = {'security_group': {'name': name, + 'tenant_id': kwargs.get('tenant_id', + 'test_tenant'), + 'description': description}} + security_group_req = self.new_create_request('security-groups', data, + fmt) + if (kwargs.get('set_context') and 'tenant_id' in kwargs): + # create a specific auth context for this request + security_group_req.environ['neutron.context'] = ( + context.Context('', kwargs['tenant_id'])) + return security_group_req.get_response(self.ext_api) + + def _build_security_group_rule(self, security_group_id, direction, proto, + port_range_min=None, port_range_max=None, + remote_ip_prefix=None, remote_group_id=None, + tenant_id='test_tenant', + ethertype=const.IPv4): + + data = {'security_group_rule': {'security_group_id': security_group_id, + 'direction': direction, + 'protocol': proto, + 'ethertype': ethertype, + 'tenant_id': tenant_id, + 'ethertype': ethertype}} + if port_range_min: + data['security_group_rule']['port_range_min'] = port_range_min + + if port_range_max: + data['security_group_rule']['port_range_max'] = port_range_max + + if remote_ip_prefix: + data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix + + if remote_group_id: + data['security_group_rule']['remote_group_id'] = remote_group_id + + return data + + def _create_security_group_rule(self, fmt, rules, **kwargs): + + security_group_rule_req = self.new_create_request( + 'security-group-rules', rules, fmt) + + if (kwargs.get('set_context') and 'tenant_id' in kwargs): + # create a specific auth context for this request + security_group_rule_req.environ['neutron.context'] = ( + context.Context('', kwargs['tenant_id'])) + return security_group_rule_req.get_response(self.ext_api) + + def _make_security_group(self, fmt, name, description, **kwargs): + res = self._create_security_group(fmt, name, description, **kwargs) + if res.status_int >= webob.exc.HTTPBadRequest.code: + raise webob.exc.HTTPClientError(code=res.status_int) + return self.deserialize(fmt, res) + + def _make_security_group_rule(self, fmt, rules, **kwargs): + res = self._create_security_group_rule(self.fmt, rules) + if res.status_int >= webob.exc.HTTPBadRequest.code: + raise webob.exc.HTTPClientError(code=res.status_int) + return self.deserialize(fmt, res) + + @contextlib.contextmanager + def security_group(self, name='webservers', description='webservers', + fmt=None, no_delete=False): + if not fmt: + fmt = self.fmt + security_group = self._make_security_group(fmt, name, description) + yield security_group + if not no_delete: + self._delete('security-groups', + security_group['security_group']['id']) + + @contextlib.contextmanager + def security_group_rule(self, security_group_id='4cd70774-cc67-4a87-9b39-7' + 'd1db38eb087', + direction='ingress', protocol=const.PROTO_NAME_TCP, + port_range_min='22', port_range_max='22', + remote_ip_prefix=None, remote_group_id=None, + fmt=None, no_delete=False, ethertype=const.IPv4): + if not fmt: + fmt = self.fmt + rule = self._build_security_group_rule(security_group_id, + direction, + protocol, port_range_min, + port_range_max, + remote_ip_prefix, + remote_group_id, + ethertype=ethertype) + security_group_rule = self._make_security_group_rule(self.fmt, rule) + yield security_group_rule + if not no_delete: + self._delete('security-group-rules', + security_group_rule['security_group_rule']['id']) + + def _delete_default_security_group_egress_rules(self, security_group_id): + """Deletes default egress rules given a security group ID.""" + res = self._list( + 'security-group-rules', + query_params='security_group_id=%s' % security_group_id) + + for r in res['security_group_rules']: + if (r['direction'] == 'egress' and not r['port_range_max'] and + not r['port_range_min'] and not r['protocol'] + and not r['remote_ip_prefix']): + self._delete('security-group-rules', r['id']) + + def _assert_sg_rule_has_kvs(self, security_group_rule, expected_kvs): + """Asserts that the sg rule has expected key/value pairs passed + in as expected_kvs dictionary + """ + for k, v in expected_kvs.iteritems(): + self.assertEqual(security_group_rule[k], v) + + +class SecurityGroupsTestCaseXML(SecurityGroupsTestCase): + fmt = 'xml' + + +class SecurityGroupTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, + securitygroups_db.SecurityGroupDbMixin): + """Test plugin that implements necessary calls on create/delete port for + associating ports with security groups. + """ + + __native_pagination_support = True + __native_sorting_support = True + + supported_extension_aliases = ["security-group"] + + def create_port(self, context, port): + tenant_id = self._get_tenant_id_for_create(context, port['port']) + default_sg = self._ensure_default_security_group(context, tenant_id) + if not attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)): + port['port'][ext_sg.SECURITYGROUPS] = [default_sg] + session = context.session + with session.begin(subtransactions=True): + sgids = self._get_security_groups_on_port(context, port) + port = super(SecurityGroupTestPlugin, self).create_port(context, + port) + self._process_port_create_security_group(context, port, + sgids) + return port + + def update_port(self, context, id, port): + session = context.session + with session.begin(subtransactions=True): + if ext_sg.SECURITYGROUPS in port['port']: + port['port'][ext_sg.SECURITYGROUPS] = ( + self._get_security_groups_on_port(context, port)) + # delete the port binding and read it with the new rules + self._delete_port_security_group_bindings(context, id) + port['port']['id'] = id + self._process_port_create_security_group( + context, port['port'], + port['port'].get(ext_sg.SECURITYGROUPS)) + port = super(SecurityGroupTestPlugin, self).update_port( + context, id, port) + return port + + def create_network(self, context, network): + tenant_id = self._get_tenant_id_for_create(context, network['network']) + self._ensure_default_security_group(context, tenant_id) + return super(SecurityGroupTestPlugin, self).create_network(context, + network) + + def get_ports(self, context, filters=None, fields=None, + sorts=[], limit=None, marker=None, + page_reverse=False): + neutron_lports = super(SecurityGroupTestPlugin, self).get_ports( + context, filters, sorts=sorts, limit=limit, marker=marker, + page_reverse=page_reverse) + return neutron_lports + + +class SecurityGroupDBTestCase(SecurityGroupsTestCase): + def setUp(self, plugin=None, ext_mgr=None): + plugin = plugin or DB_PLUGIN_KLASS + ext_mgr = ext_mgr or SecurityGroupTestExtensionManager() + super(SecurityGroupDBTestCase, + self).setUp(plugin=plugin, ext_mgr=ext_mgr) + + +class TestSecurityGroups(SecurityGroupDBTestCase): + def test_create_security_group(self): + name = 'webservers' + description = 'my webservers' + keys = [('name', name,), ('description', description)] + with self.security_group(name, description) as security_group: + for k, v, in keys: + self.assertEqual(security_group['security_group'][k], v) + + # Verify that default egress rules have been created + + sg_rules = security_group['security_group']['security_group_rules'] + self.assertEqual(len(sg_rules), 2) + + v4_rules = [r for r in sg_rules if r['ethertype'] == const.IPv4] + self.assertEqual(len(v4_rules), 1) + v4_rule = v4_rules[0] + expected = {'direction': 'egress', + 'ethertype': const.IPv4, + 'remote_group_id': None, + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None} + self._assert_sg_rule_has_kvs(v4_rule, expected) + + v6_rules = [r for r in sg_rules if r['ethertype'] == const.IPv6] + self.assertEqual(len(v6_rules), 1) + v6_rule = v6_rules[0] + expected = {'direction': 'egress', + 'ethertype': const.IPv6, + 'remote_group_id': None, + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None} + self._assert_sg_rule_has_kvs(v6_rule, expected) + + def test_update_security_group(self): + with self.security_group() as sg: + data = {'security_group': {'name': 'new_name', + 'description': 'new_desc'}} + req = self.new_update_request('security-groups', + data, + sg['security_group']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self.assertEqual(res['security_group']['name'], + data['security_group']['name']) + self.assertEqual(res['security_group']['description'], + data['security_group']['description']) + + def test_update_security_group_name_to_default_fail(self): + with self.security_group() as sg: + data = {'security_group': {'name': 'default', + 'description': 'new_desc'}} + req = self.new_update_request('security-groups', + data, + sg['security_group']['id']) + req.environ['neutron.context'] = context.Context('', 'somebody') + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + def test_update_default_security_group_name_fail(self): + with self.network(): + res = self.new_list_request('security-groups') + sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) + data = {'security_group': {'name': 'new_name', + 'description': 'new_desc'}} + req = self.new_update_request('security-groups', + data, + sg['security_groups'][0]['id']) + req.environ['neutron.context'] = context.Context('', 'somebody') + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) + + def test_update_default_security_group_with_description(self): + with self.network(): + res = self.new_list_request('security-groups') + sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) + data = {'security_group': {'description': 'new_desc'}} + req = self.new_update_request('security-groups', + data, + sg['security_groups'][0]['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self.assertEqual(res['security_group']['description'], + data['security_group']['description']) + + def test_default_security_group(self): + with self.network(): + res = self.new_list_request('security-groups') + groups = self.deserialize(self.fmt, res.get_response(self.ext_api)) + self.assertEqual(len(groups['security_groups']), 1) + + def test_create_default_security_group_fail(self): + name = 'default' + description = 'my webservers' + res = self._create_security_group(self.fmt, name, description) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + def test_list_security_groups(self): + with contextlib.nested(self.security_group(name='sg1', + description='sg'), + self.security_group(name='sg2', + description='sg'), + self.security_group(name='sg3', + description='sg') + ) as security_groups: + self._test_list_resources('security-group', + security_groups, + query_params='description=sg') + + def test_list_security_groups_with_sort(self): + with contextlib.nested(self.security_group(name='sg1', + description='sg'), + self.security_group(name='sg2', + description='sg'), + self.security_group(name='sg3', + description='sg') + ) as (sg1, sg2, sg3): + self._test_list_with_sort('security-group', + (sg3, sg2, sg1), + [('name', 'desc')], + query_params='description=sg') + + def test_list_security_groups_with_pagination(self): + with contextlib.nested(self.security_group(name='sg1', + description='sg'), + self.security_group(name='sg2', + description='sg'), + self.security_group(name='sg3', + description='sg') + ) as (sg1, sg2, sg3): + self._test_list_with_pagination('security-group', + (sg1, sg2, sg3), + ('name', 'asc'), 2, 2, + query_params='description=sg') + + def test_list_security_groups_with_pagination_reverse(self): + with contextlib.nested(self.security_group(name='sg1', + description='sg'), + self.security_group(name='sg2', + description='sg'), + self.security_group(name='sg3', + description='sg') + ) as (sg1, sg2, sg3): + self._test_list_with_pagination_reverse( + 'security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2, + query_params='description=sg') + + def test_create_security_group_rule_ethertype_invalid_as_number(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + ethertype = 2 + rule = self._build_security_group_rule( + security_group_id, 'ingress', const.PROTO_NAME_TCP, '22', + '22', None, None, ethertype=ethertype) + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_security_group_rule_invalid_ip_prefix(self): + name = 'webservers' + description = 'my webservers' + for bad_prefix in ['bad_ip', 256, "2001:db8:a::123/129", '172.30./24']: + with self.security_group(name, description) as sg: + sg_id = sg['security_group']['id'] + remote_ip_prefix = bad_prefix + rule = self._build_security_group_rule( + sg_id, + 'ingress', + const.PROTO_NAME_TCP, + '22', '22', + remote_ip_prefix) + res = self._create_security_group_rule(self.fmt, rule) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_security_group_rule_invalid_ethertype_for_prefix(self): + name = 'webservers' + description = 'my webservers' + test_addr = {'192.168.1.1/24': 'ipv4', '192.168.1.1/24': 'IPv6', + '2001:db8:1234::/48': 'ipv6', + '2001:db8:1234::/48': 'IPv4'} + for prefix, ether in test_addr.iteritems(): + with self.security_group(name, description) as sg: + sg_id = sg['security_group']['id'] + ethertype = ether + remote_ip_prefix = prefix + rule = self._build_security_group_rule( + sg_id, + 'ingress', + const.PROTO_NAME_TCP, + '22', '22', + remote_ip_prefix, + None, + None, + ethertype) + res = self._create_security_group_rule(self.fmt, rule) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_security_group_rule_with_unmasked_prefix(self): + name = 'webservers' + description = 'my webservers' + addr = {'10.1.2.3': {'mask': '32', 'ethertype': 'IPv4'}, + 'fe80::2677:3ff:fe7d:4c': {'mask': '128', 'ethertype': 'IPv6'}} + for ip in addr: + with self.security_group(name, description) as sg: + sg_id = sg['security_group']['id'] + ethertype = addr[ip]['ethertype'] + remote_ip_prefix = ip + rule = self._build_security_group_rule( + sg_id, + 'ingress', + const.PROTO_NAME_TCP, + '22', '22', + remote_ip_prefix, + None, + None, + ethertype) + res = self._create_security_group_rule(self.fmt, rule) + self.assertEqual(res.status_int, 201) + res_sg = self.deserialize(self.fmt, res) + prefix = res_sg['security_group_rule']['remote_ip_prefix'] + self.assertEqual(prefix, '%s/%s' % (ip, addr[ip]['mask'])) + + def test_create_security_group_rule_tcp_protocol_as_number(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + protocol = const.PROTO_NUM_TCP # TCP + rule = self._build_security_group_rule( + security_group_id, 'ingress', protocol, '22', '22') + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + def test_create_security_group_rule_protocol_as_number(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + protocol = 2 + rule = self._build_security_group_rule( + security_group_id, 'ingress', protocol) + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + def test_create_security_group_rule_case_insensitive(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + direction = "ingress" + remote_ip_prefix = "10.0.0.0/24" + protocol = 'TCP' + port_range_min = 22 + port_range_max = 22 + ethertype = 'ipV4' + with self.security_group_rule(security_group_id, direction, + protocol, port_range_min, + port_range_max, + remote_ip_prefix, + ethertype=ethertype) as rule: + + # the lower case value will be return + self.assertEqual(rule['security_group_rule']['protocol'], + protocol.lower()) + self.assertEqual(rule['security_group_rule']['ethertype'], + const.IPv4) + + def test_get_security_group(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + remote_group_id = sg['security_group']['id'] + res = self.new_show_request('security-groups', remote_group_id) + security_group_id = sg['security_group']['id'] + direction = "ingress" + remote_ip_prefix = "10.0.0.0/24" + protocol = const.PROTO_NAME_TCP + port_range_min = 22 + port_range_max = 22 + keys = [('remote_ip_prefix', remote_ip_prefix), + ('security_group_id', security_group_id), + ('direction', direction), + ('protocol', protocol), + ('port_range_min', port_range_min), + ('port_range_max', port_range_max)] + with self.security_group_rule(security_group_id, direction, + protocol, port_range_min, + port_range_max, + remote_ip_prefix): + + group = self.deserialize( + self.fmt, res.get_response(self.ext_api)) + sg_rule = group['security_group']['security_group_rules'] + self.assertEqual(group['security_group']['id'], + remote_group_id) + self.assertEqual(len(sg_rule), 3) + sg_rule = [r for r in sg_rule if r['direction'] == 'ingress'] + for k, v, in keys: + self.assertEqual(sg_rule[0][k], v) + + def test_delete_security_group(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description, no_delete=True) as sg: + remote_group_id = sg['security_group']['id'] + self._delete('security-groups', remote_group_id, + webob.exc.HTTPNoContent.code) + + def test_delete_default_security_group_admin(self): + with self.network(): + res = self.new_list_request('security-groups') + sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) + self._delete('security-groups', sg['security_groups'][0]['id'], + webob.exc.HTTPNoContent.code) + + def test_delete_default_security_group_nonadmin(self): + with self.network(): + res = self.new_list_request('security-groups') + sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) + neutron_context = context.Context('', 'test-tenant') + self._delete('security-groups', sg['security_groups'][0]['id'], + webob.exc.HTTPConflict.code, + neutron_context=neutron_context) + + def test_security_group_list_creates_default_security_group(self): + neutron_context = context.Context('', 'test-tenant') + sg = self._list('security-groups', + neutron_context=neutron_context).get('security_groups') + self.assertEqual(len(sg), 1) + + def test_default_security_group_rules(self): + with self.network(): + res = self.new_list_request('security-groups') + groups = self.deserialize(self.fmt, res.get_response(self.ext_api)) + self.assertEqual(len(groups['security_groups']), 1) + security_group_id = groups['security_groups'][0]['id'] + res = self.new_list_request('security-group-rules') + rules = self.deserialize(self.fmt, res.get_response(self.ext_api)) + self.assertEqual(len(rules['security_group_rules']), 4) + + # Verify default rule for v4 egress + sg_rules = rules['security_group_rules'] + rules = [ + r for r in sg_rules + if r['direction'] == 'egress' and r['ethertype'] == const.IPv4 + ] + self.assertEqual(len(rules), 1) + v4_egress = rules[0] + + expected = {'direction': 'egress', + 'ethertype': const.IPv4, + 'remote_group_id': None, + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None} + self._assert_sg_rule_has_kvs(v4_egress, expected) + + # Verify default rule for v6 egress + rules = [ + r for r in sg_rules + if r['direction'] == 'egress' and r['ethertype'] == const.IPv6 + ] + self.assertEqual(len(rules), 1) + v6_egress = rules[0] + + expected = {'direction': 'egress', + 'ethertype': const.IPv6, + 'remote_group_id': None, + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None} + self._assert_sg_rule_has_kvs(v6_egress, expected) + + # Verify default rule for v4 ingress + rules = [ + r for r in sg_rules + if r['direction'] == 'ingress' and r['ethertype'] == const.IPv4 + ] + self.assertEqual(len(rules), 1) + v4_ingress = rules[0] + + expected = {'direction': 'ingress', + 'ethertype': const.IPv4, + 'remote_group_id': security_group_id, + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None} + self._assert_sg_rule_has_kvs(v4_ingress, expected) + + # Verify default rule for v6 ingress + rules = [ + r for r in sg_rules + if r['direction'] == 'ingress' and r['ethertype'] == const.IPv6 + ] + self.assertEqual(len(rules), 1) + v6_ingress = rules[0] + + expected = {'direction': 'ingress', + 'ethertype': const.IPv6, + 'remote_group_id': security_group_id, + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None} + self._assert_sg_rule_has_kvs(v6_ingress, expected) + + def test_create_security_group_rule_remote_ip_prefix(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + direction = "ingress" + remote_ip_prefix = "10.0.0.0/24" + protocol = const.PROTO_NAME_TCP + port_range_min = 22 + port_range_max = 22 + keys = [('remote_ip_prefix', remote_ip_prefix), + ('security_group_id', security_group_id), + ('direction', direction), + ('protocol', protocol), + ('port_range_min', port_range_min), + ('port_range_max', port_range_max)] + with self.security_group_rule(security_group_id, direction, + protocol, port_range_min, + port_range_max, + remote_ip_prefix) as rule: + for k, v, in keys: + self.assertEqual(rule['security_group_rule'][k], v) + + def test_create_security_group_rule_group_id(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + with self.security_group(name, description) as sg2: + security_group_id = sg['security_group']['id'] + direction = "ingress" + remote_group_id = sg2['security_group']['id'] + protocol = const.PROTO_NAME_TCP + port_range_min = 22 + port_range_max = 22 + keys = [('remote_group_id', remote_group_id), + ('security_group_id', security_group_id), + ('direction', direction), + ('protocol', protocol), + ('port_range_min', port_range_min), + ('port_range_max', port_range_max)] + with self.security_group_rule(security_group_id, direction, + protocol, port_range_min, + port_range_max, + remote_group_id=remote_group_id + ) as rule: + for k, v, in keys: + self.assertEqual(rule['security_group_rule'][k], v) + + def test_create_security_group_rule_icmp_with_type_and_code(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + direction = "ingress" + remote_ip_prefix = "10.0.0.0/24" + protocol = const.PROTO_NAME_ICMP + # port_range_min (ICMP type) is greater than port_range_max + # (ICMP code) in order to confirm min <= max port check is + # not called for ICMP. + port_range_min = 8 + port_range_max = 5 + keys = [('remote_ip_prefix', remote_ip_prefix), + ('security_group_id', security_group_id), + ('direction', direction), + ('protocol', protocol), + ('port_range_min', port_range_min), + ('port_range_max', port_range_max)] + with self.security_group_rule(security_group_id, direction, + protocol, port_range_min, + port_range_max, + remote_ip_prefix) as rule: + for k, v, in keys: + self.assertEqual(rule['security_group_rule'][k], v) + + def test_create_security_group_rule_icmp_with_type_only(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + direction = "ingress" + remote_ip_prefix = "10.0.0.0/24" + protocol = const.PROTO_NAME_ICMP + # ICMP type + port_range_min = 8 + # ICMP code + port_range_max = None + keys = [('remote_ip_prefix', remote_ip_prefix), + ('security_group_id', security_group_id), + ('direction', direction), + ('protocol', protocol), + ('port_range_min', port_range_min), + ('port_range_max', port_range_max)] + with self.security_group_rule(security_group_id, direction, + protocol, port_range_min, + port_range_max, + remote_ip_prefix) as rule: + for k, v, in keys: + self.assertEqual(rule['security_group_rule'][k], v) + + def test_create_security_group_source_group_ip_and_ip_prefix(self): + security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" + direction = "ingress" + remote_ip_prefix = "10.0.0.0/24" + protocol = const.PROTO_NAME_TCP + port_range_min = 22 + port_range_max = 22 + remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087" + rule = self._build_security_group_rule(security_group_id, direction, + protocol, port_range_min, + port_range_max, + remote_ip_prefix, + remote_group_id) + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_security_group_rule_bad_security_group_id(self): + security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" + direction = "ingress" + remote_ip_prefix = "10.0.0.0/24" + protocol = const.PROTO_NAME_TCP + port_range_min = 22 + port_range_max = 22 + rule = self._build_security_group_rule(security_group_id, direction, + protocol, port_range_min, + port_range_max, + remote_ip_prefix) + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) + + def test_create_security_group_rule_bad_tenant(self): + with self.security_group() as sg: + rule = {'security_group_rule': + {'security_group_id': sg['security_group']['id'], + 'direction': 'ingress', + 'protocol': const.PROTO_NAME_TCP, + 'port_range_min': '22', + 'port_range_max': '22', + 'tenant_id': "bad_tenant"}} + + res = self._create_security_group_rule(self.fmt, rule, + tenant_id='bad_tenant', + set_context=True) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) + + def test_create_security_group_rule_bad_tenant_remote_group_id(self): + with self.security_group() as sg: + res = self._create_security_group(self.fmt, 'webservers', + 'webservers', + tenant_id='bad_tenant') + sg2 = self.deserialize(self.fmt, res) + rule = {'security_group_rule': + {'security_group_id': sg2['security_group']['id'], + 'direction': 'ingress', + 'protocol': const.PROTO_NAME_TCP, + 'port_range_min': '22', + 'port_range_max': '22', + 'tenant_id': 'bad_tenant', + 'remote_group_id': sg['security_group']['id']}} + + res = self._create_security_group_rule(self.fmt, rule, + tenant_id='bad_tenant', + set_context=True) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) + + def test_create_security_group_rule_bad_tenant_security_group_rule(self): + with self.security_group() as sg: + res = self._create_security_group(self.fmt, 'webservers', + 'webservers', + tenant_id='bad_tenant') + self.deserialize(self.fmt, res) + rule = {'security_group_rule': + {'security_group_id': sg['security_group']['id'], + 'direction': 'ingress', + 'protocol': const.PROTO_NAME_TCP, + 'port_range_min': '22', + 'port_range_max': '22', + 'tenant_id': 'bad_tenant'}} + + res = self._create_security_group_rule(self.fmt, rule, + tenant_id='bad_tenant', + set_context=True) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) + + def test_create_security_group_rule_bad_remote_group_id(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + remote_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" + direction = "ingress" + protocol = const.PROTO_NAME_TCP + port_range_min = 22 + port_range_max = 22 + rule = self._build_security_group_rule(security_group_id, direction, + protocol, port_range_min, + port_range_max, + remote_group_id=remote_group_id) + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code) + + def test_create_security_group_rule_duplicate_rules(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + with self.security_group_rule(security_group_id): + rule = self._build_security_group_rule( + sg['security_group']['id'], 'ingress', + const.PROTO_NAME_TCP, '22', '22') + self._create_security_group_rule(self.fmt, rule) + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + def test_create_security_group_rule_min_port_greater_max(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + with self.security_group_rule(security_group_id): + for protocol in [const.PROTO_NAME_TCP, const.PROTO_NAME_UDP, + const.PROTO_NUM_TCP, const.PROTO_NUM_UDP]: + rule = self._build_security_group_rule( + sg['security_group']['id'], + 'ingress', protocol, '50', '22') + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, + webob.exc.HTTPBadRequest.code) + + def test_create_security_group_rule_ports_but_no_protocol(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + with self.security_group_rule(security_group_id): + rule = self._build_security_group_rule( + sg['security_group']['id'], 'ingress', None, '22', '22') + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_security_group_rule_port_range_min_only(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + with self.security_group_rule(security_group_id): + rule = self._build_security_group_rule( + sg['security_group']['id'], 'ingress', + const.PROTO_NAME_TCP, '22', None) + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_security_group_rule_port_range_max_only(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + with self.security_group_rule(security_group_id): + rule = self._build_security_group_rule( + sg['security_group']['id'], 'ingress', + const.PROTO_NAME_TCP, None, '22') + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_security_group_rule_icmp_type_too_big(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + with self.security_group_rule(security_group_id): + rule = self._build_security_group_rule( + sg['security_group']['id'], 'ingress', + const.PROTO_NAME_ICMP, '256', None) + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_security_group_rule_icmp_code_too_big(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + with self.security_group_rule(security_group_id): + rule = self._build_security_group_rule( + sg['security_group']['id'], 'ingress', + const.PROTO_NAME_ICMP, '8', '256') + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_security_group_rule_icmp_with_code_only(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + with self.security_group_rule(security_group_id): + rule = self._build_security_group_rule( + sg['security_group']['id'], 'ingress', + const.PROTO_NAME_ICMP, None, '2') + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_list_ports_security_group(self): + with self.network() as n: + with self.subnet(n): + self._create_port(self.fmt, n['network']['id']) + req = self.new_list_request('ports') + res = req.get_response(self.api) + ports = self.deserialize(self.fmt, res) + port = ports['ports'][0] + self.assertEqual(len(port[ext_sg.SECURITYGROUPS]), 1) + self._delete('ports', port['id']) + + def test_list_security_group_rules(self): + with self.security_group(name='sg') as sg: + security_group_id = sg['security_group']['id'] + with contextlib.nested(self.security_group_rule(security_group_id, + direction='egress', + port_range_min=22, + port_range_max=22), + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=23, + port_range_max=23), + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=24, + port_range_max=24) + ) as (sgr1, sgr2, sgr3): + + # Delete default rules as they would fail the following + # assertion at the end. + self._delete_default_security_group_egress_rules( + security_group_id) + + q = 'direction=egress&security_group_id=' + security_group_id + self._test_list_resources('security-group-rule', + [sgr1, sgr2, sgr3], + query_params=q) + + def test_list_security_group_rules_with_sort(self): + with self.security_group(name='sg') as sg: + security_group_id = sg['security_group']['id'] + with contextlib.nested(self.security_group_rule(security_group_id, + direction='egress', + port_range_min=22, + port_range_max=22), + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=23, + port_range_max=23), + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=24, + port_range_max=24) + ) as (sgr1, sgr2, sgr3): + + # Delete default rules as they would fail the following + # assertion at the end. + self._delete_default_security_group_egress_rules( + security_group_id) + + q = 'direction=egress&security_group_id=' + security_group_id + self._test_list_with_sort('security-group-rule', + (sgr3, sgr2, sgr1), + [('port_range_max', 'desc')], + query_params=q) + + def test_list_security_group_rules_with_pagination(self): + with self.security_group(name='sg') as sg: + security_group_id = sg['security_group']['id'] + with contextlib.nested(self.security_group_rule(security_group_id, + direction='egress', + port_range_min=22, + port_range_max=22), + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=23, + port_range_max=23), + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=24, + port_range_max=24) + ) as (sgr1, sgr2, sgr3): + + # Delete default rules as they would fail the following + # assertion at the end. + self._delete_default_security_group_egress_rules( + security_group_id) + + q = 'direction=egress&security_group_id=' + security_group_id + self._test_list_with_pagination( + 'security-group-rule', (sgr3, sgr2, sgr1), + ('port_range_max', 'desc'), 2, 2, + query_params=q) + + def test_list_security_group_rules_with_pagination_reverse(self): + with self.security_group(name='sg') as sg: + security_group_id = sg['security_group']['id'] + with contextlib.nested(self.security_group_rule(security_group_id, + direction='egress', + port_range_min=22, + port_range_max=22), + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=23, + port_range_max=23), + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=24, + port_range_max=24) + ) as (sgr1, sgr2, sgr3): + self._test_list_with_pagination_reverse( + 'security-group-rule', (sgr3, sgr2, sgr1), + ('port_range_max', 'desc'), 2, 2, + query_params='direction=egress') + + def test_update_port_with_security_group(self): + with self.network() as n: + with self.subnet(n): + with self.security_group() as sg: + res = self._create_port(self.fmt, n['network']['id']) + port = self.deserialize(self.fmt, res) + + data = {'port': {'fixed_ips': port['port']['fixed_ips'], + 'name': port['port']['name'], + ext_sg.SECURITYGROUPS: + [sg['security_group']['id']]}} + + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0], + sg['security_group']['id']) + + # Test update port without security group + data = {'port': {'fixed_ips': port['port']['fixed_ips'], + 'name': port['port']['name']}} + + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0], + sg['security_group']['id']) + + self._delete('ports', port['port']['id']) + + def test_update_port_with_multiple_security_groups(self): + with self.network() as n: + with self.subnet(n): + with self.security_group() as sg1: + with self.security_group() as sg2: + res = self._create_port( + self.fmt, n['network']['id'], + security_groups=[sg1['security_group']['id'], + sg2['security_group']['id']]) + port = self.deserialize(self.fmt, res) + self.assertEqual(len( + port['port'][ext_sg.SECURITYGROUPS]), 2) + self._delete('ports', port['port']['id']) + + def test_update_port_remove_security_group_empty_list(self): + with self.network() as n: + with self.subnet(n): + with self.security_group() as sg: + res = self._create_port(self.fmt, n['network']['id'], + security_groups=( + [sg['security_group']['id']])) + port = self.deserialize(self.fmt, res) + + data = {'port': {'fixed_ips': port['port']['fixed_ips'], + 'name': port['port']['name'], + 'security_groups': []}} + + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS), + []) + self._delete('ports', port['port']['id']) + + def test_update_port_remove_security_group_none(self): + with self.network() as n: + with self.subnet(n): + with self.security_group() as sg: + res = self._create_port(self.fmt, n['network']['id'], + security_groups=( + [sg['security_group']['id']])) + port = self.deserialize(self.fmt, res) + + data = {'port': {'fixed_ips': port['port']['fixed_ips'], + 'name': port['port']['name'], + 'security_groups': None}} + + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS), + []) + self._delete('ports', port['port']['id']) + + def test_create_port_with_bad_security_group(self): + with self.network() as n: + with self.subnet(n): + res = self._create_port(self.fmt, n['network']['id'], + security_groups=['bad_id']) + + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_delete_security_group_port_in_use(self): + with self.network() as n: + with self.subnet(n): + with self.security_group() as sg: + res = self._create_port(self.fmt, n['network']['id'], + security_groups=( + [sg['security_group']['id']])) + port = self.deserialize(self.fmt, res) + self.assertEqual(port['port'][ext_sg.SECURITYGROUPS][0], + sg['security_group']['id']) + # try to delete security group that's in use + res = self._delete('security-groups', + sg['security_group']['id'], + webob.exc.HTTPConflict.code) + # delete the blocking port + self._delete('ports', port['port']['id']) + + def test_create_security_group_rule_bulk_native(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk " + "security_group_rule create") + with self.security_group() as sg: + rule1 = self._build_security_group_rule(sg['security_group']['id'], + 'ingress', + const.PROTO_NAME_TCP, '22', + '22', '10.0.0.1/24') + rule2 = self._build_security_group_rule(sg['security_group']['id'], + 'ingress', + const.PROTO_NAME_TCP, '23', + '23', '10.0.0.1/24') + rules = {'security_group_rules': [rule1['security_group_rule'], + rule2['security_group_rule']]} + res = self._create_security_group_rule(self.fmt, rules) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + def test_create_security_group_rule_bulk_emulated(self): + real_has_attr = hasattr + + #ensures the API choose the emulation code path + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + with self.security_group() as sg: + rule1 = self._build_security_group_rule( + sg['security_group']['id'], 'ingress', + const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') + rule2 = self._build_security_group_rule( + sg['security_group']['id'], 'ingress', + const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24') + rules = {'security_group_rules': [rule1['security_group_rule'], + rule2['security_group_rule']] + } + res = self._create_security_group_rule(self.fmt, rules) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + def test_create_security_group_rule_allow_all_ipv4(self): + with self.security_group() as sg: + rule = {'security_group_id': sg['security_group']['id'], + 'direction': 'ingress', + 'ethertype': 'IPv4', + 'tenant_id': 'test_tenant'} + + res = self._create_security_group_rule( + self.fmt, {'security_group_rule': rule}) + rule = self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + def test_create_security_group_rule_allow_all_ipv4_v6_bulk(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk " + "security_group_rule create") + with self.security_group() as sg: + rule_v4 = {'security_group_id': sg['security_group']['id'], + 'direction': 'ingress', + 'ethertype': 'IPv4', + 'tenant_id': 'test_tenant'} + rule_v6 = {'security_group_id': sg['security_group']['id'], + 'direction': 'ingress', + 'ethertype': 'IPv6', + 'tenant_id': 'test_tenant'} + + rules = {'security_group_rules': [rule_v4, rule_v6]} + res = self._create_security_group_rule(self.fmt, rules) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + def test_create_security_group_rule_duplicate_rule_in_post(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk " + "security_group_rule create") + with self.security_group() as sg: + rule = self._build_security_group_rule(sg['security_group']['id'], + 'ingress', + const.PROTO_NAME_TCP, '22', + '22', '10.0.0.1/24') + rules = {'security_group_rules': [rule['security_group_rule'], + rule['security_group_rule']]} + res = self._create_security_group_rule(self.fmt, rules) + rule = self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + def test_create_security_group_rule_duplicate_rule_in_post_emulated(self): + real_has_attr = hasattr + + #ensures the API choose the emulation code path + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + + with self.security_group() as sg: + rule = self._build_security_group_rule( + sg['security_group']['id'], 'ingress', + const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') + rules = {'security_group_rules': [rule['security_group_rule'], + rule['security_group_rule']]} + res = self._create_security_group_rule(self.fmt, rules) + rule = self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + def test_create_security_group_rule_duplicate_rule_db(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk " + "security_group_rule create") + with self.security_group() as sg: + rule = self._build_security_group_rule(sg['security_group']['id'], + 'ingress', + const.PROTO_NAME_TCP, '22', + '22', '10.0.0.1/24') + rules = {'security_group_rules': [rule]} + self._create_security_group_rule(self.fmt, rules) + res = self._create_security_group_rule(self.fmt, rules) + rule = self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + def test_create_security_group_rule_duplicate_rule_db_emulated(self): + real_has_attr = hasattr + + #ensures the API choose the emulation code path + def fakehasattr(item, attr): + if attr.endswith('__native_bulk_support'): + return False + return real_has_attr(item, attr) + + with mock.patch('__builtin__.hasattr', + new=fakehasattr): + with self.security_group() as sg: + rule = self._build_security_group_rule( + sg['security_group']['id'], 'ingress', + const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') + rules = {'security_group_rules': [rule]} + self._create_security_group_rule(self.fmt, rules) + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) + + def test_create_security_group_rule_different_security_group_ids(self): + if self._skip_native_bulk: + self.skipTest("Plugin does not support native bulk " + "security_group_rule create") + with self.security_group() as sg1: + with self.security_group() as sg2: + rule1 = self._build_security_group_rule( + sg1['security_group']['id'], 'ingress', + const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') + rule2 = self._build_security_group_rule( + sg2['security_group']['id'], 'ingress', + const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24') + + rules = {'security_group_rules': [rule1['security_group_rule'], + rule2['security_group_rule']] + } + res = self._create_security_group_rule(self.fmt, rules) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_security_group_rule_with_invalid_ethertype(self): + security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" + direction = "ingress" + remote_ip_prefix = "10.0.0.0/24" + protocol = const.PROTO_NAME_TCP + port_range_min = 22 + port_range_max = 22 + remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087" + rule = self._build_security_group_rule(security_group_id, direction, + protocol, port_range_min, + port_range_max, + remote_ip_prefix, + remote_group_id, + ethertype='IPv5') + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_security_group_rule_with_invalid_protocol(self): + security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" + direction = "ingress" + remote_ip_prefix = "10.0.0.0/24" + protocol = 'tcp/ip' + port_range_min = 22 + port_range_max = 22 + remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087" + rule = self._build_security_group_rule(security_group_id, direction, + protocol, port_range_min, + port_range_max, + remote_ip_prefix, + remote_group_id) + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + def test_create_port_with_non_uuid(self): + with self.network() as n: + with self.subnet(n): + res = self._create_port(self.fmt, n['network']['id'], + security_groups=['not_valid']) + + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) + + +class TestConvertIPPrefixToCIDR(base.BaseTestCase): + + def test_convert_bad_ip_prefix_to_cidr(self): + for val in ['bad_ip', 256, "2001:db8:a::123/129"]: + self.assertRaises(n_exc.InvalidCIDR, + ext_sg.convert_ip_prefix_to_cidr, val) + self.assertIsNone(ext_sg.convert_ip_prefix_to_cidr(None)) + + def test_convert_ip_prefix_no_netmask_to_cidr(self): + addr = {'10.1.2.3': '32', 'fe80::2677:3ff:fe7d:4c': '128'} + for k, v in addr.iteritems(): + self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(k), + '%s/%s' % (k, v)) + + def test_convert_ip_prefix_with_netmask_to_cidr(self): + addresses = ['10.1.0.0/16', '10.1.2.3/32', '2001:db8:1234::/48'] + for addr in addresses: + self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(addr), addr) + + +class TestSecurityGroupsXML(TestSecurityGroups): + fmt = 'xml' diff --git a/neutron/tests/unit/test_extensions.py b/neutron/tests/unit/test_extensions.py new file mode 100644 index 000000000..6ab310f75 --- /dev/null +++ b/neutron/tests/unit/test_extensions.py @@ -0,0 +1,685 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import mock +import routes +import webob +import webtest + +from neutron.api import extensions +from neutron.common import config +from neutron.common import exceptions +from neutron.db import db_base_plugin_v2 +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.tests import base +from neutron.tests.unit import extension_stubs as ext_stubs +import neutron.tests.unit.extensions +from neutron.tests.unit import testlib_api +from neutron import wsgi + + +LOG = logging.getLogger(__name__) +extensions_path = ':'.join(neutron.tests.unit.extensions.__path__) + + +class ExtensionsTestApp(wsgi.Router): + + def __init__(self, options={}): + mapper = routes.Mapper() + controller = ext_stubs.StubBaseAppController() + mapper.resource("dummy_resource", "/dummy_resources", + controller=controller) + super(ExtensionsTestApp, self).__init__(mapper) + + +class FakePluginWithExtension(db_base_plugin_v2.NeutronDbPluginV2): + """A fake plugin used only for extension testing in this file.""" + + supported_extension_aliases = ["FOXNSOX"] + + def method_to_support_foxnsox_extension(self, context): + self._log("method_to_support_foxnsox_extension", context) + + +class PluginInterfaceTest(base.BaseTestCase): + def test_issubclass_hook(self): + class A(object): + def f(self): + pass + + class B(extensions.PluginInterface): + @abc.abstractmethod + def f(self): + pass + + self.assertTrue(issubclass(A, B)) + + def test_issubclass_hook_class_without_abstract_methods(self): + class A(object): + def f(self): + pass + + class B(extensions.PluginInterface): + def f(self): + pass + + self.assertFalse(issubclass(A, B)) + + def test_issubclass_hook_not_all_methods_implemented(self): + class A(object): + def f(self): + pass + + class B(extensions.PluginInterface): + @abc.abstractmethod + def f(self): + pass + + @abc.abstractmethod + def g(self): + pass + + self.assertFalse(issubclass(A, B)) + + +class ResourceExtensionTest(base.BaseTestCase): + + class ResourceExtensionController(wsgi.Controller): + + def index(self, request): + return "resource index" + + def show(self, request, id): + return {'data': {'id': id}} + + def notimplemented_function(self, request, id): + return webob.exc.HTTPNotImplemented() + + def custom_member_action(self, request, id): + return {'member_action': 'value'} + + def custom_collection_action(self, request, **kwargs): + return {'collection': 'value'} + + class DummySvcPlugin(wsgi.Controller): + def get_plugin_type(self): + return constants.DUMMY + + def index(self, request, **kwargs): + return "resource index" + + def custom_member_action(self, request, **kwargs): + return {'member_action': 'value'} + + def collection_action(self, request, **kwargs): + return {'collection': 'value'} + + def show(self, request, id): + return {'data': {'id': id}} + + def test_exceptions_notimplemented(self): + controller = self.ResourceExtensionController() + member = {'notimplemented_function': "GET"} + res_ext = extensions.ResourceExtension('tweedles', controller, + member_actions=member) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + # Ideally we would check for a 501 code here but webtest doesn't take + # anything that is below 200 or above 400 so we can't actually check + # it. It throws webtest.AppError instead. + try: + test_app.get("/tweedles/some_id/notimplemented_function") + # Shouldn't be reached + self.assertTrue(False) + except webtest.AppError as e: + self.assertIn('501', e.message) + + def test_resource_can_be_added_as_extension(self): + res_ext = extensions.ResourceExtension( + 'tweedles', self.ResourceExtensionController()) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + index_response = test_app.get("/tweedles") + self.assertEqual(200, index_response.status_int) + self.assertEqual("resource index", index_response.body) + + show_response = test_app.get("/tweedles/25266") + self.assertEqual({'data': {'id': "25266"}}, show_response.json) + + def test_resource_gets_prefix_of_plugin(self): + class DummySvcPlugin(wsgi.Controller): + def index(self, request): + return "" + + def get_plugin_type(self): + return constants.DUMMY + + res_ext = extensions.ResourceExtension( + 'tweedles', DummySvcPlugin(), path_prefix="/dummy_svc") + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + index_response = test_app.get("/dummy_svc/tweedles") + self.assertEqual(200, index_response.status_int) + + def test_resource_extension_with_custom_member_action(self): + controller = self.ResourceExtensionController() + member = {'custom_member_action': "GET"} + res_ext = extensions.ResourceExtension('tweedles', controller, + member_actions=member) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.get("/tweedles/some_id/custom_member_action") + self.assertEqual(200, response.status_int) + self.assertEqual(jsonutils.loads(response.body)['member_action'], + "value") + + def test_resource_ext_with_custom_member_action_gets_plugin_prefix(self): + controller = self.DummySvcPlugin() + member = {'custom_member_action': "GET"} + collections = {'collection_action': "GET"} + res_ext = extensions.ResourceExtension('tweedles', controller, + path_prefix="/dummy_svc", + member_actions=member, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.get("/dummy_svc/tweedles/1/custom_member_action") + self.assertEqual(200, response.status_int) + self.assertEqual(jsonutils.loads(response.body)['member_action'], + "value") + + response = test_app.get("/dummy_svc/tweedles/collection_action") + self.assertEqual(200, response.status_int) + self.assertEqual(jsonutils.loads(response.body)['collection'], + "value") + + def test_plugin_prefix_with_parent_resource(self): + controller = self.DummySvcPlugin() + parent = dict(member_name="tenant", + collection_name="tenants") + member = {'custom_member_action': "GET"} + collections = {'collection_action': "GET"} + res_ext = extensions.ResourceExtension('tweedles', controller, parent, + path_prefix="/dummy_svc", + member_actions=member, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + index_response = test_app.get("/dummy_svc/tenants/1/tweedles") + self.assertEqual(200, index_response.status_int) + + response = test_app.get("/dummy_svc/tenants/1/" + "tweedles/1/custom_member_action") + self.assertEqual(200, response.status_int) + self.assertEqual(jsonutils.loads(response.body)['member_action'], + "value") + + response = test_app.get("/dummy_svc/tenants/2/" + "tweedles/collection_action") + self.assertEqual(200, response.status_int) + self.assertEqual(jsonutils.loads(response.body)['collection'], + "value") + + def test_resource_extension_for_get_custom_collection_action(self): + controller = self.ResourceExtensionController() + collections = {'custom_collection_action': "GET"} + res_ext = extensions.ResourceExtension('tweedles', controller, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.get("/tweedles/custom_collection_action") + self.assertEqual(200, response.status_int) + LOG.debug(jsonutils.loads(response.body)) + self.assertEqual(jsonutils.loads(response.body)['collection'], "value") + + def test_resource_extension_for_put_custom_collection_action(self): + controller = self.ResourceExtensionController() + collections = {'custom_collection_action': "PUT"} + res_ext = extensions.ResourceExtension('tweedles', controller, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.put("/tweedles/custom_collection_action") + + self.assertEqual(200, response.status_int) + self.assertEqual(jsonutils.loads(response.body)['collection'], 'value') + + def test_resource_extension_for_post_custom_collection_action(self): + controller = self.ResourceExtensionController() + collections = {'custom_collection_action': "POST"} + res_ext = extensions.ResourceExtension('tweedles', controller, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.post("/tweedles/custom_collection_action") + + self.assertEqual(200, response.status_int) + self.assertEqual(jsonutils.loads(response.body)['collection'], 'value') + + def test_resource_extension_for_delete_custom_collection_action(self): + controller = self.ResourceExtensionController() + collections = {'custom_collection_action': "DELETE"} + res_ext = extensions.ResourceExtension('tweedles', controller, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.delete("/tweedles/custom_collection_action") + + self.assertEqual(200, response.status_int) + self.assertEqual(jsonutils.loads(response.body)['collection'], 'value') + + def test_resource_ext_for_formatted_req_on_custom_collection_action(self): + controller = self.ResourceExtensionController() + collections = {'custom_collection_action': "GET"} + res_ext = extensions.ResourceExtension('tweedles', controller, + collection_actions=collections) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.get("/tweedles/custom_collection_action.json") + + self.assertEqual(200, response.status_int) + self.assertEqual(jsonutils.loads(response.body)['collection'], "value") + + def test_resource_ext_for_nested_resource_custom_collection_action(self): + controller = self.ResourceExtensionController() + collections = {'custom_collection_action': "GET"} + parent = dict(collection_name='beetles', member_name='beetle') + res_ext = extensions.ResourceExtension('tweedles', controller, + collection_actions=collections, + parent=parent) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.get("/beetles/beetle_id" + "/tweedles/custom_collection_action") + + self.assertEqual(200, response.status_int) + self.assertEqual(jsonutils.loads(response.body)['collection'], "value") + + def test_resource_extension_with_custom_member_action_and_attr_map(self): + controller = self.ResourceExtensionController() + member = {'custom_member_action': "GET"} + params = { + 'tweedles': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': '', 'is_visible': True}, + } + } + res_ext = extensions.ResourceExtension('tweedles', controller, + member_actions=member, + attr_map=params) + test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) + + response = test_app.get("/tweedles/some_id/custom_member_action") + self.assertEqual(200, response.status_int) + self.assertEqual(jsonutils.loads(response.body)['member_action'], + "value") + + def test_returns_404_for_non_existent_extension(self): + test_app = _setup_extensions_test_app(SimpleExtensionManager(None)) + + response = test_app.get("/non_extistant_extension", status='*') + + self.assertEqual(404, response.status_int) + + +class ActionExtensionTest(base.BaseTestCase): + + def setUp(self): + super(ActionExtensionTest, self).setUp() + self.extension_app = _setup_extensions_test_app() + + def test_extended_action_for_adding_extra_data(self): + action_name = 'FOXNSOX:add_tweedle' + action_params = dict(name='Beetle') + req_body = jsonutils.dumps({action_name: action_params}) + response = self.extension_app.post('/dummy_resources/1/action', + req_body, + content_type='application/json') + self.assertEqual("Tweedle Beetle Added.", response.body) + + def test_extended_action_for_deleting_extra_data(self): + action_name = 'FOXNSOX:delete_tweedle' + action_params = dict(name='Bailey') + req_body = jsonutils.dumps({action_name: action_params}) + response = self.extension_app.post("/dummy_resources/1/action", + req_body, + content_type='application/json') + self.assertEqual("Tweedle Bailey Deleted.", response.body) + + def test_returns_404_for_non_existent_action(self): + non_existent_action = 'blah_action' + action_params = dict(name="test") + req_body = jsonutils.dumps({non_existent_action: action_params}) + + response = self.extension_app.post("/dummy_resources/1/action", + req_body, + content_type='application/json', + status='*') + + self.assertEqual(404, response.status_int) + + def test_returns_404_for_non_existent_resource(self): + action_name = 'add_tweedle' + action_params = dict(name='Beetle') + req_body = jsonutils.dumps({action_name: action_params}) + + response = self.extension_app.post("/asdf/1/action", req_body, + content_type='application/json', + status='*') + self.assertEqual(404, response.status_int) + + +class RequestExtensionTest(base.BaseTestCase): + + def test_headers_can_be_extended(self): + def extend_headers(req, res): + assert req.headers['X-NEW-REQUEST-HEADER'] == "sox" + res.headers['X-NEW-RESPONSE-HEADER'] = "response_header_data" + return res + + app = self._setup_app_with_request_handler(extend_headers, 'GET') + response = app.get("/dummy_resources/1", + headers={'X-NEW-REQUEST-HEADER': "sox"}) + + self.assertEqual(response.headers['X-NEW-RESPONSE-HEADER'], + "response_header_data") + + def test_extend_get_resource_response(self): + def extend_response_data(req, res): + data = jsonutils.loads(res.body) + data['FOXNSOX:extended_key'] = req.GET.get('extended_key') + res.body = jsonutils.dumps(data) + return res + + app = self._setup_app_with_request_handler(extend_response_data, 'GET') + response = app.get("/dummy_resources/1?extended_key=extended_data") + + self.assertEqual(200, response.status_int) + response_data = jsonutils.loads(response.body) + self.assertEqual('extended_data', + response_data['FOXNSOX:extended_key']) + self.assertEqual('knox', response_data['fort']) + + def test_get_resources(self): + app = _setup_extensions_test_app() + + response = app.get("/dummy_resources/1?chewing=newblue") + + response_data = jsonutils.loads(response.body) + self.assertEqual('newblue', response_data['FOXNSOX:googoose']) + self.assertEqual("Pig Bands!", response_data['FOXNSOX:big_bands']) + + def test_edit_previously_uneditable_field(self): + + def _update_handler(req, res): + data = jsonutils.loads(res.body) + data['uneditable'] = req.params['uneditable'] + res.body = jsonutils.dumps(data) + return res + + base_app = webtest.TestApp(setup_base_app(self)) + response = base_app.put("/dummy_resources/1", + {'uneditable': "new_value"}) + self.assertEqual(response.json['uneditable'], "original_value") + + ext_app = self._setup_app_with_request_handler(_update_handler, + 'PUT') + ext_response = ext_app.put("/dummy_resources/1", + {'uneditable': "new_value"}) + self.assertEqual(ext_response.json['uneditable'], "new_value") + + def _setup_app_with_request_handler(self, handler, verb): + req_ext = extensions.RequestExtension(verb, + '/dummy_resources/:(id)', + handler) + manager = SimpleExtensionManager(None, None, req_ext) + return _setup_extensions_test_app(manager) + + +class ExtensionManagerTest(base.BaseTestCase): + + def test_invalid_extensions_are_not_registered(self): + + class InvalidExtension(object): + """Invalid extension. + + This Extension doesn't implement extension methods : + get_name, get_description, get_namespace and get_updated + """ + def get_alias(self): + return "invalid_extension" + + ext_mgr = extensions.ExtensionManager('') + ext_mgr.add_extension(InvalidExtension()) + ext_mgr.add_extension(ext_stubs.StubExtension("valid_extension")) + + self.assertIn('valid_extension', ext_mgr.extensions) + self.assertNotIn('invalid_extension', ext_mgr.extensions) + + +class PluginAwareExtensionManagerTest(base.BaseTestCase): + + def test_unsupported_extensions_are_not_loaded(self): + stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1", "e3"]) + plugin_info = {constants.CORE: stub_plugin} + with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." + "check_if_plugin_extensions_loaded"): + ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) + + ext_mgr.add_extension(ext_stubs.StubExtension("e1")) + ext_mgr.add_extension(ext_stubs.StubExtension("e2")) + ext_mgr.add_extension(ext_stubs.StubExtension("e3")) + + self.assertIn("e1", ext_mgr.extensions) + self.assertNotIn("e2", ext_mgr.extensions) + self.assertIn("e3", ext_mgr.extensions) + + def test_extensions_are_not_loaded_for_plugins_unaware_of_extensions(self): + class ExtensionUnawarePlugin(object): + """This plugin does not implement supports_extension method. + + Extensions will not be loaded when this plugin is used. + """ + pass + + plugin_info = {constants.CORE: ExtensionUnawarePlugin()} + ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) + ext_mgr.add_extension(ext_stubs.StubExtension("e1")) + + self.assertNotIn("e1", ext_mgr.extensions) + + def test_extensions_not_loaded_for_plugin_without_expected_interface(self): + + class PluginWithoutExpectedIface(object): + """Does not implement get_foo method as expected by extension.""" + supported_extension_aliases = ["supported_extension"] + + plugin_info = {constants.CORE: PluginWithoutExpectedIface()} + with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." + "check_if_plugin_extensions_loaded"): + ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) + ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface( + "supported_extension")) + + self.assertNotIn("e1", ext_mgr.extensions) + + def test_extensions_are_loaded_for_plugin_with_expected_interface(self): + + class PluginWithExpectedInterface(object): + """Implements get_foo method as expected by extension.""" + supported_extension_aliases = ["supported_extension"] + + def get_foo(self, bar=None): + pass + + plugin_info = {constants.CORE: PluginWithExpectedInterface()} + with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." + "check_if_plugin_extensions_loaded"): + ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) + ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface( + "supported_extension")) + + self.assertIn("supported_extension", ext_mgr.extensions) + + def test_extensions_expecting_neutron_plugin_interface_are_loaded(self): + class ExtensionForQuamtumPluginInterface(ext_stubs.StubExtension): + """This Extension does not implement get_plugin_interface method. + + This will work with any plugin implementing NeutronPluginBase + """ + pass + stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"]) + plugin_info = {constants.CORE: stub_plugin} + + with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." + "check_if_plugin_extensions_loaded"): + ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) + ext_mgr.add_extension(ExtensionForQuamtumPluginInterface("e1")) + + self.assertIn("e1", ext_mgr.extensions) + + def test_extensions_without_need_for__plugin_interface_are_loaded(self): + class ExtensionWithNoNeedForPluginInterface(ext_stubs.StubExtension): + """This Extension does not need any plugin interface. + + This will work with any plugin implementing NeutronPluginBase + """ + def get_plugin_interface(self): + return None + + stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"]) + plugin_info = {constants.CORE: stub_plugin} + with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." + "check_if_plugin_extensions_loaded"): + ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) + ext_mgr.add_extension(ExtensionWithNoNeedForPluginInterface("e1")) + + self.assertIn("e1", ext_mgr.extensions) + + def test_extension_loaded_for_non_core_plugin(self): + class NonCorePluginExtenstion(ext_stubs.StubExtension): + def get_plugin_interface(self): + return None + + stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"]) + plugin_info = {constants.DUMMY: stub_plugin} + with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." + "check_if_plugin_extensions_loaded"): + ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) + ext_mgr.add_extension(NonCorePluginExtenstion("e1")) + + self.assertIn("e1", ext_mgr.extensions) + + def test_unloaded_supported_extensions_raises_exception(self): + stub_plugin = ext_stubs.StubPlugin( + supported_extensions=["unloaded_extension"]) + plugin_info = {constants.CORE: stub_plugin} + self.assertRaises(exceptions.ExtensionsNotFound, + extensions.PluginAwareExtensionManager, + '', plugin_info) + + +class ExtensionControllerTest(testlib_api.WebTestCase): + + def setUp(self): + super(ExtensionControllerTest, self).setUp() + self.test_app = _setup_extensions_test_app() + + def test_index_gets_all_registerd_extensions(self): + response = self.test_app.get("/extensions." + self.fmt) + res_body = self.deserialize(response) + foxnsox = res_body["extensions"][0] + + self.assertEqual(foxnsox["alias"], "FOXNSOX") + self.assertEqual(foxnsox["namespace"], + "http://www.fox.in.socks/api/ext/pie/v1.0") + + def test_extension_can_be_accessed_by_alias(self): + response = self.test_app.get("/extensions/FOXNSOX." + self.fmt) + foxnsox_extension = self.deserialize(response) + foxnsox_extension = foxnsox_extension['extension'] + self.assertEqual(foxnsox_extension["alias"], "FOXNSOX") + self.assertEqual(foxnsox_extension["namespace"], + "http://www.fox.in.socks/api/ext/pie/v1.0") + + def test_show_returns_not_found_for_non_existent_extension(self): + response = self.test_app.get("/extensions/non_existent" + self.fmt, + status="*") + + self.assertEqual(response.status_int, 404) + + +class ExtensionControllerTestXML(ExtensionControllerTest): + fmt = 'xml' + + +def app_factory(global_conf, **local_conf): + conf = global_conf.copy() + conf.update(local_conf) + return ExtensionsTestApp(conf) + + +def setup_base_app(test): + base.BaseTestCase.config_parse() + app = config.load_paste_app('extensions_test_app') + return app + + +def setup_extensions_middleware(extension_manager=None): + extension_manager = (extension_manager or + extensions.PluginAwareExtensionManager( + extensions_path, + {constants.CORE: FakePluginWithExtension()})) + base.BaseTestCase.config_parse() + app = config.load_paste_app('extensions_test_app') + return extensions.ExtensionMiddleware(app, ext_mgr=extension_manager) + + +def _setup_extensions_test_app(extension_manager=None): + return webtest.TestApp(setup_extensions_middleware(extension_manager)) + + +class SimpleExtensionManager(object): + + def __init__(self, resource_ext=None, action_ext=None, request_ext=None): + self.resource_ext = resource_ext + self.action_ext = action_ext + self.request_ext = request_ext + + def get_resources(self): + resource_exts = [] + if self.resource_ext: + resource_exts.append(self.resource_ext) + return resource_exts + + def get_actions(self): + action_exts = [] + if self.action_ext: + action_exts.append(self.action_ext) + return action_exts + + def get_request_extensions(self): + request_extensions = [] + if self.request_ext: + request_extensions.append(self.request_ext) + return request_extensions diff --git a/neutron/tests/unit/test_hacking.py b/neutron/tests/unit/test_hacking.py new file mode 100644 index 000000000..a5cebd1aa --- /dev/null +++ b/neutron/tests/unit/test_hacking.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.hacking import checks +from neutron.tests import base + + +class HackingTestCase(base.BaseTestCase): + + def test_log_translations(self): + logs = ['audit', 'error', 'info', 'warn', 'warning', 'critical', + 'exception'] + levels = ['_LI', '_LW', '_LE', '_LC'] + debug = "LOG.debug('OK')" + self.assertEqual( + 0, len(list(checks.validate_log_translations(debug, debug, 'f')))) + for log in logs: + bad = 'LOG.%s("Bad")' % log + self.assertEqual( + 1, len(list(checks.validate_log_translations(bad, bad, 'f')))) + ok = "LOG.%s(_('OK'))" % log + self.assertEqual( + 0, len(list(checks.validate_log_translations(ok, ok, 'f')))) + ok = "LOG.%s('OK') # noqa" % log + self.assertEqual( + 0, len(list(checks.validate_log_translations(ok, ok, 'f')))) + ok = "LOG.%s(variable)" % log + self.assertEqual( + 0, len(list(checks.validate_log_translations(ok, ok, 'f')))) + for level in levels: + ok = "LOG.%s(%s('OK'))" % (log, level) + self.assertEqual( + 0, len(list(checks.validate_log_translations(ok, + ok, 'f')))) diff --git a/neutron/tests/unit/test_iptables_firewall.py b/neutron/tests/unit/test_iptables_firewall.py new file mode 100644 index 000000000..06f490d55 --- /dev/null +++ b/neutron/tests/unit/test_iptables_firewall.py @@ -0,0 +1,1225 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import mock +from oslo.config import cfg + +from neutron.agent.common import config as a_cfg +from neutron.agent.linux import iptables_firewall +from neutron.common import constants +from neutron.tests import base +from neutron.tests.unit import test_api_v2 + + +_uuid = test_api_v2._uuid +FAKE_PREFIX = {'IPv4': '10.0.0.0/24', + 'IPv6': 'fe80::/48'} +FAKE_IP = {'IPv4': '10.0.0.1', + 'IPv6': 'fe80::1'} + + +class IptablesFirewallTestCase(base.BaseTestCase): + def setUp(self): + super(IptablesFirewallTestCase, self).setUp() + cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT') + self.utils_exec_p = mock.patch( + 'neutron.agent.linux.utils.execute') + self.utils_exec = self.utils_exec_p.start() + self.iptables_cls_p = mock.patch( + 'neutron.agent.linux.iptables_manager.IptablesManager') + iptables_cls = self.iptables_cls_p.start() + self.iptables_inst = mock.Mock() + self.v4filter_inst = mock.Mock() + self.v6filter_inst = mock.Mock() + self.iptables_inst.ipv4 = {'filter': self.v4filter_inst} + self.iptables_inst.ipv6 = {'filter': self.v6filter_inst} + iptables_cls.return_value = self.iptables_inst + + self.firewall = iptables_firewall.IptablesFirewallDriver() + self.firewall.iptables = self.iptables_inst + + def _fake_port(self): + return {'device': 'tapfake_dev', + 'mac_address': 'ff:ff:ff:ff:ff:ff', + 'fixed_ips': [FAKE_IP['IPv4'], + FAKE_IP['IPv6']]} + + def test_prepare_port_filter_with_no_sg(self): + port = self._fake_port() + self.firewall.prepare_port_filter(port) + calls = [mock.call.add_chain('sg-fallback'), + mock.call.add_rule('sg-fallback', '-j DROP'), + mock.call.ensure_remove_chain('sg-chain'), + mock.call.add_chain('sg-chain'), + mock.call.add_chain('ifake_dev'), + mock.call.add_rule('FORWARD', + '-m physdev --physdev-out tapfake_dev ' + '--physdev-is-bridged ' + '-j $sg-chain'), + mock.call.add_rule('sg-chain', + '-m physdev --physdev-out tapfake_dev ' + '--physdev-is-bridged ' + '-j $ifake_dev'), + mock.call.add_rule( + 'ifake_dev', '-m state --state INVALID -j DROP'), + mock.call.add_rule( + 'ifake_dev', + '-m state --state RELATED,ESTABLISHED -j RETURN'), + mock.call.add_rule('ifake_dev', '-j $sg-fallback'), + mock.call.add_chain('ofake_dev'), + mock.call.add_rule('FORWARD', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged ' + '-j $sg-chain'), + mock.call.add_rule('sg-chain', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged ' + '-j $ofake_dev'), + mock.call.add_rule('INPUT', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged ' + '-j $ofake_dev'), + mock.call.add_chain('sfake_dev'), + mock.call.add_rule( + 'sfake_dev', '-m mac --mac-source ff:ff:ff:ff:ff:ff ' + '-s 10.0.0.1 -j RETURN'), + mock.call.add_rule('sfake_dev', '-j DROP'), + mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 68 --dport 67 -j RETURN'), + mock.call.add_rule('ofake_dev', '-j $sfake_dev'), + mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 67 --dport 68 -j DROP'), + mock.call.add_rule( + 'ofake_dev', '-m state --state INVALID -j DROP'), + mock.call.add_rule( + 'ofake_dev', + '-m state --state RELATED,ESTABLISHED -j RETURN'), + mock.call.add_rule('ofake_dev', '-j $sg-fallback'), + mock.call.add_rule('sg-chain', '-j ACCEPT')] + + self.v4filter_inst.assert_has_calls(calls) + + def test_filter_ipv4_ingress(self): + rule = {'ethertype': 'IPv4', + 'direction': 'ingress'} + ingress = mock.call.add_rule('ifake_dev', '-j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_prefix(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'source_ip_prefix': prefix} + ingress = mock.call.add_rule('ifake_dev', '-s %s -j RETURN' % prefix) + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_tcp(self): + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'protocol': 'tcp'} + ingress = mock.call.add_rule('ifake_dev', '-p tcp -m tcp -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_tcp_prefix(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'protocol': 'tcp', + 'source_ip_prefix': prefix} + ingress = mock.call.add_rule('ifake_dev', + '-s %s -p tcp -m tcp -j RETURN' % prefix) + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_icmp(self): + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'protocol': 'icmp'} + ingress = mock.call.add_rule('ifake_dev', '-p icmp -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_icmp_prefix(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'protocol': 'icmp', + 'source_ip_prefix': prefix} + ingress = mock.call.add_rule( + 'ifake_dev', '-s %s -p icmp -j RETURN' % prefix) + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_tcp_port(self): + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': 10, + 'port_range_max': 10} + ingress = mock.call.add_rule('ifake_dev', + '-p tcp -m tcp --dport 10 -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_tcp_mport(self): + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': 10, + 'port_range_max': 100} + ingress = mock.call.add_rule( + 'ifake_dev', + '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_tcp_mport_prefix(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': 10, + 'port_range_max': 100, + 'source_ip_prefix': prefix} + ingress = mock.call.add_rule( + 'ifake_dev', + '-s %s -p tcp -m tcp -m multiport --dports 10:100 ' + '-j RETURN' % prefix) + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_udp(self): + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'protocol': 'udp'} + ingress = mock.call.add_rule('ifake_dev', '-p udp -m udp -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_udp_prefix(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'protocol': 'udp', + 'source_ip_prefix': prefix} + ingress = mock.call.add_rule('ifake_dev', + '-s %s -p udp -m udp -j RETURN' % prefix) + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_udp_port(self): + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'protocol': 'udp', + 'port_range_min': 10, + 'port_range_max': 10} + ingress = mock.call.add_rule('ifake_dev', + '-p udp -m udp --dport 10 -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_udp_mport(self): + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'protocol': 'udp', + 'port_range_min': 10, + 'port_range_max': 100} + ingress = mock.call.add_rule( + 'ifake_dev', + '-p udp -m udp -m multiport --dports 10:100 -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_ingress_udp_mport_prefix(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'ingress', + 'protocol': 'udp', + 'port_range_min': 10, + 'port_range_max': 100, + 'source_ip_prefix': prefix} + ingress = mock.call.add_rule( + 'ifake_dev', + '-s %s -p udp -m udp -m multiport --dports 10:100 ' + '-j RETURN' % prefix) + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress(self): + rule = {'ethertype': 'IPv4', + 'direction': 'egress'} + egress = mock.call.add_rule('ofake_dev', '-j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_prefix(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'source_ip_prefix': prefix} + egress = mock.call.add_rule('ofake_dev', '-s %s -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_tcp(self): + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'tcp'} + egress = mock.call.add_rule('ofake_dev', '-p tcp -m tcp -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_tcp_prefix(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'tcp', + 'source_ip_prefix': prefix} + egress = mock.call.add_rule('ofake_dev', + '-s %s -p tcp -m tcp -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_icmp(self): + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'icmp'} + egress = mock.call.add_rule('ofake_dev', '-p icmp -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_icmp_prefix(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'icmp', + 'source_ip_prefix': prefix} + egress = mock.call.add_rule( + 'ofake_dev', '-s %s -p icmp -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_icmp_type(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'icmp', + 'source_port_range_min': 8, + 'source_ip_prefix': prefix} + egress = mock.call.add_rule( + 'ofake_dev', + '-s %s -p icmp --icmp-type 8 -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_icmp_type_name(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'icmp', + 'source_port_range_min': 'echo-request', + 'source_ip_prefix': prefix} + egress = mock.call.add_rule( + 'ofake_dev', + '-s %s -p icmp --icmp-type echo-request -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_icmp_type_code(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'icmp', + 'source_port_range_min': 8, + 'source_port_range_max': 0, + 'source_ip_prefix': prefix} + egress = mock.call.add_rule( + 'ofake_dev', + '-s %s -p icmp --icmp-type 8/0 -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_tcp_port(self): + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'tcp', + 'port_range_min': 10, + 'port_range_max': 10} + egress = mock.call.add_rule('ofake_dev', + '-p tcp -m tcp --dport 10 -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_tcp_mport(self): + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'tcp', + 'port_range_min': 10, + 'port_range_max': 100} + egress = mock.call.add_rule( + 'ofake_dev', + '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_tcp_mport_prefix(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'tcp', + 'port_range_min': 10, + 'port_range_max': 100, + 'source_ip_prefix': prefix} + egress = mock.call.add_rule( + 'ofake_dev', + '-s %s -p tcp -m tcp -m multiport --dports 10:100 ' + '-j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_udp(self): + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'udp'} + egress = mock.call.add_rule('ofake_dev', '-p udp -m udp -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_udp_prefix(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'udp', + 'source_ip_prefix': prefix} + egress = mock.call.add_rule('ofake_dev', + '-s %s -p udp -m udp -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_udp_port(self): + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'udp', + 'port_range_min': 10, + 'port_range_max': 10} + egress = mock.call.add_rule('ofake_dev', + '-p udp -m udp --dport 10 -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_udp_mport(self): + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'udp', + 'port_range_min': 10, + 'port_range_max': 100} + egress = mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp -m multiport --dports 10:100 -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv4_egress_udp_mport_prefix(self): + prefix = FAKE_PREFIX['IPv4'] + rule = {'ethertype': 'IPv4', + 'direction': 'egress', + 'protocol': 'udp', + 'port_range_min': 10, + 'port_range_max': 100, + 'source_ip_prefix': prefix} + egress = mock.call.add_rule( + 'ofake_dev', + '-s %s -p udp -m udp -m multiport --dports 10:100 ' + '-j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress(self): + rule = {'ethertype': 'IPv6', + 'direction': 'ingress'} + ingress = mock.call.add_rule('ifake_dev', '-j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_prefix(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'source_ip_prefix': prefix} + ingress = mock.call.add_rule('ifake_dev', '-s %s -j RETURN' % prefix) + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_tcp(self): + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'protocol': 'tcp'} + ingress = mock.call.add_rule('ifake_dev', '-p tcp -m tcp -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_tcp_prefix(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'protocol': 'tcp', + 'source_ip_prefix': prefix} + ingress = mock.call.add_rule('ifake_dev', + '-s %s -p tcp -m tcp -j RETURN' % prefix) + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_tcp_port(self): + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': 10, + 'port_range_max': 10} + ingress = mock.call.add_rule('ifake_dev', + '-p tcp -m tcp --dport 10 -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_icmp(self): + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'protocol': 'icmp'} + ingress = mock.call.add_rule('ifake_dev', '-p icmpv6 -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_icmp_prefix(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'protocol': 'icmp', + 'source_ip_prefix': prefix} + ingress = mock.call.add_rule( + 'ifake_dev', '-s %s -p icmpv6 -j RETURN' % prefix) + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_tcp_mport(self): + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': 10, + 'port_range_max': 100} + ingress = mock.call.add_rule( + 'ifake_dev', + '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_tcp_mport_prefix(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'protocol': 'tcp', + 'port_range_min': 10, + 'port_range_max': 100, + 'source_ip_prefix': prefix} + ingress = mock.call.add_rule( + 'ifake_dev', + '-s %s -p tcp -m tcp -m multiport --dports 10:100 ' + '-j RETURN' % prefix) + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_udp(self): + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'protocol': 'udp'} + ingress = mock.call.add_rule('ifake_dev', '-p udp -m udp -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_udp_prefix(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'protocol': 'udp', + 'source_ip_prefix': prefix} + ingress = mock.call.add_rule('ifake_dev', + '-s %s -p udp -m udp -j RETURN' % prefix) + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_udp_port(self): + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'protocol': 'udp', + 'port_range_min': 10, + 'port_range_max': 10} + ingress = mock.call.add_rule('ifake_dev', + '-p udp -m udp --dport 10 -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_udp_mport(self): + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'protocol': 'udp', + 'port_range_min': 10, + 'port_range_max': 100} + ingress = mock.call.add_rule( + 'ifake_dev', + '-p udp -m udp -m multiport --dports 10:100 -j RETURN') + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_ingress_udp_mport_prefix(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'ingress', + 'protocol': 'udp', + 'port_range_min': 10, + 'port_range_max': 100, + 'source_ip_prefix': prefix} + ingress = mock.call.add_rule( + 'ifake_dev', + '-s %s -p udp -m udp -m multiport --dports 10:100 ' + '-j RETURN' % prefix) + egress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress(self): + rule = {'ethertype': 'IPv6', + 'direction': 'egress'} + egress = mock.call.add_rule('ofake_dev', '-j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_prefix(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'source_ip_prefix': prefix} + egress = mock.call.add_rule('ofake_dev', '-s %s -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_tcp(self): + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'tcp'} + egress = mock.call.add_rule('ofake_dev', '-p tcp -m tcp -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_tcp_prefix(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'tcp', + 'source_ip_prefix': prefix} + egress = mock.call.add_rule('ofake_dev', + '-s %s -p tcp -m tcp -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_icmp(self): + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'icmp'} + egress = mock.call.add_rule('ofake_dev', '-p icmpv6 -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_icmp_prefix(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'icmp', + 'source_ip_prefix': prefix} + egress = mock.call.add_rule( + 'ofake_dev', '-s %s -p icmpv6 -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_icmp_type(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'icmp', + 'source_port_range_min': 8, + 'source_ip_prefix': prefix} + egress = mock.call.add_rule( + 'ofake_dev', + '-s %s -p icmpv6 --icmpv6-type 8 -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_icmp_type_name(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'icmp', + 'source_port_range_min': 'echo-request', + 'source_ip_prefix': prefix} + egress = mock.call.add_rule( + 'ofake_dev', + '-s %s -p icmpv6 --icmpv6-type echo-request -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_icmp_type_code(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'icmp', + 'source_port_range_min': 8, + 'source_port_range_max': 0, + 'source_ip_prefix': prefix} + egress = mock.call.add_rule( + 'ofake_dev', + '-s %s -p icmpv6 --icmpv6-type 8/0 -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_tcp_port(self): + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'tcp', + 'port_range_min': 10, + 'port_range_max': 10} + egress = mock.call.add_rule('ofake_dev', + '-p tcp -m tcp --dport 10 -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_tcp_mport(self): + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'tcp', + 'port_range_min': 10, + 'port_range_max': 100} + egress = mock.call.add_rule( + 'ofake_dev', + '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_tcp_mport_prefix(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'tcp', + 'port_range_min': 10, + 'port_range_max': 100, + 'source_ip_prefix': prefix} + egress = mock.call.add_rule( + 'ofake_dev', + '-s %s -p tcp -m tcp -m multiport --dports 10:100 ' + '-j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_udp(self): + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'udp'} + egress = mock.call.add_rule('ofake_dev', '-p udp -m udp -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_udp_prefix(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'udp', + 'source_ip_prefix': prefix} + egress = mock.call.add_rule('ofake_dev', + '-s %s -p udp -m udp -j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_udp_port(self): + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'udp', + 'port_range_min': 10, + 'port_range_max': 10} + egress = mock.call.add_rule('ofake_dev', + '-p udp -m udp --dport 10 -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_udp_mport(self): + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'udp', + 'port_range_min': 10, + 'port_range_max': 100} + egress = mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp -m multiport --dports 10:100 -j RETURN') + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def test_filter_ipv6_egress_udp_mport_prefix(self): + prefix = FAKE_PREFIX['IPv6'] + rule = {'ethertype': 'IPv6', + 'direction': 'egress', + 'protocol': 'udp', + 'port_range_min': 10, + 'port_range_max': 100, + 'source_ip_prefix': prefix} + egress = mock.call.add_rule( + 'ofake_dev', + '-s %s -p udp -m udp -m multiport --dports 10:100 ' + '-j RETURN' % prefix) + ingress = None + self._test_prepare_port_filter(rule, ingress, egress) + + def _test_prepare_port_filter(self, + rule, + ingress_expected_call=None, + egress_expected_call=None): + port = self._fake_port() + ethertype = rule['ethertype'] + prefix = FAKE_IP[ethertype] + filter_inst = self.v4filter_inst + dhcp_rule = [mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 68 --dport 67 -j RETURN')] + + if ethertype == 'IPv6': + filter_inst = self.v6filter_inst + + dhcp_rule = [mock.call.add_rule('ofake_dev', + '-p icmpv6 -j RETURN'), + mock.call.add_rule('ofake_dev', '-p udp -m udp ' + '--sport 546 --dport 547 ' + '-j RETURN')] + sg = [rule] + port['security_group_rules'] = sg + self.firewall.prepare_port_filter(port) + calls = [mock.call.add_chain('sg-fallback'), + mock.call.add_rule('sg-fallback', '-j DROP'), + mock.call.ensure_remove_chain('sg-chain'), + mock.call.add_chain('sg-chain'), + mock.call.add_chain('ifake_dev'), + mock.call.add_rule('FORWARD', + '-m physdev --physdev-out tapfake_dev ' + '--physdev-is-bridged ' + '-j $sg-chain'), + mock.call.add_rule('sg-chain', + '-m physdev --physdev-out tapfake_dev ' + '--physdev-is-bridged ' + '-j $ifake_dev'), + ] + if ethertype == 'IPv6': + for icmp6_type in constants.ICMPV6_ALLOWED_TYPES: + calls.append( + mock.call.add_rule('ifake_dev', + '-p icmpv6 --icmpv6-type %s -j RETURN' % + icmp6_type)) + calls += [mock.call.add_rule('ifake_dev', + '-m state --state INVALID -j DROP'), + mock.call.add_rule( + 'ifake_dev', + '-m state --state RELATED,ESTABLISHED -j RETURN')] + + if ingress_expected_call: + calls.append(ingress_expected_call) + + calls += [mock.call.add_rule('ifake_dev', '-j $sg-fallback'), + mock.call.add_chain('ofake_dev'), + mock.call.add_rule('FORWARD', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged ' + '-j $sg-chain'), + mock.call.add_rule('sg-chain', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged ' + '-j $ofake_dev'), + mock.call.add_rule('INPUT', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged ' + '-j $ofake_dev'), + mock.call.add_chain('sfake_dev'), + mock.call.add_rule( + 'sfake_dev', + '-m mac --mac-source ff:ff:ff:ff:ff:ff -s %s -j RETURN' + % prefix), + mock.call.add_rule('sfake_dev', '-j DROP')] + calls += dhcp_rule + calls.append(mock.call.add_rule('ofake_dev', '-j $sfake_dev')) + if ethertype == 'IPv4': + calls.append(mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 67 --dport 68 -j DROP')) + if ethertype == 'IPv6': + calls.append(mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 547 --dport 546 -j DROP')) + + calls += [mock.call.add_rule( + 'ofake_dev', '-m state --state INVALID -j DROP'), + mock.call.add_rule( + 'ofake_dev', + '-m state --state RELATED,ESTABLISHED -j RETURN')] + + if egress_expected_call: + calls.append(egress_expected_call) + + calls += [mock.call.add_rule('ofake_dev', '-j $sg-fallback'), + mock.call.add_rule('sg-chain', '-j ACCEPT')] + + filter_inst.assert_has_calls(calls) + + def test_update_delete_port_filter(self): + port = self._fake_port() + port['security_group_rules'] = [{'ethertype': 'IPv4', + 'direction': 'ingress'}] + self.firewall.prepare_port_filter(port) + port['security_group_rules'] = [{'ethertype': 'IPv4', + 'direction': 'egress'}] + self.firewall.update_port_filter(port) + self.firewall.update_port_filter({'device': 'no-exist-device'}) + self.firewall.remove_port_filter(port) + self.firewall.remove_port_filter({'device': 'no-exist-device'}) + calls = [mock.call.add_chain('sg-fallback'), + mock.call.add_rule('sg-fallback', '-j DROP'), + mock.call.ensure_remove_chain('sg-chain'), + mock.call.add_chain('sg-chain'), + mock.call.add_chain('ifake_dev'), + mock.call.add_rule( + 'FORWARD', + '-m physdev --physdev-out tapfake_dev ' + '--physdev-is-bridged -j $sg-chain'), + mock.call.add_rule( + 'sg-chain', + '-m physdev --physdev-out tapfake_dev ' + '--physdev-is-bridged -j $ifake_dev'), + mock.call.add_rule( + 'ifake_dev', '-m state --state INVALID -j DROP'), + mock.call.add_rule( + 'ifake_dev', + '-m state --state RELATED,ESTABLISHED -j RETURN'), + mock.call.add_rule('ifake_dev', '-j RETURN'), + mock.call.add_rule('ifake_dev', '-j $sg-fallback'), + mock.call.add_chain('ofake_dev'), + mock.call.add_rule( + 'FORWARD', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged -j $sg-chain'), + mock.call.add_rule( + 'sg-chain', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged -j $ofake_dev'), + mock.call.add_rule( + 'INPUT', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged -j $ofake_dev'), + mock.call.add_chain('sfake_dev'), + mock.call.add_rule( + 'sfake_dev', + '-m mac --mac-source ff:ff:ff:ff:ff:ff -s 10.0.0.1 ' + '-j RETURN'), + mock.call.add_rule('sfake_dev', '-j DROP'), + mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 68 --dport 67 -j RETURN'), + mock.call.add_rule('ofake_dev', '-j $sfake_dev'), + mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 67 --dport 68 -j DROP'), + mock.call.add_rule( + 'ofake_dev', '-m state --state INVALID -j DROP'), + mock.call.add_rule( + 'ofake_dev', + '-m state --state RELATED,ESTABLISHED -j RETURN'), + mock.call.add_rule('ofake_dev', '-j $sg-fallback'), + mock.call.add_rule('sg-chain', '-j ACCEPT'), + mock.call.ensure_remove_chain('ifake_dev'), + mock.call.ensure_remove_chain('ofake_dev'), + mock.call.ensure_remove_chain('sfake_dev'), + mock.call.ensure_remove_chain('sg-chain'), + mock.call.add_chain('sg-chain'), + mock.call.add_chain('ifake_dev'), + mock.call.add_rule( + 'FORWARD', + '-m physdev --physdev-out tapfake_dev ' + '--physdev-is-bridged -j $sg-chain'), + mock.call.add_rule( + 'sg-chain', + '-m physdev --physdev-out tapfake_dev ' + '--physdev-is-bridged -j $ifake_dev'), + mock.call.add_rule( + 'ifake_dev', + '-m state --state INVALID -j DROP'), + mock.call.add_rule( + 'ifake_dev', + '-m state --state RELATED,ESTABLISHED -j RETURN'), + mock.call.add_rule('ifake_dev', '-j $sg-fallback'), + mock.call.add_chain('ofake_dev'), + mock.call.add_rule( + 'FORWARD', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged -j $sg-chain'), + mock.call.add_rule( + 'sg-chain', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged -j $ofake_dev'), + mock.call.add_rule( + 'INPUT', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged -j $ofake_dev'), + mock.call.add_chain('sfake_dev'), + mock.call.add_rule( + 'sfake_dev', + '-m mac --mac-source ff:ff:ff:ff:ff:ff -s 10.0.0.1 ' + '-j RETURN'), + mock.call.add_rule('sfake_dev', '-j DROP'), + mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 68 --dport 67 -j RETURN'), + mock.call.add_rule('ofake_dev', '-j $sfake_dev'), + mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 67 --dport 68 -j DROP'), + mock.call.add_rule( + 'ofake_dev', '-m state --state INVALID -j DROP'), + mock.call.add_rule( + 'ofake_dev', + '-m state --state RELATED,ESTABLISHED -j RETURN'), + mock.call.add_rule('ofake_dev', '-j RETURN'), + mock.call.add_rule('ofake_dev', '-j $sg-fallback'), + mock.call.add_rule('sg-chain', '-j ACCEPT'), + mock.call.ensure_remove_chain('ifake_dev'), + mock.call.ensure_remove_chain('ofake_dev'), + mock.call.ensure_remove_chain('sfake_dev'), + mock.call.ensure_remove_chain('sg-chain'), + mock.call.add_chain('sg-chain')] + + self.v4filter_inst.assert_has_calls(calls) + + def test_remove_unknown_port(self): + port = self._fake_port() + self.firewall.remove_port_filter(port) + # checking no exception occures + self.v4filter_inst.assert_has_calls([]) + + def test_defer_apply(self): + with self.firewall.defer_apply(): + pass + self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(), + mock.call.defer_apply_off()]) + + def test_filter_defer_with_exception(self): + try: + with self.firewall.defer_apply(): + raise Exception("same exception") + except Exception: + pass + self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(), + mock.call.defer_apply_off()]) + + def _mock_chain_applies(self): + class CopyingMock(mock.MagicMock): + """Copies arguments so mutable arguments can be asserted on. + + Copied verbatim from unittest.mock documentation. + """ + def __call__(self, *args, **kwargs): + args = copy.deepcopy(args) + kwargs = copy.deepcopy(kwargs) + return super(CopyingMock, self).__call__(*args, **kwargs) + # Need to use CopyingMock because _{setup,remove}_chains_apply are + # usually called with that's modified between calls (i.e., + # self.firewall.filtered_ports). + chain_applies = CopyingMock() + self.firewall._setup_chains_apply = chain_applies.setup + self.firewall._remove_chains_apply = chain_applies.remove + return chain_applies + + def test_mock_chain_applies(self): + chain_applies = self._mock_chain_applies() + port_prepare = {'device': 'd1', 'mac_address': 'prepare'} + port_update = {'device': 'd1', 'mac_address': 'update'} + self.firewall.prepare_port_filter(port_prepare) + self.firewall.update_port_filter(port_update) + self.firewall.remove_port_filter(port_update) + chain_applies.assert_has_calls([mock.call.remove({}), + mock.call.setup({'d1': port_prepare}), + mock.call.remove({'d1': port_prepare}), + mock.call.setup({'d1': port_update}), + mock.call.remove({'d1': port_update}), + mock.call.setup({})]) + + def test_defer_chain_apply_need_pre_defer_copy(self): + chain_applies = self._mock_chain_applies() + port = self._fake_port() + device2port = {port['device']: port} + self.firewall.prepare_port_filter(port) + with self.firewall.defer_apply(): + self.firewall.remove_port_filter(port) + chain_applies.assert_has_calls([mock.call.remove({}), + mock.call.setup(device2port), + mock.call.remove(device2port), + mock.call.setup({})]) + + def test_defer_chain_apply_coalesce_simple(self): + chain_applies = self._mock_chain_applies() + port = self._fake_port() + with self.firewall.defer_apply(): + self.firewall.prepare_port_filter(port) + self.firewall.update_port_filter(port) + self.firewall.remove_port_filter(port) + chain_applies.assert_has_calls([mock.call.remove({}), + mock.call.setup({})]) + + def test_defer_chain_apply_coalesce_multiple_ports(self): + chain_applies = self._mock_chain_applies() + port1 = {'device': 'd1', 'mac_address': 'mac1'} + port2 = {'device': 'd2', 'mac_address': 'mac2'} + device2port = {'d1': port1, 'd2': port2} + with self.firewall.defer_apply(): + self.firewall.prepare_port_filter(port1) + self.firewall.prepare_port_filter(port2) + chain_applies.assert_has_calls([mock.call.remove({}), + mock.call.setup(device2port)]) + + def test_ip_spoofing_filter_with_multiple_ips(self): + port = {'device': 'tapfake_dev', + 'mac_address': 'ff:ff:ff:ff:ff:ff', + 'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']} + self.firewall.prepare_port_filter(port) + calls = [mock.call.add_chain('sg-fallback'), + mock.call.add_rule('sg-fallback', '-j DROP'), + mock.call.ensure_remove_chain('sg-chain'), + mock.call.add_chain('sg-chain'), + mock.call.add_chain('ifake_dev'), + mock.call.add_rule('FORWARD', + '-m physdev --physdev-out tapfake_dev ' + '--physdev-is-bridged ' + '-j $sg-chain'), + mock.call.add_rule('sg-chain', + '-m physdev --physdev-out tapfake_dev ' + '--physdev-is-bridged ' + '-j $ifake_dev'), + mock.call.add_rule( + 'ifake_dev', '-m state --state INVALID -j DROP'), + mock.call.add_rule( + 'ifake_dev', + '-m state --state RELATED,ESTABLISHED -j RETURN'), + mock.call.add_rule('ifake_dev', '-j $sg-fallback'), + mock.call.add_chain('ofake_dev'), + mock.call.add_rule('FORWARD', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged ' + '-j $sg-chain'), + mock.call.add_rule('sg-chain', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged ' + '-j $ofake_dev'), + mock.call.add_rule('INPUT', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged ' + '-j $ofake_dev'), + mock.call.add_chain('sfake_dev'), + mock.call.add_rule( + 'sfake_dev', + '-m mac --mac-source ff:ff:ff:ff:ff:ff -s 10.0.0.1 ' + '-j RETURN'), + mock.call.add_rule( + 'sfake_dev', + '-m mac --mac-source ff:ff:ff:ff:ff:ff -s 10.0.0.2 ' + '-j RETURN'), + mock.call.add_rule('sfake_dev', '-j DROP'), + mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 68 --dport 67 -j RETURN'), + mock.call.add_rule('ofake_dev', '-j $sfake_dev'), + mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 67 --dport 68 -j DROP'), + mock.call.add_rule( + 'ofake_dev', '-m state --state INVALID -j DROP'), + mock.call.add_rule( + 'ofake_dev', + '-m state --state RELATED,ESTABLISHED -j RETURN'), + mock.call.add_rule('ofake_dev', '-j $sg-fallback'), + mock.call.add_rule('sg-chain', '-j ACCEPT')] + self.v4filter_inst.assert_has_calls(calls) + + def test_ip_spoofing_no_fixed_ips(self): + port = {'device': 'tapfake_dev', + 'mac_address': 'ff:ff:ff:ff:ff:ff', + 'fixed_ips': []} + self.firewall.prepare_port_filter(port) + calls = [mock.call.add_chain('sg-fallback'), + mock.call.add_rule('sg-fallback', '-j DROP'), + mock.call.ensure_remove_chain('sg-chain'), + mock.call.add_chain('sg-chain'), + mock.call.add_chain('ifake_dev'), + mock.call.add_rule('FORWARD', + '-m physdev --physdev-out tapfake_dev ' + '--physdev-is-bridged ' + '-j $sg-chain'), + mock.call.add_rule('sg-chain', + '-m physdev --physdev-out tapfake_dev ' + '--physdev-is-bridged ' + '-j $ifake_dev'), + mock.call.add_rule( + 'ifake_dev', '-m state --state INVALID -j DROP'), + mock.call.add_rule( + 'ifake_dev', + '-m state --state RELATED,ESTABLISHED -j RETURN'), + mock.call.add_rule('ifake_dev', '-j $sg-fallback'), + mock.call.add_chain('ofake_dev'), + mock.call.add_rule('FORWARD', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged ' + '-j $sg-chain'), + mock.call.add_rule('sg-chain', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged ' + '-j $ofake_dev'), + mock.call.add_rule('INPUT', + '-m physdev --physdev-in tapfake_dev ' + '--physdev-is-bridged ' + '-j $ofake_dev'), + mock.call.add_chain('sfake_dev'), + mock.call.add_rule( + 'sfake_dev', + '-m mac --mac-source ff:ff:ff:ff:ff:ff -j RETURN'), + mock.call.add_rule('sfake_dev', '-j DROP'), + mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 68 --dport 67 -j RETURN'), + mock.call.add_rule('ofake_dev', '-j $sfake_dev'), + mock.call.add_rule( + 'ofake_dev', + '-p udp -m udp --sport 67 --dport 68 -j DROP'), + mock.call.add_rule( + 'ofake_dev', '-m state --state INVALID -j DROP'), + mock.call.add_rule( + 'ofake_dev', + '-m state --state RELATED,ESTABLISHED -j RETURN'), + mock.call.add_rule('ofake_dev', '-j $sg-fallback'), + mock.call.add_rule('sg-chain', '-j ACCEPT')] + self.v4filter_inst.assert_has_calls(calls) diff --git a/neutron/tests/unit/test_iptables_manager.py b/neutron/tests/unit/test_iptables_manager.py new file mode 100644 index 000000000..5e16406bd --- /dev/null +++ b/neutron/tests/unit/test_iptables_manager.py @@ -0,0 +1,705 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Locaweb. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @author: Juliano Martinez, Locaweb. + +import inspect +import os + +import mock + +from neutron.agent.linux import iptables_manager +from neutron.tests import base +from neutron.tests import tools + + +IPTABLES_ARG = {'bn': iptables_manager.binary_name} + +NAT_DUMP = ('# Generated by iptables_manager\n' + '*nat\n' + ':neutron-postrouting-bottom - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + ':%(bn)s-snat - [0:0]\n' + ':%(bn)s-PREROUTING - [0:0]\n' + ':%(bn)s-float-snat - [0:0]\n' + ':%(bn)s-POSTROUTING - [0:0]\n' + '[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n' + '[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n' + '[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n' + '[0:0] -A %(bn)s-snat -j ' + '%(bn)s-float-snat\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' % IPTABLES_ARG) + +FILTER_DUMP = ('# Generated by iptables_manager\n' + '*filter\n' + ':neutron-filter-top - [0:0]\n' + ':%(bn)s-FORWARD - [0:0]\n' + ':%(bn)s-INPUT - [0:0]\n' + ':%(bn)s-local - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + '[0:0] -A FORWARD -j neutron-filter-top\n' + '[0:0] -A OUTPUT -j neutron-filter-top\n' + '[0:0] -A neutron-filter-top -j %(bn)s-local\n' + '[0:0] -A INPUT -j %(bn)s-INPUT\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A FORWARD -j %(bn)s-FORWARD\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' % IPTABLES_ARG) + + +class IptablesManagerStateFulTestCase(base.BaseTestCase): + + def setUp(self): + super(IptablesManagerStateFulTestCase, self).setUp() + self.root_helper = 'sudo' + self.iptables = (iptables_manager. + IptablesManager(root_helper=self.root_helper)) + self.execute = mock.patch.object(self.iptables, "execute").start() + + def test_binary_name(self): + self.assertEqual(iptables_manager.binary_name, + os.path.basename(inspect.stack()[-1][1])[:16]) + + def test_get_chain_name(self): + name = '0123456789' * 5 + # 28 chars is the maximum length of iptables chain name. + self.assertEqual(iptables_manager.get_chain_name(name, wrap=False), + name[:28]) + # 11 chars is the maximum length of chain name of iptable_manager + # if binary_name is prepended. + self.assertEqual(iptables_manager.get_chain_name(name, wrap=True), + name[:11]) + + def test_add_and_remove_chain_custom_binary_name(self): + bn = ("abcdef" * 5) + + self.iptables = (iptables_manager. + IptablesManager(root_helper=self.root_helper, + binary_name=bn)) + self.execute = mock.patch.object(self.iptables, "execute").start() + + iptables_args = {'bn': bn[:16]} + + filter_dump = ('# Generated by iptables_manager\n' + '*filter\n' + ':neutron-filter-top - [0:0]\n' + ':%(bn)s-FORWARD - [0:0]\n' + ':%(bn)s-INPUT - [0:0]\n' + ':%(bn)s-local - [0:0]\n' + ':%(bn)s-filter - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + '[0:0] -A FORWARD -j neutron-filter-top\n' + '[0:0] -A OUTPUT -j neutron-filter-top\n' + '[0:0] -A neutron-filter-top -j %(bn)s-local\n' + '[0:0] -A INPUT -j %(bn)s-INPUT\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A FORWARD -j %(bn)s-FORWARD\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' % iptables_args) + + filter_dump_mod = ('# Generated by iptables_manager\n' + '*filter\n' + ':neutron-filter-top - [0:0]\n' + ':%(bn)s-FORWARD - [0:0]\n' + ':%(bn)s-INPUT - [0:0]\n' + ':%(bn)s-local - [0:0]\n' + ':%(bn)s-filter - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + '[0:0] -A FORWARD -j neutron-filter-top\n' + '[0:0] -A OUTPUT -j neutron-filter-top\n' + '[0:0] -A neutron-filter-top -j %(bn)s-local\n' + '[0:0] -A INPUT -j %(bn)s-INPUT\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A FORWARD -j %(bn)s-FORWARD\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' + % iptables_args) + + nat_dump = ('# Generated by iptables_manager\n' + '*nat\n' + ':neutron-postrouting-bottom - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + ':%(bn)s-snat - [0:0]\n' + ':%(bn)s-PREROUTING - [0:0]\n' + ':%(bn)s-float-snat - [0:0]\n' + ':%(bn)s-POSTROUTING - [0:0]\n' + '[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n' + '[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n' + '[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n' + '[0:0] -A %(bn)s-snat -j ' + '%(bn)s-float-snat\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' % iptables_args) + + expected_calls_and_values = [ + (mock.call(['iptables-save', '-c'], + root_helper=self.root_helper), + ''), + (mock.call(['iptables-restore', '-c'], + process_input=nat_dump + filter_dump_mod, + root_helper=self.root_helper), + None), + (mock.call(['iptables-save', '-c'], + root_helper=self.root_helper), + ''), + (mock.call(['iptables-restore', '-c'], + process_input=nat_dump + filter_dump, + root_helper=self.root_helper), + None), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + self.iptables.ipv4['filter'].add_chain('filter') + self.iptables.apply() + + self.iptables.ipv4['filter'].empty_chain('filter') + self.iptables.apply() + + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def test_empty_chain_custom_binary_name(self): + bn = ("abcdef" * 5)[:16] + + self.iptables = (iptables_manager. + IptablesManager(root_helper=self.root_helper, + binary_name=bn)) + self.execute = mock.patch.object(self.iptables, "execute").start() + + iptables_args = {'bn': bn} + + filter_dump = ('# Generated by iptables_manager\n' + '*filter\n' + ':neutron-filter-top - [0:0]\n' + ':%(bn)s-FORWARD - [0:0]\n' + ':%(bn)s-INPUT - [0:0]\n' + ':%(bn)s-local - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + '[0:0] -A FORWARD -j neutron-filter-top\n' + '[0:0] -A OUTPUT -j neutron-filter-top\n' + '[0:0] -A neutron-filter-top -j %(bn)s-local\n' + '[0:0] -A INPUT -j %(bn)s-INPUT\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A FORWARD -j %(bn)s-FORWARD\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' % iptables_args) + + filter_dump_mod = ('# Generated by iptables_manager\n' + '*filter\n' + ':neutron-filter-top - [0:0]\n' + ':%(bn)s-FORWARD - [0:0]\n' + ':%(bn)s-INPUT - [0:0]\n' + ':%(bn)s-local - [0:0]\n' + ':%(bn)s-filter - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + '[0:0] -A FORWARD -j neutron-filter-top\n' + '[0:0] -A OUTPUT -j neutron-filter-top\n' + '[0:0] -A neutron-filter-top -j %(bn)s-local\n' + '[0:0] -A INPUT -j %(bn)s-INPUT\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A FORWARD -j %(bn)s-FORWARD\n' + '[0:0] -A %(bn)s-filter -s 0/0 -d 192.168.0.2\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' + % iptables_args) + + nat_dump = ('# Generated by iptables_manager\n' + '*nat\n' + ':neutron-postrouting-bottom - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + ':%(bn)s-snat - [0:0]\n' + ':%(bn)s-PREROUTING - [0:0]\n' + ':%(bn)s-float-snat - [0:0]\n' + ':%(bn)s-POSTROUTING - [0:0]\n' + '[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n' + '[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n' + '[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n' + '[0:0] -A %(bn)s-snat -j ' + '%(bn)s-float-snat\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' % iptables_args) + + expected_calls_and_values = [ + (mock.call(['iptables-save', '-c'], + root_helper=self.root_helper), + ''), + (mock.call(['iptables-restore', '-c'], + process_input=nat_dump + filter_dump_mod, + root_helper=self.root_helper), + None), + (mock.call(['iptables-save', '-c'], + root_helper=self.root_helper), + ''), + (mock.call(['iptables-restore', '-c'], + process_input=nat_dump + filter_dump, + root_helper=self.root_helper), + None), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + self.iptables.ipv4['filter'].add_chain('filter') + self.iptables.ipv4['filter'].add_rule('filter', + '-s 0/0 -d 192.168.0.2') + self.iptables.apply() + + self.iptables.ipv4['filter'].remove_chain('filter') + self.iptables.apply() + + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def test_add_and_remove_chain(self): + filter_dump_mod = ('# Generated by iptables_manager\n' + '*filter\n' + ':neutron-filter-top - [0:0]\n' + ':%(bn)s-FORWARD - [0:0]\n' + ':%(bn)s-INPUT - [0:0]\n' + ':%(bn)s-local - [0:0]\n' + ':%(bn)s-filter - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + '[0:0] -A FORWARD -j neutron-filter-top\n' + '[0:0] -A OUTPUT -j neutron-filter-top\n' + '[0:0] -A neutron-filter-top -j %(bn)s-local\n' + '[0:0] -A INPUT -j %(bn)s-INPUT\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A FORWARD -j %(bn)s-FORWARD\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' + % IPTABLES_ARG) + + expected_calls_and_values = [ + (mock.call(['iptables-save', '-c'], + root_helper=self.root_helper), + ''), + (mock.call(['iptables-restore', '-c'], + process_input=NAT_DUMP + filter_dump_mod, + root_helper=self.root_helper), + None), + (mock.call(['iptables-save', '-c'], + root_helper=self.root_helper), + ''), + (mock.call(['iptables-restore', '-c'], + process_input=NAT_DUMP + FILTER_DUMP, + root_helper=self.root_helper), + None), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + self.iptables.ipv4['filter'].add_chain('filter') + self.iptables.apply() + + self.iptables.ipv4['filter'].remove_chain('filter') + self.iptables.apply() + + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def test_add_filter_rule(self): + filter_dump_mod = ('# Generated by iptables_manager\n' + '*filter\n' + ':neutron-filter-top - [0:0]\n' + ':%(bn)s-FORWARD - [0:0]\n' + ':%(bn)s-INPUT - [0:0]\n' + ':%(bn)s-local - [0:0]\n' + ':%(bn)s-filter - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + '[0:0] -A FORWARD -j neutron-filter-top\n' + '[0:0] -A OUTPUT -j neutron-filter-top\n' + '[0:0] -A neutron-filter-top -j %(bn)s-local\n' + '[0:0] -A INPUT -j %(bn)s-INPUT\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A FORWARD -j %(bn)s-FORWARD\n' + '[0:0] -A %(bn)s-filter -j DROP\n' + '[0:0] -A %(bn)s-INPUT -s 0/0 -d 192.168.0.2 -j ' + '%(bn)s-filter\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' + % IPTABLES_ARG) + + expected_calls_and_values = [ + (mock.call(['iptables-save', '-c'], + root_helper=self.root_helper), + ''), + (mock.call(['iptables-restore', '-c'], + process_input=NAT_DUMP + filter_dump_mod, + root_helper=self.root_helper), + None), + (mock.call(['iptables-save', '-c'], + root_helper=self.root_helper), + ''), + (mock.call(['iptables-restore', '-c'], + process_input=NAT_DUMP + FILTER_DUMP, + root_helper=self.root_helper + ), + None), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + self.iptables.ipv4['filter'].add_chain('filter') + self.iptables.ipv4['filter'].add_rule('filter', '-j DROP') + self.iptables.ipv4['filter'].add_rule('INPUT', + '-s 0/0 -d 192.168.0.2 -j' + ' %(bn)s-filter' % IPTABLES_ARG) + self.iptables.apply() + + self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP') + self.iptables.ipv4['filter'].remove_rule('INPUT', + '-s 0/0 -d 192.168.0.2 -j' + ' %(bn)s-filter' + % IPTABLES_ARG) + self.iptables.ipv4['filter'].remove_chain('filter') + + self.iptables.apply() + + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def test_rule_with_wrap_target(self): + name = '0123456789' * 5 + wrap = "%s-%s" % (iptables_manager.binary_name, + iptables_manager.get_chain_name(name)) + + iptables_args = {'bn': iptables_manager.binary_name, + 'wrap': wrap} + + filter_dump_mod = ('# Generated by iptables_manager\n' + '*filter\n' + ':neutron-filter-top - [0:0]\n' + ':%(bn)s-FORWARD - [0:0]\n' + ':%(bn)s-INPUT - [0:0]\n' + ':%(bn)s-local - [0:0]\n' + ':%(wrap)s - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + '[0:0] -A FORWARD -j neutron-filter-top\n' + '[0:0] -A OUTPUT -j neutron-filter-top\n' + '[0:0] -A neutron-filter-top -j %(bn)s-local\n' + '[0:0] -A INPUT -j %(bn)s-INPUT\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A FORWARD -j %(bn)s-FORWARD\n' + '[0:0] -A %(bn)s-INPUT -s 0/0 -d 192.168.0.2 -j ' + '%(wrap)s\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' + % iptables_args) + + expected_calls_and_values = [ + (mock.call(['iptables-save', '-c'], + root_helper=self.root_helper), + ''), + (mock.call(['iptables-restore', '-c'], + process_input=NAT_DUMP + filter_dump_mod, + root_helper=self.root_helper), + None), + (mock.call(['iptables-save', '-c'], + root_helper=self.root_helper), + ''), + (mock.call(['iptables-restore', '-c'], + process_input=NAT_DUMP + FILTER_DUMP, + root_helper=self.root_helper), + None), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + self.iptables.ipv4['filter'].add_chain(name) + self.iptables.ipv4['filter'].add_rule('INPUT', + '-s 0/0 -d 192.168.0.2 -j' + ' $%s' % name) + self.iptables.apply() + + self.iptables.ipv4['filter'].remove_rule('INPUT', + '-s 0/0 -d 192.168.0.2 -j' + ' $%s' % name) + self.iptables.ipv4['filter'].remove_chain(name) + + self.iptables.apply() + + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def test_add_nat_rule(self): + nat_dump = ('# Generated by iptables_manager\n' + '*nat\n' + ':neutron-postrouting-bottom - [0:0]\n' + ':%(bn)s-float-snat - [0:0]\n' + ':%(bn)s-POSTROUTING - [0:0]\n' + ':%(bn)s-PREROUTING - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + ':%(bn)s-snat - [0:0]\n' + '[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n' + '[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n' + '[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n' + '[0:0] -A %(bn)s-snat -j %(bn)s-float-snat\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' + % IPTABLES_ARG) + + nat_dump_mod = ('# Generated by iptables_manager\n' + '*nat\n' + ':neutron-postrouting-bottom - [0:0]\n' + ':%(bn)s-float-snat - [0:0]\n' + ':%(bn)s-POSTROUTING - [0:0]\n' + ':%(bn)s-PREROUTING - [0:0]\n' + ':%(bn)s-nat - [0:0]\n' + ':%(bn)s-OUTPUT - [0:0]\n' + ':%(bn)s-snat - [0:0]\n' + '[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n' + '[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n' + '[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n' + '[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n' + '[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n' + '[0:0] -A %(bn)s-snat -j %(bn)s-float-snat\n' + '[0:0] -A %(bn)s-PREROUTING -d 192.168.0.3 -j ' + '%(bn)s-nat\n' + '[0:0] -A %(bn)s-nat -p tcp --dport 8080 -j ' + 'REDIRECT --to-port 80\n' + 'COMMIT\n' + '# Completed by iptables_manager\n' + % IPTABLES_ARG) + + expected_calls_and_values = [ + (mock.call(['iptables-save', '-c'], + root_helper=self.root_helper), + ''), + (mock.call(['iptables-restore', '-c'], + process_input=nat_dump_mod + FILTER_DUMP, + root_helper=self.root_helper), + None), + (mock.call(['iptables-save', '-c'], + root_helper=self.root_helper), + ''), + (mock.call(['iptables-restore', '-c'], + process_input=nat_dump + FILTER_DUMP, + root_helper=self.root_helper), + None), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + self.iptables.ipv4['nat'].add_chain('nat') + self.iptables.ipv4['nat'].add_rule('PREROUTING', + '-d 192.168.0.3 -j ' + '%(bn)s-nat' % IPTABLES_ARG) + self.iptables.ipv4['nat'].add_rule('nat', + '-p tcp --dport 8080' + + ' -j REDIRECT --to-port 80') + + self.iptables.apply() + + self.iptables.ipv4['nat'].remove_rule('nat', + '-p tcp --dport 8080 -j' + ' REDIRECT --to-port 80') + self.iptables.ipv4['nat'].remove_rule('PREROUTING', + '-d 192.168.0.3 -j ' + '%(bn)s-nat' % IPTABLES_ARG) + self.iptables.ipv4['nat'].remove_chain('nat') + + self.iptables.apply() + + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def test_add_rule_to_a_nonexistent_chain(self): + self.assertRaises(LookupError, self.iptables.ipv4['filter'].add_rule, + 'nonexistent', '-j DROP') + + def test_remove_nonexistent_chain(self): + with mock.patch.object(iptables_manager, "LOG") as log: + self.iptables.ipv4['filter'].remove_chain('nonexistent') + log.warn.assert_called_once_with( + 'Attempted to remove chain %s which does not exist', + 'nonexistent') + + def test_remove_nonexistent_rule(self): + with mock.patch.object(iptables_manager, "LOG") as log: + self.iptables.ipv4['filter'].remove_rule('nonexistent', '-j DROP') + log.warn.assert_called_once_with( + 'Tried to remove rule that was not there: ' + '%(chain)r %(rule)r %(wrap)r %(top)r', + {'wrap': True, 'top': False, 'rule': '-j DROP', + 'chain': 'nonexistent'}) + + def test_iptables_failure_with_no_failing_line_number(self): + with mock.patch.object(iptables_manager, "LOG") as log: + # generate Runtime errors on iptables-restore calls + def iptables_restore_failer(*args, **kwargs): + if 'iptables-restore' in args[0]: + self.input_lines = kwargs['process_input'].split('\n') + # don't provide a specific failure message so all lines + # are logged + raise RuntimeError() + return FILTER_DUMP + self.execute.side_effect = iptables_restore_failer + # _apply_synchronized calls iptables-restore so it should raise + # a RuntimeError + self.assertRaises(RuntimeError, + self.iptables._apply_synchronized) + # The RuntimeError should have triggered a log of the input to the + # process that it failed to execute. Verify by comparing the log + # call to the 'process_input' arg given to the failed iptables-restore + # call. + # Failure without a specific line number in the error should cause + # all lines to be logged with numbers. + logged = ['%7d. %s' % (n, l) + for n, l in enumerate(self.input_lines, 1)] + log.error.assert_called_once_with(_( + 'IPTablesManager.apply failed to apply the ' + 'following set of iptables rules:\n%s'), + '\n'.join(logged) + ) + + def test_iptables_failure_on_specific_line(self): + with mock.patch.object(iptables_manager, "LOG") as log: + # generate Runtime errors on iptables-restore calls + def iptables_restore_failer(*args, **kwargs): + if 'iptables-restore' in args[0]: + self.input_lines = kwargs['process_input'].split('\n') + # pretend line 11 failed + msg = ("Exit code: 1\nStdout: ''\n" + "Stderr: 'iptables-restore: line 11 failed\n'") + raise RuntimeError(msg) + return FILTER_DUMP + self.execute.side_effect = iptables_restore_failer + # _apply_synchronized calls iptables-restore so it should raise + # a RuntimeError + self.assertRaises(RuntimeError, + self.iptables._apply_synchronized) + # The RuntimeError should have triggered a log of the input to the + # process that it failed to execute. Verify by comparing the log + # call to the 'process_input' arg given to the failed iptables-restore + # call. + # Line 11 of the input was marked as failing so lines (11 - context) + # to (11 + context) should be logged + ctx = iptables_manager.IPTABLES_ERROR_LINES_OF_CONTEXT + log_start = max(0, 11 - ctx) + log_end = 11 + ctx + logged = ['%7d. %s' % (n, l) + for n, l in enumerate(self.input_lines[log_start:log_end], + log_start + 1)] + log.error.assert_called_once_with(_( + 'IPTablesManager.apply failed to apply the ' + 'following set of iptables rules:\n%s'), + '\n'.join(logged) + ) + + def test_get_traffic_counters_chain_notexists(self): + with mock.patch.object(iptables_manager, "LOG") as log: + acc = self.iptables.get_traffic_counters('chain1') + self.assertIsNone(acc) + self.assertEqual(0, self.execute.call_count) + log.warn.assert_called_once_with( + 'Attempted to get traffic counters of chain %s which ' + 'does not exist', 'chain1') + + def test_get_traffic_counters(self): + iptables_dump = ( + 'Chain OUTPUT (policy ACCEPT 400 packets, 65901 bytes)\n' + ' pkts bytes target prot opt in out source' + ' destination \n' + ' 400 65901 chain1 all -- * * 0.0.0.0/0' + ' 0.0.0.0/0 \n' + ' 400 65901 chain2 all -- * * 0.0.0.0/0' + ' 0.0.0.0/0 \n') + + expected_calls_and_values = [ + (mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT', + '-n', '-v', '-x'], + root_helper=self.root_helper), + iptables_dump), + (mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n', + '-v', '-x'], + root_helper=self.root_helper), + ''), + (mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT', + '-n', '-v', '-x'], + root_helper=self.root_helper), + iptables_dump), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + acc = self.iptables.get_traffic_counters('OUTPUT') + self.assertEqual(acc['pkts'], 1600) + self.assertEqual(acc['bytes'], 263604) + + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def test_get_traffic_counters_with_zero(self): + iptables_dump = ( + 'Chain OUTPUT (policy ACCEPT 400 packets, 65901 bytes)\n' + ' pkts bytes target prot opt in out source' + ' destination \n' + ' 400 65901 chain1 all -- * * 0.0.0.0/0' + ' 0.0.0.0/0 \n' + ' 400 65901 chain2 all -- * * 0.0.0.0/0' + ' 0.0.0.0/0 \n') + + expected_calls_and_values = [ + (mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT', + '-n', '-v', '-x', '-Z'], + root_helper=self.root_helper), + iptables_dump), + (mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n', + '-v', '-x', '-Z'], + root_helper=self.root_helper), + ''), + (mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT', + '-n', '-v', '-x', '-Z'], + root_helper=self.root_helper), + iptables_dump), + ] + tools.setup_mock_calls(self.execute, expected_calls_and_values) + + acc = self.iptables.get_traffic_counters('OUTPUT', zero=True) + self.assertEqual(acc['pkts'], 1600) + self.assertEqual(acc['bytes'], 263604) + + tools.verify_mock_calls(self.execute, expected_calls_and_values) + + def _test_find_last_entry(self, find_str): + filter_list = [':neutron-filter-top - [0:0]', + ':%(bn)s-FORWARD - [0:0]', + ':%(bn)s-INPUT - [0:0]', + ':%(bn)s-local - [0:0]', + ':%(wrap)s - [0:0]', + ':%(bn)s-OUTPUT - [0:0]', + '[0:0] -A FORWARD -j neutron-filter-top', + '[0:0] -A OUTPUT -j neutron-filter-top' + % IPTABLES_ARG] + + return self.iptables._find_last_entry(filter_list, find_str) + + def test_find_last_entry_old_dup(self): + find_str = 'neutron-filter-top' + match_str = '[0:0] -A OUTPUT -j neutron-filter-top' + ret_str = self._test_find_last_entry(find_str) + self.assertEqual(ret_str, match_str) + + def test_find_last_entry_none(self): + find_str = 'neutron-filter-NOTFOUND' + ret_str = self._test_find_last_entry(find_str) + self.assertIsNone(ret_str) + + +class IptablesManagerStateLessTestCase(base.BaseTestCase): + + def setUp(self): + super(IptablesManagerStateLessTestCase, self).setUp() + self.iptables = (iptables_manager.IptablesManager(state_less=True)) + + def test_nat_not_found(self): + self.assertNotIn('nat', self.iptables.ipv4) diff --git a/neutron/tests/unit/test_ipv6.py b/neutron/tests/unit/test_ipv6.py new file mode 100644 index 000000000..47bfd2a4b --- /dev/null +++ b/neutron/tests/unit/test_ipv6.py @@ -0,0 +1,50 @@ +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import ipv6_utils +from neutron.tests import base + + +class IPv6byEUI64TestCase(base.BaseTestCase): + """Unit tests for generate IPv6 by EUI-64 operations.""" + + def test_generate_IPv6_by_EUI64(self): + addr = ipv6_utils.get_ipv6_addr_by_EUI64('2001:db8::', + '00:16:3e:33:44:55') + self.assertEqual('2001:db8::216:3eff:fe33:4455', addr.format()) + + def test_generate_IPv6_with_IPv4_prefix(self): + ipv4_prefix = '10.0.8' + mac = '00:16:3e:33:44:55' + self.assertRaises(TypeError, lambda: + ipv6_utils.get_ipv6_addr_by_EUI64(ipv4_prefix, mac)) + + def test_generate_IPv6_with_bad_mac(self): + bad_mac = '00:16:3e:33:44:5Z' + prefix = '2001:db8::' + self.assertRaises(TypeError, lambda: + ipv6_utils.get_ipv6_addr_by_EUI64(prefix, bad_mac)) + + def test_generate_IPv6_with_bad_prefix(self): + mac = '00:16:3e:33:44:55' + bad_prefix = 'bb' + self.assertRaises(TypeError, lambda: + ipv6_utils.get_ipv6_addr_by_EUI64(bad_prefix, mac)) + + def test_generate_IPv6_with_error_prefix_type(self): + mac = '00:16:3e:33:44:55' + prefix = 123 + self.assertRaises(TypeError, lambda: + ipv6_utils.get_ipv6_addr_by_EUI64(prefix, mac)) diff --git a/neutron/tests/unit/test_l3_agent.py b/neutron/tests/unit/test_l3_agent.py new file mode 100644 index 000000000..841425dfb --- /dev/null +++ b/neutron/tests/unit/test_l3_agent.py @@ -0,0 +1,1379 @@ +# Copyright 2012 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import copy + +import mock +import netaddr +from oslo.config import cfg +from testtools import matchers + +from neutron.agent.common import config as agent_config +from neutron.agent import l3_agent +from neutron.agent.linux import interface +from neutron.common import config as base_config +from neutron.common import constants as l3_constants +from neutron.common import exceptions as n_exc +from neutron.openstack.common import processutils +from neutron.openstack.common import uuidutils +from neutron.tests import base + + +_uuid = uuidutils.generate_uuid +HOSTNAME = 'myhost' +FAKE_ID = _uuid() + + +class TestBasicRouterOperations(base.BaseTestCase): + + def setUp(self): + super(TestBasicRouterOperations, self).setUp() + self.conf = cfg.ConfigOpts() + self.conf.register_opts(base_config.core_opts) + self.conf.register_opts(l3_agent.L3NATAgent.OPTS) + agent_config.register_interface_driver_opts_helper(self.conf) + agent_config.register_use_namespaces_opts_helper(self.conf) + agent_config.register_root_helper(self.conf) + self.conf.register_opts(interface.OPTS) + self.conf.set_override('router_id', 'fake_id') + self.conf.set_override('interface_driver', + 'neutron.agent.linux.interface.NullDriver') + self.conf.root_helper = 'sudo' + + self.device_exists_p = mock.patch( + 'neutron.agent.linux.ip_lib.device_exists') + self.device_exists = self.device_exists_p.start() + + self.utils_exec_p = mock.patch( + 'neutron.agent.linux.utils.execute') + self.utils_exec = self.utils_exec_p.start() + + self.external_process_p = mock.patch( + 'neutron.agent.linux.external_process.ProcessManager') + self.external_process = self.external_process_p.start() + + self.send_arp_p = mock.patch( + 'neutron.agent.l3_agent.L3NATAgent._send_gratuitous_arp_packet') + self.send_arp = self.send_arp_p.start() + + self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') + driver_cls = self.dvr_cls_p.start() + self.mock_driver = mock.MagicMock() + self.mock_driver.DEV_NAME_LEN = ( + interface.LinuxInterfaceDriver.DEV_NAME_LEN) + driver_cls.return_value = self.mock_driver + + self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper') + ip_cls = self.ip_cls_p.start() + self.mock_ip = mock.MagicMock() + ip_cls.return_value = self.mock_ip + + self.l3pluginApi_cls_p = mock.patch( + 'neutron.agent.l3_agent.L3PluginApi') + l3pluginApi_cls = self.l3pluginApi_cls_p.start() + self.plugin_api = mock.Mock() + l3pluginApi_cls.return_value = self.plugin_api + + self.looping_call_p = mock.patch( + 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall') + self.looping_call_p.start() + + def test_router_info_create(self): + id = _uuid() + ri = l3_agent.RouterInfo(id, self.conf.root_helper, + self.conf.use_namespaces, None) + + self.assertTrue(ri.ns_name.endswith(id)) + + def test_router_info_create_with_router(self): + id = _uuid() + ex_gw_port = {'id': _uuid(), + 'network_id': _uuid(), + 'fixed_ips': [{'ip_address': '19.4.4.4', + 'subnet_id': _uuid()}], + 'subnet': {'cidr': '19.4.4.0/24', + 'gateway_ip': '19.4.4.1'}} + router = { + 'id': _uuid(), + 'enable_snat': True, + 'routes': [], + 'gw_port': ex_gw_port} + ri = l3_agent.RouterInfo(id, self.conf.root_helper, + self.conf.use_namespaces, router) + self.assertTrue(ri.ns_name.endswith(id)) + self.assertEqual(ri.router, router) + + def test_agent_create(self): + l3_agent.L3NATAgent(HOSTNAME, self.conf) + + def _test_internal_network_action(self, action): + port_id = _uuid() + router_id = _uuid() + network_id = _uuid() + ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, + self.conf.use_namespaces, None) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + cidr = '99.0.1.9/24' + mac = 'ca:fe:de:ad:be:ef' + interface_name = agent.get_internal_device_name(port_id) + + if action == 'add': + self.device_exists.return_value = False + agent.internal_network_added(ri, network_id, + port_id, cidr, mac) + self.assertEqual(self.mock_driver.plug.call_count, 1) + self.assertEqual(self.mock_driver.init_l3.call_count, 1) + self.send_arp.assert_called_once_with(ri, interface_name, + '99.0.1.9') + elif action == 'remove': + self.device_exists.return_value = True + agent.internal_network_removed(ri, port_id, cidr) + self.assertEqual(self.mock_driver.unplug.call_count, 1) + else: + raise Exception("Invalid action %s" % action) + + def test_agent_add_internal_network(self): + self._test_internal_network_action('add') + + def test_agent_remove_internal_network(self): + self._test_internal_network_action('remove') + + def _test_external_gateway_action(self, action): + router_id = _uuid() + ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, + self.conf.use_namespaces, None) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + internal_cidrs = ['100.0.1.0/24', '200.74.0.0/16'] + ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', + 'subnet_id': _uuid()}], + 'subnet': {'gateway_ip': '20.0.0.1'}, + 'extra_subnets': [{'cidr': '172.16.0.0/24'}], + 'id': _uuid(), + 'network_id': _uuid(), + 'mac_address': 'ca:fe:de:ad:be:ef', + 'ip_cidr': '20.0.0.30/24'} + interface_name = agent.get_external_device_name(ex_gw_port['id']) + + if action == 'add': + self.device_exists.return_value = False + ri.router = mock.Mock() + ri.router.get.return_value = [{'floating_ip_address': + '192.168.1.34'}] + agent.external_gateway_added(ri, ex_gw_port, + interface_name, internal_cidrs) + self.assertEqual(self.mock_driver.plug.call_count, 1) + self.assertEqual(self.mock_driver.init_l3.call_count, 1) + self.send_arp.assert_called_once_with(ri, interface_name, + '20.0.0.30') + kwargs = {'preserve_ips': ['192.168.1.34/32'], + 'namespace': 'qrouter-' + router_id, + 'gateway': '20.0.0.1', + 'extra_subnets': [{'cidr': '172.16.0.0/24'}]} + self.mock_driver.init_l3.assert_called_with(interface_name, + ['20.0.0.30/24'], + **kwargs) + + elif action == 'remove': + self.device_exists.return_value = True + agent.external_gateway_removed(ri, ex_gw_port, + interface_name, internal_cidrs) + self.assertEqual(self.mock_driver.unplug.call_count, 1) + else: + raise Exception("Invalid action %s" % action) + + def test_agent_add_external_gateway(self): + self._test_external_gateway_action('add') + + def _test_arping(self, namespace): + if not namespace: + self.conf.set_override('use_namespaces', False) + + router_id = _uuid() + ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, + self.conf.use_namespaces, None) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + floating_ip = '20.0.0.101' + interface_name = agent.get_external_device_name(router_id) + agent._arping(ri, interface_name, floating_ip) + + arping_cmd = ['arping', '-A', + '-I', interface_name, + '-c', self.conf.send_arp_for_ha, + floating_ip] + self.mock_ip.netns.execute.assert_any_call( + arping_cmd, check_exit_code=True) + + def test_arping_namespace(self): + self._test_arping(namespace=True) + + def test_arping_no_namespace(self): + self._test_arping(namespace=False) + + def test_agent_remove_external_gateway(self): + self._test_external_gateway_action('remove') + + def _check_agent_method_called(self, agent, calls, namespace): + self.mock_ip.netns.execute.assert_has_calls( + [mock.call(call, check_exit_code=False) for call in calls], + any_order=True) + + def _test_routing_table_update(self, namespace): + if not namespace: + self.conf.set_override('use_namespaces', False) + + router_id = _uuid() + ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, + self.conf.use_namespaces, + None) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + + fake_route1 = {'destination': '135.207.0.0/16', + 'nexthop': '1.2.3.4'} + fake_route2 = {'destination': '135.207.111.111/32', + 'nexthop': '1.2.3.4'} + + agent._update_routing_table(ri, 'replace', fake_route1) + expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16', + 'via', '1.2.3.4']] + self._check_agent_method_called(agent, expected, namespace) + + agent._update_routing_table(ri, 'delete', fake_route1) + expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16', + 'via', '1.2.3.4']] + self._check_agent_method_called(agent, expected, namespace) + + agent._update_routing_table(ri, 'replace', fake_route2) + expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32', + 'via', '1.2.3.4']] + self._check_agent_method_called(agent, expected, namespace) + + agent._update_routing_table(ri, 'delete', fake_route2) + expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32', + 'via', '1.2.3.4']] + self._check_agent_method_called(agent, expected, namespace) + + def test_agent_routing_table_updated(self): + self._test_routing_table_update(namespace=True) + + def test_agent_routing_table_updated_no_namespace(self): + self._test_routing_table_update(namespace=False) + + def test_routes_updated(self): + self._test_routes_updated(namespace=True) + + def test_routes_updated_no_namespace(self): + self._test_routes_updated(namespace=False) + + def _test_routes_updated(self, namespace=True): + if not namespace: + self.conf.set_override('use_namespaces', False) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + router_id = _uuid() + + ri = l3_agent.RouterInfo(router_id, self.conf.root_helper, + self.conf.use_namespaces, + None) + ri.router = {} + + fake_old_routes = [] + fake_new_routes = [{'destination': "110.100.31.0/24", + 'nexthop': "10.100.10.30"}, + {'destination': "110.100.30.0/24", + 'nexthop': "10.100.10.30"}] + ri.routes = fake_old_routes + ri.router['routes'] = fake_new_routes + agent.routes_updated(ri) + + expected = [['ip', 'route', 'replace', 'to', '110.100.30.0/24', + 'via', '10.100.10.30'], + ['ip', 'route', 'replace', 'to', '110.100.31.0/24', + 'via', '10.100.10.30']] + + self._check_agent_method_called(agent, expected, namespace) + + fake_new_routes = [{'destination': "110.100.30.0/24", + 'nexthop': "10.100.10.30"}] + ri.router['routes'] = fake_new_routes + agent.routes_updated(ri) + expected = [['ip', 'route', 'delete', 'to', '110.100.31.0/24', + 'via', '10.100.10.30']] + + self._check_agent_method_called(agent, expected, namespace) + fake_new_routes = [] + ri.router['routes'] = fake_new_routes + agent.routes_updated(ri) + + expected = [['ip', 'route', 'delete', 'to', '110.100.30.0/24', + 'via', '10.100.10.30']] + self._check_agent_method_called(agent, expected, namespace) + + def _verify_snat_rules(self, rules, router, negate=False): + interfaces = router[l3_constants.INTERFACE_KEY] + source_cidrs = [] + for interface in interfaces: + prefix = interface['subnet']['cidr'].split('/')[1] + source_cidr = "%s/%s" % (interface['fixed_ips'][0]['ip_address'], + prefix) + source_cidrs.append(source_cidr) + source_nat_ip = router['gw_port']['fixed_ips'][0]['ip_address'] + interface_name = ('qg-%s' % router['gw_port']['id'])[:14] + expected_rules = [ + '! -i %s ! -o %s -m conntrack ! --ctstate DNAT -j ACCEPT' % + (interface_name, interface_name)] + for source_cidr in source_cidrs: + # Create SNAT rules for IPv4 only + if (netaddr.IPNetwork(source_cidr).version == 4 and + netaddr.IPNetwork(source_nat_ip).version == 4): + value_dict = {'source_cidr': source_cidr, + 'source_nat_ip': source_nat_ip} + expected_rules.append('-s %(source_cidr)s -j SNAT --to-source ' + '%(source_nat_ip)s' % value_dict) + for r in rules: + if negate: + self.assertNotIn(r.rule, expected_rules) + else: + self.assertIn(r.rule, expected_rules) + + def _prepare_router_data(self, ip_version=4, + enable_snat=None, num_internal_ports=1): + if ip_version == 4: + ip_addr = '19.4.4.4' + cidr = '19.4.4.0/24' + gateway_ip = '19.4.4.1' + elif ip_version == 6: + ip_addr = 'fd00::4' + cidr = 'fd00::/64' + gateway_ip = 'fd00::1' + + router_id = _uuid() + ex_gw_port = {'id': _uuid(), + 'network_id': _uuid(), + 'fixed_ips': [{'ip_address': ip_addr, + 'subnet_id': _uuid()}], + 'subnet': {'cidr': cidr, + 'gateway_ip': gateway_ip}} + int_ports = [] + for i in range(num_internal_ports): + int_ports.append({'id': _uuid(), + 'network_id': _uuid(), + 'admin_state_up': True, + 'fixed_ips': [{'ip_address': '35.4.%s.4' % i, + 'subnet_id': _uuid()}], + 'mac_address': 'ca:fe:de:ad:be:ef', + 'subnet': {'cidr': '35.4.%s.0/24' % i, + 'gateway_ip': '35.4.%s.1' % i}}) + + router = { + 'id': router_id, + l3_constants.INTERFACE_KEY: int_ports, + 'routes': [], + 'gw_port': ex_gw_port} + if enable_snat is not None: + router['enable_snat'] = enable_snat + return router + + def test_process_router(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + fake_fip_id = 'fake_fip_id' + agent.process_router_floating_ip_addresses = mock.Mock() + agent.process_router_floating_ip_nat_rules = mock.Mock() + agent.process_router_floating_ip_addresses.return_value = { + fake_fip_id: 'ACTIVE'} + agent.external_gateway_added = mock.Mock() + router = self._prepare_router_data() + fake_floatingips1 = {'floatingips': [ + {'id': fake_fip_id, + 'floating_ip_address': '8.8.8.8', + 'fixed_ip_address': '7.7.7.7', + 'port_id': _uuid()}]} + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + agent.process_router(ri) + ex_gw_port = agent._get_ex_gw_port(ri) + agent.process_router_floating_ip_addresses.assert_called_with( + ri, ex_gw_port) + agent.process_router_floating_ip_addresses.reset_mock() + agent.process_router_floating_ip_nat_rules.assert_called_with(ri) + agent.process_router_floating_ip_nat_rules.reset_mock() + + # remap floating IP to a new fixed ip + fake_floatingips2 = copy.deepcopy(fake_floatingips1) + fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8' + + router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips'] + agent.process_router(ri) + ex_gw_port = agent._get_ex_gw_port(ri) + agent.process_router_floating_ip_addresses.assert_called_with( + ri, ex_gw_port) + agent.process_router_floating_ip_addresses.reset_mock() + agent.process_router_floating_ip_nat_rules.assert_called_with(ri) + agent.process_router_floating_ip_nat_rules.reset_mock() + + # remove just the floating ips + del router[l3_constants.FLOATINGIP_KEY] + agent.process_router(ri) + ex_gw_port = agent._get_ex_gw_port(ri) + agent.process_router_floating_ip_addresses.assert_called_with( + ri, ex_gw_port) + agent.process_router_floating_ip_addresses.reset_mock() + agent.process_router_floating_ip_nat_rules.assert_called_with(ri) + agent.process_router_floating_ip_nat_rules.reset_mock() + + # now no ports so state is torn down + del router[l3_constants.INTERFACE_KEY] + del router['gw_port'] + agent.process_router(ri) + self.assertEqual(self.send_arp.call_count, 1) + self.assertFalse(agent.process_router_floating_ip_addresses.called) + self.assertFalse(agent.process_router_floating_ip_nat_rules.called) + + @mock.patch('neutron.agent.linux.ip_lib.IPDevice') + def test_process_router_floating_ip_addresses_add(self, IPDevice): + fip_id = _uuid() + fip = { + 'id': fip_id, 'port_id': _uuid(), + 'floating_ip_address': '15.1.2.3', + 'fixed_ip_address': '192.168.0.1' + } + + IPDevice.return_value = device = mock.Mock() + device.addr.list.return_value = [] + + ri = mock.MagicMock() + ri.router.get.return_value = [fip] + + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + + fip_statuses = agent.process_router_floating_ip_addresses( + ri, {'id': _uuid()}) + self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE}, + fip_statuses) + device.addr.add.assert_called_once_with(4, '15.1.2.3/32', '15.1.2.3') + + def test_process_router_floating_ip_nat_rules_add(self): + fip = { + 'id': _uuid(), 'port_id': _uuid(), + 'floating_ip_address': '15.1.2.3', + 'fixed_ip_address': '192.168.0.1' + } + + ri = mock.MagicMock() + ri.router.get.return_value = [fip] + + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + + agent.process_router_floating_ip_nat_rules(ri) + + nat = ri.iptables_manager.ipv4['nat'] + nat.clear_rules_by_tag.assert_called_once_with('floating_ip') + rules = agent.floating_forward_rules('15.1.2.3', '192.168.0.1') + for chain, rule in rules: + nat.add_rule.assert_any_call(chain, rule, tag='floating_ip') + + @mock.patch('neutron.agent.linux.ip_lib.IPDevice') + def test_process_router_floating_ip_addresses_remove(self, IPDevice): + IPDevice.return_value = device = mock.Mock() + device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}] + + ri = mock.MagicMock() + ri.router.get.return_value = [] + + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + + fip_statuses = agent.process_router_floating_ip_addresses( + ri, {'id': _uuid()}) + self.assertEqual({}, fip_statuses) + device.addr.delete.assert_called_once_with(4, '15.1.2.3/32') + + def test_process_router_floating_ip_nat_rules_remove(self): + ri = mock.MagicMock() + ri.router.get.return_value = [] + + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + + agent.process_router_floating_ip_nat_rules(ri) + + nat = ri.iptables_manager.ipv4['nat'] + nat = ri.iptables_manager.ipv4['nat`'] + nat.clear_rules_by_tag.assert_called_once_with('floating_ip') + + @mock.patch('neutron.agent.linux.ip_lib.IPDevice') + def test_process_router_floating_ip_addresses_remap(self, IPDevice): + fip_id = _uuid() + fip = { + 'id': fip_id, 'port_id': _uuid(), + 'floating_ip_address': '15.1.2.3', + 'fixed_ip_address': '192.168.0.2' + } + + IPDevice.return_value = device = mock.Mock() + device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}] + ri = mock.MagicMock() + + ri.router.get.return_value = [fip] + + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + + fip_statuses = agent.process_router_floating_ip_addresses( + ri, {'id': _uuid()}) + self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE}, + fip_statuses) + + self.assertFalse(device.addr.add.called) + self.assertFalse(device.addr.delete.called) + + @mock.patch('neutron.agent.linux.ip_lib.IPDevice') + def test_process_router_with_disabled_floating_ip(self, IPDevice): + fip_id = _uuid() + fip = { + 'id': fip_id, 'port_id': _uuid(), + 'floating_ip_address': '15.1.2.3', + 'fixed_ip_address': '192.168.0.2' + } + + ri = mock.MagicMock() + ri.floating_ips = [fip] + ri.router.get.return_value = [] + + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + + fip_statuses = agent.process_router_floating_ip_addresses( + ri, {'id': _uuid()}) + + self.assertIsNone(fip_statuses.get(fip_id)) + + @mock.patch('neutron.agent.linux.ip_lib.IPDevice') + def test_process_router_floating_ip_with_device_add_error(self, IPDevice): + IPDevice.return_value = device = mock.Mock() + device.addr.add.side_effect = processutils.ProcessExecutionError + device.addr.list.return_value = [] + fip_id = _uuid() + fip = { + 'id': fip_id, 'port_id': _uuid(), + 'floating_ip_address': '15.1.2.3', + 'fixed_ip_address': '192.168.0.2' + } + ri = mock.MagicMock() + ri.router.get.return_value = [fip] + + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + + fip_statuses = agent.process_router_floating_ip_addresses( + ri, {'id': _uuid()}) + + self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ERROR}, + fip_statuses) + + def test_process_router_snat_disabled(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + router = self._prepare_router_data(enable_snat=True) + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + agent.external_gateway_added = mock.Mock() + # Process with NAT + agent.process_router(ri) + orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] + # Reprocess without NAT + router['enable_snat'] = False + # Reassign the router object to RouterInfo + ri.router = router + agent.process_router(ri) + # For some reason set logic does not work well with + # IpTablesRule instances + nat_rules_delta = [r for r in orig_nat_rules + if r not in ri.iptables_manager.ipv4['nat'].rules] + self.assertEqual(len(nat_rules_delta), 2) + self._verify_snat_rules(nat_rules_delta, router) + self.assertEqual(self.send_arp.call_count, 1) + + def test_process_router_snat_enabled(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + router = self._prepare_router_data(enable_snat=False) + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + agent.external_gateway_added = mock.Mock() + # Process without NAT + agent.process_router(ri) + orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] + # Reprocess with NAT + router['enable_snat'] = True + # Reassign the router object to RouterInfo + ri.router = router + agent.process_router(ri) + # For some reason set logic does not work well with + # IpTablesRule instances + nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules + if r not in orig_nat_rules] + self.assertEqual(len(nat_rules_delta), 2) + self._verify_snat_rules(nat_rules_delta, router) + self.assertEqual(self.send_arp.call_count, 1) + + def test_process_router_interface_added(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + router = self._prepare_router_data() + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + agent.external_gateway_added = mock.Mock() + # Process with NAT + agent.process_router(ri) + orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] + # Add an interface and reprocess + router[l3_constants.INTERFACE_KEY].append( + {'id': _uuid(), + 'network_id': _uuid(), + 'admin_state_up': True, + 'fixed_ips': [{'ip_address': '35.4.1.4', + 'subnet_id': _uuid()}], + 'mac_address': 'ca:fe:de:ad:be:ef', + 'subnet': {'cidr': '35.4.1.0/24', + 'gateway_ip': '35.4.1.1'}}) + # Reassign the router object to RouterInfo + ri.router = router + agent.process_router(ri) + # For some reason set logic does not work well with + # IpTablesRule instances + nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules + if r not in orig_nat_rules] + self.assertEqual(len(nat_rules_delta), 1) + self._verify_snat_rules(nat_rules_delta, router) + # send_arp is called both times process_router is called + self.assertEqual(self.send_arp.call_count, 2) + + def test_process_ipv6_only_gw(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + router = self._prepare_router_data(ip_version=6) + # Get NAT rules without the gw_port + gw_port = router['gw_port'] + router['gw_port'] = None + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + agent.external_gateway_added = mock.Mock() + agent.process_router(ri) + orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] + + # Get NAT rules with the gw_port + router['gw_port'] = gw_port + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + with mock.patch.object( + agent, + 'external_gateway_nat_rules') as external_gateway_nat_rules: + agent.process_router(ri) + new_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] + + # There should be no change with the NAT rules + self.assertFalse(external_gateway_nat_rules.called) + self.assertEqual(orig_nat_rules, new_nat_rules) + + def test_process_router_ipv6_interface_added(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + router = self._prepare_router_data() + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + agent.external_gateway_added = mock.Mock() + # Process with NAT + agent.process_router(ri) + orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] + # Add an IPv6 interface and reprocess + router[l3_constants.INTERFACE_KEY].append( + {'id': _uuid(), + 'network_id': _uuid(), + 'admin_state_up': True, + 'fixed_ips': [{'ip_address': 'fd00::2', + 'subnet_id': _uuid()}], + 'mac_address': 'ca:fe:de:ad:be:ef', + 'subnet': {'cidr': 'fd00::/64', + 'gateway_ip': 'fd00::1'}}) + # Reassign the router object to RouterInfo + ri.router = router + agent.process_router(ri) + # For some reason set logic does not work well with + # IpTablesRule instances + nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules + if r not in orig_nat_rules] + self.assertFalse(nat_rules_delta) + + def test_process_router_ipv6v4_interface_added(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + router = self._prepare_router_data() + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + agent.external_gateway_added = mock.Mock() + # Process with NAT + agent.process_router(ri) + orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] + # Add an IPv4 and IPv6 interface and reprocess + router[l3_constants.INTERFACE_KEY].append( + {'id': _uuid(), + 'network_id': _uuid(), + 'admin_state_up': True, + 'fixed_ips': [{'ip_address': '35.4.1.4', + 'subnet_id': _uuid()}], + 'mac_address': 'ca:fe:de:ad:be:ef', + 'subnet': {'cidr': '35.4.1.0/24', + 'gateway_ip': '35.4.1.1'}}) + + router[l3_constants.INTERFACE_KEY].append( + {'id': _uuid(), + 'network_id': _uuid(), + 'admin_state_up': True, + 'fixed_ips': [{'ip_address': 'fd00::2', + 'subnet_id': _uuid()}], + 'mac_address': 'ca:fe:de:ad:be:ef', + 'subnet': {'cidr': 'fd00::/64', + 'gateway_ip': 'fd00::1'}}) + # Reassign the router object to RouterInfo + ri.router = router + agent.process_router(ri) + # For some reason set logic does not work well with + # IpTablesRule instances + nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules + if r not in orig_nat_rules] + self.assertEqual(1, len(nat_rules_delta)) + self._verify_snat_rules(nat_rules_delta, router) + + def test_process_router_interface_removed(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + router = self._prepare_router_data(num_internal_ports=2) + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + agent.external_gateway_added = mock.Mock() + # Process with NAT + agent.process_router(ri) + orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] + # Add an interface and reprocess + del router[l3_constants.INTERFACE_KEY][1] + # Reassign the router object to RouterInfo + ri.router = router + agent.process_router(ri) + # For some reason set logic does not work well with + # IpTablesRule instances + nat_rules_delta = [r for r in orig_nat_rules + if r not in ri.iptables_manager.ipv4['nat'].rules] + self.assertEqual(len(nat_rules_delta), 1) + self._verify_snat_rules(nat_rules_delta, router, negate=True) + # send_arp is called both times process_router is called + self.assertEqual(self.send_arp.call_count, 2) + + def test_process_router_internal_network_added_unexpected_error(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + router = self._prepare_router_data() + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + agent.external_gateway_added = mock.Mock() + with mock.patch.object( + l3_agent.L3NATAgent, + 'internal_network_added') as internal_network_added: + # raise RuntimeError to simulate that an unexpected exception + # occurrs + internal_network_added.side_effect = RuntimeError + self.assertRaises(RuntimeError, agent.process_router, ri) + self.assertNotIn( + router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) + + # The unexpected exception has been fixed manually + internal_network_added.side_effect = None + + # _sync_routers_task finds out that _rpc_loop failed to process the + # router last time, it will retry in the next run. + agent.process_router(ri) + # We were able to add the port to ri.internal_ports + self.assertIn( + router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) + + def test_process_router_internal_network_removed_unexpected_error(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + router = self._prepare_router_data() + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + agent.external_gateway_added = mock.Mock() + # add an internal port + agent.process_router(ri) + + with mock.patch.object( + l3_agent.L3NATAgent, + 'internal_network_removed') as internal_net_removed: + # raise RuntimeError to simulate that an unexpected exception + # occurrs + internal_net_removed.side_effect = RuntimeError + ri.internal_ports[0]['admin_state_up'] = False + # The above port is set to down state, remove it. + self.assertRaises(RuntimeError, agent.process_router, ri) + self.assertIn( + router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) + + # The unexpected exception has been fixed manually + internal_net_removed.side_effect = None + + # _sync_routers_task finds out that _rpc_loop failed to process the + # router last time, it will retry in the next run. + agent.process_router(ri) + # We were able to remove the port from ri.internal_ports + self.assertNotIn( + router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) + + def test_process_router_floatingip_disabled(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + with mock.patch.object( + agent.plugin_rpc, + 'update_floatingip_statuses') as mock_update_fip_status: + fip_id = _uuid() + router = self._prepare_router_data(num_internal_ports=1) + router[l3_constants.FLOATINGIP_KEY] = [ + {'id': fip_id, + 'floating_ip_address': '8.8.8.8', + 'fixed_ip_address': '7.7.7.7', + 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}] + + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + agent.external_gateway_added = mock.Mock() + agent.process_router(ri) + # Assess the call for putting the floating IP up was performed + mock_update_fip_status.assert_called_once_with( + mock.ANY, ri.router_id, + {fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE}) + mock_update_fip_status.reset_mock() + # Process the router again, this time without floating IPs + router[l3_constants.FLOATINGIP_KEY] = [] + ri.router = router + agent.process_router(ri) + # Assess the call for putting the floating IP up was performed + mock_update_fip_status.assert_called_once_with( + mock.ANY, ri.router_id, + {fip_id: l3_constants.FLOATINGIP_STATUS_DOWN}) + + def test_process_router_floatingip_exception(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + agent.process_router_floating_ip_addresses = mock.Mock() + agent.process_router_floating_ip_addresses.side_effect = RuntimeError + with mock.patch.object( + agent.plugin_rpc, + 'update_floatingip_statuses') as mock_update_fip_status: + fip_id = _uuid() + router = self._prepare_router_data(num_internal_ports=1) + router[l3_constants.FLOATINGIP_KEY] = [ + {'id': fip_id, + 'floating_ip_address': '8.8.8.8', + 'fixed_ip_address': '7.7.7.7', + 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}] + + ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper, + self.conf.use_namespaces, router=router) + agent.external_gateway_added = mock.Mock() + agent.process_router(ri) + # Assess the call for putting the floating IP into Error + # was performed + mock_update_fip_status.assert_called_once_with( + mock.ANY, ri.router_id, + {fip_id: l3_constants.FLOATINGIP_STATUS_ERROR}) + + def test_handle_router_snat_rules_add_back_jump(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + ri = mock.MagicMock() + port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]} + + agent._handle_router_snat_rules(ri, port, [], "iface", "add_rules") + + nat = ri.iptables_manager.ipv4['nat'] + nat.empty_chain.assert_any_call('snat') + nat.add_rule.assert_any_call('snat', '-j $float-snat') + for call in nat.mock_calls: + name, args, kwargs = call + if name == 'add_rule': + self.assertEqual(args, ('snat', '-j $float-snat')) + self.assertEqual(kwargs, {}) + break + + def test_handle_router_snat_rules_add_rules(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + ri = l3_agent.RouterInfo(_uuid(), self.conf.root_helper, + self.conf.use_namespaces, None) + ex_gw_port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]} + internal_cidrs = ['10.0.0.0/24'] + agent._handle_router_snat_rules(ri, ex_gw_port, internal_cidrs, + "iface", "add_rules") + + nat_rules = map(str, ri.iptables_manager.ipv4['nat'].rules) + wrap_name = ri.iptables_manager.wrap_name + + jump_float_rule = "-A %s-snat -j %s-float-snat" % (wrap_name, + wrap_name) + internal_net_rule = ("-A %s-snat -s %s -j SNAT --to-source %s") % ( + wrap_name, internal_cidrs[0], + ex_gw_port['fixed_ips'][0]['ip_address']) + + self.assertIn(jump_float_rule, nat_rules) + + self.assertIn(internal_net_rule, nat_rules) + self.assertThat(nat_rules.index(jump_float_rule), + matchers.LessThan(nat_rules.index(internal_net_rule))) + + def test_process_router_delete_stale_internal_devices(self): + class FakeDev(object): + def __init__(self, name): + self.name = name + + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + stale_devlist = [FakeDev('qr-a1b2c3d4-e5'), + FakeDev('qr-b2c3d4e5-f6')] + stale_devnames = [dev.name for dev in stale_devlist] + + get_devices_return = [] + get_devices_return.extend(stale_devlist) + self.mock_ip.get_devices.return_value = get_devices_return + + router = self._prepare_router_data(enable_snat=True, + num_internal_ports=1) + ri = l3_agent.RouterInfo(router['id'], + self.conf.root_helper, + self.conf.use_namespaces, + router=router) + + internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) + self.assertEqual(len(internal_ports), 1) + internal_port = internal_ports[0] + + with contextlib.nested(mock.patch.object(l3_agent.L3NATAgent, + 'internal_network_removed'), + mock.patch.object(l3_agent.L3NATAgent, + 'internal_network_added'), + mock.patch.object(l3_agent.L3NATAgent, + 'external_gateway_removed'), + mock.patch.object(l3_agent.L3NATAgent, + 'external_gateway_added') + ) as (internal_network_removed, + internal_network_added, + external_gateway_removed, + external_gateway_added): + + agent.process_router(ri) + + self.assertEqual(external_gateway_added.call_count, 1) + self.assertFalse(external_gateway_removed.called) + self.assertFalse(internal_network_removed.called) + internal_network_added.assert_called_once_with( + ri, + internal_port['network_id'], + internal_port['id'], + internal_port['ip_cidr'], + internal_port['mac_address']) + self.assertEqual(self.mock_driver.unplug.call_count, + len(stale_devnames)) + calls = [mock.call(stale_devname, + namespace=ri.ns_name, + prefix=l3_agent.INTERNAL_DEV_PREFIX) + for stale_devname in stale_devnames] + self.mock_driver.unplug.assert_has_calls(calls, any_order=True) + + def test_process_router_delete_stale_external_devices(self): + class FakeDev(object): + def __init__(self, name): + self.name = name + + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + stale_devlist = [FakeDev('qg-a1b2c3d4-e5')] + stale_devnames = [dev.name for dev in stale_devlist] + + router = self._prepare_router_data(enable_snat=True, + num_internal_ports=1) + del router['gw_port'] + ri = l3_agent.RouterInfo(router['id'], + self.conf.root_helper, + self.conf.use_namespaces, + router=router) + + self.mock_ip.get_devices.return_value = stale_devlist + + agent.process_router(ri) + + self.mock_driver.unplug.assert_called_with( + stale_devnames[0], + bridge="br-ex", + namespace=ri.ns_name, + prefix=l3_agent.EXTERNAL_DEV_PREFIX) + + def test_router_deleted(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + agent.router_deleted(None, FAKE_ID) + # verify that will set fullsync + self.assertIn(FAKE_ID, agent.removed_routers) + + def test_routers_updated(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + agent.routers_updated(None, [FAKE_ID]) + # verify that will set fullsync + self.assertIn(FAKE_ID, agent.updated_routers) + + def test_removed_from_agent(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + agent.router_removed_from_agent(None, {'router_id': FAKE_ID}) + # verify that will set fullsync + self.assertIn(FAKE_ID, agent.removed_routers) + + def test_added_to_agent(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + agent.router_added_to_agent(None, [FAKE_ID]) + # verify that will set fullsync + self.assertIn(FAKE_ID, agent.updated_routers) + + def test_process_router_delete(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + ex_gw_port = {'id': _uuid(), + 'network_id': _uuid(), + 'fixed_ips': [{'ip_address': '19.4.4.4', + 'subnet_id': _uuid()}], + 'subnet': {'cidr': '19.4.4.0/24', + 'gateway_ip': '19.4.4.1'}} + router = { + 'id': _uuid(), + 'enable_snat': True, + 'routes': [], + 'gw_port': ex_gw_port} + agent._router_added(router['id'], router) + agent.router_deleted(None, router['id']) + agent._process_router_delete() + self.assertFalse(list(agent.removed_routers)) + + def test_destroy_router_namespace_skips_ns_removal(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + agent._destroy_router_namespace("fakens") + self.assertEqual(self.mock_ip.netns.delete.call_count, 0) + + def test_destroy_router_namespace_removes_ns(self): + self.conf.set_override('router_delete_namespaces', True) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + agent._destroy_router_namespace("fakens") + self.mock_ip.netns.delete.assert_called_once_with("fakens") + + def _configure_metadata_proxy(self, enableflag=True): + if not enableflag: + self.conf.set_override('enable_metadata_proxy', False) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + router_id = _uuid() + router = {'id': _uuid(), + 'external_gateway_info': {}, + 'routes': []} + with mock.patch.object( + agent, '_destroy_metadata_proxy') as destroy_proxy: + with mock.patch.object( + agent, '_spawn_metadata_proxy') as spawn_proxy: + agent._router_added(router_id, router) + if enableflag: + spawn_proxy.assert_called_with(mock.ANY, mock.ANY) + else: + self.assertFalse(spawn_proxy.call_count) + agent._router_removed(router_id) + if enableflag: + destroy_proxy.assert_called_with(mock.ANY, mock.ANY) + else: + self.assertFalse(destroy_proxy.call_count) + + def test_enable_metadata_proxy(self): + self._configure_metadata_proxy() + + def test_disable_metadata_proxy_spawn(self): + self._configure_metadata_proxy(enableflag=False) + + def test_metadata_nat_rules(self): + self.conf.set_override('enable_metadata_proxy', False) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + self.assertEqual([], agent.metadata_nat_rules()) + + self.conf.set_override('metadata_port', '8775') + self.conf.set_override('enable_metadata_proxy', True) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + rules = ('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 ' + '-p tcp -m tcp --dport 80 -j REDIRECT --to-port 8775') + self.assertEqual([rules], agent.metadata_nat_rules()) + + def test_router_id_specified_in_conf(self): + self.conf.set_override('use_namespaces', False) + self.conf.set_override('router_id', '') + self.assertRaises(SystemExit, l3_agent.L3NATAgent, + HOSTNAME, self.conf) + + self.conf.set_override('router_id', '1234') + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + self.assertEqual(['1234'], agent._router_ids()) + self.assertFalse(agent._clean_stale_namespaces) + + def test_process_routers_with_no_ext_net_in_conf(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + self.plugin_api.get_external_network_id.return_value = 'aaa' + + routers = [ + {'id': _uuid(), + 'routes': [], + 'admin_state_up': True, + 'external_gateway_info': {'network_id': 'aaa'}}] + + agent._process_routers(routers) + self.assertIn(routers[0]['id'], agent.router_info) + self.plugin_api.get_external_network_id.assert_called_with( + agent.context) + + def test_process_routers_with_cached_ext_net(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + self.plugin_api.get_external_network_id.return_value = 'aaa' + agent.target_ex_net_id = 'aaa' + + routers = [ + {'id': _uuid(), + 'routes': [], + 'admin_state_up': True, + 'external_gateway_info': {'network_id': 'aaa'}}] + + agent._process_routers(routers) + self.assertIn(routers[0]['id'], agent.router_info) + self.assertFalse(self.plugin_api.get_external_network_id.called) + + def test_process_routers_with_stale_cached_ext_net(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + self.plugin_api.get_external_network_id.return_value = 'aaa' + agent.target_ex_net_id = 'bbb' + + routers = [ + {'id': _uuid(), + 'routes': [], + 'admin_state_up': True, + 'external_gateway_info': {'network_id': 'aaa'}}] + + agent._process_routers(routers) + self.assertIn(routers[0]['id'], agent.router_info) + self.plugin_api.get_external_network_id.assert_called_with( + agent.context) + + def test_process_routers_with_no_ext_net_in_conf_and_two_net_plugin(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + + routers = [ + {'id': _uuid(), + 'routes': [], + 'admin_state_up': True, + 'external_gateway_info': {'network_id': 'aaa'}}] + + agent.router_info = {} + self.plugin_api.get_external_network_id.side_effect = ( + n_exc.TooManyExternalNetworks()) + self.assertRaises(n_exc.TooManyExternalNetworks, + agent._process_routers, + routers) + self.assertNotIn(routers[0]['id'], agent.router_info) + + def test_process_routers_with_ext_net_in_conf(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + self.plugin_api.get_external_network_id.return_value = 'aaa' + + routers = [ + {'id': _uuid(), + 'routes': [], + 'admin_state_up': True, + 'external_gateway_info': {'network_id': 'aaa'}}, + {'id': _uuid(), + 'routes': [], + 'admin_state_up': True, + 'external_gateway_info': {'network_id': 'bbb'}}] + + agent.router_info = {} + self.conf.set_override('gateway_external_network_id', 'aaa') + agent._process_routers(routers) + self.assertIn(routers[0]['id'], agent.router_info) + self.assertNotIn(routers[1]['id'], agent.router_info) + + def test_process_routers_with_no_bridge_no_ext_net_in_conf(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + self.plugin_api.get_external_network_id.return_value = 'aaa' + + routers = [ + {'id': _uuid(), + 'routes': [], + 'admin_state_up': True, + 'external_gateway_info': {'network_id': 'aaa'}}, + {'id': _uuid(), + 'routes': [], + 'admin_state_up': True, + 'external_gateway_info': {'network_id': 'bbb'}}] + + agent.router_info = {} + self.conf.set_override('external_network_bridge', '') + agent._process_routers(routers) + self.assertIn(routers[0]['id'], agent.router_info) + self.assertIn(routers[1]['id'], agent.router_info) + + def test_nonexistent_interface_driver(self): + self.conf.set_override('interface_driver', None) + with mock.patch.object(l3_agent, 'LOG') as log: + self.assertRaises(SystemExit, l3_agent.L3NATAgent, + HOSTNAME, self.conf) + msg = 'An interface driver must be specified' + log.error.assert_called_once_with(msg) + + self.conf.set_override('interface_driver', 'wrong_driver') + with mock.patch.object(l3_agent, 'LOG') as log: + self.assertRaises(SystemExit, l3_agent.L3NATAgent, + HOSTNAME, self.conf) + msg = "Error importing interface driver 'wrong_driver'" + log.error.assert_called_once_with(msg) + + def test_metadata_filter_rules(self): + self.conf.set_override('enable_metadata_proxy', False) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + self.assertEqual([], agent.metadata_filter_rules()) + + self.conf.set_override('metadata_port', '8775') + self.conf.set_override('enable_metadata_proxy', True) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + rules = ('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 ' + '-p tcp -m tcp --dport 8775 -j ACCEPT') + self.assertEqual([rules], agent.metadata_filter_rules()) + + def _cleanup_namespace_test(self, + stale_namespace_list, + router_list, + other_namespaces): + self.conf.set_override('router_delete_namespaces', True) + + good_namespace_list = [l3_agent.NS_PREFIX + r['id'] + for r in router_list] + self.mock_ip.get_namespaces.return_value = (stale_namespace_list + + good_namespace_list + + other_namespaces) + + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + + self.assertTrue(agent._clean_stale_namespaces) + + pm = self.external_process.return_value + pm.reset_mock() + + agent._destroy_router_namespace = mock.MagicMock() + agent._cleanup_namespaces(router_list) + + self.assertEqual(pm.disable.call_count, len(stale_namespace_list)) + self.assertEqual(agent._destroy_router_namespace.call_count, + len(stale_namespace_list)) + expected_args = [mock.call(ns) for ns in stale_namespace_list] + agent._destroy_router_namespace.assert_has_calls(expected_args, + any_order=True) + self.assertFalse(agent._clean_stale_namespaces) + + def test_cleanup_namespace(self): + self.conf.set_override('router_id', None) + stale_namespaces = [l3_agent.NS_PREFIX + 'foo', + l3_agent.NS_PREFIX + 'bar'] + other_namespaces = ['unknown'] + + self._cleanup_namespace_test(stale_namespaces, + [], + other_namespaces) + + def test_cleanup_namespace_with_registered_router_ids(self): + self.conf.set_override('router_id', None) + stale_namespaces = [l3_agent.NS_PREFIX + 'cccc', + l3_agent.NS_PREFIX + 'eeeee'] + router_list = [{'id': 'foo'}, {'id': 'aaaa'}] + other_namespaces = ['qdhcp-aabbcc', 'unknown'] + + self._cleanup_namespace_test(stale_namespaces, + router_list, + other_namespaces) + + def test_cleanup_namespace_with_conf_router_id(self): + self.conf.set_override('router_id', 'bbbbb') + stale_namespaces = [l3_agent.NS_PREFIX + 'cccc', + l3_agent.NS_PREFIX + 'eeeee', + l3_agent.NS_PREFIX + self.conf.router_id] + router_list = [{'id': 'foo'}, {'id': 'aaaa'}] + other_namespaces = ['qdhcp-aabbcc', 'unknown'] + + self._cleanup_namespace_test(stale_namespaces, + router_list, + other_namespaces) + + +class TestL3AgentEventHandler(base.BaseTestCase): + + def setUp(self): + super(TestL3AgentEventHandler, self).setUp() + cfg.CONF.register_opts(l3_agent.L3NATAgent.OPTS) + agent_config.register_interface_driver_opts_helper(cfg.CONF) + agent_config.register_use_namespaces_opts_helper(cfg.CONF) + cfg.CONF.set_override( + 'interface_driver', 'neutron.agent.linux.interface.NullDriver' + ) + cfg.CONF.set_override('use_namespaces', True) + agent_config.register_root_helper(cfg.CONF) + + device_exists_p = mock.patch( + 'neutron.agent.linux.ip_lib.device_exists') + device_exists_p.start() + + utils_exec_p = mock.patch( + 'neutron.agent.linux.utils.execute') + utils_exec_p.start() + + drv_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') + driver_cls = drv_cls_p.start() + mock_driver = mock.MagicMock() + mock_driver.DEV_NAME_LEN = ( + interface.LinuxInterfaceDriver.DEV_NAME_LEN) + driver_cls.return_value = mock_driver + + l3_plugin_p = mock.patch( + 'neutron.agent.l3_agent.L3PluginApi') + l3_plugin_cls = l3_plugin_p.start() + l3_plugin_cls.return_value = mock.Mock() + + self.external_process_p = mock.patch( + 'neutron.agent.linux.external_process.ProcessManager' + ) + self.external_process_p.start() + looping_call_p = mock.patch( + 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall') + looping_call_p.start() + self.agent = l3_agent.L3NATAgent(HOSTNAME) + + def test_spawn_metadata_proxy(self): + router_id = _uuid() + metadata_port = 8080 + ip_class_path = 'neutron.agent.linux.ip_lib.IPWrapper' + + cfg.CONF.set_override('metadata_port', metadata_port) + cfg.CONF.set_override('log_file', 'test.log') + cfg.CONF.set_override('debug', True) + + self.external_process_p.stop() + ns = 'qrouter-' + router_id + try: + with mock.patch(ip_class_path) as ip_mock: + self.agent._spawn_metadata_proxy(router_id, ns) + ip_mock.assert_has_calls([ + mock.call('sudo', ns), + mock.call().netns.execute([ + 'neutron-ns-metadata-proxy', + mock.ANY, + mock.ANY, + '--router_id=%s' % router_id, + mock.ANY, + '--metadata_port=%s' % metadata_port, + '--debug', + '--log-file=neutron-ns-metadata-proxy-%s.log' % + router_id + ]) + ]) + finally: + self.external_process_p.start() diff --git a/neutron/tests/unit/test_l3_plugin.py b/neutron/tests/unit/test_l3_plugin.py new file mode 100644 index 000000000..4eb80d0d3 --- /dev/null +++ b/neutron/tests/unit/test_l3_plugin.py @@ -0,0 +1,2070 @@ +# Copyright 2012 VMware, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import contextlib +import copy + +import mock +import netaddr +from oslo.config import cfg +from webob import exc + +from neutron.api.v2 import attributes +from neutron.common import constants as l3_constants +from neutron.common import exceptions as n_exc +from neutron import context +from neutron.db import api as qdbapi +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_db +from neutron.db import l3_rpc_base +from neutron.db import model_base +from neutron.extensions import external_net +from neutron.extensions import l3 +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as service_constants +from neutron.tests import fake_notifier +from neutron.tests.unit import test_agent_ext_plugin +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_api_v2_extension +from neutron.tests.unit import test_db_plugin + + +LOG = logging.getLogger(__name__) + +_uuid = uuidutils.generate_uuid +_get_path = test_api_v2._get_path + + +class L3TestExtensionManager(object): + + def get_resources(self): + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + l3.RESOURCE_ATTRIBUTE_MAP) + return l3.L3.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class L3NatExtensionTestCase(test_api_v2_extension.ExtensionTestCase): + fmt = 'json' + + def setUp(self): + super(L3NatExtensionTestCase, self).setUp() + self._setUpExtension( + 'neutron.extensions.l3.RouterPluginBase', None, + l3.RESOURCE_ATTRIBUTE_MAP, l3.L3, '', + allow_pagination=True, allow_sorting=True, + supported_extension_aliases=['router'], + use_quota=True) + + def test_router_create(self): + router_id = _uuid() + data = {'router': {'name': 'router1', 'admin_state_up': True, + 'tenant_id': _uuid(), + 'external_gateway_info': None}} + return_value = copy.deepcopy(data['router']) + return_value.update({'status': "ACTIVE", 'id': router_id}) + + instance = self.plugin.return_value + instance.create_router.return_value = return_value + instance.get_routers_count.return_value = 0 + res = self.api.post(_get_path('routers', fmt=self.fmt), + self.serialize(data), + content_type='application/%s' % self.fmt) + instance.create_router.assert_called_with(mock.ANY, + router=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + res = self.deserialize(res) + self.assertIn('router', res) + router = res['router'] + self.assertEqual(router['id'], router_id) + self.assertEqual(router['status'], "ACTIVE") + self.assertEqual(router['admin_state_up'], True) + + def test_router_list(self): + router_id = _uuid() + return_value = [{'name': 'router1', 'admin_state_up': True, + 'tenant_id': _uuid(), 'id': router_id}] + + instance = self.plugin.return_value + instance.get_routers.return_value = return_value + + res = self.api.get(_get_path('routers', fmt=self.fmt)) + + instance.get_routers.assert_called_with(mock.ANY, fields=mock.ANY, + filters=mock.ANY, + sorts=mock.ANY, + limit=mock.ANY, + marker=mock.ANY, + page_reverse=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('routers', res) + self.assertEqual(1, len(res['routers'])) + self.assertEqual(router_id, res['routers'][0]['id']) + + def test_router_update(self): + router_id = _uuid() + update_data = {'router': {'admin_state_up': False}} + return_value = {'name': 'router1', 'admin_state_up': False, + 'tenant_id': _uuid(), + 'status': "ACTIVE", 'id': router_id} + + instance = self.plugin.return_value + instance.update_router.return_value = return_value + + res = self.api.put(_get_path('routers', id=router_id, + fmt=self.fmt), + self.serialize(update_data)) + + instance.update_router.assert_called_with(mock.ANY, router_id, + router=update_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('router', res) + router = res['router'] + self.assertEqual(router['id'], router_id) + self.assertEqual(router['status'], "ACTIVE") + self.assertEqual(router['admin_state_up'], False) + + def test_router_get(self): + router_id = _uuid() + return_value = {'name': 'router1', 'admin_state_up': False, + 'tenant_id': _uuid(), + 'status': "ACTIVE", 'id': router_id} + + instance = self.plugin.return_value + instance.get_router.return_value = return_value + + res = self.api.get(_get_path('routers', id=router_id, + fmt=self.fmt)) + + instance.get_router.assert_called_with(mock.ANY, router_id, + fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('router', res) + router = res['router'] + self.assertEqual(router['id'], router_id) + self.assertEqual(router['status'], "ACTIVE") + self.assertEqual(router['admin_state_up'], False) + + def test_router_delete(self): + router_id = _uuid() + + res = self.api.delete(_get_path('routers', id=router_id)) + + instance = self.plugin.return_value + instance.delete_router.assert_called_with(mock.ANY, router_id) + self.assertEqual(res.status_int, exc.HTTPNoContent.code) + + def test_router_add_interface(self): + router_id = _uuid() + subnet_id = _uuid() + port_id = _uuid() + + interface_data = {'subnet_id': subnet_id} + return_value = copy.deepcopy(interface_data) + return_value['port_id'] = port_id + + instance = self.plugin.return_value + instance.add_router_interface.return_value = return_value + + path = _get_path('routers', id=router_id, + action="add_router_interface", + fmt=self.fmt) + res = self.api.put(path, self.serialize(interface_data)) + + instance.add_router_interface.assert_called_with(mock.ANY, router_id, + interface_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + res = self.deserialize(res) + self.assertIn('port_id', res) + self.assertEqual(res['port_id'], port_id) + self.assertEqual(res['subnet_id'], subnet_id) + + +class L3NatExtensionTestCaseXML(L3NatExtensionTestCase): + fmt = 'xml' + + +# This base plugin class is for tests. +class TestL3NatBasePlugin(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin): + + __native_pagination_support = True + __native_sorting_support = True + + def create_network(self, context, network): + session = context.session + with session.begin(subtransactions=True): + net = super(TestL3NatBasePlugin, self).create_network(context, + network) + self._process_l3_create(context, net, network['network']) + return net + + def update_network(self, context, id, network): + + session = context.session + with session.begin(subtransactions=True): + net = super(TestL3NatBasePlugin, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + return net + + def delete_network(self, context, id): + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, id) + super(TestL3NatBasePlugin, self).delete_network(context, id) + + def delete_port(self, context, id, l3_port_check=True): + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if plugin: + if l3_port_check: + plugin.prevent_l3_port_deletion(context, id) + plugin.disassociate_floatingips(context, id) + return super(TestL3NatBasePlugin, self).delete_port(context, id) + + +# This plugin class is for tests with plugin that integrates L3. +class TestL3NatIntPlugin(TestL3NatBasePlugin, + l3_db.L3_NAT_db_mixin): + + supported_extension_aliases = ["external-net", "router"] + + +# This plugin class is for tests with plugin that integrates L3 and L3 agent +# scheduling. +class TestL3NatIntAgentSchedulingPlugin(TestL3NatIntPlugin, + l3_agentschedulers_db. + L3AgentSchedulerDbMixin): + + supported_extension_aliases = ["external-net", "router", + "l3_agent_scheduler"] + router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver) + + +# This plugin class is for tests with plugin not supporting L3. +class TestNoL3NatPlugin(TestL3NatBasePlugin): + + __native_pagination_support = True + __native_sorting_support = True + + supported_extension_aliases = ["external-net"] + + +# A L3 routing service plugin class for tests with plugins that +# delegate away L3 routing functionality +class TestL3NatServicePlugin(db_base_plugin_v2.CommonDbMixin, + l3_db.L3_NAT_db_mixin): + + supported_extension_aliases = ["router"] + + def __init__(self): + qdbapi.register_models(base=model_base.BASEV2) + + def get_plugin_type(self): + return service_constants.L3_ROUTER_NAT + + def get_plugin_description(self): + return "L3 Routing Service Plugin for testing" + + +# A L3 routing with L3 agent scheduling service plugin class for tests with +# plugins that delegate away L3 routing functionality +class TestL3NatAgentSchedulingServicePlugin(TestL3NatServicePlugin, + l3_agentschedulers_db. + L3AgentSchedulerDbMixin): + + supported_extension_aliases = ["router", "l3_agent_scheduler"] + + +class L3NatTestCaseMixin(object): + + def _create_router(self, fmt, tenant_id, name=None, + admin_state_up=None, set_context=False, + arg_list=None, **kwargs): + data = {'router': {'tenant_id': tenant_id}} + if name: + data['router']['name'] = name + if admin_state_up: + data['router']['admin_state_up'] = admin_state_up + for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): + # Arg must be present and not empty + if arg in kwargs and kwargs[arg]: + data['router'][arg] = kwargs[arg] + router_req = self.new_create_request('routers', data, fmt) + if set_context and tenant_id: + # create a specific auth context for this request + router_req.environ['neutron.context'] = context.Context( + '', tenant_id) + + return router_req.get_response(self.ext_api) + + def _make_router(self, fmt, tenant_id, name=None, admin_state_up=None, + external_gateway_info=None, set_context=False, + arg_list=None, **kwargs): + if external_gateway_info: + arg_list = ('external_gateway_info', ) + (arg_list or ()) + res = self._create_router(fmt, tenant_id, name, + admin_state_up, set_context, + arg_list=arg_list, + external_gateway_info=external_gateway_info, + **kwargs) + return self.deserialize(fmt, res) + + def _add_external_gateway_to_router(self, router_id, network_id, + expected_code=exc.HTTPOk.code, + neutron_context=None): + return self._update('routers', router_id, + {'router': {'external_gateway_info': + {'network_id': network_id}}}, + expected_code=expected_code, + neutron_context=neutron_context) + + def _remove_external_gateway_from_router(self, router_id, network_id, + expected_code=exc.HTTPOk.code, + external_gw_info=None): + return self._update('routers', router_id, + {'router': {'external_gateway_info': + external_gw_info}}, + expected_code=expected_code) + + def _router_interface_action(self, action, router_id, subnet_id, port_id, + expected_code=exc.HTTPOk.code, + expected_body=None, + tenant_id=None): + interface_data = {} + if subnet_id: + interface_data.update({'subnet_id': subnet_id}) + if port_id and (action != 'add' or not subnet_id): + interface_data.update({'port_id': port_id}) + + req = self.new_action_request('routers', interface_data, router_id, + "%s_router_interface" % action) + # if tenant_id was specified, create a tenant context for this request + if tenant_id: + req.environ['neutron.context'] = context.Context( + '', tenant_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, expected_code) + response = self.deserialize(self.fmt, res) + if expected_body: + self.assertEqual(response, expected_body) + return response + + @contextlib.contextmanager + def router(self, name='router1', admin_state_up=True, + fmt=None, tenant_id=_uuid(), + external_gateway_info=None, set_context=False, + **kwargs): + router = self._make_router(fmt or self.fmt, tenant_id, name, + admin_state_up, external_gateway_info, + set_context, **kwargs) + yield router + self._delete('routers', router['router']['id']) + + def _set_net_external(self, net_id): + self._update('networks', net_id, + {'network': {external_net.EXTERNAL: True}}) + + def _create_floatingip(self, fmt, network_id, port_id=None, + fixed_ip=None, set_context=False): + data = {'floatingip': {'floating_network_id': network_id, + 'tenant_id': self._tenant_id}} + if port_id: + data['floatingip']['port_id'] = port_id + if fixed_ip: + data['floatingip']['fixed_ip_address'] = fixed_ip + floatingip_req = self.new_create_request('floatingips', data, fmt) + if set_context and self._tenant_id: + # create a specific auth context for this request + floatingip_req.environ['neutron.context'] = context.Context( + '', self._tenant_id) + return floatingip_req.get_response(self.ext_api) + + def _make_floatingip(self, fmt, network_id, port_id=None, + fixed_ip=None, set_context=False): + res = self._create_floatingip(fmt, network_id, port_id, + fixed_ip, set_context) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + return self.deserialize(fmt, res) + + def _validate_floating_ip(self, fip): + body = self._list('floatingips') + self.assertEqual(len(body['floatingips']), 1) + self.assertEqual(body['floatingips'][0]['id'], + fip['floatingip']['id']) + + body = self._show('floatingips', fip['floatingip']['id']) + self.assertEqual(body['floatingip']['id'], + fip['floatingip']['id']) + + @contextlib.contextmanager + def floatingip_with_assoc(self, port_id=None, fmt=None, fixed_ip=None, + set_context=False): + with self.subnet(cidr='11.0.0.0/24') as public_sub: + self._set_net_external(public_sub['subnet']['network_id']) + private_port = None + if port_id: + private_port = self._show('ports', port_id) + with test_db_plugin.optional_ctx(private_port, + self.port) as private_port: + with self.router() as r: + sid = private_port['port']['fixed_ips'][0]['subnet_id'] + private_sub = {'subnet': {'id': sid}} + floatingip = None + + self._add_external_gateway_to_router( + r['router']['id'], + public_sub['subnet']['network_id']) + self._router_interface_action( + 'add', r['router']['id'], + private_sub['subnet']['id'], None) + + floatingip = self._make_floatingip( + fmt or self.fmt, + public_sub['subnet']['network_id'], + port_id=private_port['port']['id'], + fixed_ip=fixed_ip, + set_context=False) + yield floatingip + + if floatingip: + self._delete('floatingips', + floatingip['floatingip']['id']) + self._router_interface_action( + 'remove', r['router']['id'], + private_sub['subnet']['id'], None) + self._remove_external_gateway_from_router( + r['router']['id'], + public_sub['subnet']['network_id']) + + @contextlib.contextmanager + def floatingip_no_assoc_with_public_sub( + self, private_sub, fmt=None, set_context=False, public_sub=None): + self._set_net_external(public_sub['subnet']['network_id']) + with self.router() as r: + floatingip = None + + self._add_external_gateway_to_router( + r['router']['id'], + public_sub['subnet']['network_id']) + self._router_interface_action('add', r['router']['id'], + private_sub['subnet']['id'], + None) + + floatingip = self._make_floatingip( + fmt or self.fmt, + public_sub['subnet']['network_id'], + set_context=set_context) + yield floatingip, r + + if floatingip: + self._delete('floatingips', + floatingip['floatingip']['id']) + self._router_interface_action('remove', r['router']['id'], + private_sub['subnet']['id'], + None) + self._remove_external_gateway_from_router( + r['router']['id'], + public_sub['subnet']['network_id']) + + @contextlib.contextmanager + def floatingip_no_assoc(self, private_sub, fmt=None, set_context=False): + with self.subnet(cidr='12.0.0.0/24') as public_sub: + with self.floatingip_no_assoc_with_public_sub( + private_sub, fmt, set_context, public_sub) as (f, r): + # Yield only the floating ip object + yield f + + +class L3NatTestCaseBase(L3NatTestCaseMixin): + + def test_router_create(self): + name = 'router1' + tenant_id = _uuid() + expected_value = [('name', name), ('tenant_id', tenant_id), + ('admin_state_up', True), ('status', 'ACTIVE'), + ('external_gateway_info', None)] + with self.router(name='router1', admin_state_up=True, + tenant_id=tenant_id) as router: + for k, v in expected_value: + self.assertEqual(router['router'][k], v) + + def test_router_create_call_extensions(self): + self.extension_called = False + + def _extend_router_dict_test_attr(*args, **kwargs): + self.extension_called = True + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + l3.ROUTERS, [_extend_router_dict_test_attr]) + self.assertFalse(self.extension_called) + with self.router(): + self.assertTrue(self.extension_called) + + def test_router_create_with_gwinfo(self): + with self.subnet() as s: + self._set_net_external(s['subnet']['network_id']) + data = {'router': {'tenant_id': _uuid()}} + data['router']['name'] = 'router1' + data['router']['external_gateway_info'] = { + 'network_id': s['subnet']['network_id']} + router_req = self.new_create_request('routers', data, self.fmt) + res = router_req.get_response(self.ext_api) + router = self.deserialize(self.fmt, res) + self.assertEqual( + s['subnet']['network_id'], + router['router']['external_gateway_info']['network_id']) + self._delete('routers', router['router']['id']) + + def test_router_list(self): + with contextlib.nested(self.router(), + self.router(), + self.router() + ) as routers: + self._test_list_resources('router', routers) + + def test_router_list_with_parameters(self): + with contextlib.nested(self.router(name='router1'), + self.router(name='router2'), + ) as (router1, router2): + query_params = 'name=router1' + self._test_list_resources('router', [router1], + query_params=query_params) + query_params = 'name=router2' + self._test_list_resources('router', [router2], + query_params=query_params) + query_params = 'name=router3' + self._test_list_resources('router', [], + query_params=query_params) + + def test_router_list_with_sort(self): + with contextlib.nested(self.router(name='router1'), + self.router(name='router2'), + self.router(name='router3') + ) as (router1, router2, router3): + self._test_list_with_sort('router', (router3, router2, router1), + [('name', 'desc')]) + + def test_router_list_with_pagination(self): + with contextlib.nested(self.router(name='router1'), + self.router(name='router2'), + self.router(name='router3') + ) as (router1, router2, router3): + self._test_list_with_pagination('router', + (router1, router2, router3), + ('name', 'asc'), 2, 2) + + def test_router_list_with_pagination_reverse(self): + with contextlib.nested(self.router(name='router1'), + self.router(name='router2'), + self.router(name='router3') + ) as (router1, router2, router3): + self._test_list_with_pagination_reverse('router', + (router1, router2, + router3), + ('name', 'asc'), 2, 2) + + def test_router_update(self): + rname1 = "yourrouter" + rname2 = "nachorouter" + with self.router(name=rname1) as r: + body = self._show('routers', r['router']['id']) + self.assertEqual(body['router']['name'], rname1) + + body = self._update('routers', r['router']['id'], + {'router': {'name': rname2}}) + + body = self._show('routers', r['router']['id']) + self.assertEqual(body['router']['name'], rname2) + + def test_router_update_gateway(self): + with self.router() as r: + with self.subnet() as s1: + with self.subnet() as s2: + self._set_net_external(s1['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s1['subnet']['network_id']) + body = self._show('routers', r['router']['id']) + net_id = (body['router'] + ['external_gateway_info']['network_id']) + self.assertEqual(net_id, s1['subnet']['network_id']) + self._set_net_external(s2['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s2['subnet']['network_id']) + body = self._show('routers', r['router']['id']) + net_id = (body['router'] + ['external_gateway_info']['network_id']) + self.assertEqual(net_id, s2['subnet']['network_id']) + # Validate that we can clear the gateway with + # an empty dict, in any other case, we fall back + # on None as default value + self._remove_external_gateway_from_router( + r['router']['id'], + s2['subnet']['network_id'], + external_gw_info={}) + + def test_router_update_gateway_with_existed_floatingip(self): + with self.subnet() as subnet: + self._set_net_external(subnet['subnet']['network_id']) + with self.floatingip_with_assoc() as fip: + self._add_external_gateway_to_router( + fip['floatingip']['router_id'], + subnet['subnet']['network_id'], + expected_code=exc.HTTPConflict.code) + + def test_router_update_gateway_to_empty_with_existed_floatingip(self): + with self.floatingip_with_assoc() as fip: + self._remove_external_gateway_from_router( + fip['floatingip']['router_id'], None, + expected_code=exc.HTTPConflict.code) + + def test_router_add_interface_subnet(self): + exp_notifications = ['router.create.start', + 'router.create.end', + 'network.create.start', + 'network.create.end', + 'subnet.create.start', + 'subnet.create.end', + 'router.interface.create', + 'router.interface.delete'] + fake_notifier.reset() + with self.router() as r: + with self.subnet() as s: + body = self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None) + self.assertIn('port_id', body) + + # fetch port and confirm device_id + r_port_id = body['port_id'] + body = self._show('ports', r_port_id) + self.assertEqual(body['port']['device_id'], r['router']['id']) + + body = self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + None) + body = self._show('ports', r_port_id, + expected_code=exc.HTTPNotFound.code) + + self.assertEqual( + set(exp_notifications), + set(n['event_type'] for n in fake_notifier.NOTIFICATIONS)) + + for n in fake_notifier.NOTIFICATIONS: + if n['event_type'].startswith('router.interface.'): + payload = n['payload']['router_interface'] + self.assertIn('id', payload) + self.assertEqual(payload['id'], r['router']['id']) + self.assertIn('tenant_id', payload) + stid = s['subnet']['tenant_id'] + # tolerate subnet tenant deliberately to '' in the + # nsx metadata access case + self.assertIn(payload['tenant_id'], [stid, '']) + + def test_router_add_interface_subnet_with_bad_tenant_returns_404(self): + with mock.patch('neutron.context.Context.to_dict') as tdict: + tenant_id = _uuid() + admin_context = {'roles': ['admin']} + tenant_context = {'tenant_id': 'bad_tenant', + 'roles': []} + tdict.return_value = admin_context + with self.router(tenant_id=tenant_id) as r: + with self.network(tenant_id=tenant_id) as n: + with self.subnet(network=n) as s: + tdict.return_value = tenant_context + err_code = exc.HTTPNotFound.code + self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None, + err_code) + tdict.return_value = admin_context + body = self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None) + self.assertIn('port_id', body) + tdict.return_value = tenant_context + self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + None, + err_code) + tdict.return_value = admin_context + body = self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + None) + + def test_router_add_interface_subnet_with_port_from_other_tenant(self): + tenant_id = _uuid() + other_tenant_id = _uuid() + with contextlib.nested( + self.router(tenant_id=tenant_id), + self.network(tenant_id=tenant_id), + self.network(tenant_id=other_tenant_id)) as (r, n1, n2): + with contextlib.nested( + self.subnet(network=n1, cidr='10.0.0.0/24'), + self.subnet(network=n2, cidr='10.1.0.0/24')) as (s1, s2): + body = self._router_interface_action( + 'add', + r['router']['id'], + s2['subnet']['id'], + None) + self.assertIn('port_id', body) + self._router_interface_action( + 'add', + r['router']['id'], + s1['subnet']['id'], + None, + tenant_id=tenant_id) + self.assertIn('port_id', body) + self._router_interface_action( + 'remove', + r['router']['id'], + s1['subnet']['id'], + None, + tenant_id=tenant_id) + body = self._router_interface_action( + 'remove', + r['router']['id'], + s2['subnet']['id'], + None) + + def test_router_add_interface_port(self): + with self.router() as r: + with self.port(no_delete=True) as p: + body = self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + self.assertIn('port_id', body) + self.assertEqual(body['port_id'], p['port']['id']) + + # fetch port and confirm device_id + body = self._show('ports', p['port']['id']) + self.assertEqual(body['port']['device_id'], r['router']['id']) + + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_router_add_interface_port_bad_tenant_returns_404(self): + with mock.patch('neutron.context.Context.to_dict') as tdict: + admin_context = {'roles': ['admin']} + tenant_context = {'tenant_id': 'bad_tenant', + 'roles': []} + tdict.return_value = admin_context + with self.router() as r: + with self.port(no_delete=True) as p: + tdict.return_value = tenant_context + err_code = exc.HTTPNotFound.code + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id'], + err_code) + tdict.return_value = admin_context + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + + tdict.return_value = tenant_context + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id'], + err_code) + + tdict.return_value = admin_context + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_router_add_interface_dup_subnet1_returns_400(self): + with self.router() as r: + with self.subnet() as s: + self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None) + self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None, + expected_code=exc. + HTTPBadRequest.code) + self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + None) + + def test_router_add_interface_dup_subnet2_returns_400(self): + with self.router() as r: + with self.subnet() as s: + with self.port(subnet=s, no_delete=True) as p1: + with self.port(subnet=s) as p2: + self._router_interface_action('add', + r['router']['id'], + None, + p1['port']['id']) + self._router_interface_action('add', + r['router']['id'], + None, + p2['port']['id'], + expected_code=exc. + HTTPBadRequest.code) + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p1['port']['id']) + + def test_router_add_interface_overlapped_cidr_returns_400(self): + with self.router() as r: + with self.subnet(cidr='10.0.1.0/24') as s1: + self._router_interface_action('add', + r['router']['id'], + s1['subnet']['id'], + None) + + def try_overlapped_cidr(cidr): + with self.subnet(cidr=cidr) as s2: + self._router_interface_action('add', + r['router']['id'], + s2['subnet']['id'], + None, + expected_code=exc. + HTTPBadRequest.code) + # another subnet with same cidr + try_overlapped_cidr('10.0.1.0/24') + # another subnet with overlapped cidr including s1 + try_overlapped_cidr('10.0.0.0/16') + # another subnet with overlapped cidr included by s1 + try_overlapped_cidr('10.0.1.1/32') + # clean-up + self._router_interface_action('remove', + r['router']['id'], + s1['subnet']['id'], + None) + + def test_router_add_interface_no_data_returns_400(self): + with self.router() as r: + self._router_interface_action('add', + r['router']['id'], + None, + None, + expected_code=exc. + HTTPBadRequest.code) + + def test_router_add_gateway_dup_subnet1_returns_400(self): + with self.router() as r: + with self.subnet() as s: + self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None) + self._set_net_external(s['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s['subnet']['network_id'], + expected_code=exc.HTTPBadRequest.code) + self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + None) + + def test_router_add_gateway_dup_subnet2_returns_400(self): + with self.router() as r: + with self.subnet() as s: + self._set_net_external(s['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s['subnet']['network_id']) + self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None, + expected_code=exc. + HTTPBadRequest.code) + self._remove_external_gateway_from_router( + r['router']['id'], + s['subnet']['network_id']) + + def test_router_add_gateway(self): + with self.router() as r: + with self.subnet() as s: + self._set_net_external(s['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s['subnet']['network_id']) + body = self._show('routers', r['router']['id']) + net_id = body['router']['external_gateway_info']['network_id'] + self.assertEqual(net_id, s['subnet']['network_id']) + self._remove_external_gateway_from_router( + r['router']['id'], + s['subnet']['network_id']) + body = self._show('routers', r['router']['id']) + gw_info = body['router']['external_gateway_info'] + self.assertIsNone(gw_info) + + def test_router_add_gateway_tenant_ctx(self): + with self.router(tenant_id='noadmin', + set_context=True) as r: + with self.subnet() as s: + self._set_net_external(s['subnet']['network_id']) + ctx = context.Context('', 'noadmin') + self._add_external_gateway_to_router( + r['router']['id'], + s['subnet']['network_id'], + neutron_context=ctx) + body = self._show('routers', r['router']['id']) + net_id = body['router']['external_gateway_info']['network_id'] + self.assertEqual(net_id, s['subnet']['network_id']) + self._remove_external_gateway_from_router( + r['router']['id'], + s['subnet']['network_id']) + body = self._show('routers', r['router']['id']) + gw_info = body['router']['external_gateway_info'] + self.assertIsNone(gw_info) + + def test_create_router_port_with_device_id_of_other_teants_router(self): + with self.router() as admin_router: + with self.network(tenant_id='tenant_a', + set_context=True) as n: + with self.subnet(network=n): + self._create_port( + self.fmt, n['network']['id'], + tenant_id='tenant_a', + device_id=admin_router['router']['id'], + device_owner='network:router_interface', + set_context=True, + expected_res_status=exc.HTTPConflict.code) + + def test_create_non_router_port_device_id_of_other_teants_router_update( + self): + # This tests that HTTPConflict is raised if we create a non-router + # port that matches the device_id of another tenants router and then + # we change the device_owner to be network:router_interface. + with self.router() as admin_router: + with self.network(tenant_id='tenant_a', + set_context=True) as n: + with self.subnet(network=n): + port_res = self._create_port( + self.fmt, n['network']['id'], + tenant_id='tenant_a', + device_id=admin_router['router']['id'], + set_context=True) + port = self.deserialize(self.fmt, port_res) + neutron_context = context.Context('', 'tenant_a') + data = {'port': {'device_owner': + 'network:router_interface'}} + self._update('ports', port['port']['id'], data, + neutron_context=neutron_context, + expected_code=exc.HTTPConflict.code) + self._delete('ports', port['port']['id']) + + def test_update_port_device_id_to_different_tenants_router(self): + with self.router() as admin_router: + with self.router(tenant_id='tenant_a', + set_context=True) as tenant_router: + with self.network(tenant_id='tenant_a', + set_context=True) as n: + with self.subnet(network=n) as s: + port = self._router_interface_action( + 'add', tenant_router['router']['id'], + s['subnet']['id'], None, tenant_id='tenant_a') + neutron_context = context.Context('', 'tenant_a') + data = {'port': + {'device_id': admin_router['router']['id']}} + self._update('ports', port['port_id'], data, + neutron_context=neutron_context, + expected_code=exc.HTTPConflict.code) + self._router_interface_action( + 'remove', tenant_router['router']['id'], + s['subnet']['id'], None, tenant_id='tenant_a') + + def test_router_add_gateway_invalid_network_returns_404(self): + with self.router() as r: + self._add_external_gateway_to_router( + r['router']['id'], + "foobar", expected_code=exc.HTTPNotFound.code) + + def test_router_add_gateway_net_not_external_returns_400(self): + with self.router() as r: + with self.subnet() as s: + # intentionally do not set net as external + self._add_external_gateway_to_router( + r['router']['id'], + s['subnet']['network_id'], + expected_code=exc.HTTPBadRequest.code) + + def test_router_add_gateway_no_subnet_returns_400(self): + with self.router() as r: + with self.network() as n: + self._set_net_external(n['network']['id']) + self._add_external_gateway_to_router( + r['router']['id'], + n['network']['id'], expected_code=exc.HTTPBadRequest.code) + + def test_router_remove_interface_inuse_returns_409(self): + with self.router() as r: + with self.subnet() as s: + self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None) + self._delete('routers', r['router']['id'], + expected_code=exc.HTTPConflict.code) + + # remove interface so test can exit without errors + self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + None) + + def test_router_remove_interface_wrong_subnet_returns_400(self): + with self.router() as r: + with self.subnet() as s: + with self.port(no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + p['port']['id'], + exc.HTTPBadRequest.code) + #remove properly to clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_router_remove_interface_returns_200(self): + with self.router() as r: + with self.port(no_delete=True) as p: + body = self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id'], + expected_body=body) + + def test_router_remove_interface_wrong_port_returns_404(self): + with self.router() as r: + with self.subnet(): + with self.port(no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + # create another port for testing failure case + res = self._create_port(self.fmt, p['port']['network_id']) + p2 = self.deserialize(self.fmt, res) + self._router_interface_action('remove', + r['router']['id'], + None, + p2['port']['id'], + exc.HTTPNotFound.code) + # remove correct interface to cleanup + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + # remove extra port created + self._delete('ports', p2['port']['id']) + + def test_router_delete(self): + with self.router() as router: + router_id = router['router']['id'] + req = self.new_show_request('router', router_id) + res = req.get_response(self._api_for_resource('router')) + self.assertEqual(res.status_int, 404) + + def test_router_delete_with_port_existed_returns_409(self): + with self.subnet() as subnet: + res = self._create_router(self.fmt, _uuid()) + router = self.deserialize(self.fmt, res) + self._router_interface_action('add', + router['router']['id'], + subnet['subnet']['id'], + None) + self._delete('routers', router['router']['id'], + exc.HTTPConflict.code) + self._router_interface_action('remove', + router['router']['id'], + subnet['subnet']['id'], + None) + self._delete('routers', router['router']['id']) + + def test_router_delete_with_floatingip_existed_returns_409(self): + with self.port() as p: + private_sub = {'subnet': {'id': + p['port']['fixed_ips'][0]['subnet_id']}} + with self.subnet(cidr='12.0.0.0/24') as public_sub: + self._set_net_external(public_sub['subnet']['network_id']) + res = self._create_router(self.fmt, _uuid()) + r = self.deserialize(self.fmt, res) + self._add_external_gateway_to_router( + r['router']['id'], + public_sub['subnet']['network_id']) + self._router_interface_action('add', r['router']['id'], + private_sub['subnet']['id'], + None) + res = self._create_floatingip( + self.fmt, public_sub['subnet']['network_id'], + port_id=p['port']['id']) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + floatingip = self.deserialize(self.fmt, res) + self._delete('routers', r['router']['id'], + expected_code=exc.HTTPConflict.code) + # Cleanup + self._delete('floatingips', floatingip['floatingip']['id']) + self._router_interface_action('remove', r['router']['id'], + private_sub['subnet']['id'], + None) + self._delete('routers', r['router']['id']) + + def test_router_show(self): + name = 'router1' + tenant_id = _uuid() + expected_value = [('name', name), ('tenant_id', tenant_id), + ('admin_state_up', True), ('status', 'ACTIVE'), + ('external_gateway_info', None)] + with self.router(name='router1', admin_state_up=True, + tenant_id=tenant_id) as router: + res = self._show('routers', router['router']['id']) + for k, v in expected_value: + self.assertEqual(res['router'][k], v) + + def test_network_update_external_failure(self): + with self.router() as r: + with self.subnet() as s1: + self._set_net_external(s1['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s1['subnet']['network_id']) + self._update('networks', s1['subnet']['network_id'], + {'network': {external_net.EXTERNAL: False}}, + expected_code=exc.HTTPConflict.code) + self._remove_external_gateway_from_router( + r['router']['id'], + s1['subnet']['network_id']) + + def test_network_update_external(self): + with self.router() as r: + with self.network('test_net') as testnet: + self._set_net_external(testnet['network']['id']) + with self.subnet() as s1: + self._set_net_external(s1['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s1['subnet']['network_id']) + self._update('networks', testnet['network']['id'], + {'network': {external_net.EXTERNAL: False}}) + self._remove_external_gateway_from_router( + r['router']['id'], + s1['subnet']['network_id']) + + def test_floatingip_crd_ops(self): + with self.floatingip_with_assoc() as fip: + self._validate_floating_ip(fip) + + # post-delete, check that it is really gone + body = self._list('floatingips') + self.assertEqual(len(body['floatingips']), 0) + + self._show('floatingips', fip['floatingip']['id'], + expected_code=exc.HTTPNotFound.code) + + def _test_floatingip_with_assoc_fails(self, plugin_class): + with self.subnet(cidr='200.0.0.0/24') as public_sub: + self._set_net_external(public_sub['subnet']['network_id']) + with self.port() as private_port: + with self.router() as r: + sid = private_port['port']['fixed_ips'][0]['subnet_id'] + private_sub = {'subnet': {'id': sid}} + self._add_external_gateway_to_router( + r['router']['id'], + public_sub['subnet']['network_id']) + self._router_interface_action('add', r['router']['id'], + private_sub['subnet']['id'], + None) + method = plugin_class + '._update_fip_assoc' + with mock.patch(method) as pl: + pl.side_effect = n_exc.BadRequest( + resource='floatingip', + msg='fake_error') + res = self._create_floatingip( + self.fmt, + public_sub['subnet']['network_id'], + port_id=private_port['port']['id']) + self.assertEqual(res.status_int, 400) + for p in self._list('ports')['ports']: + if (p['device_owner'] == + l3_constants.DEVICE_OWNER_FLOATINGIP): + self.fail('garbage port is not deleted') + self._remove_external_gateway_from_router( + r['router']['id'], + public_sub['subnet']['network_id']) + self._router_interface_action('remove', + r['router']['id'], + private_sub['subnet']['id'], + None) + + def test_floatingip_with_assoc_fails(self): + self._test_floatingip_with_assoc_fails( + 'neutron.db.l3_db.L3_NAT_db_mixin') + + def _test_floatingip_with_ip_generation_failure(self, plugin_class): + with self.subnet(cidr='200.0.0.0/24') as public_sub: + self._set_net_external(public_sub['subnet']['network_id']) + with self.port() as private_port: + with self.router() as r: + sid = private_port['port']['fixed_ips'][0]['subnet_id'] + private_sub = {'subnet': {'id': sid}} + self._add_external_gateway_to_router( + r['router']['id'], + public_sub['subnet']['network_id']) + self._router_interface_action('add', r['router']['id'], + private_sub['subnet']['id'], + None) + method = plugin_class + '._update_fip_assoc' + with mock.patch(method) as pl: + pl.side_effect = n_exc.IpAddressGenerationFailure( + net_id='netid') + res = self._create_floatingip( + self.fmt, + public_sub['subnet']['network_id'], + port_id=private_port['port']['id']) + self.assertEqual(res.status_int, exc.HTTPConflict.code) + + for p in self._list('ports')['ports']: + if (p['device_owner'] == + l3_constants.DEVICE_OWNER_FLOATINGIP): + self.fail('garbage port is not deleted') + + self._remove_external_gateway_from_router( + r['router']['id'], + public_sub['subnet']['network_id']) + self._router_interface_action('remove', + r['router']['id'], + private_sub['subnet']['id'], + None) + + def test_floatingip_update( + self, expected_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): + with self.port() as p: + private_sub = {'subnet': {'id': + p['port']['fixed_ips'][0]['subnet_id']}} + with self.floatingip_no_assoc(private_sub) as fip: + body = self._show('floatingips', fip['floatingip']['id']) + self.assertIsNone(body['floatingip']['port_id']) + self.assertIsNone(body['floatingip']['fixed_ip_address']) + self.assertEqual(body['floatingip']['status'], expected_status) + + port_id = p['port']['id'] + ip_address = p['port']['fixed_ips'][0]['ip_address'] + body = self._update('floatingips', fip['floatingip']['id'], + {'floatingip': {'port_id': port_id}}) + self.assertEqual(body['floatingip']['port_id'], port_id) + self.assertEqual(body['floatingip']['fixed_ip_address'], + ip_address) + + def test_floatingip_create_different_fixed_ip_same_port(self): + '''This tests that it is possible to delete a port that has + multiple floating ip addresses associated with it (each floating + address associated with a unique fixed address). + ''' + + with self.router() as r: + with self.subnet(cidr='11.0.0.0/24') as public_sub: + self._set_net_external(public_sub['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + public_sub['subnet']['network_id']) + + with self.subnet() as private_sub: + ip_range = list(netaddr.IPNetwork( + private_sub['subnet']['cidr'])) + fixed_ips = [{'ip_address': str(ip_range[-3])}, + {'ip_address': str(ip_range[-2])}] + + self._router_interface_action( + 'add', r['router']['id'], + private_sub['subnet']['id'], None) + + with self.port(subnet=private_sub, + fixed_ips=fixed_ips) as p: + + fip1 = self._make_floatingip( + self.fmt, + public_sub['subnet']['network_id'], + p['port']['id'], + fixed_ip=str(ip_range[-2])) + fip2 = self._make_floatingip( + self.fmt, + public_sub['subnet']['network_id'], + p['port']['id'], + fixed_ip=str(ip_range[-3])) + + # Test that floating ips are assigned successfully. + body = self._show('floatingips', + fip1['floatingip']['id']) + self.assertEqual( + body['floatingip']['port_id'], + fip1['floatingip']['port_id']) + + body = self._show('floatingips', + fip2['floatingip']['id']) + self.assertEqual( + body['floatingip']['port_id'], + fip2['floatingip']['port_id']) + + # Test that port has been successfully deleted. + body = self._show('ports', p['port']['id'], + expected_code=exc.HTTPNotFound.code) + + for fip in [fip1, fip2]: + self._delete('floatingips', + fip['floatingip']['id']) + + self._router_interface_action( + 'remove', r['router']['id'], + private_sub['subnet']['id'], None) + + self._remove_external_gateway_from_router( + r['router']['id'], + public_sub['subnet']['network_id']) + + def test_floatingip_update_different_fixed_ip_same_port(self): + with self.subnet() as s: + ip_range = list(netaddr.IPNetwork(s['subnet']['cidr'])) + fixed_ips = [{'ip_address': str(ip_range[-3])}, + {'ip_address': str(ip_range[-2])}] + with self.port(subnet=s, fixed_ips=fixed_ips) as p: + with self.floatingip_with_assoc( + port_id=p['port']['id'], + fixed_ip=str(ip_range[-3])) as fip: + body = self._show('floatingips', fip['floatingip']['id']) + self.assertEqual(fip['floatingip']['id'], + body['floatingip']['id']) + self.assertEqual(fip['floatingip']['port_id'], + body['floatingip']['port_id']) + self.assertEqual(str(ip_range[-3]), + body['floatingip']['fixed_ip_address']) + self.assertIsNotNone(body['floatingip']['router_id']) + body_2 = self._update( + 'floatingips', fip['floatingip']['id'], + {'floatingip': {'port_id': p['port']['id'], + 'fixed_ip_address': str(ip_range[-2])} + }) + self.assertEqual(fip['floatingip']['port_id'], + body_2['floatingip']['port_id']) + self.assertEqual(str(ip_range[-2]), + body_2['floatingip']['fixed_ip_address']) + + def test_floatingip_update_different_router(self): + # Create subnet with different CIDRs to account for plugins which + # do not support overlapping IPs + with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), + self.subnet(cidr='10.0.1.0/24')) as ( + s1, s2): + with contextlib.nested(self.port(subnet=s1), + self.port(subnet=s2)) as (p1, p2): + private_sub1 = {'subnet': + {'id': + p1['port']['fixed_ips'][0]['subnet_id']}} + private_sub2 = {'subnet': + {'id': + p2['port']['fixed_ips'][0]['subnet_id']}} + with self.subnet(cidr='12.0.0.0/24') as public_sub: + with contextlib.nested( + self.floatingip_no_assoc_with_public_sub( + private_sub1, public_sub=public_sub), + self.floatingip_no_assoc_with_public_sub( + private_sub2, public_sub=public_sub)) as ( + (fip1, r1), (fip2, r2)): + + def assert_no_assoc(fip): + body = self._show('floatingips', + fip['floatingip']['id']) + self.assertIsNone(body['floatingip']['port_id']) + self.assertIsNone( + body['floatingip']['fixed_ip_address']) + + assert_no_assoc(fip1) + assert_no_assoc(fip2) + + def associate_and_assert(fip, port): + port_id = port['port']['id'] + ip_address = (port['port']['fixed_ips'] + [0]['ip_address']) + body = self._update( + 'floatingips', fip['floatingip']['id'], + {'floatingip': {'port_id': port_id}}) + self.assertEqual(body['floatingip']['port_id'], + port_id) + self.assertEqual( + body['floatingip']['fixed_ip_address'], + ip_address) + return body['floatingip']['router_id'] + + fip1_r1_res = associate_and_assert(fip1, p1) + self.assertEqual(fip1_r1_res, r1['router']['id']) + # The following operation will associate the floating + # ip to a different router + fip1_r2_res = associate_and_assert(fip1, p2) + self.assertEqual(fip1_r2_res, r2['router']['id']) + fip2_r1_res = associate_and_assert(fip2, p1) + self.assertEqual(fip2_r1_res, r1['router']['id']) + # disassociate fip1 + self._update( + 'floatingips', fip1['floatingip']['id'], + {'floatingip': {'port_id': None}}) + fip2_r2_res = associate_and_assert(fip2, p2) + self.assertEqual(fip2_r2_res, r2['router']['id']) + + def test_floatingip_with_assoc(self): + with self.floatingip_with_assoc() as fip: + body = self._show('floatingips', fip['floatingip']['id']) + self.assertEqual(body['floatingip']['id'], + fip['floatingip']['id']) + self.assertEqual(body['floatingip']['port_id'], + fip['floatingip']['port_id']) + self.assertIsNotNone(body['floatingip']['fixed_ip_address']) + self.assertIsNotNone(body['floatingip']['router_id']) + + def test_floatingip_port_delete(self): + with self.subnet() as private_sub: + with self.floatingip_no_assoc(private_sub) as fip: + with self.port(subnet=private_sub) as p: + body = self._update('floatingips', fip['floatingip']['id'], + {'floatingip': + {'port_id': p['port']['id']}}) + # note: once this port goes out of scope, the port will be + # deleted, which is what we want to test. We want to confirm + # that the fields are set back to None + body = self._show('floatingips', fip['floatingip']['id']) + self.assertEqual(body['floatingip']['id'], + fip['floatingip']['id']) + self.assertIsNone(body['floatingip']['port_id']) + self.assertIsNone(body['floatingip']['fixed_ip_address']) + self.assertIsNone(body['floatingip']['router_id']) + + def test_two_fips_one_port_invalid_return_409(self): + with self.floatingip_with_assoc() as fip1: + res = self._create_floatingip( + self.fmt, + fip1['floatingip']['floating_network_id'], + fip1['floatingip']['port_id']) + self.assertEqual(res.status_int, exc.HTTPConflict.code) + + def test_floating_ip_direct_port_delete_returns_409(self): + found = False + with self.floatingip_with_assoc(): + for p in self._list('ports')['ports']: + if p['device_owner'] == l3_constants.DEVICE_OWNER_FLOATINGIP: + self._delete('ports', p['id'], + expected_code=exc.HTTPConflict.code) + found = True + self.assertTrue(found) + + def _test_floatingip_with_invalid_create_port(self, plugin_class): + with self.port() as p: + private_sub = {'subnet': {'id': + p['port']['fixed_ips'][0]['subnet_id']}} + with self.subnet(cidr='12.0.0.0/24') as public_sub: + self._set_net_external(public_sub['subnet']['network_id']) + res = self._create_router(self.fmt, _uuid()) + r = self.deserialize(self.fmt, res) + self._add_external_gateway_to_router( + r['router']['id'], + public_sub['subnet']['network_id']) + self._router_interface_action( + 'add', r['router']['id'], + private_sub['subnet']['id'], + None) + + with mock.patch(plugin_class + '.create_port') as createport: + createport.return_value = {'fixed_ips': []} + res = self._create_floatingip( + self.fmt, public_sub['subnet']['network_id'], + port_id=p['port']['id']) + self.assertEqual(res.status_int, + exc.HTTPBadRequest.code) + self._router_interface_action('remove', + r['router']['id'], + private_sub + ['subnet']['id'], + None) + self._delete('routers', r['router']['id']) + + def test_floatingip_with_invalid_create_port(self): + self._test_floatingip_with_invalid_create_port( + 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2') + + def test_create_floatingip_no_ext_gateway_return_404(self): + with self.subnet() as public_sub: + self._set_net_external(public_sub['subnet']['network_id']) + with self.port() as private_port: + with self.router(): + res = self._create_floatingip( + self.fmt, + public_sub['subnet']['network_id'], + port_id=private_port['port']['id']) + # this should be some kind of error + self.assertEqual(res.status_int, exc.HTTPNotFound.code) + + def test_create_floating_non_ext_network_returns_400(self): + with self.subnet() as public_sub: + # normally we would set the network of public_sub to be + # external, but the point of this test is to handle when + # that is not the case + with self.router(): + res = self._create_floatingip( + self.fmt, + public_sub['subnet']['network_id']) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + + def test_create_floatingip_no_public_subnet_returns_400(self): + with self.network() as public_network: + with self.port() as private_port: + with self.router() as r: + sid = private_port['port']['fixed_ips'][0]['subnet_id'] + private_sub = {'subnet': {'id': sid}} + self._router_interface_action('add', r['router']['id'], + private_sub['subnet']['id'], + None) + + res = self._create_floatingip( + self.fmt, + public_network['network']['id'], + port_id=private_port['port']['id']) + self.assertEqual(res.status_int, exc.HTTPBadRequest.code) + # cleanup + self._router_interface_action('remove', + r['router']['id'], + private_sub['subnet']['id'], + None) + + def test_create_floatingip_invalid_floating_network_id_returns_400(self): + # API-level test - no need to create all objects for l3 plugin + res = self._create_floatingip(self.fmt, 'iamnotanuuid', + uuidutils.generate_uuid(), '192.168.0.1') + self.assertEqual(res.status_int, 400) + + def test_create_floatingip_invalid_floating_port_id_returns_400(self): + # API-level test - no need to create all objects for l3 plugin + res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(), + 'iamnotanuuid', '192.168.0.1') + self.assertEqual(res.status_int, 400) + + def test_create_floatingip_invalid_fixed_ip_address_returns_400(self): + # API-level test - no need to create all objects for l3 plugin + res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(), + uuidutils.generate_uuid(), 'iamnotnanip') + self.assertEqual(res.status_int, 400) + + def test_floatingip_list_with_sort(self): + with contextlib.nested(self.subnet(cidr="10.0.0.0/24"), + self.subnet(cidr="11.0.0.0/24"), + self.subnet(cidr="12.0.0.0/24") + ) as (s1, s2, s3): + network_id1 = s1['subnet']['network_id'] + network_id2 = s2['subnet']['network_id'] + network_id3 = s3['subnet']['network_id'] + self._set_net_external(network_id1) + self._set_net_external(network_id2) + self._set_net_external(network_id3) + fp1 = self._make_floatingip(self.fmt, network_id1) + fp2 = self._make_floatingip(self.fmt, network_id2) + fp3 = self._make_floatingip(self.fmt, network_id3) + try: + self._test_list_with_sort('floatingip', (fp3, fp2, fp1), + [('floating_ip_address', 'desc')]) + finally: + self._delete('floatingips', fp1['floatingip']['id']) + self._delete('floatingips', fp2['floatingip']['id']) + self._delete('floatingips', fp3['floatingip']['id']) + + def test_floatingip_list_with_port_id(self): + with self.floatingip_with_assoc() as fip: + port_id = fip['floatingip']['port_id'] + res = self._list('floatingips', + query_params="port_id=%s" % port_id) + self.assertEqual(len(res['floatingips']), 1) + res = self._list('floatingips', query_params="port_id=aaa") + self.assertEqual(len(res['floatingips']), 0) + + def test_floatingip_list_with_pagination(self): + with contextlib.nested(self.subnet(cidr="10.0.0.0/24"), + self.subnet(cidr="11.0.0.0/24"), + self.subnet(cidr="12.0.0.0/24") + ) as (s1, s2, s3): + network_id1 = s1['subnet']['network_id'] + network_id2 = s2['subnet']['network_id'] + network_id3 = s3['subnet']['network_id'] + self._set_net_external(network_id1) + self._set_net_external(network_id2) + self._set_net_external(network_id3) + fp1 = self._make_floatingip(self.fmt, network_id1) + fp2 = self._make_floatingip(self.fmt, network_id2) + fp3 = self._make_floatingip(self.fmt, network_id3) + try: + self._test_list_with_pagination( + 'floatingip', (fp1, fp2, fp3), + ('floating_ip_address', 'asc'), 2, 2) + finally: + self._delete('floatingips', fp1['floatingip']['id']) + self._delete('floatingips', fp2['floatingip']['id']) + self._delete('floatingips', fp3['floatingip']['id']) + + def test_floatingip_list_with_pagination_reverse(self): + with contextlib.nested(self.subnet(cidr="10.0.0.0/24"), + self.subnet(cidr="11.0.0.0/24"), + self.subnet(cidr="12.0.0.0/24") + ) as (s1, s2, s3): + network_id1 = s1['subnet']['network_id'] + network_id2 = s2['subnet']['network_id'] + network_id3 = s3['subnet']['network_id'] + self._set_net_external(network_id1) + self._set_net_external(network_id2) + self._set_net_external(network_id3) + fp1 = self._make_floatingip(self.fmt, network_id1) + fp2 = self._make_floatingip(self.fmt, network_id2) + fp3 = self._make_floatingip(self.fmt, network_id3) + try: + self._test_list_with_pagination_reverse( + 'floatingip', (fp1, fp2, fp3), + ('floating_ip_address', 'asc'), 2, 2) + finally: + self._delete('floatingips', fp1['floatingip']['id']) + self._delete('floatingips', fp2['floatingip']['id']) + self._delete('floatingips', fp3['floatingip']['id']) + + def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self): + found = False + with self.floatingip_with_assoc(): + for p in self._list('ports')['ports']: + if p['device_owner'] == l3_constants.DEVICE_OWNER_ROUTER_INTF: + subnet_id = p['fixed_ips'][0]['subnet_id'] + router_id = p['device_id'] + self._router_interface_action( + 'remove', router_id, subnet_id, None, + expected_code=exc.HTTPConflict.code) + found = True + break + self.assertTrue(found) + + def test_floatingip_delete_router_intf_with_port_id_returns_409(self): + found = False + with self.floatingip_with_assoc(): + for p in self._list('ports')['ports']: + if p['device_owner'] == l3_constants.DEVICE_OWNER_ROUTER_INTF: + router_id = p['device_id'] + self._router_interface_action( + 'remove', router_id, None, p['id'], + expected_code=exc.HTTPConflict.code) + found = True + break + self.assertTrue(found) + + def test_router_delete_subnet_inuse_returns_409(self): + with self.router() as r: + with self.subnet() as s: + self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None) + # subnet cannot be delete as it's attached to a router + self._delete('subnets', s['subnet']['id'], + expected_code=exc.HTTPConflict.code) + # remove interface so test can exit without errors + self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + None) + + def test_delete_ext_net_with_disassociated_floating_ips(self): + with self.network() as net: + net_id = net['network']['id'] + self._set_net_external(net_id) + with self.subnet(network=net, do_delete=False): + self._make_floatingip(self.fmt, net_id) + + +class L3AgentDbTestCaseBase(L3NatTestCaseMixin): + + """Unit tests for methods called by the L3 agent.""" + + def test_l3_agent_routers_query_interfaces(self): + with self.router() as r: + with self.port(no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + + routers = self.plugin.get_sync_data( + context.get_admin_context(), None) + self.assertEqual(1, len(routers)) + interfaces = routers[0][l3_constants.INTERFACE_KEY] + self.assertEqual(1, len(interfaces)) + subnet_id = interfaces[0]['subnet']['id'] + wanted_subnetid = p['port']['fixed_ips'][0]['subnet_id'] + self.assertEqual(wanted_subnetid, subnet_id) + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_l3_agent_routers_query_ignore_interfaces_with_moreThanOneIp(self): + with self.router() as r: + with self.subnet(cidr='9.0.1.0/24') as subnet: + with self.port(subnet=subnet, + no_delete=True, + fixed_ips=[{'ip_address': '9.0.1.3'}]) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + port = {'port': {'fixed_ips': + [{'ip_address': '9.0.1.4', + 'subnet_id': subnet['subnet']['id']}, + {'ip_address': '9.0.1.5', + 'subnet_id': subnet['subnet']['id']}]}} + ctx = context.get_admin_context() + self.core_plugin.update_port(ctx, p['port']['id'], port) + routers = self.plugin.get_sync_data(ctx, None) + self.assertEqual(1, len(routers)) + interfaces = routers[0].get(l3_constants.INTERFACE_KEY, []) + self.assertEqual(1, len(interfaces)) + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + + def test_l3_agent_routers_query_gateway(self): + with self.router() as r: + with self.subnet() as s: + self._set_net_external(s['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s['subnet']['network_id']) + routers = self.plugin.get_sync_data( + context.get_admin_context(), [r['router']['id']]) + self.assertEqual(1, len(routers)) + gw_port = routers[0]['gw_port'] + self.assertEqual(s['subnet']['id'], gw_port['subnet']['id']) + self._remove_external_gateway_from_router( + r['router']['id'], + s['subnet']['network_id']) + + def test_l3_agent_routers_query_floatingips(self): + with self.floatingip_with_assoc() as fip: + routers = self.plugin.get_sync_data( + context.get_admin_context(), [fip['floatingip']['router_id']]) + self.assertEqual(1, len(routers)) + floatingips = routers[0][l3_constants.FLOATINGIP_KEY] + self.assertEqual(1, len(floatingips)) + self.assertEqual(floatingips[0]['id'], + fip['floatingip']['id']) + self.assertEqual(floatingips[0]['port_id'], + fip['floatingip']['port_id']) + self.assertIsNotNone(floatingips[0]['fixed_ip_address']) + self.assertIsNotNone(floatingips[0]['router_id']) + + def _test_notify_op_agent(self, target_func, *args): + l3_rpc_agent_api_str = ( + 'neutron.api.rpc.agentnotifiers.l3_rpc_agent_api.L3AgentNotifyAPI') + plugin = manager.NeutronManager.get_service_plugins()[ + service_constants.L3_ROUTER_NAT] + oldNotify = plugin.l3_rpc_notifier + try: + with mock.patch(l3_rpc_agent_api_str) as notifyApi: + plugin.l3_rpc_notifier = notifyApi + kargs = [item for item in args] + kargs.append(notifyApi) + target_func(*kargs) + except Exception: + plugin.l3_rpc_notifier = oldNotify + raise + else: + plugin.l3_rpc_notifier = oldNotify + + def _test_router_gateway_op_agent(self, notifyApi): + with self.router() as r: + with self.subnet() as s: + self._set_net_external(s['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s['subnet']['network_id']) + self._remove_external_gateway_from_router( + r['router']['id'], + s['subnet']['network_id']) + self.assertEqual( + 2, notifyApi.routers_updated.call_count) + + def test_router_gateway_op_agent(self): + self._test_notify_op_agent(self._test_router_gateway_op_agent) + + def _test_interfaces_op_agent(self, r, notifyApi): + with self.port(no_delete=True) as p: + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id']) + # clean-up + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id']) + self.assertEqual(2, notifyApi.routers_updated.call_count) + + def test_interfaces_op_agent(self): + with self.router() as r: + self._test_notify_op_agent( + self._test_interfaces_op_agent, r) + + def _test_floatingips_op_agent(self, notifyApi): + with self.floatingip_with_assoc(): + pass + # add gateway, add interface, associate, deletion of floatingip, + # delete gateway, delete interface + self.assertEqual(6, notifyApi.routers_updated.call_count) + + def test_floatingips_op_agent(self): + self._test_notify_op_agent(self._test_floatingips_op_agent) + + +class L3BaseForIntTests(test_db_plugin.NeutronDbPluginV2TestCase): + + mock_rescheduling = True + + def setUp(self, plugin=None, ext_mgr=None, service_plugins=None): + if not plugin: + plugin = 'neutron.tests.unit.test_l3_plugin.TestL3NatIntPlugin' + # for these tests we need to enable overlapping ips + cfg.CONF.set_default('allow_overlapping_ips', True) + ext_mgr = ext_mgr or L3TestExtensionManager() + + if self.mock_rescheduling: + mock.patch('%s._check_router_needs_rescheduling' % plugin, + new=lambda *a: False).start() + + super(L3BaseForIntTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr, + service_plugins=service_plugins) + + self.setup_notification_driver() + + +class L3BaseForSepTests(test_db_plugin.NeutronDbPluginV2TestCase): + + def setUp(self, plugin=None, ext_mgr=None): + # the plugin without L3 support + if not plugin: + plugin = 'neutron.tests.unit.test_l3_plugin.TestNoL3NatPlugin' + # the L3 service plugin + l3_plugin = ('neutron.tests.unit.test_l3_plugin.' + 'TestL3NatServicePlugin') + service_plugins = {'l3_plugin_name': l3_plugin} + + # for these tests we need to enable overlapping ips + cfg.CONF.set_default('allow_overlapping_ips', True) + if not ext_mgr: + ext_mgr = L3TestExtensionManager() + super(L3BaseForSepTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr, + service_plugins=service_plugins) + + self.setup_notification_driver() + + +class L3NatDBIntAgentSchedulingTestCase(L3BaseForIntTests, + L3NatTestCaseMixin, + test_agent_ext_plugin. + AgentDBTestMixIn): + + """Unit tests for core plugin with L3 routing and scheduling integrated.""" + + def setUp(self, plugin='neutron.tests.unit.test_l3_plugin.' + 'TestL3NatIntAgentSchedulingPlugin', + ext_mgr=None, service_plugins=None): + self.mock_rescheduling = False + super(L3NatDBIntAgentSchedulingTestCase, self).setUp( + plugin, ext_mgr, service_plugins) + self.adminContext = context.get_admin_context() + + def _assert_router_on_agent(self, router_id, agent_host): + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + agents = plugin.list_l3_agents_hosting_router( + self.adminContext, router_id)['agents'] + self.assertEqual(len(agents), 1) + self.assertEqual(agents[0]['host'], agent_host) + + def test_update_gateway_agent_exists_supporting_network(self): + with contextlib.nested(self.router(), + self.subnet(), + self.subnet()) as (r, s1, s2): + self._set_net_external(s1['subnet']['network_id']) + l3_rpc = l3_rpc_base.L3RpcCallbackMixin() + self._register_one_l3_agent( + host='host1', + ext_net_id=s1['subnet']['network_id']) + self._register_one_l3_agent( + host='host2', internal_only=False, + ext_net_id=s2['subnet']['network_id']) + l3_rpc.sync_routers(self.adminContext, + host='host1') + self._assert_router_on_agent(r['router']['id'], 'host1') + + self._add_external_gateway_to_router( + r['router']['id'], + s1['subnet']['network_id']) + self._assert_router_on_agent(r['router']['id'], 'host1') + + self._set_net_external(s2['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s2['subnet']['network_id']) + self._assert_router_on_agent(r['router']['id'], 'host2') + + self._remove_external_gateway_from_router( + r['router']['id'], + s2['subnet']['network_id']) + + def test_update_gateway_agent_exists_supporting_multiple_network(self): + with contextlib.nested(self.router(), + self.subnet(), + self.subnet()) as (r, s1, s2): + self._set_net_external(s1['subnet']['network_id']) + l3_rpc = l3_rpc_base.L3RpcCallbackMixin() + self._register_one_l3_agent( + host='host1', + ext_net_id=s1['subnet']['network_id']) + self._register_one_l3_agent( + host='host2', internal_only=False, + ext_net_id='', ext_bridge='') + l3_rpc.sync_routers(self.adminContext, + host='host1') + self._assert_router_on_agent(r['router']['id'], 'host1') + + self._add_external_gateway_to_router( + r['router']['id'], + s1['subnet']['network_id']) + self._assert_router_on_agent(r['router']['id'], 'host1') + + self._set_net_external(s2['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s2['subnet']['network_id']) + self._assert_router_on_agent(r['router']['id'], 'host2') + + self._remove_external_gateway_from_router( + r['router']['id'], + s2['subnet']['network_id']) + + def test_router_update_gateway_no_eligible_l3_agent(self): + with self.router() as r: + with self.subnet() as s1: + with self.subnet() as s2: + self._set_net_external(s1['subnet']['network_id']) + self._set_net_external(s2['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s1['subnet']['network_id'], + expected_code=exc.HTTPBadRequest.code) + + +class L3AgentDbIntTestCase(L3BaseForIntTests, L3AgentDbTestCaseBase): + + """Unit tests for methods called by the L3 agent for + the case where core plugin implements L3 routing. + """ + + def setUp(self): + self.core_plugin = TestL3NatIntPlugin() + # core plugin is also plugin providing L3 routing + self.plugin = self.core_plugin + super(L3AgentDbIntTestCase, self).setUp() + + +class L3AgentDbSepTestCase(L3BaseForSepTests, L3AgentDbTestCaseBase): + + """Unit tests for methods called by the L3 agent for the + case where separate service plugin implements L3 routing. + """ + + def setUp(self): + self.core_plugin = TestNoL3NatPlugin() + # core plugin is also plugin providing L3 routing + self.plugin = TestL3NatServicePlugin() + super(L3AgentDbSepTestCase, self).setUp() + + +class L3NatDBIntTestCase(L3BaseForIntTests, L3NatTestCaseBase): + + """Unit tests for core plugin with L3 routing integrated.""" + pass + + +class L3NatDBSepTestCase(L3BaseForSepTests, L3NatTestCaseBase): + + """Unit tests for a separate L3 routing service plugin.""" + pass + + +class L3NatDBIntTestCaseXML(L3NatDBIntTestCase): + fmt = 'xml' + + +class L3NatDBSepTestCaseXML(L3NatDBSepTestCase): + fmt = 'xml' diff --git a/neutron/tests/unit/test_l3_schedulers.py b/neutron/tests/unit/test_l3_schedulers.py new file mode 100644 index 000000000..99bf8d83e --- /dev/null +++ b/neutron/tests/unit/test_l3_schedulers.py @@ -0,0 +1,206 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Emilien Macchi, eNovance SAS + +import contextlib +import uuid + +import mock +from oslo.config import cfg + +from neutron.api.v2 import attributes as attr +from neutron.common import constants +from neutron.common import topics +from neutron import context as q_context +from neutron.db import agents_db +from neutron.db import l3_agentschedulers_db +from neutron.extensions import l3 as ext_l3 +from neutron import manager +from neutron.openstack.common import timeutils +from neutron.tests.unit import test_db_plugin +from neutron.tests.unit import test_l3_plugin + +HOST = 'my_l3_host' +FIRST_L3_AGENT = { + 'binary': 'neutron-l3-agent', + 'host': HOST, + 'topic': topics.L3_AGENT, + 'configurations': {}, + 'agent_type': constants.AGENT_TYPE_L3, + 'start_flag': True +} + +HOST_2 = 'my_l3_host_2' +SECOND_L3_AGENT = { + 'binary': 'neutron-l3-agent', + 'host': HOST_2, + 'topic': topics.L3_AGENT, + 'configurations': {}, + 'agent_type': constants.AGENT_TYPE_L3, + 'start_flag': True +} + +DB_PLUGIN_KLASS = ('neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2') + + +class L3SchedulerTestExtensionManager(object): + + def get_resources(self): + attr.RESOURCE_ATTRIBUTE_MAP.update(ext_l3.RESOURCE_ATTRIBUTE_MAP) + l3_res = ext_l3.L3.get_resources() + return l3_res + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class L3SchedulerTestCase(l3_agentschedulers_db.L3AgentSchedulerDbMixin, + test_db_plugin.NeutronDbPluginV2TestCase, + test_l3_plugin.L3NatTestCaseMixin): + + def setUp(self): + ext_mgr = L3SchedulerTestExtensionManager() + super(L3SchedulerTestCase, self).setUp(plugin=DB_PLUGIN_KLASS, + ext_mgr=ext_mgr) + + self.adminContext = q_context.get_admin_context() + self.plugin = manager.NeutronManager.get_plugin() + self._register_l3_agents() + + def _register_l3_agents(self): + callback = agents_db.AgentExtRpcCallback() + callback.report_state(self.adminContext, + agent_state={'agent_state': FIRST_L3_AGENT}, + time=timeutils.strtime()) + agent_db = self.plugin.get_agents_db(self.adminContext, + filters={'host': [HOST]}) + self.agent_id1 = agent_db[0].id + + callback.report_state(self.adminContext, + agent_state={'agent_state': SECOND_L3_AGENT}, + time=timeutils.strtime()) + agent_db = self.plugin.get_agents_db(self.adminContext, + filters={'host': [HOST]}) + self.agent_id2 = agent_db[0].id + + def _set_l3_agent_admin_state(self, context, agent_id, state=True): + update = {'agent': {'admin_state_up': state}} + self.plugin.update_agent(context, agent_id, update) + + @contextlib.contextmanager + def router_with_ext_gw(self, name='router1', admin_state_up=True, + fmt=None, tenant_id=str(uuid.uuid4()), + external_gateway_info=None, + subnet=None, set_context=False, + **kwargs): + router = self._make_router(fmt or self.fmt, tenant_id, name, + admin_state_up, external_gateway_info, + set_context, **kwargs) + self._add_external_gateway_to_router( + router['router']['id'], + subnet['subnet']['network_id']) + + yield router + + self._remove_external_gateway_from_router( + router['router']['id'], subnet['subnet']['network_id']) + self._delete('routers', router['router']['id']) + + +class L3AgentChanceSchedulerTestCase(L3SchedulerTestCase): + + def test_random_scheduling(self): + random_patch = mock.patch('random.choice') + random_mock = random_patch.start() + + def side_effect(seq): + return seq[0] + random_mock.side_effect = side_effect + + with self.subnet() as subnet: + self._set_net_external(subnet['subnet']['network_id']) + with self.router_with_ext_gw(name='r1', subnet=subnet) as r1: + agents = self.get_l3_agents_hosting_routers( + self.adminContext, [r1['router']['id']], + admin_state_up=True) + + self.assertEqual(len(agents), 1) + self.assertEqual(random_mock.call_count, 1) + + with self.router_with_ext_gw(name='r2', subnet=subnet) as r2: + agents = self.get_l3_agents_hosting_routers( + self.adminContext, [r2['router']['id']], + admin_state_up=True) + + self.assertEqual(len(agents), 1) + self.assertEqual(random_mock.call_count, 2) + + random_patch.stop() + + +class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCase): + def setUp(self): + cfg.CONF.set_override('router_scheduler_driver', + 'neutron.scheduler.l3_agent_scheduler.' + 'LeastRoutersScheduler') + + super(L3AgentLeastRoutersSchedulerTestCase, self).setUp() + + def test_scheduler(self): + # disable one agent to force the scheduling to the only one. + self._set_l3_agent_admin_state(self.adminContext, + self.agent_id2, False) + + with self.subnet() as subnet: + self._set_net_external(subnet['subnet']['network_id']) + with self.router_with_ext_gw(name='r1', subnet=subnet) as r1: + agents = self.get_l3_agents_hosting_routers( + self.adminContext, [r1['router']['id']], + admin_state_up=True) + self.assertEqual(len(agents), 1) + + agent_id1 = agents[0]['id'] + + with self.router_with_ext_gw(name='r2', subnet=subnet) as r2: + agents = self.get_l3_agents_hosting_routers( + self.adminContext, [r2['router']['id']], + admin_state_up=True) + self.assertEqual(len(agents), 1) + + agent_id2 = agents[0]['id'] + + self.assertEqual(agent_id1, agent_id2) + + # re-enable the second agent to see whether the next router + # spawned will be on this one. + self._set_l3_agent_admin_state(self.adminContext, + self.agent_id2, True) + + with self.router_with_ext_gw(name='r3', + subnet=subnet) as r3: + agents = self.get_l3_agents_hosting_routers( + self.adminContext, [r3['router']['id']], + admin_state_up=True) + self.assertEqual(len(agents), 1) + + agent_id3 = agents[0]['id'] + + self.assertNotEqual(agent_id1, agent_id3) diff --git a/neutron/tests/unit/test_linux_daemon.py b/neutron/tests/unit/test_linux_daemon.py new file mode 100644 index 000000000..ab65a2bb2 --- /dev/null +++ b/neutron/tests/unit/test_linux_daemon.py @@ -0,0 +1,211 @@ +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import os +import sys + +import mock +import testtools + +from neutron.agent.linux import daemon +from neutron.tests import base + +FAKE_FD = 8 + + +class TestPidfile(base.BaseTestCase): + def setUp(self): + super(TestPidfile, self).setUp() + self.os_p = mock.patch.object(daemon, 'os') + self.os = self.os_p.start() + self.os.open.return_value = FAKE_FD + + self.fcntl_p = mock.patch.object(daemon, 'fcntl') + self.fcntl = self.fcntl_p.start() + self.fcntl.flock.return_value = 0 + + def test_init(self): + self.os.O_CREAT = os.O_CREAT + self.os.O_RDWR = os.O_RDWR + + daemon.Pidfile('thefile', 'python') + self.os.open.assert_called_once_with('thefile', os.O_CREAT | os.O_RDWR) + self.fcntl.flock.assert_called_once_with(FAKE_FD, self.fcntl.LOCK_EX | + self.fcntl.LOCK_NB) + + def test_init_open_fail(self): + self.os.open.side_effect = IOError + + with mock.patch.object(daemon.sys, 'stderr'): + with testtools.ExpectedException(SystemExit): + daemon.Pidfile('thefile', 'python') + sys.assert_has_calls([ + mock.call.stderr.write(mock.ANY), + mock.call.exit(1)] + ) + + def test_unlock(self): + p = daemon.Pidfile('thefile', 'python') + p.unlock() + self.fcntl.flock.assert_has_calls([ + mock.call(FAKE_FD, self.fcntl.LOCK_EX | self.fcntl.LOCK_NB), + mock.call(FAKE_FD, self.fcntl.LOCK_UN)] + ) + + def test_write(self): + p = daemon.Pidfile('thefile', 'python') + p.write(34) + + self.os.assert_has_calls([ + mock.call.ftruncate(FAKE_FD, 0), + mock.call.write(FAKE_FD, '34'), + mock.call.fsync(FAKE_FD)] + ) + + def test_read(self): + self.os.read.return_value = '34' + p = daemon.Pidfile('thefile', 'python') + self.assertEqual(34, p.read()) + + def test_is_running(self): + with mock.patch('__builtin__.open') as mock_open: + p = daemon.Pidfile('thefile', 'python') + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.readline.return_value = 'python' + + with mock.patch.object(p, 'read') as read: + read.return_value = 34 + self.assertTrue(p.is_running()) + + mock_open.assert_called_once_with('/proc/34/cmdline', 'r') + + def test_is_running_uuid_true(self): + with mock.patch('__builtin__.open') as mock_open: + p = daemon.Pidfile('thefile', 'python', uuid='1234') + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.readline.return_value = 'python 1234' + + with mock.patch.object(p, 'read') as read: + read.return_value = 34 + self.assertTrue(p.is_running()) + + mock_open.assert_called_once_with('/proc/34/cmdline', 'r') + + def test_is_running_uuid_false(self): + with mock.patch('__builtin__.open') as mock_open: + p = daemon.Pidfile('thefile', 'python', uuid='6789') + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.readline.return_value = 'python 1234' + + with mock.patch.object(p, 'read') as read: + read.return_value = 34 + self.assertFalse(p.is_running()) + + mock_open.assert_called_once_with('/proc/34/cmdline', 'r') + + +class TestDaemon(base.BaseTestCase): + def setUp(self): + super(TestDaemon, self).setUp() + self.os_p = mock.patch.object(daemon, 'os') + self.os = self.os_p.start() + + self.pidfile_p = mock.patch.object(daemon, 'Pidfile') + self.pidfile = self.pidfile_p.start() + + def test_init(self): + d = daemon.Daemon('pidfile') + self.assertEqual(d.procname, 'python') + + def test_fork_parent(self): + self.os.fork.return_value = 1 + with testtools.ExpectedException(SystemExit): + d = daemon.Daemon('pidfile') + d._fork() + + def test_fork_child(self): + self.os.fork.return_value = 0 + d = daemon.Daemon('pidfile') + self.assertIsNone(d._fork()) + + def test_fork_error(self): + self.os.fork.side_effect = lambda: OSError(1) + with mock.patch.object(daemon.sys, 'stderr'): + with testtools.ExpectedException(SystemExit): + d = daemon.Daemon('pidfile', 'stdin') + d._fork() + + def test_daemonize(self): + d = daemon.Daemon('pidfile') + with mock.patch.object(d, '_fork') as fork: + with mock.patch.object(daemon, 'atexit') as atexit: + with mock.patch.object(daemon, 'signal') as signal: + signal.SIGTERM = 15 + with mock.patch.object(daemon, 'sys') as sys: + sys.stdin.fileno.return_value = 0 + sys.stdout.fileno.return_value = 1 + sys.stderr.fileno.return_value = 2 + d.daemonize() + + signal.signal.assert_called_once_with(15, d.handle_sigterm) + atexit.register.assert_called_once_with(d.delete_pid) + fork.assert_has_calls([mock.call(), mock.call()]) + + self.os.assert_has_calls([ + mock.call.chdir('/'), + mock.call.setsid(), + mock.call.umask(0), + mock.call.dup2(mock.ANY, 0), + mock.call.dup2(mock.ANY, 1), + mock.call.dup2(mock.ANY, 2), + mock.call.getpid()] + ) + + def test_delete_pid(self): + self.pidfile.return_value.__str__.return_value = 'pidfile' + d = daemon.Daemon('pidfile') + d.delete_pid() + self.os.remove.assert_called_once_with('pidfile') + + def test_handle_sigterm(self): + d = daemon.Daemon('pidfile') + with mock.patch.object(daemon, 'sys') as sys: + d.handle_sigterm(15, 1234) + sys.exit.assert_called_once_with(0) + + def test_start(self): + self.pidfile.return_value.is_running.return_value = False + d = daemon.Daemon('pidfile') + + with mock.patch.object(d, 'daemonize') as daemonize: + with mock.patch.object(d, 'run') as run: + d.start() + run.assert_called_once_with() + daemonize.assert_called_once_with() + + def test_start_running(self): + self.pidfile.return_value.is_running.return_value = True + d = daemon.Daemon('pidfile') + + with mock.patch.object(daemon.sys, 'stderr'): + with mock.patch.object(d, 'daemonize') as daemonize: + with testtools.ExpectedException(SystemExit): + d.start() + self.assertFalse(daemonize.called) diff --git a/neutron/tests/unit/test_linux_dhcp.py b/neutron/tests/unit/test_linux_dhcp.py new file mode 100644 index 000000000..bedf1755f --- /dev/null +++ b/neutron/tests/unit/test_linux_dhcp.py @@ -0,0 +1,1252 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import os + +import mock +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import dhcp +from neutron.common import config as base_config +from neutron.common import constants +from neutron.openstack.common import log as logging +from neutron.tests import base + +LOG = logging.getLogger(__name__) + + +class FakeIPAllocation: + def __init__(self, address, subnet_id=None): + self.ip_address = address + self.subnet_id = subnet_id + + +class DhcpOpt(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + def __str__(self): + return str(self.__dict__) + + +class FakePort1: + id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' + admin_state_up = True + device_owner = 'foo1' + fixed_ips = [FakeIPAllocation('192.168.0.2')] + mac_address = '00:00:80:aa:bb:cc' + + def __init__(self): + self.extra_dhcp_opts = [] + + +class FakePort2: + id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' + admin_state_up = False + device_owner = 'foo2' + fixed_ips = [FakeIPAllocation('fdca:3ba5:a17a:4ba3::2')] + mac_address = '00:00:f3:aa:bb:cc' + + def __init__(self): + self.extra_dhcp_opts = [] + + +class FakePort3: + id = '44444444-4444-4444-4444-444444444444' + admin_state_up = True + device_owner = 'foo3' + fixed_ips = [FakeIPAllocation('192.168.0.3'), + FakeIPAllocation('fdca:3ba5:a17a:4ba3::3')] + mac_address = '00:00:0f:aa:bb:cc' + + def __init__(self): + self.extra_dhcp_opts = [] + + +class FakeRouterPort: + id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr' + admin_state_up = True + device_owner = constants.DEVICE_OWNER_ROUTER_INTF + fixed_ips = [FakeIPAllocation('192.168.0.1', + 'dddddddd-dddd-dddd-dddd-dddddddddddd')] + mac_address = '00:00:0f:rr:rr:rr' + + def __init__(self): + self.extra_dhcp_opts = [] + + +class FakePortMultipleAgents1: + id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr' + admin_state_up = True + device_owner = constants.DEVICE_OWNER_DHCP + fixed_ips = [FakeIPAllocation('192.168.0.5', + 'dddddddd-dddd-dddd-dddd-dddddddddddd')] + mac_address = '00:00:0f:dd:dd:dd' + + def __init__(self): + self.extra_dhcp_opts = [] + + +class FakePortMultipleAgents2: + id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + admin_state_up = True + device_owner = constants.DEVICE_OWNER_DHCP + fixed_ips = [FakeIPAllocation('192.168.0.6', + 'dddddddd-dddd-dddd-dddd-dddddddddddd')] + mac_address = '00:00:0f:ee:ee:ee' + + def __init__(self): + self.extra_dhcp_opts = [] + + +class FakeV4HostRoute: + destination = '20.0.0.1/24' + nexthop = '20.0.0.1' + + +class FakeV4HostRouteGateway: + destination = '0.0.0.0/0' + nexthop = '10.0.0.1' + + +class FakeV6HostRoute: + destination = '2001:0200:feed:7ac0::/64' + nexthop = '2001:0200:feed:7ac0::1' + + +class FakeV4Subnet: + id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' + ip_version = 4 + cidr = '192.168.0.0/24' + gateway_ip = '192.168.0.1' + enable_dhcp = True + host_routes = [FakeV4HostRoute] + dns_nameservers = ['8.8.8.8'] + + +class FakeV4SubnetGatewayRoute: + id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' + ip_version = 4 + cidr = '192.168.0.0/24' + gateway_ip = '192.168.0.1' + enable_dhcp = True + host_routes = [FakeV4HostRouteGateway] + dns_nameservers = ['8.8.8.8'] + + +class FakeV4SubnetMultipleAgentsWithoutDnsProvided: + id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' + ip_version = 4 + cidr = '192.168.0.0/24' + gateway_ip = '192.168.0.1' + enable_dhcp = True + dns_nameservers = [] + host_routes = [] + + +class FakeV4MultipleAgentsWithoutDnsProvided: + id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' + subnets = [FakeV4SubnetMultipleAgentsWithoutDnsProvided()] + ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(), + FakePortMultipleAgents1(), FakePortMultipleAgents2()] + namespace = 'qdhcp-ns' + + +class FakeV4SubnetMultipleAgentsWithDnsProvided: + id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' + ip_version = 4 + cidr = '192.168.0.0/24' + gateway_ip = '192.168.0.1' + enable_dhcp = True + dns_nameservers = ['8.8.8.8'] + host_routes = [] + + +class FakeV4MultipleAgentsWithDnsProvided: + id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' + subnets = [FakeV4SubnetMultipleAgentsWithDnsProvided()] + ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(), + FakePortMultipleAgents1(), FakePortMultipleAgents2()] + namespace = 'qdhcp-ns' + + +class FakeV6Subnet: + id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' + ip_version = 6 + cidr = 'fdca:3ba5:a17a:4ba3::/64' + gateway_ip = 'fdca:3ba5:a17a:4ba3::1' + enable_dhcp = True + host_routes = [FakeV6HostRoute] + dns_nameservers = ['2001:0200:feed:7ac0::1'] + + +class FakeV4SubnetNoDHCP: + id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' + ip_version = 4 + cidr = '192.168.1.0/24' + gateway_ip = '192.168.1.1' + enable_dhcp = False + host_routes = [] + dns_nameservers = [] + + +class FakeV4SubnetNoGateway: + id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' + ip_version = 4 + cidr = '192.168.1.0/24' + gateway_ip = None + enable_dhcp = True + host_routes = [] + dns_nameservers = [] + + +class FakeV4SubnetNoRouter: + id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' + ip_version = 4 + cidr = '192.168.1.0/24' + gateway_ip = '192.168.1.1' + enable_dhcp = True + host_routes = [] + dns_nameservers = [] + + +class FakeV4Network: + id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + subnets = [FakeV4Subnet()] + ports = [FakePort1()] + namespace = 'qdhcp-ns' + + +class FakeV6Network: + id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' + subnets = [FakeV6Subnet()] + ports = [FakePort2()] + namespace = 'qdhcp-ns' + + +class FakeDualNetwork: + id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' + subnets = [FakeV4Subnet(), FakeV6Subnet()] + ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] + namespace = 'qdhcp-ns' + + +class FakeDualNetworkGatewayRoute: + id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' + subnets = [FakeV4SubnetGatewayRoute(), FakeV6Subnet()] + ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] + namespace = 'qdhcp-ns' + + +class FakeDualNetworkSingleDHCP: + id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' + subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()] + ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] + namespace = 'qdhcp-ns' + + +class FakeV4NoGatewayNetwork: + id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' + subnets = [FakeV4SubnetNoGateway()] + ports = [FakePort1()] + + +class FakeV4NetworkNoRouter: + id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' + subnets = [FakeV4SubnetNoRouter()] + ports = [FakePort1()] + + +class FakeDualV4Pxe3Ports: + id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' + subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()] + ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] + namespace = 'qdhcp-ns' + + def __init__(self, port_detail="portsSame"): + if port_detail == "portsSame": + self.ports[0].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] + self.ports[1].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')] + self.ports[2].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')] + else: + self.ports[0].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.2'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] + self.ports[1].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')] + self.ports[2].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.7'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')] + + +class FakeV4NetworkPxe2Ports: + id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' + subnets = [FakeV4Subnet()] + ports = [FakePort1(), FakePort2(), FakeRouterPort()] + namespace = 'qdhcp-ns' + + def __init__(self, port_detail="portsSame"): + if port_detail == "portsSame": + self.ports[0].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] + self.ports[1].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] + else: + self.ports[0].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] + self.ports[1].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] + + +class FakeV4NetworkPxe3Ports: + id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' + subnets = [FakeV4Subnet()] + ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] + namespace = 'qdhcp-ns' + + def __init__(self, port_detail="portsSame"): + if port_detail == "portsSame": + self.ports[0].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] + self.ports[1].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] + self.ports[2].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] + else: + self.ports[0].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] + self.ports[1].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')] + self.ports[2].extra_dhcp_opts = [ + DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7'), + DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.7'), + DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')] + + +class LocalChild(dhcp.DhcpLocalProcess): + PORTS = {4: [4], 6: [6]} + + def __init__(self, *args, **kwargs): + super(LocalChild, self).__init__(*args, **kwargs) + self.called = [] + + def reload_allocations(self): + self.called.append('reload') + + def restart(self): + self.called.append('restart') + + def spawn_process(self): + self.called.append('spawn') + + +class TestBase(base.BaseTestCase): + def setUp(self): + super(TestBase, self).setUp() + self.conf = config.setup_conf() + self.conf.register_opts(base_config.core_opts) + self.conf.register_opts(dhcp.OPTS) + config.register_interface_driver_opts_helper(self.conf) + instance = mock.patch("neutron.agent.linux.dhcp.DeviceManager") + self.mock_mgr = instance.start() + self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata', + default=True)) + self.config_parse(self.conf) + self.conf.set_override('state_path', '') + self.conf.use_namespaces = True + + self.replace_p = mock.patch('neutron.agent.linux.utils.replace_file') + self.execute_p = mock.patch('neutron.agent.linux.utils.execute') + self.safe = self.replace_p.start() + self.execute = self.execute_p.start() + + +class TestDhcpBase(TestBase): + + def test_existing_dhcp_networks_abstract_error(self): + self.assertRaises(NotImplementedError, + dhcp.DhcpBase.existing_dhcp_networks, + None, None) + + def test_check_version_abstract_error(self): + self.assertRaises(NotImplementedError, + dhcp.DhcpBase.check_version) + + def test_base_abc_error(self): + self.assertRaises(TypeError, dhcp.DhcpBase, None) + + def test_restart(self): + class SubClass(dhcp.DhcpBase): + def __init__(self): + dhcp.DhcpBase.__init__(self, cfg.CONF, FakeV4Network(), None) + self.called = [] + + def enable(self): + self.called.append('enable') + + def disable(self, retain_port=False): + self.called.append('disable %s' % retain_port) + + def reload_allocations(self): + pass + + @property + def active(self): + return True + + c = SubClass() + c.restart() + self.assertEqual(c.called, ['disable True', 'enable']) + + +class TestDhcpLocalProcess(TestBase): + def test_active(self): + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.readline.return_value = \ + 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + + with mock.patch.object(LocalChild, 'pid') as pid: + pid.__get__ = mock.Mock(return_value=4) + lp = LocalChild(self.conf, FakeV4Network()) + self.assertTrue(lp.active) + + mock_open.assert_called_once_with('/proc/4/cmdline', 'r') + + def test_active_none(self): + dummy_cmd_line = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + self.execute.return_value = (dummy_cmd_line, '') + with mock.patch.object(LocalChild, 'pid') as pid: + pid.__get__ = mock.Mock(return_value=None) + lp = LocalChild(self.conf, FakeV4Network()) + self.assertFalse(lp.active) + + def test_active_cmd_mismatch(self): + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.readline.return_value = \ + 'bbbbbbbb-bbbb-bbbb-aaaa-aaaaaaaaaaaa' + + with mock.patch.object(LocalChild, 'pid') as pid: + pid.__get__ = mock.Mock(return_value=4) + lp = LocalChild(self.conf, FakeV4Network()) + self.assertFalse(lp.active) + + mock_open.assert_called_once_with('/proc/4/cmdline', 'r') + + def test_get_conf_file_name(self): + tpl = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dev' + with mock.patch('os.path.isdir') as isdir: + isdir.return_value = False + with mock.patch('os.makedirs') as makedirs: + lp = LocalChild(self.conf, FakeV4Network()) + self.assertEqual(lp.get_conf_file_name('dev'), tpl) + self.assertFalse(makedirs.called) + + def test_get_conf_file_name_ensure_dir(self): + tpl = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dev' + with mock.patch('os.path.isdir') as isdir: + isdir.return_value = False + with mock.patch('os.makedirs') as makedirs: + lp = LocalChild(self.conf, FakeV4Network()) + self.assertEqual(lp.get_conf_file_name('dev', True), tpl) + self.assertTrue(makedirs.called) + + def test_enable_already_active(self): + with mock.patch.object(LocalChild, 'active') as patched: + patched.__get__ = mock.Mock(return_value=True) + lp = LocalChild(self.conf, FakeV4Network()) + lp.enable() + + self.assertEqual(lp.called, ['restart']) + + def test_enable(self): + attrs_to_mock = dict( + [(a, mock.DEFAULT) for a in + ['active', 'get_conf_file_name', 'interface_name']] + ) + + with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: + mocks['active'].__get__ = mock.Mock(return_value=False) + mocks['get_conf_file_name'].return_value = '/dir' + mocks['interface_name'].__set__ = mock.Mock() + lp = LocalChild(self.conf, + FakeDualNetwork()) + lp.enable() + + self.mock_mgr.assert_has_calls( + [mock.call(self.conf, 'sudo', None), + mock.call().setup(mock.ANY)]) + self.assertEqual(lp.called, ['spawn']) + self.assertTrue(mocks['interface_name'].__set__.called) + + def test_disable_not_active(self): + attrs_to_mock = dict([(a, mock.DEFAULT) for a in + ['active', 'interface_name', 'pid']]) + with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: + mocks['active'].__get__ = mock.Mock(return_value=False) + mocks['pid'].__get__ = mock.Mock(return_value=5) + mocks['interface_name'].__get__ = mock.Mock(return_value='tap0') + with mock.patch.object(dhcp.LOG, 'debug') as log: + network = FakeDualNetwork() + lp = LocalChild(self.conf, network) + lp.device_manager = mock.Mock() + lp.disable() + msg = log.call_args[0][0] + self.assertIn('does not exist', msg) + lp.device_manager.destroy.assert_called_once_with( + network, 'tap0') + + def test_disable_unknown_network(self): + attrs_to_mock = dict([(a, mock.DEFAULT) for a in + ['active', 'interface_name', 'pid']]) + with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: + mocks['active'].__get__ = mock.Mock(return_value=False) + mocks['pid'].__get__ = mock.Mock(return_value=None) + mocks['interface_name'].__get__ = mock.Mock(return_value='tap0') + with mock.patch.object(dhcp.LOG, 'debug') as log: + lp = LocalChild(self.conf, FakeDualNetwork()) + lp.disable() + msg = log.call_args[0][0] + self.assertIn('No DHCP', msg) + + def test_disable_retain_port(self): + attrs_to_mock = dict([(a, mock.DEFAULT) for a in + ['active', 'interface_name', 'pid']]) + network = FakeDualNetwork() + with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: + mocks['active'].__get__ = mock.Mock(return_value=True) + mocks['pid'].__get__ = mock.Mock(return_value=5) + mocks['interface_name'].__get__ = mock.Mock(return_value='tap0') + lp = LocalChild(self.conf, network) + lp.disable(retain_port=True) + + exp_args = ['kill', '-9', 5] + self.execute.assert_called_once_with(exp_args, 'sudo') + + def test_disable(self): + attrs_to_mock = dict([(a, mock.DEFAULT) for a in + ['active', 'interface_name', 'pid']]) + network = FakeDualNetwork() + with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: + mocks['active'].__get__ = mock.Mock(return_value=True) + mocks['pid'].__get__ = mock.Mock(return_value=5) + mocks['interface_name'].__get__ = mock.Mock(return_value='tap0') + lp = LocalChild(self.conf, network) + with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip: + lp.disable() + + self.mock_mgr.assert_has_calls([mock.call(self.conf, 'sudo', None), + mock.call().destroy(network, 'tap0')]) + exp_args = ['kill', '-9', 5] + self.execute.assert_called_once_with(exp_args, 'sudo') + + self.assertEqual(ip.return_value.netns.delete.call_count, 0) + + def test_disable_delete_ns(self): + self.conf.set_override('dhcp_delete_namespaces', True) + attrs_to_mock = dict([(a, mock.DEFAULT) for a in ['active', 'pid']]) + + with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: + mocks['active'].__get__ = mock.Mock(return_value=False) + mocks['pid'].__get__ = mock.Mock(return_value=False) + lp = LocalChild(self.conf, FakeDualNetwork()) + with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip: + lp.disable() + + ip.return_value.netns.delete.assert_called_with('qdhcp-ns') + + def test_pid(self): + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.read.return_value = '5' + lp = LocalChild(self.conf, FakeDualNetwork()) + self.assertEqual(lp.pid, 5) + + def test_pid_no_an_int(self): + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.read.return_value = 'foo' + lp = LocalChild(self.conf, FakeDualNetwork()) + self.assertIsNone(lp.pid) + + def test_pid_invalid_file(self): + with mock.patch.object(LocalChild, 'get_conf_file_name') as conf_file: + conf_file.return_value = '.doesnotexist/pid' + lp = LocalChild(self.conf, FakeDualNetwork()) + self.assertIsNone(lp.pid) + + def test_get_interface_name(self): + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.read.return_value = 'tap0' + lp = LocalChild(self.conf, FakeDualNetwork()) + self.assertEqual(lp.interface_name, 'tap0') + + def test_set_interface_name(self): + with mock.patch('neutron.agent.linux.utils.replace_file') as replace: + lp = LocalChild(self.conf, FakeDualNetwork()) + with mock.patch.object(lp, 'get_conf_file_name') as conf_file: + conf_file.return_value = '/interface' + lp.interface_name = 'tap0' + conf_file.assert_called_once_with('interface', + ensure_conf_dir=True) + replace.assert_called_once_with(mock.ANY, 'tap0') + + +class TestDnsmasq(TestBase): + def _test_spawn(self, extra_options, network=FakeDualNetwork(), + max_leases=16777216, lease_duration=86400): + def mock_get_conf_file_name(kind, ensure_conf_dir=False): + return '/dhcp/%s/%s' % (network.id, kind) + + def fake_argv(index): + if index == 0: + return '/usr/local/bin/neutron-dhcp-agent' + else: + raise IndexError + + expected = [ + 'ip', + 'netns', + 'exec', + 'qdhcp-ns', + 'env', + 'NEUTRON_NETWORK_ID=%s' % network.id, + 'dnsmasq', + '--no-hosts', + '--no-resolv', + '--strict-order', + '--bind-interfaces', + '--interface=tap0', + '--except-interface=lo', + '--pid-file=/dhcp/%s/pid' % network.id, + '--dhcp-hostsfile=/dhcp/%s/host' % network.id, + '--addn-hosts=/dhcp/%s/addn_hosts' % network.id, + '--dhcp-optsfile=/dhcp/%s/opts' % network.id, + '--leasefile-ro'] + + seconds = '' + if lease_duration == -1: + lease_duration = 'infinite' + else: + seconds = 's' + expected.extend('--dhcp-range=set:tag%d,%s,static,%s%s' % + (i, s.cidr.split('/')[0], lease_duration, seconds) + for i, s in enumerate(network.subnets)) + + expected.append('--dhcp-lease-max=%d' % max_leases) + expected.extend(extra_options) + + self.execute.return_value = ('', '') + + attrs_to_mock = dict( + [(a, mock.DEFAULT) for a in + ['_output_opts_file', 'get_conf_file_name', 'interface_name']] + ) + + with mock.patch.multiple(dhcp.Dnsmasq, **attrs_to_mock) as mocks: + mocks['get_conf_file_name'].side_effect = mock_get_conf_file_name + mocks['_output_opts_file'].return_value = ( + '/dhcp/%s/opts' % network.id + ) + mocks['interface_name'].__get__ = mock.Mock(return_value='tap0') + + with mock.patch.object(dhcp.sys, 'argv') as argv: + argv.__getitem__.side_effect = fake_argv + dm = dhcp.Dnsmasq(self.conf, network, version=float(2.59)) + dm.spawn_process() + self.assertTrue(mocks['_output_opts_file'].called) + self.execute.assert_called_once_with(expected, + root_helper='sudo', + check_exit_code=True) + + def test_spawn(self): + self._test_spawn(['--conf-file=', '--domain=openstacklocal']) + + def test_spawn_infinite_lease_duration(self): + self.conf.set_override('dhcp_lease_duration', -1) + self._test_spawn(['--conf-file=', '--domain=openstacklocal'], + FakeDualNetwork(), 16777216, -1) + + def test_spawn_cfg_config_file(self): + self.conf.set_override('dnsmasq_config_file', '/foo') + self._test_spawn(['--conf-file=/foo', '--domain=openstacklocal']) + + def test_spawn_no_dhcp_domain(self): + self.conf.set_override('dhcp_domain', '') + self._test_spawn(['--conf-file=']) + + def test_spawn_cfg_dns_server(self): + self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8']) + self._test_spawn(['--conf-file=', + '--server=8.8.8.8', + '--domain=openstacklocal']) + + def test_spawn_cfg_multiple_dns_server(self): + self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8', + '9.9.9.9']) + self._test_spawn(['--conf-file=', + '--server=8.8.8.8', + '--server=9.9.9.9', + '--domain=openstacklocal']) + + def test_spawn_max_leases_is_smaller_than_cap(self): + self._test_spawn( + ['--conf-file=', '--domain=openstacklocal'], + network=FakeV4Network(), + max_leases=256) + + def test_output_opts_file(self): + fake_v6 = '2001:0200:feed:7ac0::1' + fake_v6_cidr = '2001:0200:feed:7ac0::/64' + expected = ( + 'tag:tag0,option:dns-server,8.8.8.8\n' + 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' + '0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,option:router,192.168.0.1\n' + 'tag:tag1,option:dns-server,%s\n' + 'tag:tag1,option:classless-static-route,%s,%s\n' + 'tag:tag1,249,%s,%s').lstrip() % (fake_v6, + fake_v6_cidr, fake_v6, + fake_v6_cidr, fake_v6) + + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/opts' + dm = dhcp.Dnsmasq(self.conf, FakeDualNetwork(), + version=float(2.59)) + dm._output_opts_file() + + self.safe.assert_called_once_with('/foo/opts', expected) + + def test_output_opts_file_gateway_route(self): + fake_v6 = '2001:0200:feed:7ac0::1' + fake_v6_cidr = '2001:0200:feed:7ac0::/64' + expected = """ +tag:tag0,option:dns-server,8.8.8.8 +tag:tag0,option:router,192.168.0.1 +tag:tag1,option:dns-server,%s +tag:tag1,option:classless-static-route,%s,%s +tag:tag1,249,%s,%s""".lstrip() % (fake_v6, + fake_v6_cidr, fake_v6, + fake_v6_cidr, fake_v6) + + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/opts' + dm = dhcp.Dnsmasq(self.conf, FakeDualNetworkGatewayRoute(), + version=float(2.59)) + dm._output_opts_file() + + self.safe.assert_called_once_with('/foo/opts', expected) + + def test_output_opts_file_multiple_agents_without_dns_provided(self): + expected = """ +tag:tag0,option:router,192.168.0.1 +tag:tag0,option:dns-server,192.168.0.5,192.168.0.6""".lstrip() + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/opts' + dm = dhcp.Dnsmasq(self.conf, + FakeV4MultipleAgentsWithoutDnsProvided(), + version=float(2.59)) + dm._output_opts_file() + self.safe.assert_called_once_with('/foo/opts', expected) + + def test_output_opts_file_multiple_agents_with_dns_provided(self): + expected = """ +tag:tag0,option:dns-server,8.8.8.8 +tag:tag0,option:router,192.168.0.1""".lstrip() + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/opts' + dm = dhcp.Dnsmasq(self.conf, + FakeV4MultipleAgentsWithDnsProvided(), + version=float(2.59)) + dm._output_opts_file() + self.safe.assert_called_once_with('/foo/opts', expected) + + def test_output_opts_file_single_dhcp(self): + expected = ( + 'tag:tag0,option:dns-server,8.8.8.8\n' + 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' + '0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,option:router,192.168.0.1').lstrip() + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/opts' + dm = dhcp.Dnsmasq(self.conf, FakeDualNetworkSingleDHCP(), + version=float(2.59)) + dm._output_opts_file() + + self.safe.assert_called_once_with('/foo/opts', expected) + + def test_output_opts_file_single_dhcp_ver2_48(self): + expected = ( + 'tag0,option:dns-server,8.8.8.8\n' + 'tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' + '0.0.0.0/0,192.168.0.1\n' + 'tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag0,option:router,192.168.0.1').lstrip() + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/opts' + dm = dhcp.Dnsmasq(self.conf, FakeDualNetworkSingleDHCP(), + version=float(2.48)) + dm._output_opts_file() + + self.safe.assert_called_once_with('/foo/opts', expected) + + def test_output_opts_file_no_gateway(self): + expected = """ +tag:tag0,option:classless-static-route,169.254.169.254/32,192.168.1.1 +tag:tag0,249,169.254.169.254/32,192.168.1.1 +tag:tag0,option:router""".lstrip() + + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/opts' + dm = dhcp.Dnsmasq(self.conf, FakeV4NoGatewayNetwork(), + version=float(2.59)) + with mock.patch.object(dm, '_make_subnet_interface_ip_map') as ipm: + ipm.return_value = {FakeV4SubnetNoGateway.id: '192.168.1.1'} + + dm._output_opts_file() + self.assertTrue(ipm.called) + + self.safe.assert_called_once_with('/foo/opts', expected) + + def test_output_opts_file_no_neutron_router_on_subnet(self): + expected = ( + 'tag:tag0,option:classless-static-route,' + '169.254.169.254/32,192.168.1.2,0.0.0.0/0,192.168.1.1\n' + 'tag:tag0,249,169.254.169.254/32,192.168.1.2,' + '0.0.0.0/0,192.168.1.1\n' + 'tag:tag0,option:router,192.168.1.1').lstrip() + + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/opts' + dm = dhcp.Dnsmasq(self.conf, FakeV4NetworkNoRouter(), + version=float(2.59)) + with mock.patch.object(dm, '_make_subnet_interface_ip_map') as ipm: + ipm.return_value = {FakeV4SubnetNoRouter.id: '192.168.1.2'} + + dm._output_opts_file() + self.assertTrue(ipm.called) + + self.safe.assert_called_once_with('/foo/opts', expected) + + def test_output_opts_file_pxe_2port_1net(self): + expected = ( + 'tag:tag0,option:dns-server,8.8.8.8\n' + 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' + '0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,option:router,192.168.0.1\n' + 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' + 'option:tftp-server,192.168.0.3\n' + 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' + 'option:server-ip-address,192.168.0.2\n' + 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' + 'option:bootfile-name,pxelinux.0\n' + 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' + 'option:tftp-server,192.168.0.3\n' + 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' + 'option:server-ip-address,192.168.0.2\n' + 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' + 'option:bootfile-name,pxelinux.0') + expected = expected.lstrip() + + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/opts' + fp = FakeV4NetworkPxe2Ports() + dm = dhcp.Dnsmasq(self.conf, fp, version=float(2.59)) + dm._output_opts_file() + + self.safe.assert_called_once_with('/foo/opts', expected) + + def test_output_opts_file_pxe_2port_1net_diff_details(self): + expected = ( + 'tag:tag0,option:dns-server,8.8.8.8\n' + 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' + '0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,option:router,192.168.0.1\n' + 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' + 'option:tftp-server,192.168.0.3\n' + 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' + 'option:server-ip-address,192.168.0.2\n' + 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' + 'option:bootfile-name,pxelinux.0\n' + 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' + 'option:tftp-server,192.168.0.5\n' + 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' + 'option:server-ip-address,192.168.0.5\n' + 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' + 'option:bootfile-name,pxelinux.0') + expected = expected.lstrip() + + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/opts' + dm = dhcp.Dnsmasq(self.conf, FakeV4NetworkPxe2Ports("portsDiff"), + version=float(2.59)) + dm._output_opts_file() + + self.safe.assert_called_once_with('/foo/opts', expected) + + def test_output_opts_file_pxe_3port_1net_diff_details(self): + expected = ( + 'tag:tag0,option:dns-server,8.8.8.8\n' + 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' + '0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,option:router,192.168.0.1\n' + 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' + 'option:tftp-server,192.168.0.3\n' + 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' + 'option:server-ip-address,192.168.0.2\n' + 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' + 'option:bootfile-name,pxelinux.0\n' + 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' + 'option:tftp-server,192.168.0.5\n' + 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' + 'option:server-ip-address,192.168.0.5\n' + 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' + 'option:bootfile-name,pxelinux2.0\n' + 'tag:44444444-4444-4444-4444-444444444444,' + 'option:tftp-server,192.168.0.7\n' + 'tag:44444444-4444-4444-4444-444444444444,' + 'option:server-ip-address,192.168.0.7\n' + 'tag:44444444-4444-4444-4444-444444444444,' + 'option:bootfile-name,pxelinux3.0') + expected = expected.lstrip() + + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/opts' + dm = dhcp.Dnsmasq(self.conf, + FakeV4NetworkPxe3Ports("portsDifferent"), + version=float(2.59)) + dm._output_opts_file() + + self.safe.assert_called_once_with('/foo/opts', expected) + + def test_output_opts_file_pxe_3port_2net(self): + expected = ( + 'tag:tag0,option:dns-server,8.8.8.8\n' + 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' + '0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,option:router,192.168.0.1\n' + 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' + 'option:tftp-server,192.168.0.3\n' + 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' + 'option:server-ip-address,192.168.0.2\n' + 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' + 'option:bootfile-name,pxelinux.0\n' + 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' + 'option:tftp-server,192.168.1.3\n' + 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' + 'option:server-ip-address,192.168.1.2\n' + 'tag:ffffffff-ffff-ffff-ffff-ffffffffffff,' + 'option:bootfile-name,pxelinux2.0\n' + 'tag:44444444-4444-4444-4444-444444444444,' + 'option:tftp-server,192.168.1.3\n' + 'tag:44444444-4444-4444-4444-444444444444,' + 'option:server-ip-address,192.168.1.2\n' + 'tag:44444444-4444-4444-4444-444444444444,' + 'option:bootfile-name,pxelinux3.0') + expected = expected.lstrip() + + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/opts' + dm = dhcp.Dnsmasq(self.conf, FakeDualV4Pxe3Ports(), + version=float(2.59)) + dm._output_opts_file() + + self.safe.assert_called_once_with('/foo/opts', expected) + + @property + def _test_reload_allocation_data(self): + exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host' + exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal,' + '192.168.0.2\n' + '00:00:f3:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--2.' + 'openstacklocal,[fdca:3ba5:a17a:4ba3::2]\n' + '00:00:0f:aa:bb:cc,host-192-168-0-3.openstacklocal,' + '192.168.0.3\n' + '00:00:0f:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--3.' + 'openstacklocal,[fdca:3ba5:a17a:4ba3::3]\n' + '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal,' + '192.168.0.1\n').lstrip() + exp_addn_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/addn_hosts' + exp_addn_data = ( + '192.168.0.2\t' + 'host-192-168-0-2.openstacklocal host-192-168-0-2\n' + 'fdca:3ba5:a17a:4ba3::2\t' + 'host-fdca-3ba5-a17a-4ba3--2.openstacklocal ' + 'host-fdca-3ba5-a17a-4ba3--2\n' + '192.168.0.3\thost-192-168-0-3.openstacklocal ' + 'host-192-168-0-3\n' + 'fdca:3ba5:a17a:4ba3::3\t' + 'host-fdca-3ba5-a17a-4ba3--3.openstacklocal ' + 'host-fdca-3ba5-a17a-4ba3--3\n' + '192.168.0.1\t' + 'host-192-168-0-1.openstacklocal ' + 'host-192-168-0-1\n' + ).lstrip() + exp_opt_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/opts' + fake_v6 = '2001:0200:feed:7ac0::1' + fake_v6_cidr = '2001:0200:feed:7ac0::/64' + exp_opt_data = ( + 'tag:tag0,option:dns-server,8.8.8.8\n' + 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' + '0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,option:router,192.168.0.1\n' + 'tag:tag1,option:dns-server,%s\n' + 'tag:tag1,option:classless-static-route,%s,%s\n' + 'tag:tag1,249,%s,%s').lstrip() % (fake_v6, + fake_v6_cidr, fake_v6, + fake_v6_cidr, fake_v6) + return (exp_host_name, exp_host_data, + exp_addn_name, exp_addn_data, + exp_opt_name, exp_opt_data,) + + def test_reload_allocations(self): + (exp_host_name, exp_host_data, + exp_addn_name, exp_addn_data, + exp_opt_name, exp_opt_data,) = self._test_reload_allocation_data + + exp_args = ['kill', '-HUP', 5] + + fake_net = FakeDualNetwork() + dm = dhcp.Dnsmasq(self.conf, fake_net, version=float(2.59)) + + with contextlib.nested( + mock.patch('os.path.isdir', return_value=True), + mock.patch.object(dhcp.Dnsmasq, 'active'), + mock.patch.object(dhcp.Dnsmasq, 'pid'), + mock.patch.object(dhcp.Dnsmasq, 'interface_name'), + mock.patch.object(dhcp.Dnsmasq, '_make_subnet_interface_ip_map'), + mock.patch.object(dm, 'device_manager') + ) as (isdir, active, pid, interface_name, ip_map, device_manager): + active.__get__ = mock.Mock(return_value=True) + pid.__get__ = mock.Mock(return_value=5) + interface_name.__get__ = mock.Mock(return_value='tap12345678-12') + ip_map.return_value = {} + dm.reload_allocations() + + self.assertTrue(ip_map.called) + self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data), + mock.call(exp_addn_name, exp_addn_data), + mock.call(exp_opt_name, exp_opt_data)]) + self.execute.assert_called_once_with(exp_args, 'sudo') + device_manager.update.assert_called_with(fake_net, 'tap12345678-12') + + def test_reload_allocations_stale_pid(self): + (exp_host_name, exp_host_data, + exp_addn_name, exp_addn_data, + exp_opt_name, exp_opt_data,) = self._test_reload_allocation_data + + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.readline.return_value = None + + with mock.patch('os.path.isdir') as isdir: + isdir.return_value = True + with mock.patch.object(dhcp.Dnsmasq, 'pid') as pid: + pid.__get__ = mock.Mock(return_value=5) + dm = dhcp.Dnsmasq(self.conf, FakeDualNetwork(), + version=float(2.59)) + + method_name = '_make_subnet_interface_ip_map' + with mock.patch.object(dhcp.Dnsmasq, method_name) as ipmap: + ipmap.return_value = {} + with mock.patch.object(dhcp.Dnsmasq, 'interface_name'): + dm.reload_allocations() + self.assertTrue(ipmap.called) + + self.safe.assert_has_calls([ + mock.call(exp_host_name, exp_host_data), + mock.call(exp_addn_name, exp_addn_data), + mock.call(exp_opt_name, exp_opt_data), + ]) + mock_open.assert_called_once_with('/proc/5/cmdline', 'r') + + def test_release_unused_leases(self): + dnsmasq = dhcp.Dnsmasq(self.conf, FakeDualNetwork()) + + ip1 = '192.168.1.2' + mac1 = '00:00:80:aa:bb:cc' + ip2 = '192.168.1.3' + mac2 = '00:00:80:cc:bb:aa' + + old_leases = set([(ip1, mac1), (ip2, mac2)]) + dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) + dnsmasq._output_hosts_file = mock.Mock() + dnsmasq._release_lease = mock.Mock() + dnsmasq.network.ports = [] + + dnsmasq._release_unused_leases() + + dnsmasq._release_lease.assert_has_calls([mock.call(mac1, ip1), + mock.call(mac2, ip2)], + any_order=True) + + def test_release_unused_leases_one_lease(self): + dnsmasq = dhcp.Dnsmasq(self.conf, FakeDualNetwork()) + + ip1 = '192.168.0.2' + mac1 = '00:00:80:aa:bb:cc' + ip2 = '192.168.0.3' + mac2 = '00:00:80:cc:bb:aa' + + old_leases = set([(ip1, mac1), (ip2, mac2)]) + dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) + dnsmasq._output_hosts_file = mock.Mock() + dnsmasq._release_lease = mock.Mock() + dnsmasq.network.ports = [FakePort1()] + + dnsmasq._release_unused_leases() + + dnsmasq._release_lease.assert_has_calls([mock.call(mac2, ip2)], + any_order=True) + + def test_read_hosts_file_leases(self): + filename = '/path/to/file' + with mock.patch('os.path.exists') as mock_exists: + mock_exists.return_value = True + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + lines = ["00:00:80:aa:bb:cc,inst-name,192.168.0.1"] + mock_open.return_value.readlines.return_value = lines + + dnsmasq = dhcp.Dnsmasq(self.conf, FakeDualNetwork()) + leases = dnsmasq._read_hosts_file_leases(filename) + + self.assertEqual(set([("192.168.0.1", "00:00:80:aa:bb:cc")]), leases) + mock_exists.assert_called_once_with(filename) + mock_open.assert_called_once_with(filename) + + def test_make_subnet_interface_ip_map(self): + with mock.patch('neutron.agent.linux.ip_lib.IPDevice') as ip_dev: + ip_dev.return_value.addr.list.return_value = [ + {'cidr': '192.168.0.1/24'} + ] + + dm = dhcp.Dnsmasq(self.conf, + FakeDualNetwork()) + + self.assertEqual( + dm._make_subnet_interface_ip_map(), + {FakeV4Subnet.id: '192.168.0.1'} + ) + + def test_remove_config_files(self): + net = FakeV4Network() + path = '/opt/data/neutron/dhcp' + self.conf.dhcp_confs = path + + with mock.patch('shutil.rmtree') as rmtree: + lp = LocalChild(self.conf, net) + lp._remove_config_files() + + rmtree.assert_called_once_with(os.path.join(path, net.id), + ignore_errors=True) + + def test_existing_dhcp_networks(self): + path = '/opt/data/neutron/dhcp' + self.conf.dhcp_confs = path + + cases = { + # network_uuid --> is_dhcp_alive? + 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa': True, + 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb': False, + 'not_uuid_like_name': True + } + + def active_fake(self, instance, cls): + return cases[instance.network.id] + + with mock.patch('os.listdir') as mock_listdir: + with mock.patch.object(dhcp.Dnsmasq, 'active') as mock_active: + mock_active.__get__ = active_fake + mock_listdir.return_value = cases.keys() + + result = dhcp.Dnsmasq.existing_dhcp_networks(self.conf, 'sudo') + + mock_listdir.assert_called_once_with(path) + self.assertEqual(['aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', + 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'], + result) + + def _check_version(self, cmd_out, expected_value): + with mock.patch('neutron.agent.linux.utils.execute') as cmd: + cmd.return_value = cmd_out + result = dhcp.Dnsmasq.check_version() + self.assertEqual(result, expected_value) + + def test_check_minimum_version(self): + self._check_version('Dnsmasq version 2.59 Copyright (c)...', + float(2.59)) + + def test_check_future_version(self): + self._check_version('Dnsmasq version 2.65 Copyright (c)...', + float(2.65)) + + def test_check_fail_version(self): + self._check_version('Dnsmasq version 2.48 Copyright (c)...', + float(2.48)) + + def test_check_version_failed_cmd_execution(self): + self._check_version('Error while executing command', 0) diff --git a/neutron/tests/unit/test_linux_external_process.py b/neutron/tests/unit/test_linux_external_process.py new file mode 100644 index 000000000..9207e5df0 --- /dev/null +++ b/neutron/tests/unit/test_linux_external_process.py @@ -0,0 +1,202 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import mock + +from neutron.agent.linux import external_process as ep +from neutron.tests import base + + +class TestProcessManager(base.BaseTestCase): + def setUp(self): + super(TestProcessManager, self).setUp() + self.execute_p = mock.patch('neutron.agent.linux.utils.execute') + self.execute = self.execute_p.start() + self.conf = mock.Mock() + self.conf.external_pids = '/var/path' + + def test_enable_no_namespace(self): + callback = mock.Mock() + callback.return_value = ['the', 'cmd'] + + with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name: + name.return_value = 'pidfile' + with mock.patch.object(ep.ProcessManager, 'active') as active: + active.__get__ = mock.Mock(return_value=False) + + manager = ep.ProcessManager(self.conf, 'uuid') + manager.enable(callback) + callback.assert_called_once_with('pidfile') + name.assert_called_once_with(ensure_pids_dir=True) + self.execute.assert_called_once_with(['the', 'cmd'], + root_helper='sudo', + check_exit_code=True) + + def test_enable_with_namespace(self): + callback = mock.Mock() + callback.return_value = ['the', 'cmd'] + + with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name: + name.return_value = 'pidfile' + with mock.patch.object(ep.ProcessManager, 'active') as active: + active.__get__ = mock.Mock(return_value=False) + + manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns') + with mock.patch.object(ep, 'ip_lib') as ip_lib: + manager.enable(callback) + callback.assert_called_once_with('pidfile') + name.assert_called_once_with(ensure_pids_dir=True) + ip_lib.assert_has_calls([ + mock.call.IPWrapper('sudo', 'ns'), + mock.call.IPWrapper().netns.execute(['the', 'cmd'])] + ) + + def test_enable_with_namespace_process_active(self): + callback = mock.Mock() + callback.return_value = ['the', 'cmd'] + + with mock.patch.object(ep.ProcessManager, 'active') as active: + active.__get__ = mock.Mock(return_value=True) + + manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns') + with mock.patch.object(ep, 'ip_lib'): + manager.enable(callback) + self.assertFalse(callback.called) + + def test_disable_no_namespace(self): + with mock.patch.object(ep.ProcessManager, 'pid') as pid: + pid.__get__ = mock.Mock(return_value=4) + with mock.patch.object(ep.ProcessManager, 'active') as active: + active.__get__ = mock.Mock(return_value=True) + + manager = ep.ProcessManager(self.conf, 'uuid') + manager.disable() + self.execute(['kill', '-9', 4], 'sudo') + + def test_disable_namespace(self): + with mock.patch.object(ep.ProcessManager, 'pid') as pid: + pid.__get__ = mock.Mock(return_value=4) + with mock.patch.object(ep.ProcessManager, 'active') as active: + active.__get__ = mock.Mock(return_value=True) + + manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns') + + with mock.patch.object(ep, 'utils') as utils: + manager.disable() + utils.assert_has_calls( + mock.call.execute(['kill', '-9', 4], 'sudo')) + + def test_disable_not_active(self): + with mock.patch.object(ep.ProcessManager, 'pid') as pid: + pid.__get__ = mock.Mock(return_value=4) + with mock.patch.object(ep.ProcessManager, 'active') as active: + active.__get__ = mock.Mock(return_value=False) + with mock.patch.object(ep.LOG, 'debug') as debug: + manager = ep.ProcessManager(self.conf, 'uuid') + manager.disable() + debug.assert_called_once_with(mock.ANY, mock.ANY) + + def test_disable_no_pid(self): + with mock.patch.object(ep.ProcessManager, 'pid') as pid: + pid.__get__ = mock.Mock(return_value=None) + with mock.patch.object(ep.ProcessManager, 'active') as active: + active.__get__ = mock.Mock(return_value=False) + with mock.patch.object(ep.LOG, 'debug') as debug: + manager = ep.ProcessManager(self.conf, 'uuid') + manager.disable() + debug.assert_called_once_with(mock.ANY, mock.ANY) + + def test_get_pid_file_name_existing(self): + with mock.patch.object(ep.os.path, 'isdir') as isdir: + isdir.return_value = True + manager = ep.ProcessManager(self.conf, 'uuid') + retval = manager.get_pid_file_name(ensure_pids_dir=True) + self.assertEqual(retval, '/var/path/uuid.pid') + + def test_get_pid_file_name_not_existing(self): + with mock.patch.object(ep.os.path, 'isdir') as isdir: + with mock.patch.object(ep.os, 'makedirs') as makedirs: + isdir.return_value = False + manager = ep.ProcessManager(self.conf, 'uuid') + retval = manager.get_pid_file_name(ensure_pids_dir=True) + self.assertEqual(retval, '/var/path/uuid.pid') + makedirs.assert_called_once_with('/var/path', 0o755) + + def test_get_pid_file_name_default(self): + with mock.patch.object(ep.os.path, 'isdir') as isdir: + isdir.return_value = True + manager = ep.ProcessManager(self.conf, 'uuid') + retval = manager.get_pid_file_name(ensure_pids_dir=False) + self.assertEqual(retval, '/var/path/uuid.pid') + self.assertFalse(isdir.called) + + def test_pid(self): + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.read.return_value = '5' + manager = ep.ProcessManager(self.conf, 'uuid') + self.assertEqual(manager.pid, 5) + + def test_pid_no_an_int(self): + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.read.return_value = 'foo' + manager = ep.ProcessManager(self.conf, 'uuid') + self.assertIsNone(manager.pid, 5) + + def test_pid_invalid_file(self): + with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name: + name.return_value = '.doesnotexist/pid' + manager = ep.ProcessManager(self.conf, 'uuid') + self.assertIsNone(manager.pid) + + def test_active(self): + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.readline.return_value = \ + 'python foo --router_id=uuid' + with mock.patch.object(ep.ProcessManager, 'pid') as pid: + pid.__get__ = mock.Mock(return_value=4) + manager = ep.ProcessManager(self.conf, 'uuid') + self.assertTrue(manager.active) + + mock_open.assert_called_once_with('/proc/4/cmdline', 'r') + + def test_active_none(self): + dummy_cmd_line = 'python foo --router_id=uuid' + self.execute.return_value = dummy_cmd_line + with mock.patch.object(ep.ProcessManager, 'pid') as pid: + pid.__get__ = mock.Mock(return_value=None) + manager = ep.ProcessManager(self.conf, 'uuid') + self.assertFalse(manager.active) + + def test_active_cmd_mismatch(self): + with mock.patch('__builtin__.open') as mock_open: + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + mock_open.return_value.readline.return_value = \ + 'python foo --router_id=anotherid' + with mock.patch.object(ep.ProcessManager, 'pid') as pid: + pid.__get__ = mock.Mock(return_value=4) + manager = ep.ProcessManager(self.conf, 'uuid') + self.assertFalse(manager.active) + + mock_open.assert_called_once_with('/proc/4/cmdline', 'r') diff --git a/neutron/tests/unit/test_linux_interface.py b/neutron/tests/unit/test_linux_interface.py new file mode 100644 index 000000000..ffdec24bc --- /dev/null +++ b/neutron/tests/unit/test_linux_interface.py @@ -0,0 +1,620 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.agent.common import config +from neutron.agent.linux import dhcp +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.extensions import flavor +from neutron.openstack.common import uuidutils +from neutron.tests import base + + +class BaseChild(interface.LinuxInterfaceDriver): + def plug(*args): + pass + + def unplug(*args): + pass + + +class FakeNetwork: + id = '12345678-1234-5678-90ab-ba0987654321' + + +class FakeSubnet: + cidr = '192.168.1.1/24' + + +class FakeAllocation: + subnet = FakeSubnet() + ip_address = '192.168.1.2' + ip_version = 4 + + +class FakePort: + id = 'abcdef01-1234-5678-90ab-ba0987654321' + fixed_ips = [FakeAllocation] + device_id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' + network = FakeNetwork() + network_id = network.id + + +class TestBase(base.BaseTestCase): + def setUp(self): + super(TestBase, self).setUp() + self.conf = config.setup_conf() + self.conf.register_opts(interface.OPTS) + config.register_root_helper(self.conf) + self.ip_dev_p = mock.patch.object(ip_lib, 'IPDevice') + self.ip_dev = self.ip_dev_p.start() + self.ip_p = mock.patch.object(ip_lib, 'IPWrapper') + self.ip = self.ip_p.start() + self.device_exists_p = mock.patch.object(ip_lib, 'device_exists') + self.device_exists = self.device_exists_p.start() + + +class TestABCDriver(TestBase): + def test_get_device_name(self): + bc = BaseChild(self.conf) + device_name = bc.get_device_name(FakePort()) + self.assertEqual('tapabcdef01-12', device_name) + + def test_l3_init(self): + addresses = [dict(ip_version=4, scope='global', + dynamic=False, cidr='172.16.77.240/24')] + self.ip_dev().addr.list = mock.Mock(return_value=addresses) + self.ip_dev().route.list_onlink_routes.return_value = [] + + bc = BaseChild(self.conf) + ns = '12345678-1234-5678-90ab-ba0987654321' + bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns, + extra_subnets=[{'cidr': '172.20.0.0/24'}]) + self.ip_dev.assert_has_calls( + [mock.call('tap0', 'sudo', namespace=ns), + mock.call().addr.list(scope='global', filters=['permanent']), + mock.call().addr.add(4, '192.168.1.2/24', '192.168.1.255'), + mock.call().addr.delete(4, '172.16.77.240/24'), + mock.call().route.list_onlink_routes(), + mock.call().route.add_onlink_route('172.20.0.0/24')]) + + def test_l3_init_delete_onlink_routes(self): + addresses = [dict(ip_version=4, scope='global', + dynamic=False, cidr='172.16.77.240/24')] + self.ip_dev().addr.list = mock.Mock(return_value=addresses) + self.ip_dev().route.list_onlink_routes.return_value = ['172.20.0.0/24'] + + bc = BaseChild(self.conf) + ns = '12345678-1234-5678-90ab-ba0987654321' + bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns) + self.ip_dev.assert_has_calls( + [mock.call().route.list_onlink_routes(), + mock.call().route.delete_onlink_route('172.20.0.0/24')]) + + def test_l3_init_with_preserve(self): + addresses = [dict(ip_version=4, scope='global', + dynamic=False, cidr='192.168.1.3/32')] + self.ip_dev().addr.list = mock.Mock(return_value=addresses) + + bc = BaseChild(self.conf) + ns = '12345678-1234-5678-90ab-ba0987654321' + bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns, + preserve_ips=['192.168.1.3/32']) + self.ip_dev.assert_has_calls( + [mock.call('tap0', 'sudo', namespace=ns), + mock.call().addr.list(scope='global', filters=['permanent']), + mock.call().addr.add(4, '192.168.1.2/24', '192.168.1.255')]) + self.assertFalse(self.ip_dev().addr.delete.called) + + def test_l3_init_with_ipv6(self): + addresses = [dict(ip_version=6, + scope='global', + dynamic=False, + cidr='2001:db8:a::123/64')] + self.ip_dev().addr.list = mock.Mock(return_value=addresses) + bc = BaseChild(self.conf) + ns = '12345678-1234-5678-90ab-ba0987654321' + bc.init_l3('tap0', ['2001:db8:a::124/64'], namespace=ns) + self.ip_dev.assert_has_calls( + [mock.call('tap0', 'sudo', namespace=ns), + mock.call().addr.list(scope='global', filters=['permanent']), + mock.call().addr.add(6, '2001:db8:a::124/64', + '2001:db8:a:0:ffff:ffff:ffff:ffff'), + mock.call().addr.delete(6, '2001:db8:a::123/64')]) + + def test_l3_init_with_duplicated_ipv6(self): + addresses = [dict(ip_version=6, + scope='global', + dynamic=False, + cidr='2001:db8:a::123/64')] + self.ip_dev().addr.list = mock.Mock(return_value=addresses) + bc = BaseChild(self.conf) + ns = '12345678-1234-5678-90ab-ba0987654321' + bc.init_l3('tap0', ['2001:db8:a::123/64'], namespace=ns) + self.assertFalse(self.ip_dev().addr.add.called) + + def test_l3_init_with_duplicated_ipv6_uncompact(self): + addresses = [dict(ip_version=6, + scope='global', + dynamic=False, + cidr='2001:db8:a::123/64')] + self.ip_dev().addr.list = mock.Mock(return_value=addresses) + bc = BaseChild(self.conf) + ns = '12345678-1234-5678-90ab-ba0987654321' + bc.init_l3('tap0', + ['2001:db8:a:0000:0000:0000:0000:0123/64'], + namespace=ns) + self.assertFalse(self.ip_dev().addr.add.called) + + +class TestOVSInterfaceDriver(TestBase): + + def test_get_device_name(self): + br = interface.OVSInterfaceDriver(self.conf) + device_name = br.get_device_name(FakePort()) + self.assertEqual('tapabcdef01-12', device_name) + + def test_plug_no_ns(self): + self._test_plug() + + def test_plug_with_ns(self): + self._test_plug(namespace='01234567-1234-1234-99') + + def test_plug_alt_bridge(self): + self._test_plug(bridge='br-foo') + + def test_plug_configured_bridge(self): + br = 'br-v' + self.conf.set_override('ovs_use_veth', False) + self.conf.set_override('ovs_integration_bridge', br) + self.assertEqual(self.conf.ovs_integration_bridge, br) + + def device_exists(dev, root_helper=None, namespace=None): + return dev == br + + ovs = interface.OVSInterfaceDriver(self.conf) + with mock.patch.object(ovs, '_ovs_add_port') as add_port: + self.device_exists.side_effect = device_exists + ovs.plug('01234567-1234-1234-99', + 'port-1234', + 'tap0', + 'aa:bb:cc:dd:ee:ff', + bridge=None, + namespace=None) + + add_port.assert_called_once_with('br-v', + 'tap0', + 'port-1234', + 'aa:bb:cc:dd:ee:ff', + internal=True) + + def _test_plug(self, additional_expectation=[], bridge=None, + namespace=None): + + if not bridge: + bridge = 'br-int' + + def device_exists(dev, root_helper=None, namespace=None): + return dev == bridge + + vsctl_cmd = ['ovs-vsctl', '--', '--if-exists', 'del-port', + 'tap0', '--', 'add-port', + bridge, 'tap0', '--', 'set', 'Interface', 'tap0', + 'type=internal', '--', 'set', 'Interface', 'tap0', + 'external-ids:iface-id=port-1234', '--', 'set', + 'Interface', 'tap0', + 'external-ids:iface-status=active', '--', 'set', + 'Interface', 'tap0', + 'external-ids:attached-mac=aa:bb:cc:dd:ee:ff'] + + with mock.patch.object(utils, 'execute') as execute: + ovs = interface.OVSInterfaceDriver(self.conf) + self.device_exists.side_effect = device_exists + ovs.plug('01234567-1234-1234-99', + 'port-1234', + 'tap0', + 'aa:bb:cc:dd:ee:ff', + bridge=bridge, + namespace=namespace) + execute.assert_called_once_with(vsctl_cmd, 'sudo') + + expected = [mock.call('sudo'), + mock.call().device('tap0'), + mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')] + expected.extend(additional_expectation) + if namespace: + expected.extend( + [mock.call().ensure_namespace(namespace), + mock.call().ensure_namespace().add_device_to_namespace( + mock.ANY)]) + expected.extend([mock.call().device().link.set_up()]) + + self.ip.assert_has_calls(expected) + + def test_mtu_int(self): + self.assertIsNone(self.conf.network_device_mtu) + self.conf.set_override('network_device_mtu', 9000) + self.assertEqual(self.conf.network_device_mtu, 9000) + + def test_plug_mtu(self): + self.conf.set_override('network_device_mtu', 9000) + self._test_plug([mock.call().device().link.set_mtu(9000)]) + + def test_unplug(self, bridge=None): + if not bridge: + bridge = 'br-int' + with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge') as ovs_br: + ovs = interface.OVSInterfaceDriver(self.conf) + ovs.unplug('tap0') + ovs_br.assert_has_calls([mock.call(bridge, 'sudo'), + mock.call().delete_port('tap0')]) + + +class TestOVSInterfaceDriverWithVeth(TestOVSInterfaceDriver): + + def setUp(self): + super(TestOVSInterfaceDriverWithVeth, self).setUp() + self.conf.set_override('ovs_use_veth', True) + + def test_get_device_name(self): + br = interface.OVSInterfaceDriver(self.conf) + device_name = br.get_device_name(FakePort()) + self.assertEqual('ns-abcdef01-12', device_name) + + def test_plug_with_prefix(self): + self._test_plug(devname='qr-0', prefix='qr-') + + def _test_plug(self, devname=None, bridge=None, namespace=None, + prefix=None, mtu=None): + + if not devname: + devname = 'ns-0' + if not bridge: + bridge = 'br-int' + + def device_exists(dev, root_helper=None, namespace=None): + return dev == bridge + + ovs = interface.OVSInterfaceDriver(self.conf) + self.device_exists.side_effect = device_exists + + root_dev = mock.Mock() + ns_dev = mock.Mock() + self.ip().add_veth = mock.Mock(return_value=(root_dev, ns_dev)) + expected = [mock.call('sudo'), + mock.call().add_veth('tap0', devname, + namespace2=namespace)] + + vsctl_cmd = ['ovs-vsctl', '--', '--if-exists', 'del-port', + 'tap0', '--', 'add-port', + bridge, 'tap0', '--', 'set', 'Interface', 'tap0', + 'external-ids:iface-id=port-1234', '--', 'set', + 'Interface', 'tap0', + 'external-ids:iface-status=active', '--', 'set', + 'Interface', 'tap0', + 'external-ids:attached-mac=aa:bb:cc:dd:ee:ff'] + with mock.patch.object(utils, 'execute') as execute: + ovs.plug('01234567-1234-1234-99', + 'port-1234', + devname, + 'aa:bb:cc:dd:ee:ff', + bridge=bridge, + namespace=namespace, + prefix=prefix) + execute.assert_called_once_with(vsctl_cmd, 'sudo') + + ns_dev.assert_has_calls( + [mock.call.link.set_address('aa:bb:cc:dd:ee:ff')]) + if mtu: + ns_dev.assert_has_calls([mock.call.link.set_mtu(mtu)]) + root_dev.assert_has_calls([mock.call.link.set_mtu(mtu)]) + + self.ip.assert_has_calls(expected) + root_dev.assert_has_calls([mock.call.link.set_up()]) + ns_dev.assert_has_calls([mock.call.link.set_up()]) + + def test_plug_mtu(self): + self.conf.set_override('network_device_mtu', 9000) + self._test_plug(mtu=9000) + + def test_unplug(self, bridge=None): + if not bridge: + bridge = 'br-int' + with mock.patch('neutron.agent.linux.ovs_lib.OVSBridge') as ovs_br: + ovs = interface.OVSInterfaceDriver(self.conf) + ovs.unplug('ns-0', bridge=bridge) + ovs_br.assert_has_calls([mock.call(bridge, 'sudo'), + mock.call().delete_port('tap0')]) + self.ip_dev.assert_has_calls([mock.call('ns-0', 'sudo', None), + mock.call().link.delete()]) + + +class TestBridgeInterfaceDriver(TestBase): + def test_get_device_name(self): + br = interface.BridgeInterfaceDriver(self.conf) + device_name = br.get_device_name(FakePort()) + self.assertEqual('ns-abcdef01-12', device_name) + + def test_plug_no_ns(self): + self._test_plug() + + def test_plug_with_ns(self): + self._test_plug(namespace='01234567-1234-1234-99') + + def _test_plug(self, namespace=None, mtu=None): + def device_exists(device, root_helper=None, namespace=None): + return device.startswith('brq') + + root_veth = mock.Mock() + ns_veth = mock.Mock() + + self.ip().add_veth = mock.Mock(return_value=(root_veth, ns_veth)) + + self.device_exists.side_effect = device_exists + br = interface.BridgeInterfaceDriver(self.conf) + mac_address = 'aa:bb:cc:dd:ee:ff' + br.plug('01234567-1234-1234-99', + 'port-1234', + 'ns-0', + mac_address, + namespace=namespace) + + ip_calls = [mock.call('sudo'), + mock.call().add_veth('tap0', 'ns-0', namespace2=namespace)] + ns_veth.assert_has_calls([mock.call.link.set_address(mac_address)]) + if mtu: + ns_veth.assert_has_calls([mock.call.link.set_mtu(mtu)]) + root_veth.assert_has_calls([mock.call.link.set_mtu(mtu)]) + + self.ip.assert_has_calls(ip_calls) + + root_veth.assert_has_calls([mock.call.link.set_up()]) + ns_veth.assert_has_calls([mock.call.link.set_up()]) + + def test_plug_dev_exists(self): + self.device_exists.return_value = True + with mock.patch('neutron.agent.linux.interface.LOG.info') as log: + br = interface.BridgeInterfaceDriver(self.conf) + br.plug('01234567-1234-1234-99', + 'port-1234', + 'tap0', + 'aa:bb:cc:dd:ee:ff') + self.ip_dev.assert_has_calls([]) + self.assertEqual(log.call_count, 1) + + def test_plug_mtu(self): + self.device_exists.return_value = False + self.conf.set_override('network_device_mtu', 9000) + self._test_plug(mtu=9000) + + def test_unplug_no_device(self): + self.device_exists.return_value = False + self.ip_dev().link.delete.side_effect = RuntimeError + with mock.patch('neutron.agent.linux.interface.LOG') as log: + br = interface.BridgeInterfaceDriver(self.conf) + br.unplug('tap0') + [mock.call(), mock.call('tap0', 'sudo'), mock.call().link.delete()] + self.assertEqual(log.error.call_count, 1) + + def test_unplug(self): + self.device_exists.return_value = True + with mock.patch('neutron.agent.linux.interface.LOG.debug') as log: + br = interface.BridgeInterfaceDriver(self.conf) + br.unplug('tap0') + self.assertEqual(log.call_count, 1) + + self.ip_dev.assert_has_calls([mock.call('tap0', 'sudo', None), + mock.call().link.delete()]) + + +class TestMetaInterfaceDriver(TestBase): + def setUp(self): + super(TestMetaInterfaceDriver, self).setUp() + config.register_interface_driver_opts_helper(self.conf) + self.conf.register_opts(dhcp.OPTS) + self.client_cls_p = mock.patch('neutronclient.v2_0.client.Client') + client_cls = self.client_cls_p.start() + self.client_inst = mock.Mock() + client_cls.return_value = self.client_inst + + fake_network = {'network': {flavor.FLAVOR_NETWORK: 'fake1'}} + fake_port = {'ports': + [{'mac_address': + 'aa:bb:cc:dd:ee:ffa', 'network_id': 'test'}]} + + self.client_inst.list_ports.return_value = fake_port + self.client_inst.show_network.return_value = fake_network + + self.conf.set_override('auth_url', 'http://localhost:35357/v2.0') + self.conf.set_override('auth_region', 'RegionOne') + self.conf.set_override('admin_user', 'neutron') + self.conf.set_override('admin_password', 'password') + self.conf.set_override('admin_tenant_name', 'service') + self.conf.set_override( + 'meta_flavor_driver_mappings', + 'fake1:neutron.agent.linux.interface.OVSInterfaceDriver,' + 'fake2:neutron.agent.linux.interface.BridgeInterfaceDriver') + + def test_get_driver_by_network_id(self): + meta_interface = interface.MetaInterfaceDriver(self.conf) + driver = meta_interface._get_driver_by_network_id('test') + self.assertIsInstance(driver, interface.OVSInterfaceDriver) + + def test_set_device_plugin_tag(self): + meta_interface = interface.MetaInterfaceDriver(self.conf) + driver = meta_interface._get_driver_by_network_id('test') + meta_interface._set_device_plugin_tag(driver, + 'tap0', + namespace=None) + expected = [mock.call('tap0', 'sudo', None), + mock.call().link.set_alias('fake1')] + self.ip_dev.assert_has_calls(expected) + namespace = '01234567-1234-1234-99' + meta_interface._set_device_plugin_tag(driver, + 'tap1', + namespace=namespace) + expected = [mock.call('tap1', 'sudo', '01234567-1234-1234-99'), + mock.call().link.set_alias('fake1')] + self.ip_dev.assert_has_calls(expected) + + def test_get_device_plugin_tag(self): + meta_interface = interface.MetaInterfaceDriver(self.conf) + self.ip_dev().link.alias = 'fake1' + plugin_tag0 = meta_interface._get_device_plugin_tag('tap0', + namespace=None) + expected = [mock.call('tap0', 'sudo', None)] + self.ip_dev.assert_has_calls(expected) + self.assertEqual('fake1', plugin_tag0) + namespace = '01234567-1234-1234-99' + expected = [mock.call('tap1', 'sudo', '01234567-1234-1234-99')] + plugin_tag1 = meta_interface._get_device_plugin_tag( + 'tap1', + namespace=namespace) + self.ip_dev.assert_has_calls(expected) + self.assertEqual('fake1', plugin_tag1) + + +class TestIVSInterfaceDriver(TestBase): + + def setUp(self): + super(TestIVSInterfaceDriver, self).setUp() + + def test_get_device_name(self): + br = interface.IVSInterfaceDriver(self.conf) + device_name = br.get_device_name(FakePort()) + self.assertEqual('ns-abcdef01-12', device_name) + + def test_plug_with_prefix(self): + self._test_plug(devname='qr-0', prefix='qr-') + + def _test_plug(self, devname=None, namespace=None, + prefix=None, mtu=None): + + if not devname: + devname = 'ns-0' + + def device_exists(dev, root_helper=None, namespace=None): + return dev == 'indigo' + + ivs = interface.IVSInterfaceDriver(self.conf) + self.device_exists.side_effect = device_exists + + root_dev = mock.Mock() + _ns_dev = mock.Mock() + ns_dev = mock.Mock() + self.ip().add_veth = mock.Mock(return_value=(root_dev, _ns_dev)) + self.ip().device = mock.Mock(return_value=(ns_dev)) + expected = [mock.call('sudo'), mock.call().add_veth('tap0', devname), + mock.call().device(devname)] + + ivsctl_cmd = ['ivs-ctl', 'add-port', 'tap0'] + + with mock.patch.object(utils, 'execute') as execute: + ivs.plug('01234567-1234-1234-99', + 'port-1234', + devname, + 'aa:bb:cc:dd:ee:ff', + namespace=namespace, + prefix=prefix) + execute.assert_called_once_with(ivsctl_cmd, 'sudo') + + ns_dev.assert_has_calls( + [mock.call.link.set_address('aa:bb:cc:dd:ee:ff')]) + if mtu: + ns_dev.assert_has_calls([mock.call.link.set_mtu(mtu)]) + root_dev.assert_has_calls([mock.call.link.set_mtu(mtu)]) + if namespace: + expected.extend( + [mock.call().ensure_namespace(namespace), + mock.call().ensure_namespace().add_device_to_namespace( + mock.ANY)]) + + self.ip.assert_has_calls(expected) + root_dev.assert_has_calls([mock.call.link.set_up()]) + ns_dev.assert_has_calls([mock.call.link.set_up()]) + + def test_plug_mtu(self): + self.conf.set_override('network_device_mtu', 9000) + self._test_plug(mtu=9000) + + def test_plug_namespace(self): + self._test_plug(namespace='mynamespace') + + def test_unplug(self): + ivs = interface.IVSInterfaceDriver(self.conf) + ivsctl_cmd = ['ivs-ctl', 'del-port', 'tap0'] + with mock.patch.object(utils, 'execute') as execute: + ivs.unplug('ns-0') + execute.assert_called_once_with(ivsctl_cmd, 'sudo') + self.ip_dev.assert_has_calls([mock.call('ns-0', 'sudo', None), + mock.call().link.delete()]) + + +class TestMidonetInterfaceDriver(TestBase): + def setUp(self): + self.conf = config.setup_conf() + self.conf.register_opts(interface.OPTS) + config.register_root_helper(self.conf) + self.driver = interface.MidonetInterfaceDriver(self.conf) + self.network_id = uuidutils.generate_uuid() + self.port_id = uuidutils.generate_uuid() + self.device_name = "tap0" + self.mac_address = "aa:bb:cc:dd:ee:ff" + self.bridge = "br-test" + self.namespace = "ns-test" + super(TestMidonetInterfaceDriver, self).setUp() + + def test_plug(self): + cmd = ['mm-ctl', '--bind-port', self.port_id, 'tap0'] + self.device_exists.return_value = False + + root_dev = mock.Mock() + ns_dev = mock.Mock() + self.ip().add_veth = mock.Mock(return_value=(root_dev, ns_dev)) + with mock.patch.object(utils, 'execute') as execute: + self.driver.plug( + self.network_id, self.port_id, + self.device_name, self.mac_address, + self.bridge, self.namespace) + execute.assert_called_once_with(cmd, 'sudo') + + expected = [mock.call(), mock.call('sudo'), + mock.call().add_veth(self.device_name, + self.device_name, + namespace2=self.namespace), + mock.call().ensure_namespace(self.namespace), + mock.call().ensure_namespace().add_device_to_namespace( + mock.ANY)] + + ns_dev.assert_has_calls( + [mock.call.link.set_address(self.mac_address)]) + + root_dev.assert_has_calls([mock.call.link.set_up()]) + ns_dev.assert_has_calls([mock.call.link.set_up()]) + self.ip.assert_has_calls(expected, True) + + def test_unplug(self): + self.driver.unplug(self.device_name, self.bridge, self.namespace) + + self.ip_dev.assert_has_calls([ + mock.call(self.device_name, self.driver.root_helper, + self.namespace), + mock.call().link.delete()]) + self.ip.assert_has_calls(mock.call().garbage_collect_namespace()) diff --git a/neutron/tests/unit/test_linux_ip_lib.py b/neutron/tests/unit/test_linux_ip_lib.py new file mode 100644 index 000000000..a002c1d29 --- /dev/null +++ b/neutron/tests/unit/test_linux_ip_lib.py @@ -0,0 +1,860 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.agent.linux import ip_lib +from neutron.common import exceptions +from neutron.tests import base + +NETNS_SAMPLE = [ + '12345678-1234-5678-abcd-1234567890ab', + 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', + 'cccccccc-cccc-cccc-cccc-cccccccccccc'] + +LINK_SAMPLE = [ + '1: lo: mtu 16436 qdisc noqueue state UNKNOWN \\' + 'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0', + '2: eth0: mtu 1500 qdisc mq state UP ' + 'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff' + '\ alias openvswitch', + '3: br-int: mtu 1500 qdisc noop state DOWN ' + '\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff promiscuity 0', + '4: gw-ddc717df-49: mtu 1500 qdisc noop ' + 'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff ' + 'promiscuity 0', + '5: foo:foo: mtu 1500 qdisc mq state ' + 'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff ' + 'promiscuity 0', + '6: foo@foo: mtu 1500 qdisc mq state ' + 'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff ' + 'promiscuity 0', + '7: foo:foo@foo: mtu 1500 qdisc mq ' + 'state UP qlen 1000' + '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0', + '8: foo@foo:foo: mtu 1500 qdisc mq ' + 'state UP qlen 1000' + '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0', + '9: bar.9@eth0: mtu 1500 qdisc ' + ' noqueue master brq0b24798c-07 state UP mode DEFAULT' + '\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0' + '\ vlan protocol 802.1q id 9 ', + '10: bar@eth0: mtu 1500 qdisc ' + ' noqueue master brq0b24798c-07 state UP mode DEFAULT' + '\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0' + '\ vlan protocol 802.1Q id 10 ', + '11: bar:bar@eth0: mtu 1500 qdisc mq ' + 'state UP qlen 1000' + '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0' + '\ vlan id 11 ', + '12: bar@bar@eth0: mtu 1500 qdisc mq ' + 'state UP qlen 1000' + '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0' + '\ vlan id 12 ', + '13: bar:bar@bar@eth0: mtu 1500 ' + 'qdisc mq state UP qlen 1000' + '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0' + '\ vlan protocol 802.1q id 13 ', + '14: bar@bar:bar@eth0: mtu 1500 ' + 'qdisc mq state UP qlen 1000' + '\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0' + '\ vlan protocol 802.1Q id 14 '] + +ADDR_SAMPLE = (""" +2: eth0: mtu 1500 qdisc mq state UP qlen 1000 + link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff + inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0 + inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic + valid_lft 14187sec preferred_lft 3387sec + inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ + """deprecated dynamic + valid_lft 14187sec preferred_lft 0sec + inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ + """deprecated dynamic + valid_lft 14187sec preferred_lft 0sec + inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic + valid_lft 14187sec preferred_lft 3387sec + inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link + valid_lft forever preferred_lft forever +""") + +ADDR_SAMPLE2 = (""" +2: eth0: mtu 1500 qdisc mq state UP qlen 1000 + link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff + inet 172.16.77.240/24 scope global eth0 + inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic + valid_lft 14187sec preferred_lft 3387sec + inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ + """deprecated dynamic + valid_lft 14187sec preferred_lft 0sec + inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ + """deprecated dynamic + valid_lft 14187sec preferred_lft 0sec + inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic + valid_lft 14187sec preferred_lft 3387sec + inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link + valid_lft forever preferred_lft forever +""") + +GATEWAY_SAMPLE1 = (""" +default via 10.35.19.254 metric 100 +10.35.16.0/22 proto kernel scope link src 10.35.17.97 +""") + +GATEWAY_SAMPLE2 = (""" +default via 10.35.19.254 metric 100 +""") + +GATEWAY_SAMPLE3 = (""" +10.35.16.0/22 proto kernel scope link src 10.35.17.97 +""") + +GATEWAY_SAMPLE4 = (""" +default via 10.35.19.254 +""") + +GATEWAY_SAMPLE5 = (""" +default via 192.168.99.1 proto static +""") + +GATEWAY_SAMPLE6 = (""" +default via 192.168.99.1 proto static metric 100 +""") + +DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2") + +SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n" + "10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2") +SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n" + "10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1") + + +class TestSubProcessBase(base.BaseTestCase): + def setUp(self): + super(TestSubProcessBase, self).setUp() + self.execute_p = mock.patch('neutron.agent.linux.utils.execute') + self.execute = self.execute_p.start() + + def test_execute_wrapper(self): + ip_lib.SubProcessBase._execute('o', 'link', ('list',), 'sudo') + + self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'], + root_helper='sudo') + + def test_execute_wrapper_int_options(self): + ip_lib.SubProcessBase._execute([4], 'link', ('list',)) + + self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'], + root_helper=None) + + def test_execute_wrapper_no_options(self): + ip_lib.SubProcessBase._execute([], 'link', ('list',)) + + self.execute.assert_called_once_with(['ip', 'link', 'list'], + root_helper=None) + + def test_run_no_namespace(self): + base = ip_lib.SubProcessBase('sudo') + base._run([], 'link', ('list',)) + self.execute.assert_called_once_with(['ip', 'link', 'list'], + root_helper=None) + + def test_run_namespace(self): + base = ip_lib.SubProcessBase('sudo', 'ns') + base._run([], 'link', ('list',)) + self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', + 'ip', 'link', 'list'], + root_helper='sudo') + + def test_as_root_namespace(self): + base = ip_lib.SubProcessBase('sudo', 'ns') + base._as_root([], 'link', ('list',)) + self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', + 'ip', 'link', 'list'], + root_helper='sudo') + + def test_as_root_no_root_helper(self): + base = ip_lib.SubProcessBase() + self.assertRaises(exceptions.SudoRequired, + base._as_root, + [], 'link', ('list',)) + + +class TestIpWrapper(base.BaseTestCase): + def setUp(self): + super(TestIpWrapper, self).setUp() + self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute') + self.execute = self.execute_p.start() + + def test_get_devices(self): + self.execute.return_value = '\n'.join(LINK_SAMPLE) + retval = ip_lib.IPWrapper('sudo').get_devices() + self.assertEqual(retval, + [ip_lib.IPDevice('lo'), + ip_lib.IPDevice('eth0'), + ip_lib.IPDevice('br-int'), + ip_lib.IPDevice('gw-ddc717df-49'), + ip_lib.IPDevice('foo:foo'), + ip_lib.IPDevice('foo@foo'), + ip_lib.IPDevice('foo:foo@foo'), + ip_lib.IPDevice('foo@foo:foo'), + ip_lib.IPDevice('bar.9'), + ip_lib.IPDevice('bar'), + ip_lib.IPDevice('bar:bar'), + ip_lib.IPDevice('bar@bar'), + ip_lib.IPDevice('bar:bar@bar'), + ip_lib.IPDevice('bar@bar:bar')]) + + self.execute.assert_called_once_with(['o', 'd'], 'link', ('list',), + 'sudo', None) + + def test_get_devices_malformed_line(self): + self.execute.return_value = '\n'.join(LINK_SAMPLE + ['gibberish']) + retval = ip_lib.IPWrapper('sudo').get_devices() + self.assertEqual(retval, + [ip_lib.IPDevice('lo'), + ip_lib.IPDevice('eth0'), + ip_lib.IPDevice('br-int'), + ip_lib.IPDevice('gw-ddc717df-49'), + ip_lib.IPDevice('foo:foo'), + ip_lib.IPDevice('foo@foo'), + ip_lib.IPDevice('foo:foo@foo'), + ip_lib.IPDevice('foo@foo:foo'), + ip_lib.IPDevice('bar.9'), + ip_lib.IPDevice('bar'), + ip_lib.IPDevice('bar:bar'), + ip_lib.IPDevice('bar@bar'), + ip_lib.IPDevice('bar:bar@bar'), + ip_lib.IPDevice('bar@bar:bar')]) + + self.execute.assert_called_once_with(['o', 'd'], 'link', ('list',), + 'sudo', None) + + def test_get_namespaces(self): + self.execute.return_value = '\n'.join(NETNS_SAMPLE) + retval = ip_lib.IPWrapper.get_namespaces('sudo') + self.assertEqual(retval, + ['12345678-1234-5678-abcd-1234567890ab', + 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', + 'cccccccc-cccc-cccc-cccc-cccccccccccc']) + + self.execute.assert_called_once_with('', 'netns', ('list',), + root_helper='sudo') + + def test_add_tuntap(self): + ip_lib.IPWrapper('sudo').add_tuntap('tap0') + self.execute.assert_called_once_with('', 'tuntap', + ('add', 'tap0', 'mode', 'tap'), + 'sudo', None) + + def test_add_veth(self): + ip_lib.IPWrapper('sudo').add_veth('tap0', 'tap1') + self.execute.assert_called_once_with('', 'link', + ('add', 'tap0', 'type', 'veth', + 'peer', 'name', 'tap1'), + 'sudo', None) + + def test_add_veth_with_namespaces(self): + ns2 = 'ns2' + with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en: + ip_lib.IPWrapper('sudo').add_veth('tap0', 'tap1', namespace2=ns2) + en.assert_has_calls([mock.call(ns2)]) + self.execute.assert_called_once_with('', 'link', + ('add', 'tap0', 'type', 'veth', + 'peer', 'name', 'tap1', + 'netns', ns2), + 'sudo', None) + + def test_get_device(self): + dev = ip_lib.IPWrapper('sudo', 'ns').device('eth0') + self.assertEqual(dev.root_helper, 'sudo') + self.assertEqual(dev.namespace, 'ns') + self.assertEqual(dev.name, 'eth0') + + def test_ensure_namespace(self): + with mock.patch.object(ip_lib, 'IPDevice') as ip_dev: + ip = ip_lib.IPWrapper('sudo') + with mock.patch.object(ip.netns, 'exists') as ns_exists: + ns_exists.return_value = False + ip.ensure_namespace('ns') + self.execute.assert_has_calls( + [mock.call([], 'netns', ('add', 'ns'), 'sudo', None)]) + ip_dev.assert_has_calls([mock.call('lo', 'sudo', 'ns'), + mock.call().link.set_up()]) + + def test_ensure_namespace_existing(self): + with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd: + ip_ns_cmd.exists.return_value = True + ns = ip_lib.IPWrapper('sudo').ensure_namespace('ns') + self.assertFalse(self.execute.called) + self.assertEqual(ns.namespace, 'ns') + + def test_namespace_is_empty_no_devices(self): + ip = ip_lib.IPWrapper('sudo', 'ns') + with mock.patch.object(ip, 'get_devices') as get_devices: + get_devices.return_value = [] + + self.assertTrue(ip.namespace_is_empty()) + get_devices.assert_called_once_with(exclude_loopback=True) + + def test_namespace_is_empty(self): + ip = ip_lib.IPWrapper('sudo', 'ns') + with mock.patch.object(ip, 'get_devices') as get_devices: + get_devices.return_value = [mock.Mock()] + + self.assertFalse(ip.namespace_is_empty()) + get_devices.assert_called_once_with(exclude_loopback=True) + + def test_garbage_collect_namespace_does_not_exist(self): + with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: + ip_ns_cmd_cls.return_value.exists.return_value = False + ip = ip_lib.IPWrapper('sudo', 'ns') + with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: + + self.assertFalse(ip.garbage_collect_namespace()) + ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')]) + self.assertNotIn(mock.call().delete('ns'), + ip_ns_cmd_cls.return_value.mock_calls) + self.assertEqual(mock_is_empty.mock_calls, []) + + def test_garbage_collect_namespace_existing_empty_ns(self): + with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: + ip_ns_cmd_cls.return_value.exists.return_value = True + + ip = ip_lib.IPWrapper('sudo', 'ns') + + with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: + mock_is_empty.return_value = True + self.assertTrue(ip.garbage_collect_namespace()) + + mock_is_empty.assert_called_once_with() + expected = [mock.call().exists('ns'), + mock.call().delete('ns')] + ip_ns_cmd_cls.assert_has_calls(expected) + + def test_garbage_collect_namespace_existing_not_empty(self): + lo_device = mock.Mock() + lo_device.name = 'lo' + tap_device = mock.Mock() + tap_device.name = 'tap1' + + with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: + ip_ns_cmd_cls.return_value.exists.return_value = True + + ip = ip_lib.IPWrapper('sudo', 'ns') + + with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: + mock_is_empty.return_value = False + + self.assertFalse(ip.garbage_collect_namespace()) + + mock_is_empty.assert_called_once_with() + expected = [mock.call(ip), + mock.call().exists('ns')] + self.assertEqual(ip_ns_cmd_cls.mock_calls, expected) + self.assertNotIn(mock.call().delete('ns'), + ip_ns_cmd_cls.mock_calls) + + def test_add_vxlan_valid_port_length(self): + retval = ip_lib.IPWrapper('sudo').add_vxlan('vxlan0', 'vni0', + group='group0', + dev='dev0', ttl='ttl0', + tos='tos0', + local='local0', proxy=True, + port=('1', '2')) + self.assertIsInstance(retval, ip_lib.IPDevice) + self.assertEqual(retval.name, 'vxlan0') + self.execute.assert_called_once_with('', 'link', + ['add', 'vxlan0', 'type', + 'vxlan', 'id', 'vni0', 'group', + 'group0', 'dev', 'dev0', + 'ttl', 'ttl0', 'tos', 'tos0', + 'local', 'local0', 'proxy', + 'port', '1', '2'], + 'sudo', None) + + def test_add_vxlan_invalid_port_length(self): + wrapper = ip_lib.IPWrapper('sudo') + self.assertRaises(exceptions.NetworkVxlanPortRangeError, + wrapper.add_vxlan, 'vxlan0', 'vni0', group='group0', + dev='dev0', ttl='ttl0', tos='tos0', + local='local0', proxy=True, + port=('1', '2', '3')) + + def test_add_device_to_namespace(self): + dev = mock.Mock() + ip_lib.IPWrapper('sudo', 'ns').add_device_to_namespace(dev) + dev.assert_has_calls([mock.call.link.set_netns('ns')]) + + def test_add_device_to_namespace_is_none(self): + dev = mock.Mock() + ip_lib.IPWrapper('sudo').add_device_to_namespace(dev) + self.assertEqual(dev.mock_calls, []) + + +class TestIpRule(base.BaseTestCase): + def setUp(self): + super(TestIpRule, self).setUp() + self.execute_p = mock.patch.object(ip_lib.IpRule, '_execute') + self.execute = self.execute_p.start() + + def test_add_rule_from(self): + ip_lib.IpRule('sudo').add_rule_from('192.168.45.100', 2, 100) + self.execute.assert_called_once_with('', 'rule', + ('add', 'from', '192.168.45.100', + 'lookup', 2, 'priority', 100), + 'sudo', None) + + def test_delete_rule_priority(self): + ip_lib.IpRule('sudo').delete_rule_priority(100) + self.execute.assert_called_once_with('', 'rule', + ('del', 'priority', 100), + 'sudo', None) + + +class TestIPDevice(base.BaseTestCase): + def test_eq_same_name(self): + dev1 = ip_lib.IPDevice('tap0') + dev2 = ip_lib.IPDevice('tap0') + self.assertEqual(dev1, dev2) + + def test_eq_diff_name(self): + dev1 = ip_lib.IPDevice('tap0') + dev2 = ip_lib.IPDevice('tap1') + self.assertNotEqual(dev1, dev2) + + def test_eq_same_namespace(self): + dev1 = ip_lib.IPDevice('tap0', 'ns1') + dev2 = ip_lib.IPDevice('tap0', 'ns1') + self.assertEqual(dev1, dev2) + + def test_eq_diff_namespace(self): + dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1') + dev2 = ip_lib.IPDevice('tap0', 'sudo', 'ns2') + self.assertNotEqual(dev1, dev2) + + def test_eq_other_is_none(self): + dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1') + self.assertIsNotNone(dev1) + + def test_str(self): + self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0') + + +class TestIPCommandBase(base.BaseTestCase): + def setUp(self): + super(TestIPCommandBase, self).setUp() + self.ip = mock.Mock() + self.ip.root_helper = 'sudo' + self.ip.namespace = 'namespace' + self.ip_cmd = ip_lib.IpCommandBase(self.ip) + self.ip_cmd.COMMAND = 'foo' + + def test_run(self): + self.ip_cmd._run('link', 'show') + self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))]) + + def test_run_with_options(self): + self.ip_cmd._run('link', options='o') + self.ip.assert_has_calls([mock.call._run('o', 'foo', ('link', ))]) + + def test_as_root(self): + self.ip_cmd._as_root('link') + self.ip.assert_has_calls( + [mock.call._as_root([], 'foo', ('link', ), False)]) + + def test_as_root_with_options(self): + self.ip_cmd._as_root('link', options='o') + self.ip.assert_has_calls( + [mock.call._as_root('o', 'foo', ('link', ), False)]) + + +class TestIPDeviceCommandBase(base.BaseTestCase): + def setUp(self): + super(TestIPDeviceCommandBase, self).setUp() + self.ip_dev = mock.Mock() + self.ip_dev.name = 'eth0' + self.ip_dev.root_helper = 'sudo' + self.ip_dev._execute = mock.Mock(return_value='executed') + self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev) + self.ip_cmd.COMMAND = 'foo' + + def test_name_property(self): + self.assertEqual(self.ip_cmd.name, 'eth0') + + +class TestIPCmdBase(base.BaseTestCase): + def setUp(self): + super(TestIPCmdBase, self).setUp() + self.parent = mock.Mock() + self.parent.name = 'eth0' + self.parent.root_helper = 'sudo' + + def _assert_call(self, options, args): + self.parent.assert_has_calls([ + mock.call._run(options, self.command, args)]) + + def _assert_sudo(self, options, args, force_root_namespace=False): + self.parent.assert_has_calls( + [mock.call._as_root(options, self.command, args, + force_root_namespace)]) + + +class TestIpLinkCommand(TestIPCmdBase): + def setUp(self): + super(TestIpLinkCommand, self).setUp() + self.parent._run.return_value = LINK_SAMPLE[1] + self.command = 'link' + self.link_cmd = ip_lib.IpLinkCommand(self.parent) + + def test_set_address(self): + self.link_cmd.set_address('aa:bb:cc:dd:ee:ff') + self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff')) + + def test_set_mtu(self): + self.link_cmd.set_mtu(1500) + self._assert_sudo([], ('set', 'eth0', 'mtu', 1500)) + + def test_set_up(self): + self.link_cmd.set_up() + self._assert_sudo([], ('set', 'eth0', 'up')) + + def test_set_down(self): + self.link_cmd.set_down() + self._assert_sudo([], ('set', 'eth0', 'down')) + + def test_set_netns(self): + self.link_cmd.set_netns('foo') + self._assert_sudo([], ('set', 'eth0', 'netns', 'foo')) + self.assertEqual(self.parent.namespace, 'foo') + + def test_set_name(self): + self.link_cmd.set_name('tap1') + self._assert_sudo([], ('set', 'eth0', 'name', 'tap1')) + self.assertEqual(self.parent.name, 'tap1') + + def test_set_alias(self): + self.link_cmd.set_alias('openvswitch') + self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch')) + + def test_delete(self): + self.link_cmd.delete() + self._assert_sudo([], ('delete', 'eth0')) + + def test_address_property(self): + self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) + self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd') + + def test_mtu_property(self): + self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) + self.assertEqual(self.link_cmd.mtu, 1500) + + def test_qdisc_property(self): + self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) + self.assertEqual(self.link_cmd.qdisc, 'mq') + + def test_qlen_property(self): + self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) + self.assertEqual(self.link_cmd.qlen, 1000) + + def test_alias_property(self): + self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) + self.assertEqual(self.link_cmd.alias, 'openvswitch') + + def test_state_property(self): + self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) + self.assertEqual(self.link_cmd.state, 'UP') + + def test_settings_property(self): + expected = {'mtu': 1500, + 'qlen': 1000, + 'state': 'UP', + 'qdisc': 'mq', + 'brd': 'ff:ff:ff:ff:ff:ff', + 'link/ether': 'cc:dd:ee:ff:ab:cd', + 'alias': 'openvswitch'} + self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) + self.assertEqual(self.link_cmd.attributes, expected) + self._assert_call('o', ('show', 'eth0')) + + +class TestIpAddrCommand(TestIPCmdBase): + def setUp(self): + super(TestIpAddrCommand, self).setUp() + self.parent.name = 'tap0' + self.command = 'addr' + self.addr_cmd = ip_lib.IpAddrCommand(self.parent) + + def test_add_address(self): + self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255') + self._assert_sudo([4], + ('add', '192.168.45.100/24', 'brd', '192.168.45.255', + 'scope', 'global', 'dev', 'tap0')) + + def test_add_address_scoped(self): + self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255', + scope='link') + self._assert_sudo([4], + ('add', '192.168.45.100/24', 'brd', '192.168.45.255', + 'scope', 'link', 'dev', 'tap0')) + + def test_del_address(self): + self.addr_cmd.delete(4, '192.168.45.100/24') + self._assert_sudo([4], + ('del', '192.168.45.100/24', 'dev', 'tap0')) + + def test_flush(self): + self.addr_cmd.flush() + self._assert_sudo([], ('flush', 'tap0')) + + def test_list(self): + expected = [ + dict(ip_version=4, scope='global', + dynamic=False, cidr='172.16.77.240/24', + broadcast='172.16.77.255'), + dict(ip_version=6, scope='global', + dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64', + broadcast='::'), + dict(ip_version=6, scope='global', + dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64', + broadcast='::'), + dict(ip_version=6, scope='global', + dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64', + broadcast='::'), + dict(ip_version=6, scope='global', + dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64', + broadcast='::'), + dict(ip_version=6, scope='link', + dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64', + broadcast='::')] + + test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] + + for test_case in test_cases: + self.parent._run = mock.Mock(return_value=test_case) + self.assertEqual(self.addr_cmd.list(), expected) + self._assert_call([], ('show', 'tap0')) + + def test_list_filtered(self): + expected = [ + dict(ip_version=4, scope='global', + dynamic=False, cidr='172.16.77.240/24', + broadcast='172.16.77.255')] + + test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] + + for test_case in test_cases: + output = '\n'.join(test_case.split('\n')[0:4]) + self.parent._run.return_value = output + self.assertEqual(self.addr_cmd.list('global', + filters=['permanent']), expected) + self._assert_call([], ('show', 'tap0', 'permanent', 'scope', + 'global')) + + +class TestIpRouteCommand(TestIPCmdBase): + def setUp(self): + super(TestIpRouteCommand, self).setUp() + self.parent.name = 'eth0' + self.command = 'route' + self.route_cmd = ip_lib.IpRouteCommand(self.parent) + + def test_add_gateway(self): + gateway = '192.168.45.100' + metric = 100 + table = 14 + self.route_cmd.add_gateway(gateway, metric, table) + self._assert_sudo([], + ('replace', 'default', 'via', gateway, + 'metric', metric, + 'dev', self.parent.name, 'table', table)) + + def test_del_gateway(self): + gateway = '192.168.45.100' + table = 14 + self.route_cmd.delete_gateway(gateway, table) + self._assert_sudo([], + ('del', 'default', 'via', gateway, + 'dev', self.parent.name, 'table', table)) + + def test_get_gateway(self): + test_cases = [{'sample': GATEWAY_SAMPLE1, + 'expected': {'gateway': '10.35.19.254', + 'metric': 100}}, + {'sample': GATEWAY_SAMPLE2, + 'expected': {'gateway': '10.35.19.254', + 'metric': 100}}, + {'sample': GATEWAY_SAMPLE3, + 'expected': None}, + {'sample': GATEWAY_SAMPLE4, + 'expected': {'gateway': '10.35.19.254'}}, + {'sample': GATEWAY_SAMPLE5, + 'expected': {'gateway': '192.168.99.1'}}, + {'sample': GATEWAY_SAMPLE6, + 'expected': {'gateway': '192.168.99.1', + 'metric': 100}}] + for test_case in test_cases: + self.parent._run = mock.Mock(return_value=test_case['sample']) + self.assertEqual(self.route_cmd.get_gateway(), + test_case['expected']) + + def test_pullup_route(self): + # interface is not the first in the list - requires + # deleting and creating existing entries + output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1] + + def pullup_side_effect(self, *args): + result = output.pop(0) + return result + + self.parent._run = mock.Mock(side_effect=pullup_side_effect) + self.route_cmd.pullup_route('tap1d7888a7-10') + self._assert_sudo([], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2')) + self._assert_sudo([], ('append', '10.0.0.0/24', 'proto', 'kernel', + 'src', '10.0.0.1', 'dev', 'qr-23380d11-d2')) + + def test_pullup_route_first(self): + # interface is first in the list - no changes + output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2] + + def pullup_side_effect(self, *args): + result = output.pop(0) + return result + + self.parent._run = mock.Mock(side_effect=pullup_side_effect) + self.route_cmd.pullup_route('tap1d7888a7-10') + # Check two calls - device get and subnet get + self.assertEqual(len(self.parent._run.mock_calls), 2) + + def test_add_route(self): + cidr = '192.168.45.100/24' + ip = '10.0.0.1' + table = 14 + self.route_cmd.add_route(cidr, ip, table) + self._assert_sudo([], + ('replace', cidr, 'via', ip, + 'dev', self.parent.name, 'table', table)) + + def test_delete_route(self): + cidr = '192.168.45.100/24' + ip = '10.0.0.1' + table = 14 + self.route_cmd.delete_route(cidr, ip, table) + self._assert_sudo([], + ('del', cidr, 'via', ip, + 'dev', self.parent.name, 'table', table)) + + +class TestIpNetnsCommand(TestIPCmdBase): + def setUp(self): + super(TestIpNetnsCommand, self).setUp() + self.command = 'netns' + self.netns_cmd = ip_lib.IpNetnsCommand(self.parent) + + def test_add_namespace(self): + ns = self.netns_cmd.add('ns') + self._assert_sudo([], ('add', 'ns'), force_root_namespace=True) + self.assertEqual(ns.namespace, 'ns') + + def test_delete_namespace(self): + with mock.patch('neutron.agent.linux.utils.execute'): + self.netns_cmd.delete('ns') + self._assert_sudo([], ('delete', 'ns'), force_root_namespace=True) + + def test_namespace_exists(self): + retval = '\n'.join(NETNS_SAMPLE) + # need another instance to avoid mocking + netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase()) + with mock.patch('neutron.agent.linux.utils.execute') as execute: + execute.return_value = retval + self.assertTrue( + netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')) + execute.assert_called_once_with(['ip', '-o', 'netns', 'list'], + root_helper=None) + + def test_namespace_doest_not_exist(self): + retval = '\n'.join(NETNS_SAMPLE) + # need another instance to avoid mocking + netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase()) + with mock.patch('neutron.agent.linux.utils.execute') as execute: + execute.return_value = retval + self.assertFalse( + netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb')) + execute.assert_called_once_with(['ip', '-o', 'netns', 'list'], + root_helper=None) + + def test_execute(self): + self.parent.namespace = 'ns' + with mock.patch('neutron.agent.linux.utils.execute') as execute: + self.netns_cmd.execute(['ip', 'link', 'list']) + execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip', + 'link', 'list'], + root_helper='sudo', + check_exit_code=True) + + def test_execute_env_var_prepend(self): + self.parent.namespace = 'ns' + with mock.patch('neutron.agent.linux.utils.execute') as execute: + env = dict(FOO=1, BAR=2) + self.netns_cmd.execute(['ip', 'link', 'list'], env) + execute.assert_called_once_with( + ['ip', 'netns', 'exec', 'ns', 'env', 'FOO=1', 'BAR=2', + 'ip', 'link', 'list'], + root_helper='sudo', check_exit_code=True) + + +class TestDeviceExists(base.BaseTestCase): + def test_device_exists(self): + with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: + _execute.return_value = LINK_SAMPLE[1] + self.assertTrue(ip_lib.device_exists('eth0')) + _execute.assert_called_once_with('o', 'link', ('show', 'eth0')) + + def test_device_does_not_exist(self): + with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: + _execute.return_value = '' + _execute.side_effect = RuntimeError + self.assertFalse(ip_lib.device_exists('eth0')) + + def test_ensure_device_is_ready(self): + ip_lib_mock = mock.Mock() + with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock): + self.assertTrue(ip_lib.ensure_device_is_ready("eth0")) + self.assertTrue(ip_lib_mock.link.set_up.called) + ip_lib_mock.reset_mock() + # device doesn't exists + ip_lib_mock.link.set_up.side_effect = RuntimeError + self.assertFalse(ip_lib.ensure_device_is_ready("eth0")) + + +class TestIpNeighCommand(TestIPCmdBase): + def setUp(self): + super(TestIpNeighCommand, self).setUp() + self.parent.name = 'tap0' + self.command = 'neigh' + self.neigh_cmd = ip_lib.IpNeighCommand(self.parent) + + def test_add_entry(self): + self.neigh_cmd.add(4, '192.168.45.100', 'cc:dd:ee:ff:ab:cd') + self._assert_sudo([4], ('replace', '192.168.45.100', 'lladdr', + 'cc:dd:ee:ff:ab:cd', 'nud', 'permanent', + 'dev', 'tap0')) + + def test_delete_entry(self): + self.neigh_cmd.delete(4, '192.168.45.100', 'cc:dd:ee:ff:ab:cd') + self._assert_sudo([4], ('del', '192.168.45.100', 'lladdr', + 'cc:dd:ee:ff:ab:cd', 'dev', 'tap0')) diff --git a/neutron/tests/unit/test_metadata_agent.py b/neutron/tests/unit/test_metadata_agent.py new file mode 100644 index 000000000..872adaef3 --- /dev/null +++ b/neutron/tests/unit/test_metadata_agent.py @@ -0,0 +1,581 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import contextlib +import socket + +import mock +import testtools +import webob + +from neutron.agent.metadata import agent +from neutron.common import constants +from neutron.common import utils +from neutron.tests import base + + +class FakeConf(object): + admin_user = 'neutron' + admin_password = 'password' + admin_tenant_name = 'tenant' + auth_url = 'http://127.0.0.1' + auth_strategy = 'keystone' + auth_region = 'region' + auth_insecure = False + auth_ca_cert = None + endpoint_type = 'adminURL' + nova_metadata_ip = '9.9.9.9' + nova_metadata_port = 8775 + metadata_proxy_shared_secret = 'secret' + nova_metadata_protocol = 'http' + nova_metadata_insecure = True + nova_client_cert = 'nova_cert' + nova_client_priv_key = 'nova_priv_key' + cache_url = '' + + +class FakeConfCache(FakeConf): + cache_url = 'memory://?default_ttl=5' + + +class TestMetadataProxyHandlerCache(base.BaseTestCase): + fake_conf = FakeConfCache + + def setUp(self): + super(TestMetadataProxyHandlerCache, self).setUp() + self.qclient_p = mock.patch('neutronclient.v2_0.client.Client') + self.qclient = self.qclient_p.start() + + self.log_p = mock.patch.object(agent, 'LOG') + self.log = self.log_p.start() + + self.handler = agent.MetadataProxyHandler(self.fake_conf) + + def test_call(self): + req = mock.Mock() + with mock.patch.object(self.handler, + '_get_instance_and_tenant_id') as get_ids: + get_ids.return_value = ('instance_id', 'tenant_id') + with mock.patch.object(self.handler, '_proxy_request') as proxy: + proxy.return_value = 'value' + + retval = self.handler(req) + self.assertEqual(retval, 'value') + + def test_call_no_instance_match(self): + req = mock.Mock() + with mock.patch.object(self.handler, + '_get_instance_and_tenant_id') as get_ids: + get_ids.return_value = None, None + retval = self.handler(req) + self.assertIsInstance(retval, webob.exc.HTTPNotFound) + + def test_call_internal_server_error(self): + req = mock.Mock() + with mock.patch.object(self.handler, + '_get_instance_and_tenant_id') as get_ids: + get_ids.side_effect = Exception + retval = self.handler(req) + self.assertIsInstance(retval, webob.exc.HTTPInternalServerError) + self.assertEqual(len(self.log.mock_calls), 2) + + def test_get_router_networks(self): + router_id = 'router-id' + expected = ('network_id1', 'network_id2') + ports = {'ports': [{'network_id': 'network_id1', 'something': 42}, + {'network_id': 'network_id2', + 'something_else': 32}], + 'not_used': [1, 2, 3]} + mock_list_ports = self.qclient.return_value.list_ports + mock_list_ports.return_value = ports + networks = self.handler._get_router_networks(router_id) + mock_list_ports.assert_called_once_with( + device_id=router_id, + device_owner=constants.DEVICE_OWNER_ROUTER_INTF) + self.assertEqual(expected, networks) + + def _test_get_router_networks_twice_helper(self): + router_id = 'router-id' + ports = {'ports': [{'network_id': 'network_id1', 'something': 42}], + 'not_used': [1, 2, 3]} + expected_networks = ('network_id1',) + with mock.patch( + 'neutron.openstack.common.timeutils.utcnow_ts', return_value=0): + mock_list_ports = self.qclient.return_value.list_ports + mock_list_ports.return_value = ports + networks = self.handler._get_router_networks(router_id) + mock_list_ports.assert_called_once_with( + device_id=router_id, + device_owner=constants.DEVICE_OWNER_ROUTER_INTF) + self.assertEqual(expected_networks, networks) + networks = self.handler._get_router_networks(router_id) + + def test_get_router_networks_twice(self): + self._test_get_router_networks_twice_helper() + self.assertEqual( + 1, self.qclient.return_value.list_ports.call_count) + + def test_get_ports_for_remote_address(self): + remote_address = 'remote_address' + networks = 'networks' + fixed_ips = ["ip_address=%s" % remote_address] + ports = self.handler._get_ports_for_remote_address(remote_address, + networks) + mock_list_ports = self.qclient.return_value.list_ports + mock_list_ports.assert_called_once_with( + network_id=networks, fixed_ips=fixed_ips) + self.assertEqual(mock_list_ports.return_value.__getitem__('ports'), + ports) + + def _get_ports_for_remote_address_cache_hit_helper(self): + remote_address = 'remote_address' + networks = ('net1', 'net2') + fixed_ips = ["ip_address=%s" % remote_address] + ports = self.handler._get_ports_for_remote_address(remote_address, + networks) + mock_list_ports = self.qclient.return_value.list_ports + mock_list_ports.assert_called_once_with( + network_id=networks, fixed_ips=fixed_ips) + self.assertEqual( + mock_list_ports.return_value.__getitem__('ports'), ports) + self.assertEqual(1, mock_list_ports.call_count) + self.handler._get_ports_for_remote_address(remote_address, + networks) + + def test_get_ports_for_remote_address_cache_hit(self): + self._get_ports_for_remote_address_cache_hit_helper() + self.assertEqual( + 1, self.qclient.return_value.list_ports.call_count) + + def test_get_ports_network_id(self): + network_id = 'network-id' + router_id = 'router-id' + remote_address = 'remote-address' + expected = ['port1'] + networks = (network_id,) + with contextlib.nested( + mock.patch.object(self.handler, '_get_ports_for_remote_address'), + mock.patch.object(self.handler, '_get_router_networks') + ) as (mock_get_ip_addr, mock_get_router_networks): + mock_get_ip_addr.return_value = expected + ports = self.handler._get_ports(remote_address, network_id, + router_id) + mock_get_ip_addr.assert_called_once_with(remote_address, + networks) + self.assertFalse(mock_get_router_networks.called) + self.assertEqual(expected, ports) + + def test_get_ports_router_id(self): + router_id = 'router-id' + remote_address = 'remote-address' + expected = ['port1'] + networks = ('network1', 'network2') + with contextlib.nested( + mock.patch.object(self.handler, + '_get_ports_for_remote_address', + return_value=expected), + mock.patch.object(self.handler, + '_get_router_networks', + return_value=networks) + ) as (mock_get_ip_addr, mock_get_router_networks): + ports = self.handler._get_ports(remote_address, + router_id=router_id) + mock_get_router_networks.called_once_with(router_id) + mock_get_ip_addr.assert_called_once_with(remote_address, networks) + self.assertEqual(expected, ports) + + def test_get_ports_no_id(self): + self.assertRaises(TypeError, self.handler._get_ports, 'remote_address') + + def _get_instance_and_tenant_id_helper(self, headers, list_ports_retval, + networks=None, router_id=None): + remote_address = '192.168.1.1' + headers['X-Forwarded-For'] = remote_address + req = mock.Mock(headers=headers) + + def mock_list_ports(*args, **kwargs): + return {'ports': list_ports_retval.pop(0)} + + self.qclient.return_value.list_ports.side_effect = mock_list_ports + instance_id, tenant_id = self.handler._get_instance_and_tenant_id(req) + new_qclient_call = mock.call( + username=FakeConf.admin_user, + tenant_name=FakeConf.admin_tenant_name, + region_name=FakeConf.auth_region, + auth_url=FakeConf.auth_url, + password=FakeConf.admin_password, + auth_strategy=FakeConf.auth_strategy, + token=None, + insecure=FakeConf.auth_insecure, + ca_cert=FakeConf.auth_ca_cert, + endpoint_url=None, + endpoint_type=FakeConf.endpoint_type) + expected = [new_qclient_call] + + if router_id: + expected.extend([ + new_qclient_call, + mock.call().list_ports( + device_id=router_id, + device_owner=constants.DEVICE_OWNER_ROUTER_INTF + ) + ]) + + expected.extend([ + new_qclient_call, + mock.call().list_ports( + network_id=networks or tuple(), + fixed_ips=['ip_address=192.168.1.1']) + ]) + + self.qclient.assert_has_calls(expected) + + return (instance_id, tenant_id) + + def test_get_instance_id_router_id(self): + router_id = 'the_id' + headers = { + 'X-Neutron-Router-ID': router_id + } + + networks = ('net1', 'net2') + ports = [ + [{'network_id': 'net1'}, {'network_id': 'net2'}], + [{'device_id': 'device_id', 'tenant_id': 'tenant_id'}] + ] + + self.assertEqual( + self._get_instance_and_tenant_id_helper(headers, ports, + networks=networks, + router_id=router_id), + ('device_id', 'tenant_id') + ) + + def test_get_instance_id_router_id_no_match(self): + router_id = 'the_id' + headers = { + 'X-Neutron-Router-ID': router_id + } + + networks = ('net1', 'net2') + ports = [ + [{'network_id': 'net1'}, {'network_id': 'net2'}], + [] + ] + self.assertEqual( + self._get_instance_and_tenant_id_helper(headers, ports, + networks=networks, + router_id=router_id), + (None, None) + ) + + def test_get_instance_id_network_id(self): + network_id = 'the_id' + headers = { + 'X-Neutron-Network-ID': network_id + } + + ports = [ + [{'device_id': 'device_id', + 'tenant_id': 'tenant_id'}] + ] + + self.assertEqual( + self._get_instance_and_tenant_id_helper(headers, ports, + networks=('the_id',)), + ('device_id', 'tenant_id') + ) + + def test_get_instance_id_network_id_no_match(self): + network_id = 'the_id' + headers = { + 'X-Neutron-Network-ID': network_id + } + + ports = [[]] + + self.assertEqual( + self._get_instance_and_tenant_id_helper(headers, ports, + networks=('the_id',)), + (None, None) + ) + + def _proxy_request_test_helper(self, response_code=200, method='GET'): + hdrs = {'X-Forwarded-For': '8.8.8.8'} + body = 'body' + + req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs, + method=method, body=body) + resp = mock.MagicMock(status=response_code) + req.response = resp + with mock.patch.object(self.handler, '_sign_instance_id') as sign: + sign.return_value = 'signed' + with mock.patch('httplib2.Http') as mock_http: + resp.__getitem__.return_value = "text/plain" + mock_http.return_value.request.return_value = (resp, 'content') + + retval = self.handler._proxy_request('the_id', 'tenant_id', + req) + mock_http.assert_called_once_with( + ca_certs=None, disable_ssl_certificate_validation=True) + mock_http.assert_has_calls([ + mock.call().add_certificate( + FakeConf.nova_client_priv_key, + FakeConf.nova_client_cert, + "%s:%s" % (FakeConf.nova_metadata_ip, + FakeConf.nova_metadata_port) + ), + mock.call().request( + 'http://9.9.9.9:8775/the_path', + method=method, + headers={ + 'X-Forwarded-For': '8.8.8.8', + 'X-Instance-ID-Signature': 'signed', + 'X-Instance-ID': 'the_id', + 'X-Tenant-ID': 'tenant_id' + }, + body=body + )] + ) + + return retval + + def test_proxy_request_post(self): + response = self._proxy_request_test_helper(method='POST') + self.assertEqual(response.content_type, "text/plain") + self.assertEqual(response.body, 'content') + + def test_proxy_request_200(self): + response = self._proxy_request_test_helper(200) + self.assertEqual(response.content_type, "text/plain") + self.assertEqual(response.body, 'content') + + def test_proxy_request_403(self): + self.assertIsInstance(self._proxy_request_test_helper(403), + webob.exc.HTTPForbidden) + + def test_proxy_request_404(self): + self.assertIsInstance(self._proxy_request_test_helper(404), + webob.exc.HTTPNotFound) + + def test_proxy_request_409(self): + self.assertIsInstance(self._proxy_request_test_helper(409), + webob.exc.HTTPConflict) + + def test_proxy_request_500(self): + self.assertIsInstance(self._proxy_request_test_helper(500), + webob.exc.HTTPInternalServerError) + + def test_proxy_request_other_code(self): + with testtools.ExpectedException(Exception): + self._proxy_request_test_helper(302) + + def test_sign_instance_id(self): + self.assertEqual( + self.handler._sign_instance_id('foo'), + '773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4' + ) + + +class TestMetadataProxyHandlerNoCache(TestMetadataProxyHandlerCache): + fake_conf = FakeConf + + def test_get_router_networks_twice(self): + self._test_get_router_networks_twice_helper() + self.assertEqual( + 2, self.qclient.return_value.list_ports.call_count) + + def test_get_ports_for_remote_address_cache_hit(self): + self._get_ports_for_remote_address_cache_hit_helper() + self.assertEqual( + 2, self.qclient.return_value.list_ports.call_count) + + +class TestUnixDomainHttpProtocol(base.BaseTestCase): + def test_init_empty_client(self): + u = agent.UnixDomainHttpProtocol(mock.Mock(), '', mock.Mock()) + self.assertEqual(u.client_address, ('', 0)) + + def test_init_with_client(self): + u = agent.UnixDomainHttpProtocol(mock.Mock(), 'foo', mock.Mock()) + self.assertEqual(u.client_address, 'foo') + + +class TestUnixDomainWSGIServer(base.BaseTestCase): + def setUp(self): + super(TestUnixDomainWSGIServer, self).setUp() + self.eventlet_p = mock.patch.object(agent, 'eventlet') + self.eventlet = self.eventlet_p.start() + self.server = agent.UnixDomainWSGIServer('test') + + def test_start(self): + mock_app = mock.Mock() + with mock.patch.object(self.server, 'pool') as pool: + self.server.start(mock_app, '/the/path', workers=0, backlog=128) + self.eventlet.assert_has_calls([ + mock.call.listen( + '/the/path', + family=socket.AF_UNIX, + backlog=128 + )] + ) + pool.spawn_n.assert_called_once_with( + self.server._run, + mock_app, + self.eventlet.listen.return_value + ) + + @mock.patch('neutron.openstack.common.service.ProcessLauncher') + def test_start_multiple_workers(self, process_launcher): + launcher = process_launcher.return_value + + mock_app = mock.Mock() + self.server.start(mock_app, '/the/path', workers=2, backlog=128) + launcher.running = True + launcher.launch_service.assert_called_once_with(self.server._server, + workers=2) + + self.server.stop() + self.assertFalse(launcher.running) + + self.server.wait() + launcher.wait.assert_called_once_with() + + def test_run(self): + with mock.patch.object(agent, 'logging') as logging: + self.server._run('app', 'sock') + + self.eventlet.wsgi.server.assert_called_once_with( + 'sock', + 'app', + protocol=agent.UnixDomainHttpProtocol, + log=mock.ANY, + custom_pool=self.server.pool + ) + self.assertTrue(len(logging.mock_calls)) + + +class TestUnixDomainMetadataProxy(base.BaseTestCase): + def setUp(self): + super(TestUnixDomainMetadataProxy, self).setUp() + self.cfg_p = mock.patch.object(agent, 'cfg') + self.cfg = self.cfg_p.start() + looping_call_p = mock.patch( + 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall') + self.looping_mock = looping_call_p.start() + self.cfg.CONF.metadata_proxy_socket = '/the/path' + self.cfg.CONF.metadata_workers = 0 + self.cfg.CONF.metadata_backlog = 128 + + def test_init_doesnot_exists(self): + with mock.patch('os.path.isdir') as isdir: + with mock.patch('os.makedirs') as makedirs: + isdir.return_value = False + agent.UnixDomainMetadataProxy(mock.Mock()) + + isdir.assert_called_once_with('/the') + makedirs.assert_called_once_with('/the', 0o755) + + def test_init_exists(self): + with mock.patch('os.path.isdir') as isdir: + with mock.patch('os.unlink') as unlink: + isdir.return_value = True + agent.UnixDomainMetadataProxy(mock.Mock()) + + isdir.assert_called_once_with('/the') + unlink.assert_called_once_with('/the/path') + + def test_init_exists_unlink_no_file(self): + with mock.patch('os.path.isdir') as isdir: + with mock.patch('os.unlink') as unlink: + with mock.patch('os.path.exists') as exists: + isdir.return_value = True + exists.return_value = False + unlink.side_effect = OSError + + agent.UnixDomainMetadataProxy(mock.Mock()) + + isdir.assert_called_once_with('/the') + unlink.assert_called_once_with('/the/path') + exists.assert_called_once_with('/the/path') + + def test_init_exists_unlink_fails_file_still_exists(self): + with mock.patch('os.path.isdir') as isdir: + with mock.patch('os.unlink') as unlink: + with mock.patch('os.path.exists') as exists: + isdir.return_value = True + exists.return_value = True + unlink.side_effect = OSError + + with testtools.ExpectedException(OSError): + agent.UnixDomainMetadataProxy(mock.Mock()) + + isdir.assert_called_once_with('/the') + unlink.assert_called_once_with('/the/path') + exists.assert_called_once_with('/the/path') + + def test_run(self): + with mock.patch.object(agent, 'MetadataProxyHandler') as handler: + with mock.patch.object(agent, 'UnixDomainWSGIServer') as server: + with mock.patch('os.path.isdir') as isdir: + with mock.patch('os.makedirs') as makedirs: + isdir.return_value = False + + p = agent.UnixDomainMetadataProxy(self.cfg.CONF) + p.run() + + isdir.assert_called_once_with('/the') + makedirs.assert_called_once_with('/the', 0o755) + server.assert_has_calls([ + mock.call('neutron-metadata-agent'), + mock.call().start(handler.return_value, + '/the/path', workers=0, + backlog=128), + mock.call().wait()] + ) + + def test_main(self): + with mock.patch.object(agent, 'UnixDomainMetadataProxy') as proxy: + with mock.patch.object(agent, 'config') as config: + with mock.patch.object(agent, 'cfg') as cfg: + with mock.patch.object(utils, 'cfg'): + agent.main() + + self.assertTrue(config.setup_logging.called) + proxy.assert_has_calls([ + mock.call(cfg.CONF), + mock.call().run()] + ) + + def test_init_state_reporting(self): + with mock.patch('os.makedirs'): + proxy = agent.UnixDomainMetadataProxy(mock.Mock()) + self.looping_mock.assert_called_once_with(proxy._report_state) + self.looping_mock.return_value.start.assert_called_once_with( + interval=mock.ANY) + + def test_report_state(self): + with mock.patch('neutron.agent.rpc.PluginReportStateAPI') as state_api: + with mock.patch('os.makedirs'): + proxy = agent.UnixDomainMetadataProxy(mock.Mock()) + self.assertTrue(proxy.agent_state['start_flag']) + proxy._report_state() + self.assertNotIn('start_flag', proxy.agent_state) + state_api_inst = state_api.return_value + state_api_inst.report_state.assert_called_once_with( + proxy.context, proxy.agent_state, use_call=True) diff --git a/neutron/tests/unit/test_metadata_namespace_proxy.py b/neutron/tests/unit/test_metadata_namespace_proxy.py new file mode 100644 index 000000000..416a113b3 --- /dev/null +++ b/neutron/tests/unit/test_metadata_namespace_proxy.py @@ -0,0 +1,353 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import socket + +import mock +import testtools +import webob + +from neutron.agent.metadata import namespace_proxy as ns_proxy +from neutron.common import utils +from neutron.tests import base + + +class FakeConf(object): + admin_user = 'neutron' + admin_password = 'password' + admin_tenant_name = 'tenant' + auth_url = 'http://127.0.0.1' + auth_strategy = 'keystone' + auth_region = 'region' + nova_metadata_ip = '9.9.9.9' + nova_metadata_port = 8775 + metadata_proxy_shared_secret = 'secret' + + +class TestUnixDomainHttpConnection(base.BaseTestCase): + def test_connect(self): + with mock.patch.object(ns_proxy, 'cfg') as cfg: + cfg.CONF.metadata_proxy_socket = '/the/path' + with mock.patch('socket.socket') as socket_create: + conn = ns_proxy.UnixDomainHTTPConnection('169.254.169.254', + timeout=3) + + conn.connect() + + socket_create.assert_has_calls([ + mock.call(socket.AF_UNIX, socket.SOCK_STREAM), + mock.call().settimeout(3), + mock.call().connect('/the/path')] + ) + self.assertEqual(conn.timeout, 3) + + +class TestNetworkMetadataProxyHandler(base.BaseTestCase): + def setUp(self): + super(TestNetworkMetadataProxyHandler, self).setUp() + self.log_p = mock.patch.object(ns_proxy, 'LOG') + self.log = self.log_p.start() + + self.handler = ns_proxy.NetworkMetadataProxyHandler('router_id') + + def test_call(self): + req = mock.Mock(headers={}) + with mock.patch.object(self.handler, '_proxy_request') as proxy_req: + proxy_req.return_value = 'value' + + retval = self.handler(req) + self.assertEqual(retval, 'value') + proxy_req.assert_called_once_with(req.remote_addr, + req.method, + req.path_info, + req.query_string, + req.body) + + def test_no_argument_passed_to_init(self): + with testtools.ExpectedException(ValueError): + ns_proxy.NetworkMetadataProxyHandler() + + def test_call_internal_server_error(self): + req = mock.Mock(headers={}) + with mock.patch.object(self.handler, '_proxy_request') as proxy_req: + proxy_req.side_effect = Exception + retval = self.handler(req) + self.assertIsInstance(retval, webob.exc.HTTPInternalServerError) + self.assertEqual(len(self.log.mock_calls), 2) + self.assertTrue(proxy_req.called) + + def test_proxy_request_router_200(self): + self.handler.router_id = 'router_id' + + resp = mock.MagicMock(status=200) + with mock.patch('httplib2.Http') as mock_http: + resp.__getitem__.return_value = "text/plain" + mock_http.return_value.request.return_value = (resp, 'content') + + retval = self.handler._proxy_request('192.168.1.1', + 'GET', + '/latest/meta-data', + '', + '') + + mock_http.assert_has_calls([ + mock.call().request( + 'http://169.254.169.254/latest/meta-data', + method='GET', + headers={ + 'X-Forwarded-For': '192.168.1.1', + 'X-Neutron-Router-ID': 'router_id' + }, + connection_type=ns_proxy.UnixDomainHTTPConnection, + body='' + )] + ) + + self.assertEqual(retval.headers['Content-Type'], 'text/plain') + self.assertEqual(retval.body, 'content') + + def test_proxy_request_network_200(self): + self.handler.network_id = 'network_id' + + resp = mock.MagicMock(status=200) + with mock.patch('httplib2.Http') as mock_http: + resp.__getitem__.return_value = "application/json" + mock_http.return_value.request.return_value = (resp, '{}') + + retval = self.handler._proxy_request('192.168.1.1', + 'GET', + '/latest/meta-data', + '', + '') + + mock_http.assert_has_calls([ + mock.call().request( + 'http://169.254.169.254/latest/meta-data', + method='GET', + headers={ + 'X-Forwarded-For': '192.168.1.1', + 'X-Neutron-Network-ID': 'network_id' + }, + connection_type=ns_proxy.UnixDomainHTTPConnection, + body='' + )] + ) + + self.assertEqual(retval.headers['Content-Type'], + 'application/json') + self.assertEqual(retval.body, '{}') + + def test_proxy_request_network_404(self): + self.handler.network_id = 'network_id' + + resp = mock.Mock(status=404) + with mock.patch('httplib2.Http') as mock_http: + mock_http.return_value.request.return_value = (resp, '') + + retval = self.handler._proxy_request('192.168.1.1', + 'GET', + '/latest/meta-data', + '', + '') + + mock_http.assert_has_calls([ + mock.call().request( + 'http://169.254.169.254/latest/meta-data', + method='GET', + headers={ + 'X-Forwarded-For': '192.168.1.1', + 'X-Neutron-Network-ID': 'network_id' + }, + connection_type=ns_proxy.UnixDomainHTTPConnection, + body='' + )] + ) + + self.assertIsInstance(retval, webob.exc.HTTPNotFound) + + def test_proxy_request_network_409(self): + self.handler.network_id = 'network_id' + + resp = mock.Mock(status=409) + with mock.patch('httplib2.Http') as mock_http: + mock_http.return_value.request.return_value = (resp, '') + + retval = self.handler._proxy_request('192.168.1.1', + 'POST', + '/latest/meta-data', + '', + '') + + mock_http.assert_has_calls([ + mock.call().request( + 'http://169.254.169.254/latest/meta-data', + method='POST', + headers={ + 'X-Forwarded-For': '192.168.1.1', + 'X-Neutron-Network-ID': 'network_id' + }, + connection_type=ns_proxy.UnixDomainHTTPConnection, + body='' + )] + ) + + self.assertIsInstance(retval, webob.exc.HTTPConflict) + + def test_proxy_request_network_500(self): + self.handler.network_id = 'network_id' + + resp = mock.Mock(status=500) + with mock.patch('httplib2.Http') as mock_http: + mock_http.return_value.request.return_value = (resp, '') + + retval = self.handler._proxy_request('192.168.1.1', + 'GET', + '/latest/meta-data', + '', + '') + + mock_http.assert_has_calls([ + mock.call().request( + 'http://169.254.169.254/latest/meta-data', + method='GET', + headers={ + 'X-Forwarded-For': '192.168.1.1', + 'X-Neutron-Network-ID': 'network_id' + }, + connection_type=ns_proxy.UnixDomainHTTPConnection, + body='' + )] + ) + + self.assertIsInstance(retval, webob.exc.HTTPInternalServerError) + + def test_proxy_request_network_418(self): + self.handler.network_id = 'network_id' + + resp = mock.Mock(status=418) + with mock.patch('httplib2.Http') as mock_http: + mock_http.return_value.request.return_value = (resp, '') + + with testtools.ExpectedException(Exception): + self.handler._proxy_request('192.168.1.1', + 'GET', + '/latest/meta-data', + '', + '') + + mock_http.assert_has_calls([ + mock.call().request( + 'http://169.254.169.254/latest/meta-data', + method='GET', + headers={ + 'X-Forwarded-For': '192.168.1.1', + 'X-Neutron-Network-ID': 'network_id' + }, + connection_type=ns_proxy.UnixDomainHTTPConnection, + body='' + )] + ) + + def test_proxy_request_network_exception(self): + self.handler.network_id = 'network_id' + + mock.Mock(status=500) + with mock.patch('httplib2.Http') as mock_http: + mock_http.return_value.request.side_effect = Exception + + with testtools.ExpectedException(Exception): + self.handler._proxy_request('192.168.1.1', + 'GET', + '/latest/meta-data', + '', + '') + + mock_http.assert_has_calls([ + mock.call().request( + 'http://169.254.169.254/latest/meta-data', + method='GET', + headers={ + 'X-Forwarded-For': '192.168.1.1', + 'X-Neutron-Network-ID': 'network_id' + }, + connection_type=ns_proxy.UnixDomainHTTPConnection, + body='' + )] + ) + + +class TestProxyDaemon(base.BaseTestCase): + def test_init(self): + with mock.patch('neutron.agent.linux.daemon.Pidfile'): + pd = ns_proxy.ProxyDaemon('pidfile', 9697, 'net_id', 'router_id') + self.assertEqual(pd.router_id, 'router_id') + self.assertEqual(pd.network_id, 'net_id') + + def test_run(self): + with mock.patch('neutron.agent.linux.daemon.Pidfile'): + with mock.patch('neutron.wsgi.Server') as Server: + pd = ns_proxy.ProxyDaemon('pidfile', 9697, 'net_id', + 'router_id') + pd.run() + Server.assert_has_calls([ + mock.call('neutron-network-metadata-proxy'), + mock.call().start(mock.ANY, 9697), + mock.call().wait()] + ) + + def test_main(self): + with mock.patch.object(ns_proxy, 'ProxyDaemon') as daemon: + with mock.patch.object(ns_proxy, 'config') as config: + with mock.patch.object(ns_proxy, 'cfg') as cfg: + with mock.patch.object(utils, 'cfg') as utils_cfg: + cfg.CONF.router_id = 'router_id' + cfg.CONF.network_id = None + cfg.CONF.metadata_port = 9697 + cfg.CONF.pid_file = 'pidfile' + cfg.CONF.daemonize = True + utils_cfg.CONF.log_opt_values.return_value = None + ns_proxy.main() + + self.assertTrue(config.setup_logging.called) + daemon.assert_has_calls([ + mock.call('pidfile', 9697, + router_id='router_id', + network_id=None), + mock.call().start()] + ) + + def test_main_dont_fork(self): + with mock.patch.object(ns_proxy, 'ProxyDaemon') as daemon: + with mock.patch.object(ns_proxy, 'config') as config: + with mock.patch.object(ns_proxy, 'cfg') as cfg: + with mock.patch.object(utils, 'cfg') as utils_cfg: + cfg.CONF.router_id = 'router_id' + cfg.CONF.network_id = None + cfg.CONF.metadata_port = 9697 + cfg.CONF.pid_file = 'pidfile' + cfg.CONF.daemonize = False + utils_cfg.CONF.log_opt_values.return_value = None + ns_proxy.main() + + self.assertTrue(config.setup_logging.called) + daemon.assert_has_calls([ + mock.call('pidfile', 9697, + router_id='router_id', + network_id=None), + mock.call().run()] + ) diff --git a/neutron/tests/unit/test_neutron_context.py b/neutron/tests/unit/test_neutron_context.py new file mode 100644 index 000000000..ebf30b72c --- /dev/null +++ b/neutron/tests/unit/test_neutron_context.py @@ -0,0 +1,136 @@ +# Copyright 2012 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from testtools import matchers + +from neutron import context +from neutron.openstack.common import local +from neutron.tests import base + + +class TestNeutronContext(base.BaseTestCase): + + def setUp(self): + super(TestNeutronContext, self).setUp() + db_api = 'neutron.db.api.get_session' + self._db_api_session_patcher = mock.patch(db_api) + self.db_api_session = self._db_api_session_patcher.start() + + def test_neutron_context_create(self): + ctx = context.Context('user_id', 'tenant_id') + self.assertEqual('user_id', ctx.user_id) + self.assertEqual('tenant_id', ctx.project_id) + self.assertEqual('tenant_id', ctx.tenant_id) + self.assertThat(ctx.request_id, matchers.StartsWith('req-')) + self.assertEqual('user_id', ctx.user) + self.assertEqual('tenant_id', ctx.tenant) + self.assertIsNone(ctx.user_name) + self.assertIsNone(ctx.tenant_name) + + def test_neutron_context_create_logs_unknown_kwarg(self): + with mock.patch.object(context.LOG, 'debug') as mock_log: + context.Context('user_id', 'tenant_id', foo=None) + self.assertEqual(mock_log.call_count, 1) + + def test_neutron_context_create_with_name(self): + ctx = context.Context('user_id', 'tenant_id', + tenant_name='tenant_name', user_name='user_name') + # Check name is set + self.assertEqual('user_name', ctx.user_name) + self.assertEqual('tenant_name', ctx.tenant_name) + # Check user/tenant contains its ID even if user/tenant_name is passed + self.assertEqual('user_id', ctx.user) + self.assertEqual('tenant_id', ctx.tenant) + + def test_neutron_context_create_with_request_id(self): + ctx = context.Context('user_id', 'tenant_id', request_id='req_id_xxx') + self.assertEqual('req_id_xxx', ctx.request_id) + + def test_neutron_context_to_dict(self): + ctx = context.Context('user_id', 'tenant_id') + ctx_dict = ctx.to_dict() + self.assertEqual('user_id', ctx_dict['user_id']) + self.assertEqual('tenant_id', ctx_dict['project_id']) + self.assertEqual(ctx.request_id, ctx_dict['request_id']) + self.assertEqual('user_id', ctx_dict['user']) + self.assertEqual('tenant_id', ctx_dict['tenant']) + self.assertIsNone(ctx_dict['user_name']) + self.assertIsNone(ctx_dict['tenant_name']) + self.assertIsNone(ctx_dict['project_name']) + + def test_neutron_context_to_dict_with_name(self): + ctx = context.Context('user_id', 'tenant_id', + tenant_name='tenant_name', user_name='user_name') + ctx_dict = ctx.to_dict() + self.assertEqual('user_name', ctx_dict['user_name']) + self.assertEqual('tenant_name', ctx_dict['tenant_name']) + self.assertEqual('tenant_name', ctx_dict['project_name']) + + def test_neutron_context_admin_to_dict(self): + self.db_api_session.return_value = 'fakesession' + ctx = context.get_admin_context() + ctx_dict = ctx.to_dict() + self.assertIsNone(ctx_dict['user_id']) + self.assertIsNone(ctx_dict['tenant_id']) + self.assertIsNotNone(ctx.session) + self.assertNotIn('session', ctx_dict) + + def test_neutron_context_admin_without_session_to_dict(self): + ctx = context.get_admin_context_without_session() + ctx_dict = ctx.to_dict() + self.assertIsNone(ctx_dict['user_id']) + self.assertIsNone(ctx_dict['tenant_id']) + self.assertFalse(hasattr(ctx, 'session')) + + def test_neutron_context_with_load_roles_true(self): + ctx = context.get_admin_context() + self.assertIn('admin', ctx.roles) + + def test_neutron_context_with_load_roles_false(self): + ctx = context.get_admin_context(load_admin_roles=False) + self.assertFalse(ctx.roles) + + def test_neutron_context_elevated_retains_request_id(self): + ctx = context.Context('user_id', 'tenant_id') + self.assertFalse(ctx.is_admin) + req_id_before = ctx.request_id + + elevated_ctx = ctx.elevated() + self.assertTrue(elevated_ctx.is_admin) + self.assertEqual(req_id_before, elevated_ctx.request_id) + + def test_neutron_context_overwrite(self): + ctx1 = context.Context('user_id', 'tenant_id') + self.assertEqual(ctx1.request_id, local.store.context.request_id) + + # If overwrite is not specified, request_id should be updated. + ctx2 = context.Context('user_id', 'tenant_id') + self.assertNotEqual(ctx2.request_id, ctx1.request_id) + self.assertEqual(ctx2.request_id, local.store.context.request_id) + + # If overwrite is specified, request_id should be kept. + ctx3 = context.Context('user_id', 'tenant_id', overwrite=False) + self.assertNotEqual(ctx3.request_id, ctx2.request_id) + self.assertEqual(ctx2.request_id, local.store.context.request_id) + + def test_neutron_context_get_admin_context_not_update_local_store(self): + ctx = context.Context('user_id', 'tenant_id') + req_id_before = local.store.context.request_id + self.assertEqual(ctx.request_id, req_id_before) + + ctx_admin = context.get_admin_context() + self.assertEqual(req_id_before, local.store.context.request_id) + self.assertNotEqual(req_id_before, ctx_admin.request_id) diff --git a/neutron/tests/unit/test_neutron_manager.py b/neutron/tests/unit/test_neutron_manager.py new file mode 100644 index 000000000..4a8eb0e6d --- /dev/null +++ b/neutron/tests/unit/test_neutron_manager.py @@ -0,0 +1,146 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import types + +import fixtures + +from oslo.config import cfg + +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.tests import base +from neutron.tests.unit import dummy_plugin + + +LOG = logging.getLogger(__name__) +DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' + + +class MultiServiceCorePlugin(object): + supported_extension_aliases = ['lbaas', 'dummy'] + + +class CorePluginWithAgentNotifiers(object): + agent_notifiers = {'l3': 'l3_agent_notifier', + 'dhcp': 'dhcp_agent_notifier'} + + +class NeutronManagerTestCase(base.BaseTestCase): + + def setUp(self): + super(NeutronManagerTestCase, self).setUp() + self.config_parse() + self.setup_coreplugin() + self.useFixture( + fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance')) + + def test_service_plugin_is_loaded(self): + cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) + cfg.CONF.set_override("service_plugins", + ["neutron.tests.unit.dummy_plugin." + "DummyServicePlugin"]) + mgr = manager.NeutronManager.get_instance() + plugin = mgr.get_service_plugins()[constants.DUMMY] + + self.assertTrue( + isinstance(plugin, + (dummy_plugin.DummyServicePlugin, types.ClassType)), + "loaded plugin should be of type neutronDummyPlugin") + + def test_service_plugin_by_name_is_loaded(self): + cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) + cfg.CONF.set_override("service_plugins", ["dummy"]) + mgr = manager.NeutronManager.get_instance() + plugin = mgr.get_service_plugins()[constants.DUMMY] + + self.assertTrue( + isinstance(plugin, + (dummy_plugin.DummyServicePlugin, types.ClassType)), + "loaded plugin should be of type neutronDummyPlugin") + + def test_multiple_plugins_specified_for_service_type(self): + cfg.CONF.set_override("service_plugins", + ["neutron.tests.unit.dummy_plugin." + "DummyServicePlugin", + "neutron.tests.unit.dummy_plugin." + "DummyServicePlugin"]) + cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) + self.assertRaises(ValueError, manager.NeutronManager.get_instance) + + def test_multiple_plugins_by_name_specified_for_service_type(self): + cfg.CONF.set_override("service_plugins", ["dummy", "dummy"]) + cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) + self.assertRaises(ValueError, manager.NeutronManager.get_instance) + + def test_multiple_plugins_mixed_specified_for_service_type(self): + cfg.CONF.set_override("service_plugins", + ["neutron.tests.unit.dummy_plugin." + "DummyServicePlugin", "dummy"]) + cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) + self.assertRaises(ValueError, manager.NeutronManager.get_instance) + + def test_service_plugin_conflicts_with_core_plugin(self): + cfg.CONF.set_override("service_plugins", + ["neutron.tests.unit.dummy_plugin." + "DummyServicePlugin"]) + cfg.CONF.set_override("core_plugin", + "neutron.tests.unit.test_neutron_manager." + "MultiServiceCorePlugin") + self.assertRaises(ValueError, manager.NeutronManager.get_instance) + + def test_core_plugin_supports_services(self): + cfg.CONF.set_override("core_plugin", + "neutron.tests.unit.test_neutron_manager." + "MultiServiceCorePlugin") + mgr = manager.NeutronManager.get_instance() + svc_plugins = mgr.get_service_plugins() + self.assertEqual(3, len(svc_plugins)) + self.assertIn(constants.CORE, svc_plugins.keys()) + self.assertIn(constants.LOADBALANCER, svc_plugins.keys()) + self.assertIn(constants.DUMMY, svc_plugins.keys()) + + def test_post_plugin_validation(self): + cfg.CONF.import_opt('dhcp_agents_per_network', + 'neutron.db.agentschedulers_db') + + self.assertIsNone(manager.validate_post_plugin_load()) + cfg.CONF.set_override('dhcp_agents_per_network', 2) + self.assertIsNone(manager.validate_post_plugin_load()) + cfg.CONF.set_override('dhcp_agents_per_network', 0) + self.assertIsNotNone(manager.validate_post_plugin_load()) + cfg.CONF.set_override('dhcp_agents_per_network', -1) + self.assertIsNotNone(manager.validate_post_plugin_load()) + + def test_pre_plugin_validation(self): + self.assertIsNotNone(manager.validate_pre_plugin_load()) + cfg.CONF.set_override('core_plugin', 'dummy.plugin') + self.assertIsNone(manager.validate_pre_plugin_load()) + + def test_manager_gathers_agent_notifiers_from_service_plugins(self): + cfg.CONF.set_override("service_plugins", + ["neutron.tests.unit.dummy_plugin." + "DummyServicePlugin"]) + cfg.CONF.set_override("core_plugin", + "neutron.tests.unit.test_neutron_manager." + "CorePluginWithAgentNotifiers") + expected = {'l3': 'l3_agent_notifier', + 'dhcp': 'dhcp_agent_notifier', + 'dummy': 'dummy_agent_notifier'} + core_plugin = manager.NeutronManager.get_plugin() + self.assertEqual(expected, core_plugin.agent_notifiers) diff --git a/neutron/tests/unit/test_policy.py b/neutron/tests/unit/test_policy.py new file mode 100644 index 000000000..a17ac9aeb --- /dev/null +++ b/neutron/tests/unit/test_policy.py @@ -0,0 +1,553 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test of Policy Engine For Neutron""" + +import urllib2 + +import fixtures +import mock +import six + +import neutron +from neutron.api.v2 import attributes +from neutron.common import exceptions +from neutron import context +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import policy as common_policy +from neutron import policy +from neutron.tests import base + + +class PolicyFileTestCase(base.BaseTestCase): + def setUp(self): + super(PolicyFileTestCase, self).setUp() + policy.reset() + self.addCleanup(policy.reset) + self.context = context.Context('fake', 'fake', is_admin=False) + self.target = {} + self.tempdir = self.useFixture(fixtures.TempDir()) + + def test_modified_policy_reloads(self): + def fake_find_config_file(_1, _2): + return self.tempdir.join('policy') + + with mock.patch.object(neutron.common.utils, + 'find_config_file', + new=fake_find_config_file): + tmpfilename = fake_find_config_file(None, None) + action = "example:test" + with open(tmpfilename, "w") as policyfile: + policyfile.write("""{"example:test": ""}""") + policy.init() + policy.enforce(self.context, action, self.target) + with open(tmpfilename, "w") as policyfile: + policyfile.write("""{"example:test": "!"}""") + # NOTE(vish): reset stored policy cache so we don't have to + # sleep(1) + policy._POLICY_CACHE = {} + policy.init() + self.assertRaises(exceptions.PolicyNotAuthorized, + policy.enforce, + self.context, + action, + self.target) + + +class PolicyTestCase(base.BaseTestCase): + def setUp(self): + super(PolicyTestCase, self).setUp() + policy.reset() + self.addCleanup(policy.reset) + # NOTE(vish): preload rules to circumvent reloading from file + policy.init() + rules = { + "true": '@', + "example:allowed": '@', + "example:denied": '!', + "example:get_http": "http:http://www.example.com", + "example:my_file": "role:compute_admin or tenant_id:%(tenant_id)s", + "example:early_and_fail": "! and @", + "example:early_or_success": "@ or !", + "example:lowercase_admin": "role:admin or role:sysadmin", + "example:uppercase_admin": "role:ADMIN or role:sysadmin", + } + # NOTE(vish): then overload underlying rules + common_policy.set_rules(common_policy.Rules( + dict((k, common_policy.parse_rule(v)) + for k, v in rules.items()))) + self.context = context.Context('fake', 'fake', roles=['member']) + self.target = {} + + def test_enforce_nonexistent_action_throws(self): + action = "example:noexist" + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + def test_enforce_bad_action_throws(self): + action = "example:denied" + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + def test_check_bad_action_noraise(self): + action = "example:denied" + result = policy.check(self.context, action, self.target) + self.assertEqual(result, False) + + def test_check_non_existent_action(self): + action = "example:idonotexist" + result_1 = policy.check(self.context, action, self.target) + self.assertFalse(result_1) + result_2 = policy.check(self.context, action, self.target, + might_not_exist=True) + self.assertTrue(result_2) + + def test_enforce_good_action(self): + action = "example:allowed" + result = policy.enforce(self.context, action, self.target) + self.assertEqual(result, True) + + def test_enforce_http_true(self): + + def fakeurlopen(url, post_data): + return six.StringIO("True") + + with mock.patch.object(urllib2, 'urlopen', new=fakeurlopen): + action = "example:get_http" + target = {} + result = policy.enforce(self.context, action, target) + self.assertEqual(result, True) + + def test_enforce_http_false(self): + + def fakeurlopen(url, post_data): + return six.StringIO("False") + + with mock.patch.object(urllib2, 'urlopen', new=fakeurlopen): + action = "example:get_http" + target = {} + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, target) + + def test_templatized_enforcement(self): + target_mine = {'tenant_id': 'fake'} + target_not_mine = {'tenant_id': 'another'} + action = "example:my_file" + policy.enforce(self.context, action, target_mine) + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, target_not_mine) + + def test_early_AND_enforcement(self): + action = "example:early_and_fail" + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + def test_early_OR_enforcement(self): + action = "example:early_or_success" + policy.enforce(self.context, action, self.target) + + def test_ignore_case_role_check(self): + lowercase_action = "example:lowercase_admin" + uppercase_action = "example:uppercase_admin" + # NOTE(dprince) we mix case in the Admin role here to ensure + # case is ignored + admin_context = context.Context('admin', 'fake', roles=['AdMiN']) + policy.enforce(admin_context, lowercase_action, self.target) + policy.enforce(admin_context, uppercase_action, self.target) + + +class DefaultPolicyTestCase(base.BaseTestCase): + + def setUp(self): + super(DefaultPolicyTestCase, self).setUp() + policy.reset() + policy.init() + self.addCleanup(policy.reset) + + self.rules = { + "default": '', + "example:exist": '!', + } + + self._set_rules('default') + + self.context = context.Context('fake', 'fake') + + def _set_rules(self, default_rule): + rules = common_policy.Rules( + dict((k, common_policy.parse_rule(v)) + for k, v in self.rules.items()), default_rule) + common_policy.set_rules(rules) + + def test_policy_called(self): + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, "example:exist", {}) + + def test_not_found_policy_calls_default(self): + policy.enforce(self.context, "example:noexist", {}) + + def test_default_not_found(self): + self._set_rules("default_noexist") + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, "example:noexist", {}) + + +FAKE_RESOURCE_NAME = 'something' +FAKE_RESOURCE = {"%ss" % FAKE_RESOURCE_NAME: + {'attr': {'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': None, + 'enforce_policy': True, + 'validate': {'type:dict': + {'sub_attr_1': {'type:string': None}, + 'sub_attr_2': {'type:string': None}}} + }}} + + +class NeutronPolicyTestCase(base.BaseTestCase): + + def setUp(self): + super(NeutronPolicyTestCase, self).setUp() + policy.reset() + policy.init() + self.addCleanup(policy.reset) + self.admin_only_legacy = "role:admin" + self.admin_or_owner_legacy = "role:admin or tenant_id:%(tenant_id)s" + # Add a Fake 'something' resource to RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCE) + self.rules = dict((k, common_policy.parse_rule(v)) for k, v in { + "context_is_admin": "role:admin", + "admin_or_network_owner": "rule:context_is_admin or " + "tenant_id:%(network:tenant_id)s", + "admin_or_owner": ("rule:context_is_admin or " + "tenant_id:%(tenant_id)s"), + "admin_only": "rule:context_is_admin", + "regular_user": "role:user", + "shared": "field:networks:shared=True", + "external": "field:networks:router:external=True", + "default": '@', + + "create_network": "rule:admin_or_owner", + "create_network:shared": "rule:admin_only", + "update_network": '@', + "update_network:shared": "rule:admin_only", + + "get_network": "rule:admin_or_owner or " + "rule:shared or " + "rule:external", + "create_port:mac": "rule:admin_or_network_owner", + "create_something": "rule:admin_or_owner", + "create_something:attr": "rule:admin_or_owner", + "create_something:attr:sub_attr_1": "rule:admin_or_owner", + "create_something:attr:sub_attr_2": "rule:admin_only", + + "get_firewall_policy": "rule:admin_or_owner or " + "rule:shared", + "get_firewall_rule": "rule:admin_or_owner or " + "rule:shared" + }.items()) + + def fakepolicyinit(): + common_policy.set_rules(common_policy.Rules(self.rules)) + + def remove_fake_resource(): + del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME] + + self.patcher = mock.patch.object(neutron.policy, + 'init', + new=fakepolicyinit) + self.patcher.start() + self.addCleanup(remove_fake_resource) + self.context = context.Context('fake', 'fake', roles=['user']) + plugin_klass = importutils.import_class( + "neutron.db.db_base_plugin_v2.NeutronDbPluginV2") + self.manager_patcher = mock.patch('neutron.manager.NeutronManager') + fake_manager = self.manager_patcher.start() + fake_manager_instance = fake_manager.return_value + fake_manager_instance.plugin = plugin_klass() + + def _test_action_on_attr(self, context, action, attr, value, + exception=None): + action = "%s_network" % action + target = {'tenant_id': 'the_owner', attr: value} + if exception: + self.assertRaises(exception, policy.enforce, + context, action, target) + else: + result = policy.enforce(context, action, target) + self.assertEqual(result, True) + + def _test_nonadmin_action_on_attr(self, action, attr, value, + exception=None): + user_context = context.Context('', "user", roles=['user']) + self._test_action_on_attr(user_context, action, attr, + value, exception) + + def test_nonadmin_write_on_private_fails(self): + self._test_nonadmin_action_on_attr('create', 'shared', False, + exceptions.PolicyNotAuthorized) + + def test_nonadmin_read_on_private_fails(self): + self._test_nonadmin_action_on_attr('get', 'shared', False, + exceptions.PolicyNotAuthorized) + + def test_nonadmin_write_on_shared_fails(self): + self._test_nonadmin_action_on_attr('create', 'shared', True, + exceptions.PolicyNotAuthorized) + + def test_nonadmin_read_on_shared_succeeds(self): + self._test_nonadmin_action_on_attr('get', 'shared', True) + + def _test_enforce_adminonly_attribute(self, action): + admin_context = context.get_admin_context() + target = {'shared': True} + result = policy.enforce(admin_context, action, target) + self.assertEqual(result, True) + + def test_enforce_adminonly_attribute_create(self): + self._test_enforce_adminonly_attribute('create_network') + + def test_enforce_adminonly_attribute_update(self): + self._test_enforce_adminonly_attribute('update_network') + + def test_enforce_adminonly_attribute_no_context_is_admin_policy(self): + del self.rules[policy.ADMIN_CTX_POLICY] + self.rules['admin_only'] = common_policy.parse_rule( + self.admin_only_legacy) + self.rules['admin_or_owner'] = common_policy.parse_rule( + self.admin_or_owner_legacy) + self._test_enforce_adminonly_attribute('create_network') + + def test_enforce_adminonly_attribute_nonadminctx_returns_403(self): + action = "create_network" + target = {'shared': True, 'tenant_id': 'somebody_else'} + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, target) + + def test_enforce_adminonly_nonadminctx_no_ctx_is_admin_policy_403(self): + del self.rules[policy.ADMIN_CTX_POLICY] + self.rules['admin_only'] = common_policy.parse_rule( + self.admin_only_legacy) + self.rules['admin_or_owner'] = common_policy.parse_rule( + self.admin_or_owner_legacy) + action = "create_network" + target = {'shared': True, 'tenant_id': 'somebody_else'} + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, target) + + def _test_build_subattribute_match_rule(self, validate_value): + bk = FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] + FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = ( + validate_value) + action = "create_something" + target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}} + self.assertFalse(policy._build_subattr_match_rule( + 'attr', + FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr'], + action, + target)) + FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = bk + + def test_build_subattribute_match_rule_empty_dict_validator(self): + self._test_build_subattribute_match_rule({}) + + def test_build_subattribute_match_rule_wrong_validation_info(self): + self._test_build_subattribute_match_rule( + {'type:dict': 'wrong_stuff'}) + + def test_enforce_subattribute(self): + action = "create_something" + target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}} + result = policy.enforce(self.context, action, target, None) + self.assertEqual(result, True) + + def test_enforce_admin_only_subattribute(self): + action = "create_something" + target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', + 'sub_attr_2': 'y'}} + result = policy.enforce(context.get_admin_context(), + action, target, None) + self.assertEqual(result, True) + + def test_enforce_admin_only_subattribute_nonadminctx_returns_403(self): + action = "create_something" + target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', + 'sub_attr_2': 'y'}} + self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce, + self.context, action, target, None) + + def test_enforce_regularuser_on_read(self): + action = "get_network" + target = {'shared': True, 'tenant_id': 'somebody_else'} + result = policy.enforce(self.context, action, target) + self.assertTrue(result) + + def test_enforce_firewall_policy_shared(self): + action = "get_firewall_policy" + target = {'shared': True, 'tenant_id': 'somebody_else'} + result = policy.enforce(self.context, action, target) + self.assertTrue(result) + + def test_enforce_firewall_rule_shared(self): + action = "get_firewall_rule" + target = {'shared': True, 'tenant_id': 'somebody_else'} + result = policy.enforce(self.context, action, target) + self.assertTrue(result) + + def test_enforce_tenant_id_check(self): + # Trigger a policy with rule admin_or_owner + action = "create_network" + target = {'tenant_id': 'fake'} + result = policy.enforce(self.context, action, target) + self.assertTrue(result) + + def test_enforce_tenant_id_check_parent_resource(self): + + def fakegetnetwork(*args, **kwargs): + return {'tenant_id': 'fake'} + + action = "create_port:mac" + with mock.patch.object(manager.NeutronManager.get_instance().plugin, + 'get_network', new=fakegetnetwork): + target = {'network_id': 'whatever'} + result = policy.enforce(self.context, action, target) + self.assertTrue(result) + + def test_enforce_plugin_failure(self): + + def fakegetnetwork(*args, **kwargs): + raise NotImplementedError('Blast!') + + # the policy check and plugin method we use in this test are irrelevant + # so long that we verify that, if *f* blows up, the behavior of the + # policy engine to propagate the exception is preserved + action = "create_port:mac" + with mock.patch.object(manager.NeutronManager.get_instance().plugin, + 'get_network', new=fakegetnetwork): + target = {'network_id': 'whatever'} + self.assertRaises(NotImplementedError, + policy.enforce, + self.context, + action, + target) + + def test_enforce_tenant_id_check_parent_resource_bw_compatibility(self): + + def fakegetnetwork(*args, **kwargs): + return {'tenant_id': 'fake'} + + del self.rules['admin_or_network_owner'] + self.rules['admin_or_network_owner'] = common_policy.parse_rule( + "role:admin or tenant_id:%(network_tenant_id)s") + action = "create_port:mac" + with mock.patch.object(manager.NeutronManager.get_instance().plugin, + 'get_network', new=fakegetnetwork): + target = {'network_id': 'whatever'} + result = policy.enforce(self.context, action, target) + self.assertTrue(result) + + def test_tenant_id_check_no_target_field_raises(self): + # Try and add a bad rule + self.assertRaises( + exceptions.PolicyInitError, + common_policy.parse_rule, + 'tenant_id:(wrong_stuff)') + + def _test_enforce_tenant_id_raises(self, bad_rule): + self.rules['admin_or_owner'] = common_policy.parse_rule(bad_rule) + # Trigger a policy with rule admin_or_owner + action = "create_network" + target = {'tenant_id': 'fake'} + policy.init() + self.assertRaises(exceptions.PolicyCheckError, + policy.enforce, + self.context, action, target) + + def test_enforce_tenant_id_check_malformed_target_field_raises(self): + self._test_enforce_tenant_id_raises('tenant_id:%(malformed_field)s') + + def test_enforce_tenant_id_check_invalid_parent_resource_raises(self): + self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s') + + def test_get_roles_context_is_admin_rule_missing(self): + rules = dict((k, common_policy.parse_rule(v)) for k, v in { + "some_other_rule": "role:admin", + }.items()) + common_policy.set_rules(common_policy.Rules(rules)) + # 'admin' role is expected for bw compatibility + self.assertEqual(['admin'], policy.get_admin_roles()) + + def test_get_roles_with_role_check(self): + rules = dict((k, common_policy.parse_rule(v)) for k, v in { + policy.ADMIN_CTX_POLICY: "role:admin", + }.items()) + common_policy.set_rules(common_policy.Rules(rules)) + self.assertEqual(['admin'], policy.get_admin_roles()) + + def test_get_roles_with_rule_check(self): + rules = dict((k, common_policy.parse_rule(v)) for k, v in { + policy.ADMIN_CTX_POLICY: "rule:some_other_rule", + "some_other_rule": "role:admin", + }.items()) + common_policy.set_rules(common_policy.Rules(rules)) + self.assertEqual(['admin'], policy.get_admin_roles()) + + def test_get_roles_with_or_check(self): + self.rules = dict((k, common_policy.parse_rule(v)) for k, v in { + policy.ADMIN_CTX_POLICY: "rule:rule1 or rule:rule2", + "rule1": "role:admin_1", + "rule2": "role:admin_2" + }.items()) + self.assertEqual(['admin_1', 'admin_2'], + policy.get_admin_roles()) + + def test_get_roles_with_other_rules(self): + self.rules = dict((k, common_policy.parse_rule(v)) for k, v in { + policy.ADMIN_CTX_POLICY: "role:xxx or other:value", + }.items()) + self.assertEqual(['xxx'], policy.get_admin_roles()) + + def _test_set_rules_with_deprecated_policy(self, input_rules, + expected_rules): + policy._set_rules(json.dumps(input_rules)) + # verify deprecated policy has been removed + for pol in input_rules.keys(): + self.assertNotIn(pol, common_policy._rules) + # verify deprecated policy was correctly translated. Iterate + # over items for compatibility with unittest2 in python 2.6 + for rule in expected_rules: + self.assertIn(rule, common_policy._rules) + self.assertEqual(str(common_policy._rules[rule]), + expected_rules[rule]) + + def test_set_rules_with_deprecated_view_policy(self): + self._test_set_rules_with_deprecated_policy( + {'extension:router:view': 'rule:admin_or_owner'}, + {'get_network:router:external': 'rule:admin_or_owner'}) + + def test_set_rules_with_deprecated_set_policy(self): + expected_policies = ['create_network:provider:network_type', + 'create_network:provider:physical_network', + 'create_network:provider:segmentation_id', + 'update_network:provider:network_type', + 'update_network:provider:physical_network', + 'update_network:provider:segmentation_id'] + self._test_set_rules_with_deprecated_policy( + {'extension:provider_network:set': 'rule:admin_only'}, + dict((policy, 'rule:admin_only') for policy in + expected_policies)) diff --git a/neutron/tests/unit/test_post_mortem_debug.py b/neutron/tests/unit/test_post_mortem_debug.py new file mode 100644 index 000000000..582c99756 --- /dev/null +++ b/neutron/tests/unit/test_post_mortem_debug.py @@ -0,0 +1,101 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +import mock +from six import moves + +from neutron.tests import base +from neutron.tests import post_mortem_debug + + +class TestTesttoolsExceptionHandler(base.BaseTestCase): + + def test_exception_handler(self): + try: + self.assertTrue(False) + except Exception: + exc_info = sys.exc_info() + with mock.patch('traceback.print_exception') as mock_print_exception: + with mock.patch('pdb.post_mortem') as mock_post_mortem: + with mock.patch.object(post_mortem_debug, + 'get_ignored_traceback', + return_value=mock.Mock()): + post_mortem_debug.exception_handler(exc_info) + + # traceback will become post_mortem_debug.FilteredTraceback + filtered_exc_info = (exc_info[0], exc_info[1], mock.ANY) + mock_print_exception.assert_called_once_with(*filtered_exc_info) + mock_post_mortem.assert_called_once_with(mock.ANY) + + +class TestFilteredTraceback(base.BaseTestCase): + + def test_filter_traceback(self): + tb1 = mock.Mock() + tb2 = mock.Mock() + tb1.tb_next = tb2 + tb2.tb_next = None + ftb1 = post_mortem_debug.FilteredTraceback(tb1, tb2) + for attr in ['lasti', 'lineno', 'frame']: + attr_name = 'tb_%s' % attr + self.assertEqual(getattr(tb1, attr_name, None), + getattr(ftb1, attr_name, None)) + self.assertIsNone(ftb1.tb_next) + + +class TestGetIgnoredTraceback(base.BaseTestCase): + + def _test_get_ignored_traceback(self, ignored_bit_array, expected): + root_tb = mock.Mock() + + tb = root_tb + tracebacks = [tb] + for x in moves.xrange(len(ignored_bit_array) - 1): + tb.tb_next = mock.Mock() + tb = tb.tb_next + tracebacks.append(tb) + tb.tb_next = None + + tb = root_tb + for ignored in ignored_bit_array: + if ignored: + tb.tb_frame.f_globals = ['__unittest'] + else: + tb.tb_frame.f_globals = [] + tb = tb.tb_next + + actual = post_mortem_debug.get_ignored_traceback(root_tb) + if expected is not None: + expected = tracebacks[expected] + self.assertEqual(actual, expected) + + def test_no_ignored_tracebacks(self): + self._test_get_ignored_traceback([0, 0, 0], None) + + def test_single_member_trailing_chain(self): + self._test_get_ignored_traceback([0, 0, 1], 2) + + def test_two_member_trailing_chain(self): + self._test_get_ignored_traceback([0, 1, 1], 1) + + def test_first_traceback_ignored(self): + self._test_get_ignored_traceback([1, 0, 0], None) + + def test_middle_traceback_ignored(self): + self._test_get_ignored_traceback([0, 1, 0], None) diff --git a/neutron/tests/unit/test_provider_configuration.py b/neutron/tests/unit/test_provider_configuration.py new file mode 100644 index 000000000..17fb41fcd --- /dev/null +++ b/neutron/tests/unit/test_provider_configuration.py @@ -0,0 +1,201 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.common import exceptions as n_exc + +from neutron.plugins.common import constants +from neutron.services import provider_configuration as provconf +from neutron.tests import base + + +class ParseServiceProviderConfigurationTestCase(base.BaseTestCase): + def test_default_service_provider_configuration(self): + providers = cfg.CONF.service_providers.service_provider + self.assertEqual(providers, []) + + def test_parse_single_service_provider_opt(self): + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas:driver_path'], + 'service_providers') + expected = {'service_type': constants.LOADBALANCER, + 'name': 'lbaas', + 'driver': 'driver_path', + 'default': False} + res = provconf.parse_service_provider_opt() + self.assertEqual(len(res), 1) + self.assertEqual(res, [expected]) + + def test_parse_single_default_service_provider_opt(self): + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas:driver_path:default'], + 'service_providers') + expected = {'service_type': constants.LOADBALANCER, + 'name': 'lbaas', + 'driver': 'driver_path', + 'default': True} + res = provconf.parse_service_provider_opt() + self.assertEqual(len(res), 1) + self.assertEqual(res, [expected]) + + def test_parse_multi_service_provider_opt(self): + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas:driver_path', + constants.LOADBALANCER + ':name1:path1', + constants.LOADBALANCER + + ':name2:path2:default'], + 'service_providers') + expected = {'service_type': constants.LOADBALANCER, + 'name': 'lbaas', + 'driver': 'driver_path', + 'default': False} + res = provconf.parse_service_provider_opt() + self.assertEqual(len(res), 3) + self.assertEqual(res, [expected, + {'service_type': constants.LOADBALANCER, + 'name': 'name1', + 'driver': 'path1', + 'default': False}, + {'service_type': constants.LOADBALANCER, + 'name': 'name2', + 'driver': 'path2', + 'default': True}]) + + def test_parse_service_provider_opt_not_allowed_raises(self): + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas:driver_path', + 'svc_type:name1:path1'], + 'service_providers') + self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt) + + def test_parse_service_provider_invalid_format(self): + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas:driver_path', + 'svc_type:name1:path1:def'], + 'service_providers') + self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt) + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':', + 'svc_type:name1:path1:def'], + 'service_providers') + self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt) + + def test_parse_service_provider_name_too_long(self): + name = 'a' * 256 + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':' + name + ':driver_path', + 'svc_type:name1:path1:def'], + 'service_providers') + self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt) + + +class ProviderConfigurationTestCase(base.BaseTestCase): + def setUp(self): + super(ProviderConfigurationTestCase, self).setUp() + + def test_ensure_driver_unique(self): + pconf = provconf.ProviderConfiguration([]) + pconf.providers[('svctype', 'name')] = {'driver': 'driver', + 'default': True} + self.assertRaises(n_exc.Invalid, + pconf._ensure_driver_unique, 'driver') + self.assertIsNone(pconf._ensure_driver_unique('another_driver1')) + + def test_ensure_default_unique(self): + pconf = provconf.ProviderConfiguration([]) + pconf.providers[('svctype', 'name')] = {'driver': 'driver', + 'default': True} + self.assertRaises(n_exc.Invalid, + pconf._ensure_default_unique, + 'svctype', True) + self.assertIsNone(pconf._ensure_default_unique('svctype', False)) + self.assertIsNone(pconf._ensure_default_unique('svctype1', True)) + self.assertIsNone(pconf._ensure_default_unique('svctype1', False)) + + def test_add_provider(self): + pconf = provconf.ProviderConfiguration([]) + prov = {'service_type': constants.LOADBALANCER, + 'name': 'name', + 'driver': 'path', + 'default': False} + pconf.add_provider(prov) + self.assertEqual(len(pconf.providers), 1) + self.assertEqual(pconf.providers.keys(), + [(constants.LOADBALANCER, 'name')]) + self.assertEqual(pconf.providers.values(), + [{'driver': 'path', 'default': False}]) + + def test_add_duplicate_provider(self): + pconf = provconf.ProviderConfiguration([]) + prov = {'service_type': constants.LOADBALANCER, + 'name': 'name', + 'driver': 'path', + 'default': False} + pconf.add_provider(prov) + self.assertRaises(n_exc.Invalid, pconf.add_provider, prov) + self.assertEqual(len(pconf.providers), 1) + + def test_get_service_providers(self): + provs = [{'service_type': constants.LOADBALANCER, + 'name': 'name', + 'driver': 'path', + 'default': False}, + {'service_type': constants.LOADBALANCER, + 'name': 'name2', + 'driver': 'path2', + 'default': False}, + {'service_type': 'st2', + 'name': 'name', + 'driver': 'driver', + 'default': True + }, + {'service_type': 'st3', + 'name': 'name2', + 'driver': 'driver2', + 'default': True}] + pconf = provconf.ProviderConfiguration(provs) + for prov in provs: + p = pconf.get_service_providers( + filters={'name': [prov['name']], + 'service_type': prov['service_type']} + ) + self.assertEqual(p, [prov]) + + def test_get_service_providers_with_fields(self): + provs = [{'service_type': constants.LOADBALANCER, + 'name': 'name', + 'driver': 'path', + 'default': False}, + {'service_type': constants.LOADBALANCER, + 'name': 'name2', + 'driver': 'path2', + 'default': False}] + pconf = provconf.ProviderConfiguration(provs) + for prov in provs: + p = pconf.get_service_providers( + filters={'name': [prov['name']], + 'service_type': prov['service_type']}, + fields=['name'] + ) + self.assertEqual(p, [{'name': prov['name']}]) diff --git a/neutron/tests/unit/test_quota_ext.py b/neutron/tests/unit/test_quota_ext.py new file mode 100644 index 000000000..c95e4d3e0 --- /dev/null +++ b/neutron/tests/unit/test_quota_ext.py @@ -0,0 +1,432 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +import mock +from oslo.config import cfg +import testtools +import webtest + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.common import config +from neutron.common import exceptions +from neutron import context +from neutron.db import api as db +from neutron.db import quota_db +from neutron import quota +from neutron.tests import base +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import testlib_api + +TARGET_PLUGIN = ('neutron.plugins.linuxbridge.lb_neutron_plugin' + '.LinuxBridgePluginV2') + +_get_path = test_api_v2._get_path + + +class QuotaExtensionTestCase(testlib_api.WebTestCase): + + def setUp(self): + super(QuotaExtensionTestCase, self).setUp() + # Ensure existing ExtensionManager is not used + extensions.PluginAwareExtensionManager._instance = None + + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + + # Create the default configurations + self.config_parse() + + # Update the plugin and extensions path + self.setup_coreplugin(TARGET_PLUGIN) + cfg.CONF.set_override( + 'quota_items', + ['network', 'subnet', 'port', 'extra1'], + group='QUOTAS') + quota.QUOTAS = quota.QuotaEngine() + quota.register_resources_from_config() + self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True) + self.plugin = self._plugin_patcher.start() + self.plugin.return_value.supported_extension_aliases = ['quotas'] + # QUOTAS will register the items in conf when starting + # extra1 here is added later, so have to do it manually + quota.QUOTAS.register_resource_by_name('extra1') + ext_mgr = extensions.PluginAwareExtensionManager.get_instance() + db.configure_db() + app = config.load_paste_app('extensions_test_app') + ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) + self.api = webtest.TestApp(ext_middleware) + + def tearDown(self): + self.api = None + self.plugin = None + db.clear_db() + + # Restore the global RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + super(QuotaExtensionTestCase, self).tearDown() + + +class QuotaExtensionDbTestCase(QuotaExtensionTestCase): + fmt = 'json' + + def setUp(self): + cfg.CONF.set_override( + 'quota_driver', + 'neutron.db.quota_db.DbQuotaDriver', + group='QUOTAS') + super(QuotaExtensionDbTestCase, self).setUp() + + def test_quotas_loaded_right(self): + res = self.api.get(_get_path('quotas', fmt=self.fmt)) + quota = self.deserialize(res) + self.assertEqual([], quota['quotas']) + self.assertEqual(200, res.status_int) + + def test_quotas_default_values(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env) + quota = self.deserialize(res) + self.assertEqual(10, quota['quota']['network']) + self.assertEqual(10, quota['quota']['subnet']) + self.assertEqual(50, quota['quota']['port']) + self.assertEqual(-1, quota['quota']['extra1']) + + def test_show_quotas_with_admin(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id + '2', + is_admin=True)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env) + self.assertEqual(200, res.status_int) + quota = self.deserialize(res) + self.assertEqual(10, quota['quota']['network']) + self.assertEqual(10, quota['quota']['subnet']) + self.assertEqual(50, quota['quota']['port']) + + def test_show_quotas_without_admin_forbidden_returns_403(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id + '2', + is_admin=False)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env, expect_errors=True) + self.assertEqual(403, res.status_int) + + def test_show_quotas_with_owner_tenant(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=False)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env) + self.assertEqual(200, res.status_int) + quota = self.deserialize(res) + self.assertEqual(10, quota['quota']['network']) + self.assertEqual(10, quota['quota']['subnet']) + self.assertEqual(50, quota['quota']['port']) + + def test_list_quotas_with_admin(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=True)} + res = self.api.get(_get_path('quotas', fmt=self.fmt), + extra_environ=env) + self.assertEqual(200, res.status_int) + quota = self.deserialize(res) + self.assertEqual([], quota['quotas']) + + def test_list_quotas_without_admin_forbidden_returns_403(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=False)} + res = self.api.get(_get_path('quotas', fmt=self.fmt), + extra_environ=env, expect_errors=True) + self.assertEqual(403, res.status_int) + + def test_update_quotas_without_admin_forbidden_returns_403(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=False)} + quotas = {'quota': {'network': 100}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), extra_environ=env, + expect_errors=True) + self.assertEqual(403, res.status_int) + + def test_update_quotas_with_non_integer_returns_400(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=True)} + quotas = {'quota': {'network': 'abc'}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), extra_environ=env, + expect_errors=True) + self.assertEqual(400, res.status_int) + + def test_update_quotas_with_negative_integer_returns_400(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=True)} + quotas = {'quota': {'network': -2}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), extra_environ=env, + expect_errors=True) + self.assertEqual(400, res.status_int) + + def test_update_quotas_to_unlimited(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=True)} + quotas = {'quota': {'network': -1}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), extra_environ=env, + expect_errors=False) + self.assertEqual(200, res.status_int) + + def test_update_quotas_exceeding_current_limit(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=True)} + quotas = {'quota': {'network': 120}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), extra_environ=env, + expect_errors=False) + self.assertEqual(200, res.status_int) + + def test_update_quotas_with_non_support_resource_returns_400(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=True)} + quotas = {'quota': {'abc': 100}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), extra_environ=env, + expect_errors=True) + self.assertEqual(400, res.status_int) + + def test_update_quotas_with_admin(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id + '2', + is_admin=True)} + quotas = {'quota': {'network': 100}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), extra_environ=env) + self.assertEqual(200, res.status_int) + env2 = {'neutron.context': context.Context('', tenant_id)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env2) + quota = self.deserialize(res) + self.assertEqual(100, quota['quota']['network']) + self.assertEqual(10, quota['quota']['subnet']) + self.assertEqual(50, quota['quota']['port']) + + def test_update_attributes(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id + '2', + is_admin=True)} + quotas = {'quota': {'extra1': 100}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), extra_environ=env) + self.assertEqual(200, res.status_int) + env2 = {'neutron.context': context.Context('', tenant_id)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env2) + quota = self.deserialize(res) + self.assertEqual(100, quota['quota']['extra1']) + + def test_delete_quotas_with_admin(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id + '2', + is_admin=True)} + res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env) + self.assertEqual(204, res.status_int) + + def test_delete_quotas_without_admin_forbidden_returns_403(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=False)} + res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env, expect_errors=True) + self.assertEqual(403, res.status_int) + + def test_quotas_loaded_bad_returns_404(self): + try: + res = self.api.get(_get_path('quotas'), expect_errors=True) + self.assertEqual(404, res.status_int) + except Exception: + pass + + def test_quotas_limit_check(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=True)} + quotas = {'quota': {'network': 5}} + res = self.api.put(_get_path('quotas', id=tenant_id, + fmt=self.fmt), + self.serialize(quotas), extra_environ=env) + self.assertEqual(200, res.status_int) + quota.QUOTAS.limit_check(context.Context('', tenant_id), + tenant_id, + network=4) + + def test_quotas_limit_check_with_invalid_quota_value(self): + tenant_id = 'tenant_id1' + with testtools.ExpectedException(exceptions.InvalidQuotaValue): + quota.QUOTAS.limit_check(context.Context('', tenant_id), + tenant_id, + network=-2) + + def test_quotas_get_tenant_from_request_context(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=True)} + res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt), + extra_environ=env) + self.assertEqual(200, res.status_int) + quota = self.deserialize(res) + self.assertEqual(quota['tenant']['tenant_id'], tenant_id) + + def test_quotas_get_tenant_from_empty_request_context_returns_400(self): + env = {'neutron.context': context.Context('', '', + is_admin=True)} + res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt), + extra_environ=env, expect_errors=True) + self.assertEqual(400, res.status_int) + + +class QuotaExtensionDbTestCaseXML(QuotaExtensionDbTestCase): + fmt = 'xml' + + +class QuotaExtensionCfgTestCase(QuotaExtensionTestCase): + fmt = 'json' + + def setUp(self): + cfg.CONF.set_override( + 'quota_driver', + 'neutron.quota.ConfDriver', + group='QUOTAS') + super(QuotaExtensionCfgTestCase, self).setUp() + + def test_quotas_default_values(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env) + quota = self.deserialize(res) + self.assertEqual(10, quota['quota']['network']) + self.assertEqual(10, quota['quota']['subnet']) + self.assertEqual(50, quota['quota']['port']) + self.assertEqual(-1, quota['quota']['extra1']) + + def test_show_quotas_with_admin(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id + '2', + is_admin=True)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env) + self.assertEqual(200, res.status_int) + + def test_show_quotas_without_admin_forbidden(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id + '2', + is_admin=False)} + res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env, expect_errors=True) + self.assertEqual(403, res.status_int) + + def test_update_quotas_forbidden(self): + tenant_id = 'tenant_id1' + quotas = {'quota': {'network': 100}} + res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), + self.serialize(quotas), + expect_errors=True) + self.assertEqual(403, res.status_int) + + def test_delete_quotas_forbidden(self): + tenant_id = 'tenant_id1' + env = {'neutron.context': context.Context('', tenant_id, + is_admin=False)} + res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt), + extra_environ=env, expect_errors=True) + self.assertEqual(403, res.status_int) + + +class QuotaExtensionCfgTestCaseXML(QuotaExtensionCfgTestCase): + fmt = 'xml' + + +class TestDbQuotaDriver(base.BaseTestCase): + """Test for neutron.db.quota_db.DbQuotaDriver.""" + + def test_get_tenant_quotas_arg(self): + """Call neutron.db.quota_db.DbQuotaDriver._get_quotas.""" + + driver = quota_db.DbQuotaDriver() + ctx = context.Context('', 'bar') + + foo_quotas = {'network': 5} + default_quotas = {'network': 10} + target_tenant = 'foo' + + with mock.patch.object(quota_db.DbQuotaDriver, + 'get_tenant_quotas', + return_value=foo_quotas) as get_tenant_quotas: + + quotas = driver._get_quotas(ctx, + target_tenant, + default_quotas, + ['network']) + + self.assertEqual(quotas, foo_quotas) + get_tenant_quotas.assert_called_once_with(ctx, + default_quotas, + target_tenant) + + +class TestQuotaDriverLoad(base.BaseTestCase): + def setUp(self): + super(TestQuotaDriverLoad, self).setUp() + # Make sure QuotaEngine is reinitialized in each test. + quota.QUOTAS._driver = None + + def _test_quota_driver(self, cfg_driver, loaded_driver, + with_quota_db_module=True): + cfg.CONF.set_override('quota_driver', cfg_driver, group='QUOTAS') + with mock.patch.dict(sys.modules, {}): + if (not with_quota_db_module and + 'neutron.db.quota_db' in sys.modules): + del sys.modules['neutron.db.quota_db'] + driver = quota.QUOTAS.get_driver() + self.assertEqual(loaded_driver, driver.__class__.__name__) + + def test_quota_db_driver_with_quotas_table(self): + self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver', + 'DbQuotaDriver', True) + + def test_quota_db_driver_fallback_conf_driver(self): + self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver', + 'ConfDriver', False) + + def test_quota_conf_driver(self): + self._test_quota_driver('neutron.quota.ConfDriver', + 'ConfDriver', True) diff --git a/neutron/tests/unit/test_routerserviceinsertion.py b/neutron/tests/unit/test_routerserviceinsertion.py new file mode 100644 index 000000000..d84d0db6e --- /dev/null +++ b/neutron/tests/unit/test_routerserviceinsertion.py @@ -0,0 +1,490 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 VMware, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import webob.exc as webexc + +import neutron +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import router +from neutron.common import config +from neutron import context as q_context +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import l3_db +from neutron.db.loadbalancer import loadbalancer_db as lb_db +from neutron.db import routedserviceinsertion_db as rsi_db +from neutron.db import routerservicetype_db as rst_db +from neutron.db import servicetype_db as st_db +from neutron.extensions import routedserviceinsertion as rsi +from neutron.extensions import routerservicetype as rst +from neutron.plugins.common import constants +from neutron.tests import base +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import testlib_api +from neutron import wsgi + +_uuid = test_api_v2._uuid +_get_path = test_api_v2._get_path +extensions_path = ':'.join(neutron.extensions.__path__) + + +class RouterServiceInsertionTestPlugin( + rst_db.RouterServiceTypeDbMixin, + rsi_db.RoutedServiceInsertionDbMixin, + st_db.ServiceTypeManager, + lb_db.LoadBalancerPluginDb, + l3_db.L3_NAT_db_mixin, + db_base_plugin_v2.NeutronDbPluginV2): + + supported_extension_aliases = [ + "router", "router-service-type", "routed-service-insertion", + "service-type", "lbaas" + ] + + def create_router(self, context, router): + with context.session.begin(subtransactions=True): + r = super(RouterServiceInsertionTestPlugin, self).create_router( + context, router) + service_type_id = router['router'].get(rst.SERVICE_TYPE_ID) + if service_type_id is not None: + r[rst.SERVICE_TYPE_ID] = service_type_id + self._process_create_router_service_type_id( + context, r) + return r + + def get_router(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + r = super(RouterServiceInsertionTestPlugin, self).get_router( + context, id, fields) + rsbind = self._get_router_service_type_id_binding(context, id) + if rsbind: + r[rst.SERVICE_TYPE_ID] = rsbind['service_type_id'] + return r + + def delete_router(self, context, id): + with context.session.begin(subtransactions=True): + super(RouterServiceInsertionTestPlugin, self).delete_router( + context, id) + rsbind = self._get_router_service_type_id_binding(context, id) + if rsbind: + raise Exception('Router service-type binding is not deleted') + + def create_resource(self, res, context, resource, model): + with context.session.begin(subtransactions=True): + method_name = "create_{0}".format(res) + method = getattr(super(RouterServiceInsertionTestPlugin, self), + method_name) + o = method(context, resource) + router_id = resource[res].get(rsi.ROUTER_ID) + if router_id is not None: + o[rsi.ROUTER_ID] = router_id + self._process_create_resource_router_id( + context, o, model) + return o + + def get_resource(self, res, context, id, fields, model): + method_name = "get_{0}".format(res) + method = getattr(super(RouterServiceInsertionTestPlugin, self), + method_name) + o = method(context, id, fields) + if fields is None or rsi.ROUTER_ID in fields: + rsbind = self._get_resource_router_id_binding( + context, model, id) + if rsbind: + o[rsi.ROUTER_ID] = rsbind['router_id'] + return o + + def delete_resource(self, res, context, id, model): + method_name = "delete_{0}".format(res) + with context.session.begin(subtransactions=True): + method = getattr(super(RouterServiceInsertionTestPlugin, self), + method_name) + method(context, id) + self._delete_resource_router_id_binding(context, id, model) + if self._get_resource_router_id_binding(context, model, id): + raise Exception("{0}-router binding is not deleted".format(res)) + + def create_pool(self, context, pool): + return self.create_resource('pool', context, pool, lb_db.Pool) + + def get_pool(self, context, id, fields=None): + return self.get_resource('pool', context, id, fields, lb_db.Pool) + + def delete_pool(self, context, id): + return self.delete_resource('pool', context, id, lb_db.Pool) + + def create_health_monitor(self, context, health_monitor): + return self.create_resource('health_monitor', context, health_monitor, + lb_db.HealthMonitor) + + def get_health_monitor(self, context, id, fields=None): + return self.get_resource('health_monitor', context, id, fields, + lb_db.HealthMonitor) + + def delete_health_monitor(self, context, id): + return self.delete_resource('health_monitor', context, id, + lb_db.HealthMonitor) + + def create_vip(self, context, vip): + return self.create_resource('vip', context, vip, lb_db.Vip) + + def get_vip(self, context, id, fields=None): + return self.get_resource( + 'vip', context, id, fields, lb_db.Vip) + + def delete_vip(self, context, id): + return self.delete_resource('vip', context, id, lb_db.Vip) + + def stats(self, context, pool_id): + pass + + +class RouterServiceInsertionTestCase(base.BaseTestCase): + def setUp(self): + super(RouterServiceInsertionTestCase, self).setUp() + plugin = ( + "neutron.tests.unit.test_routerserviceinsertion." + "RouterServiceInsertionTestPlugin" + ) + + # point config file to: neutron/tests/etc/neutron.conf.test + self.config_parse() + + #just stubbing core plugin with LoadBalancer plugin + self.setup_coreplugin(plugin) + cfg.CONF.set_override('service_plugins', []) + cfg.CONF.set_override('quota_router', -1, group='QUOTAS') + + # Ensure existing ExtensionManager is not used + + ext_mgr = extensions.PluginAwareExtensionManager( + extensions_path, + {constants.LOADBALANCER: RouterServiceInsertionTestPlugin()} + ) + extensions.PluginAwareExtensionManager._instance = ext_mgr + router.APIRouter() + + app = config.load_paste_app('extensions_test_app') + self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) + + self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1" + + self._service_type_id = _uuid() + + self._setup_core_resources() + + # FIXME (markmcclain): The test setup makes it difficult to add core + # via the api. In the interim we'll create directly using the plugin with + # the side effect of polluting the fixture database until tearDown. + + def tearDown(self): + self.api = None + db.clear_db() + super(RouterServiceInsertionTestCase, self).tearDown() + + def _setup_core_resources(self): + core_plugin = neutron.manager.NeutronManager.get_plugin() + + self._network = core_plugin.create_network( + q_context.get_admin_context(), + { + 'network': + { + 'tenant_id': self._tenant_id, + 'name': 'test net', + 'admin_state_up': True, + 'shared': False, + } + } + ) + + self._subnet = core_plugin.create_subnet( + q_context.get_admin_context(), + { + 'subnet': + { + 'network_id': self._network['id'], + 'name': 'test subnet', + 'cidr': '192.168.1.0/24', + 'ip_version': 4, + 'gateway_ip': '192.168.1.1', + 'allocation_pools': attributes.ATTR_NOT_SPECIFIED, + 'dns_nameservers': attributes.ATTR_NOT_SPECIFIED, + 'host_routes': attributes.ATTR_NOT_SPECIFIED, + 'enable_dhcp': True, + } + } + ) + + self._subnet_id = self._subnet['id'] + + def _do_request(self, method, path, data=None, params=None, action=None): + content_type = 'application/json' + body = None + if data is not None: # empty dict is valid + body = wsgi.Serializer().serialize(data, content_type) + + req = testlib_api.create_request( + path, body, content_type, + method, query_string=params) + res = req.get_response(self._api) + if res.status_code >= 400: + raise webexc.HTTPClientError(detail=res.body, code=res.status_code) + if res.status_code != webexc.HTTPNoContent.code: + return res.json + + def _router_create(self, service_type_id=None): + data = { + "router": { + "tenant_id": self._tenant_id, + "name": "test", + "admin_state_up": True, + "service_type_id": service_type_id, + } + } + + res = self._do_request('POST', _get_path('routers'), data) + return res['router'] + + def test_router_create_no_service_type_id(self): + router = self._router_create() + self.assertIsNone(router.get('service_type_id')) + + def test_router_create_with_service_type_id(self): + router = self._router_create(self._service_type_id) + self.assertEqual(router['service_type_id'], self._service_type_id) + + def test_router_get(self): + router = self._router_create(self._service_type_id) + res = self._do_request('GET', + _get_path('routers/{0}'.format(router['id']))) + self.assertEqual(res['router']['service_type_id'], + self._service_type_id) + + def _test_router_update(self, update_service_type_id): + router = self._router_create(self._service_type_id) + router_id = router['id'] + new_name = _uuid() + data = { + "router": { + "name": new_name, + "admin_state_up": router['admin_state_up'], + } + } + if update_service_type_id: + data["router"]["service_type_id"] = _uuid() + with testlib_api.ExpectedException( + webexc.HTTPClientError) as ctx_manager: + res = self._do_request( + 'PUT', _get_path('routers/{0}'.format(router_id)), data) + self.assertEqual(ctx_manager.exception.code, 400) + else: + res = self._do_request( + 'PUT', _get_path('routers/{0}'.format(router_id)), data) + res = self._do_request( + 'GET', _get_path('routers/{0}'.format(router['id']))) + self.assertEqual(res['router']['name'], new_name) + + def test_router_update_with_service_type_id(self): + self._test_router_update(True) + + def test_router_update_without_service_type_id(self): + self._test_router_update(False) + + def test_router_delete(self): + router = self._router_create(self._service_type_id) + self._do_request( + 'DELETE', _get_path('routers/{0}'.format(router['id']))) + + def _test_lb_setup(self): + router = self._router_create(self._service_type_id) + self._router_id = router['id'] + + def _test_pool_setup(self): + self._test_lb_setup() + + def _test_health_monitor_setup(self): + self._test_lb_setup() + + def _test_vip_setup(self): + self._test_pool_setup() + pool = self._pool_create(self._router_id) + self._pool_id = pool['id'] + + def _create_resource(self, res, data): + resp = self._do_request('POST', _get_path('lb/{0}s'.format(res)), data) + return resp[res] + + def _pool_create(self, router_id=None): + data = { + "pool": { + "tenant_id": self._tenant_id, + "name": "test", + "protocol": "HTTP", + "subnet_id": self._subnet_id, + "lb_method": "ROUND_ROBIN", + "router_id": router_id + } + } + + return self._create_resource('pool', data) + + def _pool_update_attrs(self, pool): + uattr = {} + fields = [ + 'name', 'description', 'lb_method', + 'health_monitors', 'admin_state_up' + ] + for field in fields: + uattr[field] = pool[field] + return uattr + + def _health_monitor_create(self, router_id=None): + data = { + "health_monitor": { + "tenant_id": self._tenant_id, + "type": "HTTP", + "delay": 1, + "timeout": 1, + "max_retries": 1, + "router_id": router_id + } + } + + return self._create_resource('health_monitor', data) + + def _health_monitor_update_attrs(self, hm): + uattr = {} + fields = ['delay', 'timeout', 'max_retries'] + for field in fields: + uattr[field] = hm[field] + return uattr + + def _vip_create(self, router_id=None): + data = { + "vip": { + "tenant_id": self._tenant_id, + "name": "test", + "protocol": "HTTP", + "protocol_port": 80, + "subnet_id": self._subnet_id, + "pool_id": self._pool_id, + "address": "192.168.1.102", + "connection_limit": 100, + "admin_state_up": True, + "router_id": router_id + } + } + + return self._create_resource('vip', data) + + def _vip_update_attrs(self, vip): + uattr = {} + fields = [ + 'name', 'description', 'pool_id', 'connection_limit', + 'admin_state_up' + ] + for field in fields: + uattr[field] = vip[field] + return uattr + + def _test_resource_create(self, res): + getattr(self, "_test_{0}_setup".format(res))() + obj = getattr(self, "_{0}_create".format(res))(self._router_id) + self.assertEqual(obj['router_id'], self._router_id) + + def _test_resource_update(self, res, update_router_id, + update_attr, update_value): + getattr(self, "_test_{0}_setup".format(res))() + obj = getattr(self, "_{0}_create".format(res))(self._router_id) + uattrs = getattr(self, "_{0}_update_attrs".format(res))(obj) + uattrs[update_attr] = update_value + data = {res: uattrs} + if update_router_id: + uattrs['router_id'] = self._router_id + with testlib_api.ExpectedException( + webexc.HTTPClientError) as ctx_manager: + self._do_request( + 'PUT', + _get_path('lb/{0}s/{1}'.format(res, obj['id'])), data) + self.assertEqual(ctx_manager.exception.code, 400) + else: + self._do_request( + 'PUT', + _get_path('lb/{0}s/{1}'.format(res, obj['id'])), data) + updated = self._do_request( + 'GET', + _get_path('lb/{0}s/{1}'.format(res, obj['id']))) + self.assertEqual(updated[res][update_attr], update_value) + + def _test_resource_delete(self, res, with_router_id): + getattr(self, "_test_{0}_setup".format(res))() + + func = getattr(self, "_{0}_create".format(res)) + + if with_router_id: + obj = func(self._router_id) + else: + obj = func() + self._do_request( + 'DELETE', _get_path('lb/{0}s/{1}'.format(res, obj['id']))) + + def test_pool_create(self): + self._test_resource_create('pool') + + def test_pool_update_with_router_id(self): + self._test_resource_update('pool', True, 'name', _uuid()) + + def test_pool_update_without_router_id(self): + self._test_resource_update('pool', False, 'name', _uuid()) + + def test_pool_delete_with_router_id(self): + self._test_resource_delete('pool', True) + + def test_pool_delete_without_router_id(self): + self._test_resource_delete('pool', False) + + def test_health_monitor_create(self): + self._test_resource_create('health_monitor') + + def test_health_monitor_update_with_router_id(self): + self._test_resource_update('health_monitor', True, 'timeout', 2) + + def test_health_monitor_update_without_router_id(self): + self._test_resource_update('health_monitor', False, 'timeout', 2) + + def test_health_monitor_delete_with_router_id(self): + self._test_resource_delete('health_monitor', True) + + def test_health_monitor_delete_without_router_id(self): + self._test_resource_delete('health_monitor', False) + + def test_vip_create(self): + self._test_resource_create('vip') + + def test_vip_update_with_router_id(self): + self._test_resource_update('vip', True, 'name', _uuid()) + + def test_vip_update_without_router_id(self): + self._test_resource_update('vip', False, 'name', _uuid()) + + def test_vip_delete_with_router_id(self): + self._test_resource_delete('vip', True) + + def test_vip_delete_without_router_id(self): + self._test_resource_delete('vip', False) diff --git a/neutron/tests/unit/test_security_groups_rpc.py b/neutron/tests/unit/test_security_groups_rpc.py new file mode 100644 index 000000000..12b6f6ee9 --- /dev/null +++ b/neutron/tests/unit/test_security_groups_rpc.py @@ -0,0 +1,2047 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +import mock +from oslo.config import cfg +from testtools import matchers +import webob.exc + +from neutron.agent.common import config +from neutron.agent import firewall as firewall_base +from neutron.agent.linux import iptables_manager +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import constants as const +from neutron.common import ipv6_utils as ipv6 +from neutron.common import rpc_compat +from neutron import context +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.tests import base +from neutron.tests.unit import test_extension_security_group as test_sg + + +FAKE_PREFIX = {const.IPv4: '10.0.0.0/24', + const.IPv6: '2001:db8::/64'} +FAKE_IP = {const.IPv4: '10.0.0.1', + const.IPv6: 'fe80::1', + 'IPv6_GLOBAL': '2001:0db8::1', + 'IPv6_LLA': 'fe80::123'} + + +class FakeSGCallback(sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + def get_port_from_device(self, device): + device = self.devices.get(device) + if device: + device['security_group_rules'] = [] + device['security_group_source_groups'] = [] + device['fixed_ips'] = [ip['ip_address'] + for ip in device['fixed_ips']] + return device + + +class SGServerRpcCallBackMixinTestCase(test_sg.SecurityGroupDBTestCase): + def setUp(self, plugin=None): + cfg.CONF.set_default('firewall_driver', + 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + super(SGServerRpcCallBackMixinTestCase, self).setUp(plugin) + self.rpc = FakeSGCallback() + + def test_security_group_rules_for_devices_ipv4_ingress(self): + fake_prefix = FAKE_PREFIX[const.IPv4] + with self.network() as n: + with contextlib.nested(self.subnet(n), + self.security_group()) as (subnet_v4, + sg1): + sg1_id = sg1['security_group']['id'] + rule1 = self._build_security_group_rule( + sg1_id, + 'ingress', const.PROTO_NAME_TCP, '22', + '22') + rule2 = self._build_security_group_rule( + sg1_id, + 'ingress', const.PROTO_NAME_TCP, '23', + '23', fake_prefix) + rules = { + 'security_group_rules': [rule1['security_group_rule'], + rule2['security_group_rule']]} + res = self._create_security_group_rule(self.fmt, rules) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + res1 = self._create_port( + self.fmt, n['network']['id'], + security_groups=[sg1_id]) + ports_rest1 = self.deserialize(self.fmt, res1) + port_id1 = ports_rest1['port']['id'] + self.rpc.devices = {port_id1: ports_rest1['port']} + devices = [port_id1, 'no_exist_device'] + ctx = context.get_admin_context() + ports_rpc = self.rpc.security_group_rules_for_devices( + ctx, devices=devices) + port_rpc = ports_rpc[port_id1] + expected = [{'direction': 'egress', 'ethertype': const.IPv4, + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': const.IPv6, + 'security_group_id': sg1_id}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_TCP, + 'ethertype': const.IPv4, + 'port_range_max': 22, + 'security_group_id': sg1_id, + 'port_range_min': 22}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_TCP, + 'ethertype': const.IPv4, + 'port_range_max': 23, 'security_group_id': sg1_id, + 'port_range_min': 23, + 'source_ip_prefix': fake_prefix}, + ] + self.assertEqual(port_rpc['security_group_rules'], + expected) + self._delete('ports', port_id1) + + def test_security_group_rules_for_devices_ipv4_ingress_addr_pair(self): + plugin_obj = manager.NeutronManager.get_plugin() + if ('allowed-address-pairs' + not in plugin_obj.supported_extension_aliases): + self.skipTest("Test depeneds on allowed-address-pairs extension") + fake_prefix = FAKE_PREFIX['IPv4'] + with self.network() as n: + with contextlib.nested(self.subnet(n), + self.security_group()) as (subnet_v4, + sg1): + sg1_id = sg1['security_group']['id'] + rule1 = self._build_security_group_rule( + sg1_id, + 'ingress', 'tcp', '22', + '22') + rule2 = self._build_security_group_rule( + sg1_id, + 'ingress', 'tcp', '23', + '23', fake_prefix) + rules = { + 'security_group_rules': [rule1['security_group_rule'], + rule2['security_group_rule']]} + res = self._create_security_group_rule(self.fmt, rules) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, 201) + address_pairs = [{'mac_address': '00:00:00:00:00:01', + 'ip_address': '10.0.0.0/24'}, + {'mac_address': '00:00:00:00:00:01', + 'ip_address': '11.0.0.1'}] + res1 = self._create_port( + self.fmt, n['network']['id'], + security_groups=[sg1_id], + arg_list=(addr_pair.ADDRESS_PAIRS,), + allowed_address_pairs=address_pairs) + ports_rest1 = self.deserialize(self.fmt, res1) + port_id1 = ports_rest1['port']['id'] + self.rpc.devices = {port_id1: ports_rest1['port']} + devices = [port_id1, 'no_exist_device'] + ctx = context.get_admin_context() + ports_rpc = self.rpc.security_group_rules_for_devices( + ctx, devices=devices) + port_rpc = ports_rpc[port_id1] + expected = [{'direction': 'egress', 'ethertype': 'IPv4', + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': 'IPv6', + 'security_group_id': sg1_id}, + {'direction': 'ingress', + 'protocol': 'tcp', 'ethertype': 'IPv4', + 'port_range_max': 22, + 'security_group_id': sg1_id, + 'port_range_min': 22}, + {'direction': 'ingress', 'protocol': 'tcp', + 'ethertype': 'IPv4', + 'port_range_max': 23, 'security_group_id': sg1_id, + 'port_range_min': 23, + 'source_ip_prefix': fake_prefix}, + ] + self.assertEqual(port_rpc['security_group_rules'], + expected) + self.assertEqual(port_rpc['allowed_address_pairs'], + address_pairs) + self._delete('ports', port_id1) + + def test_security_group_rules_for_devices_ipv4_egress(self): + fake_prefix = FAKE_PREFIX[const.IPv4] + with self.network() as n: + with contextlib.nested(self.subnet(n), + self.security_group()) as (subnet_v4, + sg1): + sg1_id = sg1['security_group']['id'] + rule1 = self._build_security_group_rule( + sg1_id, + 'egress', const.PROTO_NAME_TCP, '22', + '22') + rule2 = self._build_security_group_rule( + sg1_id, + 'egress', const.PROTO_NAME_UDP, '23', + '23', fake_prefix) + rules = { + 'security_group_rules': [rule1['security_group_rule'], + rule2['security_group_rule']]} + res = self._create_security_group_rule(self.fmt, rules) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + res1 = self._create_port( + self.fmt, n['network']['id'], + security_groups=[sg1_id]) + ports_rest1 = self.deserialize(self.fmt, res1) + port_id1 = ports_rest1['port']['id'] + self.rpc.devices = {port_id1: ports_rest1['port']} + devices = [port_id1, 'no_exist_device'] + ctx = context.get_admin_context() + ports_rpc = self.rpc.security_group_rules_for_devices( + ctx, devices=devices) + port_rpc = ports_rpc[port_id1] + expected = [{'direction': 'egress', 'ethertype': const.IPv4, + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': const.IPv6, + 'security_group_id': sg1_id}, + {'direction': 'egress', + 'protocol': const.PROTO_NAME_TCP, + 'ethertype': const.IPv4, + 'port_range_max': 22, + 'security_group_id': sg1_id, + 'port_range_min': 22}, + {'direction': 'egress', + 'protocol': const.PROTO_NAME_UDP, + 'ethertype': const.IPv4, + 'port_range_max': 23, 'security_group_id': sg1_id, + 'port_range_min': 23, + 'dest_ip_prefix': fake_prefix}, + ] + self.assertEqual(port_rpc['security_group_rules'], + expected) + self._delete('ports', port_id1) + + def test_security_group_rules_for_devices_ipv4_source_group(self): + + with self.network() as n: + with contextlib.nested(self.subnet(n), + self.security_group(), + self.security_group()) as (subnet_v4, + sg1, + sg2): + sg1_id = sg1['security_group']['id'] + sg2_id = sg2['security_group']['id'] + rule1 = self._build_security_group_rule( + sg1_id, + 'ingress', const.PROTO_NAME_TCP, '24', + '25', remote_group_id=sg2['security_group']['id']) + rules = { + 'security_group_rules': [rule1['security_group_rule']]} + res = self._create_security_group_rule(self.fmt, rules) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + res1 = self._create_port( + self.fmt, n['network']['id'], + security_groups=[sg1_id, + sg2_id]) + ports_rest1 = self.deserialize(self.fmt, res1) + port_id1 = ports_rest1['port']['id'] + self.rpc.devices = {port_id1: ports_rest1['port']} + devices = [port_id1, 'no_exist_device'] + + res2 = self._create_port( + self.fmt, n['network']['id'], + security_groups=[sg2_id]) + ports_rest2 = self.deserialize(self.fmt, res2) + port_id2 = ports_rest2['port']['id'] + ctx = context.get_admin_context() + ports_rpc = self.rpc.security_group_rules_for_devices( + ctx, devices=devices) + port_rpc = ports_rpc[port_id1] + expected = [{'direction': 'egress', 'ethertype': const.IPv4, + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': const.IPv6, + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': const.IPv4, + 'security_group_id': sg2_id}, + {'direction': 'egress', 'ethertype': const.IPv6, + 'security_group_id': sg2_id}, + {'direction': u'ingress', + 'source_ip_prefix': u'10.0.0.3/32', + 'protocol': const.PROTO_NAME_TCP, + 'ethertype': const.IPv4, + 'port_range_max': 25, 'port_range_min': 24, + 'remote_group_id': sg2_id, + 'security_group_id': sg1_id}, + ] + self.assertEqual(port_rpc['security_group_rules'], + expected) + self._delete('ports', port_id1) + self._delete('ports', port_id2) + + def test_security_group_rules_for_devices_ipv6_ingress(self): + fake_prefix = FAKE_PREFIX[const.IPv6] + fake_gateway = FAKE_IP[const.IPv6] + with self.network() as n: + with contextlib.nested(self.subnet(n, + gateway_ip=fake_gateway, + cidr=fake_prefix, + ip_version=6), + self.security_group()) as (subnet_v6, + sg1): + sg1_id = sg1['security_group']['id'] + rule1 = self._build_security_group_rule( + sg1_id, + 'ingress', const.PROTO_NAME_TCP, '22', + '22', + ethertype=const.IPv6) + rule2 = self._build_security_group_rule( + sg1_id, + 'ingress', const.PROTO_NAME_UDP, '23', + '23', fake_prefix, + ethertype=const.IPv6) + rules = { + 'security_group_rules': [rule1['security_group_rule'], + rule2['security_group_rule']]} + res = self._create_security_group_rule(self.fmt, rules) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, webob.exc.HTTPCreated.code) + + res1 = self._create_port( + self.fmt, n['network']['id'], + fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], + security_groups=[sg1_id]) + ports_rest1 = self.deserialize(self.fmt, res1) + port_id1 = ports_rest1['port']['id'] + self.rpc.devices = {port_id1: ports_rest1['port']} + devices = [port_id1, 'no_exist_device'] + ctx = context.get_admin_context() + ports_rpc = self.rpc.security_group_rules_for_devices( + ctx, devices=devices) + port_rpc = ports_rpc[port_id1] + expected = [{'direction': 'egress', 'ethertype': const.IPv4, + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': const.IPv6, + 'security_group_id': sg1_id}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_TCP, + 'ethertype': const.IPv6, + 'port_range_max': 22, + 'security_group_id': sg1_id, + 'port_range_min': 22}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_UDP, + 'ethertype': const.IPv6, + 'port_range_max': 23, + 'security_group_id': sg1_id, + 'port_range_min': 23, + 'source_ip_prefix': fake_prefix}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_ICMP_V6, + 'ethertype': const.IPv6, + 'source_ip_prefix': fake_gateway, + 'source_port_range_min': const.ICMPV6_TYPE_RA}, + ] + self.assertEqual(port_rpc['security_group_rules'], + expected) + self._delete('ports', port_id1) + + def test_security_group_ra_rules_for_devices_ipv6_gateway_global(self): + fake_prefix = FAKE_PREFIX[const.IPv6] + fake_gateway = FAKE_IP['IPv6_GLOBAL'] + with self.network() as n: + with contextlib.nested(self.subnet(n, + gateway_ip=fake_gateway, + cidr=fake_prefix, + ip_version=6, + ipv6_ra_mode=const.IPV6_SLAAC), + self.security_group()) as (subnet_v6, + sg1): + sg1_id = sg1['security_group']['id'] + rule1 = self._build_security_group_rule( + sg1_id, + 'ingress', const.PROTO_NAME_TCP, '22', + '22', + ethertype=const.IPv6) + rules = { + 'security_group_rules': [rule1['security_group_rule']]} + self._make_security_group_rule(self.fmt, rules) + + # Create gateway port + gateway_res = self._make_port( + self.fmt, n['network']['id'], + fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'], + 'ip_address': fake_gateway}], + device_owner='network:router_interface') + gateway_mac = gateway_res['port']['mac_address'] + gateway_port_id = gateway_res['port']['id'] + gateway_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64( + const.IPV6_LLA_PREFIX, + gateway_mac)) + + ports_rest1 = self._make_port( + self.fmt, n['network']['id'], + fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], + security_groups=[sg1_id]) + port_id1 = ports_rest1['port']['id'] + self.rpc.devices = {port_id1: ports_rest1['port']} + devices = [port_id1, 'no_exist_device'] + ctx = context.get_admin_context() + ports_rpc = self.rpc.security_group_rules_for_devices( + ctx, devices=devices) + port_rpc = ports_rpc[port_id1] + expected = [{'direction': 'egress', 'ethertype': const.IPv4, + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': const.IPv6, + 'security_group_id': sg1_id}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_TCP, + 'ethertype': const.IPv6, + 'port_range_max': 22, + 'security_group_id': sg1_id, + 'port_range_min': 22}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_ICMP_V6, + 'ethertype': const.IPv6, + 'source_ip_prefix': gateway_lla_ip, + 'source_port_range_min': const.ICMPV6_TYPE_RA}, + ] + self.assertEqual(port_rpc['security_group_rules'], + expected) + self._delete('ports', port_id1) + # Note(xuhanp): remove gateway port's fixed_ips or gateway port + # deletion will be prevented. + data = {'port': {'fixed_ips': []}} + req = self.new_update_request('ports', data, gateway_port_id) + self.deserialize(self.fmt, req.get_response(self.api)) + self._delete('ports', gateway_port_id) + + def test_security_group_rule_for_device_ipv6_multi_router_interfaces(self): + fake_prefix = FAKE_PREFIX[const.IPv6] + fake_gateway = FAKE_IP['IPv6_GLOBAL'] + with self.network() as n: + with contextlib.nested(self.subnet(n, + gateway_ip=fake_gateway, + cidr=fake_prefix, + ip_version=6, + ipv6_ra_mode=const.IPV6_SLAAC), + self.security_group()) as (subnet_v6, + sg1): + sg1_id = sg1['security_group']['id'] + rule1 = self._build_security_group_rule( + sg1_id, + 'ingress', const.PROTO_NAME_TCP, '22', + '22', + ethertype=const.IPv6) + rules = { + 'security_group_rules': [rule1['security_group_rule']]} + self._make_security_group_rule(self.fmt, rules) + + # Create gateway port + gateway_res = self._make_port( + self.fmt, n['network']['id'], + fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'], + 'ip_address': fake_gateway}], + device_owner='network:router_interface') + gateway_mac = gateway_res['port']['mac_address'] + gateway_port_id = gateway_res['port']['id'] + gateway_lla_ip = str(ipv6.get_ipv6_addr_by_EUI64( + const.IPV6_LLA_PREFIX, + gateway_mac)) + # Create another router interface port + interface_res = self._make_port( + self.fmt, n['network']['id'], + fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], + device_owner='network:router_interface') + interface_port_id = interface_res['port']['id'] + + ports_rest1 = self._make_port( + self.fmt, n['network']['id'], + fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], + security_groups=[sg1_id]) + port_id1 = ports_rest1['port']['id'] + self.rpc.devices = {port_id1: ports_rest1['port']} + devices = [port_id1, 'no_exist_device'] + ctx = context.get_admin_context() + ports_rpc = self.rpc.security_group_rules_for_devices( + ctx, devices=devices) + port_rpc = ports_rpc[port_id1] + expected = [{'direction': 'egress', 'ethertype': const.IPv4, + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': const.IPv6, + 'security_group_id': sg1_id}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_TCP, + 'ethertype': const.IPv6, + 'port_range_max': 22, + 'security_group_id': sg1_id, + 'port_range_min': 22}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_ICMP_V6, + 'ethertype': const.IPv6, + 'source_ip_prefix': gateway_lla_ip, + 'source_port_range_min': const.ICMPV6_TYPE_RA}, + ] + self.assertEqual(port_rpc['security_group_rules'], + expected) + self._delete('ports', port_id1) + data = {'port': {'fixed_ips': []}} + req = self.new_update_request('ports', data, gateway_port_id) + self.deserialize(self.fmt, req.get_response(self.api)) + req = self.new_update_request('ports', data, interface_port_id) + self.deserialize(self.fmt, req.get_response(self.api)) + self._delete('ports', gateway_port_id) + self._delete('ports', interface_port_id) + + def test_security_group_ra_rules_for_devices_ipv6_gateway_lla(self): + fake_prefix = FAKE_PREFIX[const.IPv6] + fake_gateway = FAKE_IP['IPv6_LLA'] + with self.network() as n: + with contextlib.nested(self.subnet(n, + gateway_ip=fake_gateway, + cidr=fake_prefix, + ip_version=6, + ipv6_ra_mode=const.IPV6_SLAAC), + self.security_group()) as (subnet_v6, + sg1): + sg1_id = sg1['security_group']['id'] + rule1 = self._build_security_group_rule( + sg1_id, + 'ingress', const.PROTO_NAME_TCP, '22', + '22', + ethertype=const.IPv6) + rules = { + 'security_group_rules': [rule1['security_group_rule']]} + self._make_security_group_rule(self.fmt, rules) + + ports_rest1 = self._make_port( + self.fmt, n['network']['id'], + fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], + security_groups=[sg1_id]) + port_id1 = ports_rest1['port']['id'] + self.rpc.devices = {port_id1: ports_rest1['port']} + devices = [port_id1, 'no_exist_device'] + ctx = context.get_admin_context() + ports_rpc = self.rpc.security_group_rules_for_devices( + ctx, devices=devices) + port_rpc = ports_rpc[port_id1] + expected = [{'direction': 'egress', 'ethertype': const.IPv4, + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': const.IPv6, + 'security_group_id': sg1_id}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_TCP, + 'ethertype': const.IPv6, + 'port_range_max': 22, + 'security_group_id': sg1_id, + 'port_range_min': 22}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_ICMP_V6, + 'ethertype': const.IPv6, + 'source_ip_prefix': fake_gateway, + 'source_port_range_min': const.ICMPV6_TYPE_RA}, + ] + self.assertEqual(port_rpc['security_group_rules'], + expected) + self._delete('ports', port_id1) + + def test_security_group_ra_rules_for_devices_ipv6_no_gateway_port(self): + fake_prefix = FAKE_PREFIX[const.IPv6] + with self.network() as n: + with contextlib.nested(self.subnet(n, + gateway_ip=None, + cidr=fake_prefix, + ip_version=6, + ipv6_ra_mode=const.IPV6_SLAAC), + self.security_group()) as (subnet_v6, + sg1): + sg1_id = sg1['security_group']['id'] + rule1 = self._build_security_group_rule( + sg1_id, + 'ingress', const.PROTO_NAME_TCP, '22', + '22', + ethertype=const.IPv6) + rules = { + 'security_group_rules': [rule1['security_group_rule']]} + self._make_security_group_rule(self.fmt, rules) + + ports_rest1 = self._make_port( + self.fmt, n['network']['id'], + fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], + security_groups=[sg1_id]) + port_id1 = ports_rest1['port']['id'] + self.rpc.devices = {port_id1: ports_rest1['port']} + devices = [port_id1, 'no_exist_device'] + ctx = context.get_admin_context() + ports_rpc = self.rpc.security_group_rules_for_devices( + ctx, devices=devices) + port_rpc = ports_rpc[port_id1] + expected = [{'direction': 'egress', 'ethertype': const.IPv4, + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': const.IPv6, + 'security_group_id': sg1_id}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_TCP, + 'ethertype': const.IPv6, + 'port_range_max': 22, + 'security_group_id': sg1_id, + 'port_range_min': 22}, + ] + self.assertEqual(port_rpc['security_group_rules'], + expected) + self._delete('ports', port_id1) + + def test_security_group_rules_for_devices_ipv6_egress(self): + fake_prefix = FAKE_PREFIX[const.IPv6] + fake_gateway = FAKE_IP[const.IPv6] + with self.network() as n: + with contextlib.nested(self.subnet(n, + gateway_ip=fake_gateway, + cidr=fake_prefix, + ip_version=6), + self.security_group()) as (subnet_v6, + sg1): + sg1_id = sg1['security_group']['id'] + rule1 = self._build_security_group_rule( + sg1_id, + 'egress', const.PROTO_NAME_TCP, '22', + '22', + ethertype=const.IPv6) + rule2 = self._build_security_group_rule( + sg1_id, + 'egress', const.PROTO_NAME_UDP, '23', + '23', fake_prefix, + ethertype=const.IPv6) + rules = { + 'security_group_rules': [rule1['security_group_rule'], + rule2['security_group_rule']]} + self._make_security_group_rule(self.fmt, rules) + + ports_rest1 = self._make_port( + self.fmt, n['network']['id'], + fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], + security_groups=[sg1_id]) + port_id1 = ports_rest1['port']['id'] + self.rpc.devices = {port_id1: ports_rest1['port']} + devices = [port_id1, 'no_exist_device'] + + ctx = context.get_admin_context() + ports_rpc = self.rpc.security_group_rules_for_devices( + ctx, devices=devices) + port_rpc = ports_rpc[port_id1] + expected = [{'direction': 'egress', 'ethertype': const.IPv4, + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': const.IPv6, + 'security_group_id': sg1_id}, + {'direction': 'egress', + 'protocol': const.PROTO_NAME_TCP, + 'ethertype': const.IPv6, + 'port_range_max': 22, + 'security_group_id': sg1_id, + 'port_range_min': 22}, + {'direction': 'egress', + 'protocol': const.PROTO_NAME_UDP, + 'ethertype': const.IPv6, + 'port_range_max': 23, + 'security_group_id': sg1_id, + 'port_range_min': 23, + 'dest_ip_prefix': fake_prefix}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_ICMP_V6, + 'ethertype': const.IPv6, + 'source_ip_prefix': fake_gateway, + 'source_port_range_min': const.ICMPV6_TYPE_RA}, + ] + self.assertEqual(port_rpc['security_group_rules'], + expected) + self._delete('ports', port_id1) + + def test_security_group_rules_for_devices_ipv6_source_group(self): + fake_prefix = FAKE_PREFIX[const.IPv6] + fake_gateway = FAKE_IP[const.IPv6] + with self.network() as n: + with contextlib.nested(self.subnet(n, + gateway_ip=fake_gateway, + cidr=fake_prefix, + ip_version=6), + self.security_group(), + self.security_group()) as (subnet_v6, + sg1, + sg2): + sg1_id = sg1['security_group']['id'] + sg2_id = sg2['security_group']['id'] + rule1 = self._build_security_group_rule( + sg1_id, + 'ingress', const.PROTO_NAME_TCP, '24', + '25', + ethertype=const.IPv6, + remote_group_id=sg2['security_group']['id']) + rules = { + 'security_group_rules': [rule1['security_group_rule']]} + self._make_security_group_rule(self.fmt, rules) + + ports_rest1 = self._make_port( + self.fmt, n['network']['id'], + fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], + security_groups=[sg1_id, + sg2_id]) + port_id1 = ports_rest1['port']['id'] + self.rpc.devices = {port_id1: ports_rest1['port']} + devices = [port_id1, 'no_exist_device'] + + ports_rest2 = self._make_port( + self.fmt, n['network']['id'], + fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], + security_groups=[sg2_id]) + port_id2 = ports_rest2['port']['id'] + + ctx = context.get_admin_context() + ports_rpc = self.rpc.security_group_rules_for_devices( + ctx, devices=devices) + port_rpc = ports_rpc[port_id1] + expected = [{'direction': 'egress', 'ethertype': const.IPv4, + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': const.IPv6, + 'security_group_id': sg1_id}, + {'direction': 'egress', 'ethertype': const.IPv4, + 'security_group_id': sg2_id}, + {'direction': 'egress', 'ethertype': const.IPv6, + 'security_group_id': sg2_id}, + {'direction': 'ingress', + 'source_ip_prefix': '2001:db8::2/128', + 'protocol': const.PROTO_NAME_TCP, + 'ethertype': const.IPv6, + 'port_range_max': 25, 'port_range_min': 24, + 'remote_group_id': sg2_id, + 'security_group_id': sg1_id}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_ICMP_V6, + 'ethertype': const.IPv6, + 'source_ip_prefix': fake_gateway, + 'source_port_range_min': const.ICMPV6_TYPE_RA}, + ] + self.assertEqual(port_rpc['security_group_rules'], + expected) + self._delete('ports', port_id1) + self._delete('ports', port_id2) + + +class SGServerRpcCallBackMixinTestCaseXML(SGServerRpcCallBackMixinTestCase): + fmt = 'xml' + + +class SGAgentRpcCallBackMixinTestCase(base.BaseTestCase): + def setUp(self): + super(SGAgentRpcCallBackMixinTestCase, self).setUp() + self.rpc = sg_rpc.SecurityGroupAgentRpcCallbackMixin() + self.rpc.sg_agent = mock.Mock() + + def test_security_groups_rule_updated(self): + self.rpc.security_groups_rule_updated(None, + security_groups=['fake_sgid']) + self.rpc.sg_agent.assert_has_calls( + [mock.call.security_groups_rule_updated(['fake_sgid'])]) + + def test_security_groups_member_updated(self): + self.rpc.security_groups_member_updated(None, + security_groups=['fake_sgid']) + self.rpc.sg_agent.assert_has_calls( + [mock.call.security_groups_member_updated(['fake_sgid'])]) + + def test_security_groups_provider_updated(self): + self.rpc.security_groups_provider_updated(None) + self.rpc.sg_agent.assert_has_calls( + [mock.call.security_groups_provider_updated()]) + + +class SecurityGroupAgentRpcTestCaseForNoneDriver(base.BaseTestCase): + def test_init_firewall_with_none_driver(self): + cfg.CONF.set_override( + 'enable_security_group', False, + group='SECURITYGROUP') + agent = sg_rpc.SecurityGroupAgentRpcMixin() + agent.init_firewall() + self.assertEqual(agent.firewall.__class__.__name__, + 'NoopFirewallDriver') + + +class SecurityGroupAgentRpcTestCase(base.BaseTestCase): + def setUp(self, defer_refresh_firewall=False): + super(SecurityGroupAgentRpcTestCase, self).setUp() + cfg.CONF.set_default('firewall_driver', + 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + self.agent = sg_rpc.SecurityGroupAgentRpcMixin() + self.agent.context = None + mock.patch('neutron.agent.linux.iptables_manager').start() + self.agent.root_helper = 'sudo' + self.agent.init_firewall(defer_refresh_firewall=defer_refresh_firewall) + self.firewall = mock.Mock() + firewall_object = firewall_base.FirewallDriver() + self.firewall.defer_apply.side_effect = firewall_object.defer_apply + self.agent.firewall = self.firewall + rpc = mock.Mock() + self.agent.plugin_rpc = rpc + self.fake_device = {'device': 'fake_device', + 'security_groups': ['fake_sgid1', 'fake_sgid2'], + 'security_group_source_groups': ['fake_sgid2'], + 'security_group_rules': [{'security_group_id': + 'fake_sgid1', + 'remote_group_id': + 'fake_sgid2'}]} + fake_devices = {'fake_device': self.fake_device} + self.firewall.ports = fake_devices + rpc.security_group_rules_for_devices.return_value = fake_devices + + def test_prepare_and_remove_devices_filter(self): + self.agent.prepare_devices_filter(['fake_device']) + self.agent.remove_devices_filter(['fake_device']) + # ignore device which is not filtered + self.firewall.assert_has_calls([mock.call.defer_apply(), + mock.call.prepare_port_filter( + self.fake_device), + mock.call.defer_apply(), + mock.call.remove_port_filter( + self.fake_device), + ]) + + def test_security_groups_rule_updated(self): + self.agent.refresh_firewall = mock.Mock() + self.agent.prepare_devices_filter(['fake_port_id']) + self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3']) + self.agent.refresh_firewall.assert_has_calls( + [mock.call.refresh_firewall([self.fake_device['device']])]) + + def test_security_groups_rule_not_updated(self): + self.agent.refresh_firewall = mock.Mock() + self.agent.prepare_devices_filter(['fake_port_id']) + self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4']) + self.agent.refresh_firewall.assert_has_calls([]) + + def test_security_groups_member_updated(self): + self.agent.refresh_firewall = mock.Mock() + self.agent.prepare_devices_filter(['fake_port_id']) + self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3']) + self.agent.refresh_firewall.assert_has_calls( + [mock.call.refresh_firewall([self.fake_device['device']])]) + + def test_security_groups_member_not_updated(self): + self.agent.refresh_firewall = mock.Mock() + self.agent.prepare_devices_filter(['fake_port_id']) + self.agent.security_groups_member_updated(['fake_sgid3', 'fake_sgid4']) + self.agent.refresh_firewall.assert_has_calls([]) + + def test_security_groups_provider_updated(self): + self.agent.refresh_firewall = mock.Mock() + self.agent.security_groups_provider_updated() + self.agent.refresh_firewall.assert_has_calls( + [mock.call.refresh_firewall()]) + + def test_refresh_firewall(self): + self.agent.prepare_devices_filter(['fake_port_id']) + self.agent.refresh_firewall() + calls = [mock.call.defer_apply(), + mock.call.prepare_port_filter(self.fake_device), + mock.call.defer_apply(), + mock.call.update_port_filter(self.fake_device)] + self.firewall.assert_has_calls(calls) + + def test_refresh_firewall_devices(self): + self.agent.prepare_devices_filter(['fake_port_id']) + self.agent.refresh_firewall([self.fake_device]) + calls = [mock.call.defer_apply(), + mock.call.prepare_port_filter(self.fake_device), + mock.call.defer_apply(), + mock.call.update_port_filter(self.fake_device)] + self.firewall.assert_has_calls(calls) + + def test_refresh_firewall_none(self): + self.agent.refresh_firewall([]) + self.firewall.assert_has_calls([]) + + +class SecurityGroupAgentRpcWithDeferredRefreshTestCase( + SecurityGroupAgentRpcTestCase): + + def setUp(self): + super(SecurityGroupAgentRpcWithDeferredRefreshTestCase, self).setUp( + defer_refresh_firewall=True) + + @contextlib.contextmanager + def add_fake_device(self, device, sec_groups, source_sec_groups=None): + fake_device = {'device': device, + 'security_groups': sec_groups, + 'security_group_source_groups': source_sec_groups or [], + 'security_group_rules': [{'security_group_id': + 'fake_sgid1', + 'remote_group_id': + 'fake_sgid2'}]} + self.firewall.ports[device] = fake_device + yield + del self.firewall.ports[device] + + def test_security_groups_rule_updated(self): + self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3']) + self.assertIn('fake_device', self.agent.devices_to_refilter) + + def test_multiple_security_groups_rule_updated_same_port(self): + with self.add_fake_device(device='fake_device_2', + sec_groups=['fake_sgidX']): + self.agent.refresh_firewall = mock.Mock() + self.agent.security_groups_rule_updated(['fake_sgid1']) + self.agent.security_groups_rule_updated(['fake_sgid2']) + self.assertIn('fake_device', self.agent.devices_to_refilter) + self.assertNotIn('fake_device_2', self.agent.devices_to_refilter) + + def test_security_groups_rule_updated_multiple_ports(self): + with self.add_fake_device(device='fake_device_2', + sec_groups=['fake_sgid2']): + self.agent.refresh_firewall = mock.Mock() + self.agent.security_groups_rule_updated(['fake_sgid1', + 'fake_sgid2']) + self.assertIn('fake_device', self.agent.devices_to_refilter) + self.assertIn('fake_device_2', self.agent.devices_to_refilter) + + def test_multiple_security_groups_rule_updated_multiple_ports(self): + with self.add_fake_device(device='fake_device_2', + sec_groups=['fake_sgid2']): + self.agent.refresh_firewall = mock.Mock() + self.agent.security_groups_rule_updated(['fake_sgid1']) + self.agent.security_groups_rule_updated(['fake_sgid2']) + self.assertIn('fake_device', self.agent.devices_to_refilter) + self.assertIn('fake_device_2', self.agent.devices_to_refilter) + + def test_security_groups_member_updated(self): + self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3']) + self.assertIn('fake_device', self.agent.devices_to_refilter) + + def test_multiple_security_groups_member_updated_same_port(self): + with self.add_fake_device(device='fake_device_2', + sec_groups=['fake_sgid1', 'fake_sgid1B'], + source_sec_groups=['fake_sgidX']): + self.agent.refresh_firewall = mock.Mock() + self.agent.security_groups_member_updated(['fake_sgid1', + 'fake_sgid3']) + self.agent.security_groups_member_updated(['fake_sgid2', + 'fake_sgid3']) + self.assertIn('fake_device', self.agent.devices_to_refilter) + self.assertNotIn('fake_device_2', self.agent.devices_to_refilter) + + def test_security_groups_member_updated_multiple_ports(self): + with self.add_fake_device(device='fake_device_2', + sec_groups=['fake_sgid1', 'fake_sgid1B'], + source_sec_groups=['fake_sgid2']): + self.agent.security_groups_member_updated(['fake_sgid2']) + self.assertIn('fake_device', self.agent.devices_to_refilter) + self.assertIn('fake_device_2', self.agent.devices_to_refilter) + + def test_multiple_security_groups_member_updated_multiple_ports(self): + with self.add_fake_device(device='fake_device_2', + sec_groups=['fake_sgid1', 'fake_sgid1B'], + source_sec_groups=['fake_sgid1B']): + self.agent.security_groups_member_updated(['fake_sgid1B']) + self.agent.security_groups_member_updated(['fake_sgid2']) + self.assertIn('fake_device', self.agent.devices_to_refilter) + self.assertIn('fake_device_2', self.agent.devices_to_refilter) + + def test_security_groups_provider_updated(self): + self.agent.security_groups_provider_updated() + self.assertTrue(self.agent.global_refresh_firewall) + + def test_setup_port_filters_new_ports_only(self): + self.agent.prepare_devices_filter = mock.Mock() + self.agent.refresh_firewall = mock.Mock() + self.agent.devices_to_refilter = set() + self.agent.global_refresh_firewall = False + self.agent.setup_port_filters(set(['fake_new_device']), set()) + self.assertFalse(self.agent.devices_to_refilter) + self.assertFalse(self.agent.global_refresh_firewall) + self.agent.prepare_devices_filter.assert_called_once_with( + set(['fake_new_device'])) + self.assertFalse(self.agent.refresh_firewall.called) + + def test_setup_port_filters_updated_ports_only(self): + self.agent.prepare_devices_filter = mock.Mock() + self.agent.refresh_firewall = mock.Mock() + self.agent.devices_to_refilter = set() + self.agent.global_refresh_firewall = False + self.agent.setup_port_filters(set(), set(['fake_updated_device'])) + self.assertFalse(self.agent.devices_to_refilter) + self.assertFalse(self.agent.global_refresh_firewall) + self.agent.refresh_firewall.assert_called_once_with( + set(['fake_updated_device'])) + self.assertFalse(self.agent.prepare_devices_filter.called) + + def test_setup_port_filter_new_and_updated_ports(self): + self.agent.prepare_devices_filter = mock.Mock() + self.agent.refresh_firewall = mock.Mock() + self.agent.devices_to_refilter = set() + self.agent.global_refresh_firewall = False + self.agent.setup_port_filters(set(['fake_new_device']), + set(['fake_updated_device'])) + self.assertFalse(self.agent.devices_to_refilter) + self.assertFalse(self.agent.global_refresh_firewall) + self.agent.prepare_devices_filter.assert_called_once_with( + set(['fake_new_device'])) + self.agent.refresh_firewall.assert_called_once_with( + set(['fake_updated_device'])) + + def test_setup_port_filters_sg_updates_only(self): + self.agent.prepare_devices_filter = mock.Mock() + self.agent.refresh_firewall = mock.Mock() + self.agent.devices_to_refilter = set(['fake_device']) + self.agent.global_refresh_firewall = False + self.agent.setup_port_filters(set(), set()) + self.assertFalse(self.agent.devices_to_refilter) + self.assertFalse(self.agent.global_refresh_firewall) + self.agent.refresh_firewall.assert_called_once_with( + set(['fake_device'])) + self.assertFalse(self.agent.prepare_devices_filter.called) + + def test_setup_port_filters_sg_updates_and_new_ports(self): + self.agent.prepare_devices_filter = mock.Mock() + self.agent.refresh_firewall = mock.Mock() + self.agent.devices_to_refilter = set(['fake_device']) + self.agent.global_refresh_firewall = False + self.agent.setup_port_filters(set(['fake_new_device']), set()) + self.assertFalse(self.agent.devices_to_refilter) + self.assertFalse(self.agent.global_refresh_firewall) + self.agent.prepare_devices_filter.assert_called_once_with( + set(['fake_new_device'])) + self.agent.refresh_firewall.assert_called_once_with( + set(['fake_device'])) + + def test_setup_port_filters_sg_updates_and_updated_ports(self): + self.agent.prepare_devices_filter = mock.Mock() + self.agent.refresh_firewall = mock.Mock() + self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2']) + self.agent.global_refresh_firewall = False + self.agent.setup_port_filters( + set(), set(['fake_device', 'fake_updated_device'])) + self.assertFalse(self.agent.devices_to_refilter) + self.assertFalse(self.agent.global_refresh_firewall) + self.agent.refresh_firewall.assert_called_once_with( + set(['fake_device', 'fake_device_2', 'fake_updated_device'])) + self.assertFalse(self.agent.prepare_devices_filter.called) + + def test_setup_port_filters_all_updates(self): + self.agent.prepare_devices_filter = mock.Mock() + self.agent.refresh_firewall = mock.Mock() + self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2']) + self.agent.global_refresh_firewall = False + self.agent.setup_port_filters( + set(['fake_new_device']), + set(['fake_device', 'fake_updated_device'])) + self.assertFalse(self.agent.devices_to_refilter) + self.assertFalse(self.agent.global_refresh_firewall) + self.agent.prepare_devices_filter.assert_called_once_with( + set(['fake_new_device'])) + self.agent.refresh_firewall.assert_called_once_with( + set(['fake_device', 'fake_device_2', 'fake_updated_device'])) + + def test_setup_port_filters_no_update(self): + self.agent.prepare_devices_filter = mock.Mock() + self.agent.refresh_firewall = mock.Mock() + self.agent.devices_to_refilter = set() + self.agent.global_refresh_firewall = False + self.agent.setup_port_filters(set(), set()) + self.assertFalse(self.agent.devices_to_refilter) + self.assertFalse(self.agent.global_refresh_firewall) + self.assertFalse(self.agent.refresh_firewall.called) + self.assertFalse(self.agent.prepare_devices_filter.called) + + def test_setup_port_filters_with_global_refresh(self): + self.agent.prepare_devices_filter = mock.Mock() + self.agent.refresh_firewall = mock.Mock() + self.agent.devices_to_refilter = set() + self.agent.global_refresh_firewall = True + self.agent.setup_port_filters(set(), set()) + self.assertFalse(self.agent.devices_to_refilter) + self.assertFalse(self.agent.global_refresh_firewall) + self.agent.refresh_firewall.assert_called_once_with() + self.assertFalse(self.agent.prepare_devices_filter.called) + + +class FakeSGRpcApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class SecurityGroupServerRpcApiTestCase(base.BaseTestCase): + def setUp(self): + super(SecurityGroupServerRpcApiTestCase, self).setUp() + self.rpc = FakeSGRpcApi('fake_topic') + self.rpc.call = mock.Mock() + + def test_security_group_rules_for_devices(self): + self.rpc.security_group_rules_for_devices(None, ['fake_device']) + self.rpc.call.assert_has_calls( + [mock.call(None, + {'args': + {'devices': ['fake_device']}, + 'method': 'security_group_rules_for_devices', + 'namespace': None}, + version=sg_rpc.SG_RPC_VERSION, + topic='fake_topic')]) + + +class FakeSGNotifierAPI(rpc_compat.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + pass + + +class SecurityGroupAgentRpcApiTestCase(base.BaseTestCase): + def setUp(self): + super(SecurityGroupAgentRpcApiTestCase, self).setUp() + self.notifier = FakeSGNotifierAPI(topic='fake', + default_version='1.0') + self.notifier.fanout_cast = mock.Mock() + + def test_security_groups_rule_updated(self): + self.notifier.security_groups_rule_updated( + None, security_groups=['fake_sgid']) + self.notifier.fanout_cast.assert_has_calls( + [mock.call(None, + {'args': + {'security_groups': ['fake_sgid']}, + 'method': 'security_groups_rule_updated', + 'namespace': None}, + version=sg_rpc.SG_RPC_VERSION, + topic='fake-security_group-update')]) + + def test_security_groups_member_updated(self): + self.notifier.security_groups_member_updated( + None, security_groups=['fake_sgid']) + self.notifier.fanout_cast.assert_has_calls( + [mock.call(None, + {'args': + {'security_groups': ['fake_sgid']}, + 'method': 'security_groups_member_updated', + 'namespace': None}, + version=sg_rpc.SG_RPC_VERSION, + topic='fake-security_group-update')]) + + def test_security_groups_rule_not_updated(self): + self.notifier.security_groups_rule_updated( + None, security_groups=[]) + self.assertEqual(False, self.notifier.fanout_cast.called) + + def test_security_groups_member_not_updated(self): + self.notifier.security_groups_member_updated( + None, security_groups=[]) + self.assertEqual(False, self.notifier.fanout_cast.called) + +#Note(nati) bn -> binary_name +# id -> device_id + +PHYSDEV_MOD = '-m physdev' +PHYSDEV_IS_BRIDGED = '--physdev-is-bridged' + +IPTABLES_ARG = {'bn': iptables_manager.binary_name, + 'physdev_mod': PHYSDEV_MOD, + 'physdev_is_bridged': PHYSDEV_IS_BRIDGED} + +CHAINS_NAT = 'OUTPUT|POSTROUTING|PREROUTING|float-snat|snat' +IPTABLES_ARG['chains'] = CHAINS_NAT + +IPTABLES_NAT = """# Generated by iptables_manager +*nat +:neutron-postrouting-bottom - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +[0:0] -A PREROUTING -j %(bn)s-PREROUTING +[0:0] -A OUTPUT -j %(bn)s-OUTPUT +[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING +[0:0] -A POSTROUTING -j neutron-postrouting-bottom +[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat +[0:0] -A %(bn)s-snat -j %(bn)s-float-snat +COMMIT +# Completed by iptables_manager +""" % IPTABLES_ARG + +CHAINS_EMPTY = 'FORWARD|INPUT|OUTPUT|local|sg-chain|sg-fallback' +CHAINS_1 = CHAINS_EMPTY + '|i_port1|o_port1|s_port1' +CHAINS_2 = CHAINS_1 + '|i_port2|o_port2|s_port2' + +IPTABLES_ARG['chains'] = CHAINS_1 + +IPTABLES_FILTER_1 = """# Generated by iptables_manager +*filter +:neutron-filter-top - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +[0:0] -A FORWARD -j neutron-filter-top +[0:0] -A OUTPUT -j neutron-filter-top +[0:0] -A neutron-filter-top -j %(bn)s-local +[0:0] -A INPUT -j %(bn)s-INPUT +[0:0] -A OUTPUT -j %(bn)s-OUTPUT +[0:0] -A FORWARD -j %(bn)s-FORWARD +[0:0] -A %(bn)s-sg-fallback -j DROP +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-i_port1 +[0:0] -A %(bn)s-i_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-i_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-i_port1 -s 10.0.0.2 -p udp -m udp --sport 67 --dport 68 -j \ +RETURN +[0:0] -A %(bn)s-i_port1 -p tcp -m tcp --dport 22 -j RETURN +[0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-s_port1 -m mac --mac-source 12:34:56:78:9a:bc -s 10.0.0.3 -j \ +RETURN +[0:0] -A %(bn)s-s_port1 -j DROP +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 68 --dport 67 -j RETURN +[0:0] -A %(bn)s-o_port1 -j %(bn)s-s_port1 +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 67 --dport 68 -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-o_port1 -j RETURN +[0:0] -A %(bn)s-o_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-sg-chain -j ACCEPT +COMMIT +# Completed by iptables_manager +""" % IPTABLES_ARG + + +IPTABLES_FILTER_1_2 = """# Generated by iptables_manager +*filter +:neutron-filter-top - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +[0:0] -A FORWARD -j neutron-filter-top +[0:0] -A OUTPUT -j neutron-filter-top +[0:0] -A neutron-filter-top -j %(bn)s-local +[0:0] -A INPUT -j %(bn)s-INPUT +[0:0] -A OUTPUT -j %(bn)s-OUTPUT +[0:0] -A FORWARD -j %(bn)s-FORWARD +[0:0] -A %(bn)s-sg-fallback -j DROP +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-i_port1 +[0:0] -A %(bn)s-i_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-i_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-i_port1 -s 10.0.0.2 -p udp -m udp --sport 67 --dport 68 -j \ +RETURN +[0:0] -A %(bn)s-i_port1 -p tcp -m tcp --dport 22 -j RETURN +[0:0] -A %(bn)s-i_port1 -s 10.0.0.4 -j RETURN +[0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-s_port1 -m mac --mac-source 12:34:56:78:9a:bc -s 10.0.0.3 -j \ +RETURN +[0:0] -A %(bn)s-s_port1 -j DROP +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 68 --dport 67 -j RETURN +[0:0] -A %(bn)s-o_port1 -j %(bn)s-s_port1 +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 67 --dport 68 -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-o_port1 -j RETURN +[0:0] -A %(bn)s-o_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-sg-chain -j ACCEPT +COMMIT +# Completed by iptables_manager +""" % IPTABLES_ARG + +IPTABLES_ARG['chains'] = CHAINS_2 + +IPTABLES_FILTER_2 = """# Generated by iptables_manager +*filter +:neutron-filter-top - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +[0:0] -A FORWARD -j neutron-filter-top +[0:0] -A OUTPUT -j neutron-filter-top +[0:0] -A neutron-filter-top -j %(bn)s-local +[0:0] -A INPUT -j %(bn)s-INPUT +[0:0] -A OUTPUT -j %(bn)s-OUTPUT +[0:0] -A FORWARD -j %(bn)s-FORWARD +[0:0] -A %(bn)s-sg-fallback -j DROP +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-i_port1 +[0:0] -A %(bn)s-i_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-i_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-i_port1 -s 10.0.0.2 -p udp -m udp --sport 67 --dport 68 -j \ +RETURN +[0:0] -A %(bn)s-i_port1 -p tcp -m tcp --dport 22 -j RETURN +[0:0] -A %(bn)s-i_port1 -s 10.0.0.4 -j RETURN +[0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-s_port1 -m mac --mac-source 12:34:56:78:9a:bc -s 10.0.0.3 \ +-j RETURN +[0:0] -A %(bn)s-s_port1 -j DROP +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 68 --dport 67 -j RETURN +[0:0] -A %(bn)s-o_port1 -j %(bn)s-s_port1 +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 67 --dport 68 -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-o_port1 -j RETURN +[0:0] -A %(bn)s-o_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-i_port2 +[0:0] -A %(bn)s-i_port2 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-i_port2 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-i_port2 -s 10.0.0.2 -p udp -m udp --sport 67 --dport 68 -j \ +RETURN +[0:0] -A %(bn)s-i_port2 -p tcp -m tcp --dport 22 -j RETURN +[0:0] -A %(bn)s-i_port2 -s 10.0.0.3 -j RETURN +[0:0] -A %(bn)s-i_port2 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-o_port2 +[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-o_port2 +[0:0] -A %(bn)s-s_port2 -m mac --mac-source 12:34:56:78:9a:bd -s 10.0.0.4 \ +-j RETURN +[0:0] -A %(bn)s-s_port2 -j DROP +[0:0] -A %(bn)s-o_port2 -p udp -m udp --sport 68 --dport 67 -j RETURN +[0:0] -A %(bn)s-o_port2 -j %(bn)s-s_port2 +[0:0] -A %(bn)s-o_port2 -p udp -m udp --sport 67 --dport 68 -j DROP +[0:0] -A %(bn)s-o_port2 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-o_port2 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-o_port2 -j RETURN +[0:0] -A %(bn)s-o_port2 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-sg-chain -j ACCEPT +COMMIT +# Completed by iptables_manager +""" % IPTABLES_ARG + +IPTABLES_FILTER_2_2 = """# Generated by iptables_manager +*filter +:neutron-filter-top - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +[0:0] -A FORWARD -j neutron-filter-top +[0:0] -A OUTPUT -j neutron-filter-top +[0:0] -A neutron-filter-top -j %(bn)s-local +[0:0] -A INPUT -j %(bn)s-INPUT +[0:0] -A OUTPUT -j %(bn)s-OUTPUT +[0:0] -A FORWARD -j %(bn)s-FORWARD +[0:0] -A %(bn)s-sg-fallback -j DROP +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-i_port1 +[0:0] -A %(bn)s-i_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-i_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-i_port1 -s 10.0.0.2 -p udp -m udp --sport 67 --dport 68 -j \ +RETURN +[0:0] -A %(bn)s-i_port1 -p tcp -m tcp --dport 22 -j RETURN +[0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-s_port1 -m mac --mac-source 12:34:56:78:9a:bc -s 10.0.0.3 -j \ +RETURN +[0:0] -A %(bn)s-s_port1 -j DROP +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 68 --dport 67 -j RETURN +[0:0] -A %(bn)s-o_port1 -j %(bn)s-s_port1 +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 67 --dport 68 -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-o_port1 -j RETURN +[0:0] -A %(bn)s-o_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-i_port2 +[0:0] -A %(bn)s-i_port2 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-i_port2 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-i_port2 -s 10.0.0.2 -p udp -m udp --sport 67 --dport 68 -j \ +RETURN +[0:0] -A %(bn)s-i_port2 -p tcp -m tcp --dport 22 -j RETURN +[0:0] -A %(bn)s-i_port2 -s 10.0.0.3 -j RETURN +[0:0] -A %(bn)s-i_port2 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-o_port2 +[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-o_port2 +[0:0] -A %(bn)s-s_port2 -m mac --mac-source 12:34:56:78:9a:bd -s 10.0.0.4 -j \ +RETURN +[0:0] -A %(bn)s-s_port2 -j DROP +[0:0] -A %(bn)s-o_port2 -p udp -m udp --sport 68 --dport 67 -j RETURN +[0:0] -A %(bn)s-o_port2 -j %(bn)s-s_port2 +[0:0] -A %(bn)s-o_port2 -p udp -m udp --sport 67 --dport 68 -j DROP +[0:0] -A %(bn)s-o_port2 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-o_port2 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-o_port2 -j RETURN +[0:0] -A %(bn)s-o_port2 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-sg-chain -j ACCEPT +COMMIT +# Completed by iptables_manager +""" % IPTABLES_ARG + +IPTABLES_FILTER_2_3 = """# Generated by iptables_manager +*filter +:neutron-filter-top - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +[0:0] -A FORWARD -j neutron-filter-top +[0:0] -A OUTPUT -j neutron-filter-top +[0:0] -A neutron-filter-top -j %(bn)s-local +[0:0] -A INPUT -j %(bn)s-INPUT +[0:0] -A OUTPUT -j %(bn)s-OUTPUT +[0:0] -A FORWARD -j %(bn)s-FORWARD +[0:0] -A %(bn)s-sg-fallback -j DROP +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-i_port1 +[0:0] -A %(bn)s-i_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-i_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-i_port1 -s 10.0.0.2 -p udp -m udp --sport 67 --dport 68 -j \ +RETURN +[0:0] -A %(bn)s-i_port1 -p tcp -m tcp --dport 22 -j RETURN +[0:0] -A %(bn)s-i_port1 -s 10.0.0.4 -j RETURN +[0:0] -A %(bn)s-i_port1 -p icmp -j RETURN +[0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-s_port1 -m mac --mac-source 12:34:56:78:9a:bc -s 10.0.0.3 -j \ +RETURN +[0:0] -A %(bn)s-s_port1 -j DROP +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 68 --dport 67 -j RETURN +[0:0] -A %(bn)s-o_port1 -j %(bn)s-s_port1 +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 67 --dport 68 -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-o_port1 -j RETURN +[0:0] -A %(bn)s-o_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-i_port2 +[0:0] -A %(bn)s-i_port2 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-i_port2 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-i_port2 -s 10.0.0.2 -p udp -m udp --sport 67 --dport 68 -j \ +RETURN +[0:0] -A %(bn)s-i_port2 -p tcp -m tcp --dport 22 -j RETURN +[0:0] -A %(bn)s-i_port2 -s 10.0.0.3 -j RETURN +[0:0] -A %(bn)s-i_port2 -p icmp -j RETURN +[0:0] -A %(bn)s-i_port2 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-o_port2 +[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-o_port2 +[0:0] -A %(bn)s-s_port2 -m mac --mac-source 12:34:56:78:9a:bd -s 10.0.0.4 -j \ +RETURN +[0:0] -A %(bn)s-s_port2 -j DROP +[0:0] -A %(bn)s-o_port2 -p udp -m udp --sport 68 --dport 67 -j RETURN +[0:0] -A %(bn)s-o_port2 -j %(bn)s-s_port2 +[0:0] -A %(bn)s-o_port2 -p udp -m udp --sport 67 --dport 68 -j DROP +[0:0] -A %(bn)s-o_port2 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-o_port2 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-o_port2 -j RETURN +[0:0] -A %(bn)s-o_port2 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-sg-chain -j ACCEPT +COMMIT +# Completed by iptables_manager +""" % IPTABLES_ARG + + +IPTABLES_ARG['chains'] = CHAINS_EMPTY +IPTABLES_FILTER_EMPTY = """# Generated by iptables_manager +*filter +:neutron-filter-top - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +[0:0] -A FORWARD -j neutron-filter-top +[0:0] -A OUTPUT -j neutron-filter-top +[0:0] -A neutron-filter-top -j %(bn)s-local +[0:0] -A INPUT -j %(bn)s-INPUT +[0:0] -A OUTPUT -j %(bn)s-OUTPUT +[0:0] -A FORWARD -j %(bn)s-FORWARD +[0:0] -A %(bn)s-sg-fallback -j DROP +COMMIT +# Completed by iptables_manager +""" % IPTABLES_ARG + +IPTABLES_ARG['chains'] = CHAINS_1 +IPTABLES_FILTER_V6_1 = """# Generated by iptables_manager +*filter +:neutron-filter-top - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +[0:0] -A FORWARD -j neutron-filter-top +[0:0] -A OUTPUT -j neutron-filter-top +[0:0] -A neutron-filter-top -j %(bn)s-local +[0:0] -A INPUT -j %(bn)s-INPUT +[0:0] -A OUTPUT -j %(bn)s-OUTPUT +[0:0] -A FORWARD -j %(bn)s-FORWARD +[0:0] -A %(bn)s-sg-fallback -j DROP +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-i_port1 +[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 130 -j RETURN +[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 131 -j RETURN +[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 132 -j RETURN +[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 135 -j RETURN +[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 136 -j RETURN +[0:0] -A %(bn)s-i_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-i_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-o_port1 -p icmpv6 -j RETURN +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 546 --dport 547 -j RETURN +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 547 --dport 546 -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-o_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-sg-chain -j ACCEPT +COMMIT +# Completed by iptables_manager +""" % IPTABLES_ARG + + +IPTABLES_ARG['chains'] = CHAINS_2 + +IPTABLES_FILTER_V6_2 = """# Generated by iptables_manager +*filter +:neutron-filter-top - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +[0:0] -A FORWARD -j neutron-filter-top +[0:0] -A OUTPUT -j neutron-filter-top +[0:0] -A neutron-filter-top -j %(bn)s-local +[0:0] -A INPUT -j %(bn)s-INPUT +[0:0] -A OUTPUT -j %(bn)s-OUTPUT +[0:0] -A FORWARD -j %(bn)s-FORWARD +[0:0] -A %(bn)s-sg-fallback -j DROP +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-i_port1 +[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 130 -j RETURN +[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 131 -j RETURN +[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 132 -j RETURN +[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 135 -j RETURN +[0:0] -A %(bn)s-i_port1 -p icmpv6 --icmpv6-type 136 -j RETURN +[0:0] -A %(bn)s-i_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-i_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port1 \ +%(physdev_is_bridged)s -j %(bn)s-o_port1 +[0:0] -A %(bn)s-o_port1 -p icmpv6 -j RETURN +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 546 --dport 547 -j RETURN +[0:0] -A %(bn)s-o_port1 -p udp -m udp --sport 547 --dport 546 -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-o_port1 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-o_port1 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-INGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-INGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-i_port2 +[0:0] -A %(bn)s-i_port2 -p icmpv6 --icmpv6-type 130 -j RETURN +[0:0] -A %(bn)s-i_port2 -p icmpv6 --icmpv6-type 131 -j RETURN +[0:0] -A %(bn)s-i_port2 -p icmpv6 --icmpv6-type 132 -j RETURN +[0:0] -A %(bn)s-i_port2 -p icmpv6 --icmpv6-type 135 -j RETURN +[0:0] -A %(bn)s-i_port2 -p icmpv6 --icmpv6-type 136 -j RETURN +[0:0] -A %(bn)s-i_port2 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-i_port2 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-i_port2 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-sg-chain +[0:0] -A %(bn)s-sg-chain %(physdev_mod)s --physdev-EGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-o_port2 +[0:0] -A %(bn)s-INPUT %(physdev_mod)s --physdev-EGRESS tap_port2 \ +%(physdev_is_bridged)s -j %(bn)s-o_port2 +[0:0] -A %(bn)s-o_port2 -p icmpv6 -j RETURN +[0:0] -A %(bn)s-o_port2 -p udp -m udp --sport 546 --dport 547 -j RETURN +[0:0] -A %(bn)s-o_port2 -p udp -m udp --sport 547 --dport 546 -j DROP +[0:0] -A %(bn)s-o_port2 -m state --state INVALID -j DROP +[0:0] -A %(bn)s-o_port2 -m state --state RELATED,ESTABLISHED -j RETURN +[0:0] -A %(bn)s-o_port2 -j %(bn)s-sg-fallback +[0:0] -A %(bn)s-sg-chain -j ACCEPT +COMMIT +# Completed by iptables_manager +""" % IPTABLES_ARG + +IPTABLES_ARG['chains'] = CHAINS_EMPTY +IPTABLES_FILTER_V6_EMPTY = """# Generated by iptables_manager +*filter +:neutron-filter-top - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +:%(bn)s-(%(chains)s) - [0:0] +[0:0] -A FORWARD -j neutron-filter-top +[0:0] -A OUTPUT -j neutron-filter-top +[0:0] -A neutron-filter-top -j %(bn)s-local +[0:0] -A INPUT -j %(bn)s-INPUT +[0:0] -A OUTPUT -j %(bn)s-OUTPUT +[0:0] -A FORWARD -j %(bn)s-FORWARD +[0:0] -A %(bn)s-sg-fallback -j DROP +COMMIT +# Completed by iptables_manager +""" % IPTABLES_ARG + +FIREWALL_BASE_PACKAGE = 'neutron.agent.linux.iptables_firewall.' +FIREWALL_IPTABLES_DRIVER = FIREWALL_BASE_PACKAGE + 'IptablesFirewallDriver' +FIREWALL_HYBRID_DRIVER = (FIREWALL_BASE_PACKAGE + + 'OVSHybridIptablesFirewallDriver') +FIREWALL_NOOP_DRIVER = 'neutron.agent.firewall.NoopFirewallDriver' + + +def set_firewall_driver(firewall_driver): + cfg.CONF.set_override('firewall_driver', firewall_driver, + group='SECURITYGROUP') + + +class TestSecurityGroupAgentWithIptables(base.BaseTestCase): + FIREWALL_DRIVER = FIREWALL_IPTABLES_DRIVER + PHYSDEV_INGRESS = 'physdev-out' + PHYSDEV_EGRESS = 'physdev-in' + + def setUp(self, defer_refresh_firewall=False): + super(TestSecurityGroupAgentWithIptables, self).setUp() + config.register_root_helper(cfg.CONF) + cfg.CONF.set_override( + 'lock_path', + '$state_path/lock') + cfg.CONF.set_override( + 'firewall_driver', + self.FIREWALL_DRIVER, + group='SECURITYGROUP') + + self.agent = sg_rpc.SecurityGroupAgentRpcMixin() + self.agent.context = None + + self.root_helper = 'sudo' + self.agent.root_helper = 'sudo' + self.agent.init_firewall(defer_refresh_firewall=defer_refresh_firewall) + + self.iptables = self.agent.firewall.iptables + self.iptables_execute = mock.patch.object(self.iptables, + "execute").start() + self.iptables_execute_return_values = [] + self.expected_call_count = 0 + self.expected_calls = [] + self.expected_process_inputs = [] + self.iptables_execute.side_effect = self.iptables_execute_return_values + + self.rpc = mock.Mock() + self.agent.plugin_rpc = self.rpc + rule1 = [{'direction': 'ingress', + 'protocol': const.PROTO_NAME_UDP, + 'ethertype': const.IPv4, + 'source_ip_prefix': '10.0.0.2', + 'source_port_range_min': 67, + 'source_port_range_max': 67, + 'port_range_min': 68, + 'port_range_max': 68}, + {'direction': 'ingress', + 'protocol': const.PROTO_NAME_TCP, + 'ethertype': const.IPv4, + 'port_range_min': 22, + 'port_range_max': 22}, + {'direction': 'egress', + 'ethertype': const.IPv4}] + rule2 = rule1[:] + rule2 += [{'direction': 'ingress', + 'source_ip_prefix': '10.0.0.4', + 'ethertype': const.IPv4}] + rule3 = rule2[:] + rule3 += [{'direction': 'ingress', + 'protocol': const.PROTO_NAME_ICMP, + 'ethertype': const.IPv4}] + rule4 = rule1[:] + rule4 += [{'direction': 'ingress', + 'source_ip_prefix': '10.0.0.3', + 'ethertype': const.IPv4}] + rule5 = rule4[:] + rule5 += [{'direction': 'ingress', + 'protocol': const.PROTO_NAME_ICMP, + 'ethertype': const.IPv4}] + self.devices1 = {'tap_port1': self._device('tap_port1', + '10.0.0.3', + '12:34:56:78:9a:bc', + rule1)} + self.devices2 = {'tap_port1': self._device('tap_port1', + '10.0.0.3', + '12:34:56:78:9a:bc', + rule2), + 'tap_port2': self._device('tap_port2', + '10.0.0.4', + '12:34:56:78:9a:bd', + rule4)} + self.devices3 = {'tap_port1': self._device('tap_port1', + '10.0.0.3', + '12:34:56:78:9a:bc', + rule3), + 'tap_port2': self._device('tap_port2', + '10.0.0.4', + '12:34:56:78:9a:bd', + rule5)} + + def _device(self, device, ip, mac_address, rule): + return {'device': device, + 'fixed_ips': [ip], + 'mac_address': mac_address, + 'security_groups': ['security_group1'], + 'security_group_rules': rule, + 'security_group_source_groups': [ + 'security_group1']} + + def _regex(self, value): + value = value.replace('physdev-INGRESS', self.PHYSDEV_INGRESS) + value = value.replace('physdev-EGRESS', self.PHYSDEV_EGRESS) + value = value.replace('\n', '\\n') + value = value.replace('[', '\[') + value = value.replace(']', '\]') + value = value.replace('*', '\*') + return value + + def _register_mock_call(self, *args, **kwargs): + return_value = kwargs.pop('return_value', None) + self.iptables_execute_return_values.append(return_value) + + has_process_input = 'process_input' in kwargs + process_input = kwargs.get('process_input') + self.expected_process_inputs.append((has_process_input, process_input)) + + if has_process_input: + kwargs['process_input'] = mock.ANY + self.expected_calls.append(mock.call(*args, **kwargs)) + self.expected_call_count += 1 + + def _verify_mock_calls(self): + self.assertEqual(self.expected_call_count, + self.iptables_execute.call_count) + self.iptables_execute.assert_has_calls(self.expected_calls) + + for i, expected in enumerate(self.expected_process_inputs): + check, expected_regex = expected + if not check: + continue + # The second or later arguments of self.iptables.execute + # are keyword parameter, so keyword argument is extracted by [1] + kwargs = self.iptables_execute.call_args_list[i][1] + self.assertThat(kwargs['process_input'], + matchers.MatchesRegex(expected_regex)) + + def _replay_iptables(self, v4_filter, v6_filter): + self._register_mock_call( + ['iptables-save', '-c'], + root_helper=self.root_helper, + return_value='') + self._register_mock_call( + ['iptables-restore', '-c'], + process_input=self._regex(IPTABLES_NAT + v4_filter), + root_helper=self.root_helper, + return_value='') + self._register_mock_call( + ['ip6tables-save', '-c'], + root_helper=self.root_helper, + return_value='') + self._register_mock_call( + ['ip6tables-restore', '-c'], + process_input=self._regex(v6_filter), + root_helper=self.root_helper, + return_value='') + + def test_prepare_remove_port(self): + self.rpc.security_group_rules_for_devices.return_value = self.devices1 + self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1) + self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY) + + self.agent.prepare_devices_filter(['tap_port1']) + self.agent.remove_devices_filter(['tap_port1']) + + self._verify_mock_calls() + + def test_security_group_member_updated(self): + self.rpc.security_group_rules_for_devices.return_value = self.devices1 + self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1) + self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1) + self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2) + self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2) + self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1) + self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY) + + self.agent.prepare_devices_filter(['tap_port1']) + self.rpc.security_group_rules_for_devices.return_value = self.devices2 + self.agent.security_groups_member_updated(['security_group1']) + self.agent.prepare_devices_filter(['tap_port2']) + self.rpc.security_group_rules_for_devices.return_value = self.devices1 + self.agent.security_groups_member_updated(['security_group1']) + self.agent.remove_devices_filter(['tap_port2']) + self.agent.remove_devices_filter(['tap_port1']) + + self._verify_mock_calls() + + def test_security_group_rule_updated(self): + self.rpc.security_group_rules_for_devices.return_value = self.devices2 + self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2) + self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2) + + self.agent.prepare_devices_filter(['tap_port1', 'tap_port3']) + self.rpc.security_group_rules_for_devices.return_value = self.devices3 + self.agent.security_groups_rule_updated(['security_group1']) + + self._verify_mock_calls() + + +class SGNotificationTestMixin(): + def test_security_group_rule_updated(self): + name = 'webservers' + description = 'my webservers' + with self.security_group(name, description) as sg: + with self.security_group(name, description) as sg2: + security_group_id = sg['security_group']['id'] + direction = "ingress" + remote_group_id = sg2['security_group']['id'] + protocol = const.PROTO_NAME_TCP + port_range_min = 88 + port_range_max = 88 + with self.security_group_rule(security_group_id, direction, + protocol, port_range_min, + port_range_max, + remote_group_id=remote_group_id + ): + pass + self.notifier.assert_has_calls( + [mock.call.security_groups_rule_updated(mock.ANY, + [security_group_id]), + mock.call.security_groups_rule_updated(mock.ANY, + [security_group_id])]) + + def test_security_group_member_updated(self): + with self.network() as n: + with self.subnet(n): + with self.security_group() as sg: + security_group_id = sg['security_group']['id'] + res = self._create_port(self.fmt, n['network']['id']) + port = self.deserialize(self.fmt, res) + + data = {'port': {'fixed_ips': port['port']['fixed_ips'], + 'name': port['port']['name'], + ext_sg.SECURITYGROUPS: + [security_group_id]}} + + req = self.new_update_request('ports', data, + port['port']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.api)) + self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0], + security_group_id) + self._delete('ports', port['port']['id']) + self.notifier.assert_has_calls( + [mock.call.security_groups_member_updated( + mock.ANY, [mock.ANY])]) + + +class TestSecurityGroupAgentWithOVSIptables( + TestSecurityGroupAgentWithIptables): + + FIREWALL_DRIVER = FIREWALL_HYBRID_DRIVER + + def _regex(self, value): + #Note(nati): tap is prefixed on the device + # in the OVSHybridIptablesFirewallDriver + + value = value.replace('tap_port', 'taptap_port') + value = value.replace('o_port', 'otap_port') + value = value.replace('i_port', 'itap_port') + value = value.replace('s_port', 'stap_port') + return super( + TestSecurityGroupAgentWithOVSIptables, + self)._regex(value) + + +class TestSecurityGroupExtensionControl(base.BaseTestCase): + def test_disable_security_group_extension_by_config(self): + cfg.CONF.set_override( + 'enable_security_group', False, + group='SECURITYGROUP') + exp_aliases = ['dummy1', 'dummy2'] + ext_aliases = ['dummy1', 'security-group', 'dummy2'] + sg_rpc.disable_security_group_extension_by_config(ext_aliases) + self.assertEqual(ext_aliases, exp_aliases) + + def test_enable_security_group_extension_by_config(self): + cfg.CONF.set_override( + 'enable_security_group', True, + group='SECURITYGROUP') + exp_aliases = ['dummy1', 'security-group', 'dummy2'] + ext_aliases = ['dummy1', 'security-group', 'dummy2'] + sg_rpc.disable_security_group_extension_by_config(ext_aliases) + self.assertEqual(ext_aliases, exp_aliases) + + def test_is_invalid_drvier_combination_sg_enabled(self): + cfg.CONF.set_override( + 'enable_security_group', True, + group='SECURITYGROUP') + cfg.CONF.set_override( + 'firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + self.assertFalse(sg_rpc._is_valid_driver_combination()) + + def test_is_invalid_drvier_combination_sg_enabled_with_none(self): + cfg.CONF.set_override( + 'enable_security_group', True, + group='SECURITYGROUP') + cfg.CONF.set_override( + 'firewall_driver', None, + group='SECURITYGROUP') + self.assertFalse(sg_rpc._is_valid_driver_combination()) + + def test_is_invalid_drvier_combination_sg_disabled(self): + cfg.CONF.set_override( + 'enable_security_group', False, + group='SECURITYGROUP') + cfg.CONF.set_override( + 'firewall_driver', 'NonNoopDriver', + group='SECURITYGROUP') + self.assertFalse(sg_rpc._is_valid_driver_combination()) + + def test_is_valid_drvier_combination_sg_enabled(self): + cfg.CONF.set_override( + 'enable_security_group', True, + group='SECURITYGROUP') + cfg.CONF.set_override( + 'firewall_driver', 'NonNoopDriver', + group='SECURITYGROUP') + self.assertTrue(sg_rpc._is_valid_driver_combination()) + + def test_is_valid_drvier_combination_sg_disabled(self): + cfg.CONF.set_override( + 'enable_security_group', False, + group='SECURITYGROUP') + cfg.CONF.set_override( + 'firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', + group='SECURITYGROUP') + self.assertTrue(sg_rpc._is_valid_driver_combination()) + + def test_is_valid_drvier_combination_sg_disabled_with_none(self): + cfg.CONF.set_override( + 'enable_security_group', False, + group='SECURITYGROUP') + cfg.CONF.set_override( + 'firewall_driver', None, + group='SECURITYGROUP') + self.assertTrue(sg_rpc._is_valid_driver_combination()) diff --git a/neutron/tests/unit/test_servicetype.py b/neutron/tests/unit/test_servicetype.py new file mode 100644 index 000000000..135c87b0b --- /dev/null +++ b/neutron/tests/unit/test_servicetype.py @@ -0,0 +1,241 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Salvatore Orlando, VMware +# + +import logging + +import mock +from oslo.config import cfg +import webob.exc as webexc +import webtest + +from neutron.api import extensions +from neutron.common import exceptions as n_exc +from neutron import context +from neutron.db import api as db_api +from neutron.db import servicetype_db as st_db +from neutron.extensions import servicetype +from neutron.plugins.common import constants +from neutron.services import provider_configuration as provconf +from neutron.tests import base +from neutron.tests.unit import dummy_plugin as dp +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_db_plugin +from neutron.tests.unit import test_extensions +from neutron.tests.unit import testlib_api + + +LOG = logging.getLogger(__name__) +DEFAULT_SERVICE_DEFS = [{'service_class': constants.DUMMY, + 'plugin': dp.DUMMY_PLUGIN_NAME}] + +_uuid = test_api_v2._uuid +_get_path = test_api_v2._get_path + + +class ServiceTypeManagerTestCase(base.BaseTestCase): + def setUp(self): + super(ServiceTypeManagerTestCase, self).setUp() + st_db.ServiceTypeManager._instance = None + self.manager = st_db.ServiceTypeManager.get_instance() + self.ctx = context.get_admin_context() + + def test_service_provider_driver_not_unique(self): + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas:driver'], + 'service_providers') + prov = {'service_type': constants.LOADBALANCER, + 'name': 'name2', + 'driver': 'driver', + 'default': False} + self.manager._load_conf() + self.assertRaises( + n_exc.Invalid, self.manager.conf.add_provider, prov) + + def test_get_service_providers(self): + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas:driver_path', + constants.DUMMY + ':dummy:dummy_dr'], + 'service_providers') + ctx = context.get_admin_context() + provconf.parse_service_provider_opt() + self.manager._load_conf() + res = self.manager.get_service_providers(ctx) + self.assertEqual(len(res), 2) + + res = self.manager.get_service_providers( + ctx, + filters=dict(service_type=[constants.DUMMY]) + ) + self.assertEqual(len(res), 1) + + res = self.manager.get_service_providers( + ctx, + filters=dict(service_type=[constants.LOADBALANCER]) + ) + self.assertEqual(len(res), 1) + + def test_multiple_default_providers_specified_for_service(self): + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas1:driver_path:default', + constants.LOADBALANCER + + ':lbaas2:driver_path:default'], + 'service_providers') + self.assertRaises(n_exc.Invalid, self.manager._load_conf) + + def test_get_default_provider(self): + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas1:driver_path:default', + constants.DUMMY + + ':lbaas2:driver_path2'], + 'service_providers') + self.manager._load_conf() + # can pass None as a context + p = self.manager.get_default_service_provider(None, + constants.LOADBALANCER) + self.assertEqual(p, {'service_type': constants.LOADBALANCER, + 'name': 'lbaas1', + 'driver': 'driver_path', + 'default': True}) + + self.assertRaises( + provconf.DefaultServiceProviderNotFound, + self.manager.get_default_service_provider, + None, constants.DUMMY + ) + + def test_add_resource_association(self): + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas1:driver_path:default', + constants.DUMMY + + ':lbaas2:driver_path2'], + 'service_providers') + self.manager._load_conf() + ctx = context.get_admin_context() + self.manager.add_resource_association(ctx, + constants.LOADBALANCER, + 'lbaas1', '123-123') + self.assertEqual(ctx.session. + query(st_db.ProviderResourceAssociation).count(), + 1) + assoc = ctx.session.query(st_db.ProviderResourceAssociation).one() + ctx.session.delete(assoc) + + def test_invalid_resource_association(self): + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas1:driver_path:default', + constants.DUMMY + + ':lbaas2:driver_path2'], + 'service_providers') + self.manager._load_conf() + ctx = context.get_admin_context() + self.assertRaises(provconf.ServiceProviderNotFound, + self.manager.add_resource_association, + ctx, 'BLABLA_svc', 'name', '123-123') + + +class TestServiceTypeExtensionManager(object): + """Mock extensions manager.""" + def get_resources(self): + return (servicetype.Servicetype.get_resources() + + dp.Dummy.get_resources()) + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class ServiceTypeExtensionTestCaseBase(testlib_api.WebTestCase): + fmt = 'json' + + def setUp(self): + # This is needed because otherwise a failure will occur due to + # nonexisting core_plugin + self.setup_coreplugin(test_db_plugin.DB_PLUGIN_KLASS) + + cfg.CONF.set_override('service_plugins', + ["%s.%s" % (dp.__name__, + dp.DummyServicePlugin.__name__)]) + # Ensure existing ExtensionManager is not used + extensions.PluginAwareExtensionManager._instance = None + ext_mgr = TestServiceTypeExtensionManager() + self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) + self.api = webtest.TestApp(self.ext_mdw) + self.resource_name = servicetype.RESOURCE_NAME.replace('-', '_') + super(ServiceTypeExtensionTestCaseBase, self).setUp() + + +class ServiceTypeExtensionTestCase(ServiceTypeExtensionTestCaseBase): + + def setUp(self): + self._patcher = mock.patch( + "neutron.db.servicetype_db.ServiceTypeManager", + autospec=True) + self.mock_mgr = self._patcher.start() + self.mock_mgr.get_instance.return_value = self.mock_mgr.return_value + super(ServiceTypeExtensionTestCase, self).setUp() + + def test_service_provider_list(self): + instance = self.mock_mgr.return_value + + res = self.api.get(_get_path('service-providers', fmt=self.fmt)) + + instance.get_service_providers.assert_called_with(mock.ANY, + filters={}, + fields=[]) + self.assertEqual(res.status_int, webexc.HTTPOk.code) + + +class ServiceTypeExtensionTestCaseXML(ServiceTypeExtensionTestCase): + fmt = 'xml' + + +class ServiceTypeManagerExtTestCase(ServiceTypeExtensionTestCaseBase): + """Tests ServiceTypemanager as a public API.""" + def setUp(self): + # Blank out service type manager instance + st_db.ServiceTypeManager._instance = None + cfg.CONF.set_override('service_provider', + [constants.LOADBALANCER + + ':lbaas:driver_path', + constants.DUMMY + ':dummy:dummy_dr'], + 'service_providers') + self.addCleanup(db_api.clear_db) + super(ServiceTypeManagerExtTestCase, self).setUp() + + def _list_service_providers(self): + return self.api.get(_get_path('service-providers', fmt=self.fmt)) + + def test_list_service_providers(self): + res = self._list_service_providers() + self.assertEqual(res.status_int, webexc.HTTPOk.code) + data = self.deserialize(res) + self.assertIn('service_providers', data) + self.assertEqual(len(data['service_providers']), 2) + + +class ServiceTypeManagerExtTestCaseXML(ServiceTypeManagerExtTestCase): + fmt = 'xml' diff --git a/neutron/tests/unit/test_wsgi.py b/neutron/tests/unit/test_wsgi.py new file mode 100644 index 000000000..1de0d53f8 --- /dev/null +++ b/neutron/tests/unit/test_wsgi.py @@ -0,0 +1,1136 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import socket +import urllib2 + +import mock +from oslo.config import cfg +import testtools +import webob +import webob.exc + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as exception +from neutron.tests import base +from neutron import wsgi + +CONF = cfg.CONF + +TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), + '..', 'var')) + + +class TestWSGIServer(base.BaseTestCase): + """WSGI server tests.""" + + def test_start_random_port(self): + server = wsgi.Server("test_random_port") + server.start(None, 0, host="127.0.0.1") + self.assertNotEqual(0, server.port) + server.stop() + server.wait() + + @mock.patch('neutron.openstack.common.service.ProcessLauncher') + def test_start_multiple_workers(self, ProcessLauncher): + launcher = ProcessLauncher.return_value + + server = wsgi.Server("test_multiple_processes") + server.start(None, 0, host="127.0.0.1", workers=2) + launcher.running = True + launcher.launch_service.assert_called_once_with(server._server, + workers=2) + + server.stop() + self.assertFalse(launcher.running) + + server.wait() + launcher.wait.assert_called_once_with() + + def test_start_random_port_with_ipv6(self): + server = wsgi.Server("test_random_port") + server.start(None, 0, host="::1") + self.assertEqual("::1", server.host) + self.assertNotEqual(0, server.port) + server.stop() + server.wait() + + def test_ipv6_listen_called_with_scope(self): + server = wsgi.Server("test_app") + + with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen: + with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr: + mock_get_addr.return_value = [ + (socket.AF_INET6, + socket.SOCK_STREAM, + socket.IPPROTO_TCP, + '', + ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2)) + ] + with mock.patch.object(server, 'pool') as mock_pool: + server.start(None, + 1234, + host="fe80::204:acff:fe96:da87%eth0") + + mock_get_addr.assert_called_once_with( + "fe80::204:acff:fe96:da87%eth0", + 1234, + socket.AF_UNSPEC, + socket.SOCK_STREAM + ) + + mock_listen.assert_called_once_with( + ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2), + family=socket.AF_INET6, + backlog=cfg.CONF.backlog + ) + + mock_pool.spawn.assert_has_calls([ + mock.call( + server._run, + None, + mock_listen.return_value) + ]) + + def test_app(self): + greetings = 'Hello, World!!!' + + def hello_world(env, start_response): + if env['PATH_INFO'] != '/': + start_response('404 Not Found', + [('Content-Type', 'text/plain')]) + return ['Not Found\r\n'] + start_response('200 OK', [('Content-Type', 'text/plain')]) + return [greetings] + + server = wsgi.Server("test_app") + server.start(hello_world, 0, host="127.0.0.1") + + response = urllib2.urlopen('http://127.0.0.1:%d/' % server.port) + self.assertEqual(greetings, response.read()) + + server.stop() + + +class SerializerTest(base.BaseTestCase): + def test_serialize_unknown_content_type(self): + """Verify that exception InvalidContentType is raised.""" + input_dict = {'servers': {'test': 'pass'}} + content_type = 'application/unknown' + serializer = wsgi.Serializer() + + self.assertRaises( + exception.InvalidContentType, serializer.serialize, + input_dict, content_type) + + def test_get_deserialize_handler_unknown_content_type(self): + """Verify that exception InvalidContentType is raised.""" + content_type = 'application/unknown' + serializer = wsgi.Serializer() + + self.assertRaises( + exception.InvalidContentType, + serializer.get_deserialize_handler, content_type) + + def test_serialize_content_type_json(self): + """Test serialize with content type json.""" + input_data = {'servers': ['test=pass']} + content_type = 'application/json' + serializer = wsgi.Serializer(default_xmlns="fake") + result = serializer.serialize(input_data, content_type) + + self.assertEqual('{"servers": ["test=pass"]}', result) + + def test_serialize_content_type_xml(self): + """Test serialize with content type xml.""" + input_data = {'servers': ['test=pass']} + content_type = 'application/xml' + serializer = wsgi.Serializer(default_xmlns="fake") + result = serializer.serialize(input_data, content_type) + expected = ( + '\n' + '' + 'test=pass' + ) + + self.assertEqual(expected, result) + + def test_deserialize_raise_bad_request(self): + """Test serialize verifies that exception is raises.""" + content_type = 'application/unknown' + data_string = 'test' + serializer = wsgi.Serializer(default_xmlns="fake") + + self.assertRaises( + webob.exc.HTTPBadRequest, + serializer.deserialize, data_string, content_type) + + def test_deserialize_json_content_type(self): + """Test Serializer.deserialize with content type json.""" + content_type = 'application/json' + data_string = '{"servers": ["test=pass"]}' + serializer = wsgi.Serializer(default_xmlns="fake") + result = serializer.deserialize(data_string, content_type) + + self.assertEqual({'body': {u'servers': [u'test=pass']}}, result) + + def test_deserialize_xml_content_type(self): + """Test deserialize with content type xml.""" + content_type = 'application/xml' + data_string = ( + '' + 'test=pass' + '' + ) + serializer = wsgi.Serializer( + default_xmlns="fake", metadata={'xmlns': 'fake'}) + result = serializer.deserialize(data_string, content_type) + expected = {'body': {'servers': {'server': 'test=pass'}}} + + self.assertEqual(expected, result) + + def test_deserialize_xml_content_type_with_meta(self): + """Test deserialize with content type xml with meta.""" + content_type = 'application/xml' + data_string = ( + '' + '' + 'passed' + '' + '' + ) + + metadata = {'plurals': {'servers': 'server'}, 'xmlns': 'fake'} + serializer = wsgi.Serializer( + default_xmlns="fake", metadata=metadata) + result = serializer.deserialize(data_string, content_type) + expected = {'body': {'servers': [{'name': 's1', 'test': 'passed'}]}} + + self.assertEqual(expected, result) + + def test_serialize_xml_root_key_is_dict(self): + """Test Serializer.serialize with content type xml with meta dict.""" + content_type = 'application/xml' + data = {'servers': {'network': (2, 3)}} + metadata = {'xmlns': 'fake'} + + serializer = wsgi.Serializer(default_xmlns="fake", metadata=metadata) + result = serializer.serialize(data, content_type) + result = result.replace('\n', '') + expected = ( + '' + '' + '(2, 3)' + ) + + self.assertEqual(result, expected) + + def test_serialize_xml_root_key_is_list(self): + """Test serialize with content type xml with meta list.""" + input_dict = {'servers': ['test=pass']} + content_type = 'application/xml' + metadata = {'application/xml': { + 'xmlns': 'fake'}} + serializer = wsgi.Serializer(default_xmlns="fake", metadata=metadata) + result = serializer.serialize(input_dict, content_type) + result = result.replace('\n', '').replace(' ', '') + expected = ( + '' + '' + 'test=pass' + ) + + self.assertEqual(result, expected) + + def test_serialize_xml_root_is_None(self): + input_dict = {'test': 'pass'} + content_type = 'application/xml' + serializer = wsgi.Serializer(default_xmlns="fake") + result = serializer.serialize(input_dict, content_type) + result = result.replace('\n', '').replace(' ', '') + expected = ( + '' + '' + 'pass' + ) + + self.assertEqual(result, expected) + + +class RequestDeserializerTest(testtools.TestCase): + def setUp(self): + super(RequestDeserializerTest, self).setUp() + + class JSONDeserializer(object): + def deserialize(self, data, action='default'): + return 'pew_json' + + class XMLDeserializer(object): + def deserialize(self, data, action='default'): + return 'pew_xml' + + self.body_deserializers = { + 'application/json': JSONDeserializer(), + 'application/xml': XMLDeserializer()} + + self.deserializer = wsgi.RequestDeserializer(self.body_deserializers) + + def test_get_deserializer(self): + """Test RequestDeserializer.get_body_deserializer.""" + expected_json_serializer = self.deserializer.get_body_deserializer( + 'application/json') + expected_xml_serializer = self.deserializer.get_body_deserializer( + 'application/xml') + + self.assertEqual( + expected_json_serializer, + self.body_deserializers['application/json']) + self.assertEqual( + expected_xml_serializer, + self.body_deserializers['application/xml']) + + def test_get_expected_content_type(self): + """Test RequestDeserializer.get_expected_content_type.""" + request = wsgi.Request.blank('/') + request.headers['Accept'] = 'application/json' + + self.assertEqual( + self.deserializer.get_expected_content_type(request), + 'application/json') + + def test_get_action_args(self): + """Test RequestDeserializer.get_action_args.""" + env = { + 'wsgiorg.routing_args': [None, { + 'controller': None, + 'format': None, + 'action': 'update', + 'id': 12}]} + expected = {'action': 'update', 'id': 12} + + self.assertEqual( + self.deserializer.get_action_args(env), expected) + + def test_deserialize(self): + """Test RequestDeserializer.deserialize.""" + with mock.patch.object( + self.deserializer, 'get_action_args') as mock_method: + mock_method.return_value = {'action': 'create'} + request = wsgi.Request.blank('/') + request.headers['Accept'] = 'application/xml' + deserialized = self.deserializer.deserialize(request) + expected = ('create', {}, 'application/xml') + + self.assertEqual(expected, deserialized) + + def test_get_body_deserializer_unknown_content_type(self): + """Verify that exception InvalidContentType is raised.""" + content_type = 'application/unknown' + deserializer = wsgi.RequestDeserializer() + self.assertRaises( + exception.InvalidContentType, + deserializer.get_body_deserializer, content_type) + + +class ResponseSerializerTest(testtools.TestCase): + def setUp(self): + super(ResponseSerializerTest, self).setUp() + + class JSONSerializer(object): + def serialize(self, data, action='default'): + return 'pew_json' + + class XMLSerializer(object): + def serialize(self, data, action='default'): + return 'pew_xml' + + class HeadersSerializer(object): + def serialize(self, response, data, action): + response.status_int = 404 + + self.body_serializers = { + 'application/json': JSONSerializer(), + 'application/xml': XMLSerializer()} + + self.serializer = wsgi.ResponseSerializer( + self.body_serializers, HeadersSerializer()) + + def test_serialize_unknown_content_type(self): + """Verify that exception InvalidContentType is raised.""" + self.assertRaises( + exception.InvalidContentType, + self.serializer.serialize, + {}, 'application/unknown') + + def test_get_body_serializer(self): + """Verify that exception InvalidContentType is raised.""" + self.assertRaises( + exception.InvalidContentType, + self.serializer.get_body_serializer, 'application/unknown') + + def test_get_serializer(self): + """Test ResponseSerializer.get_body_serializer.""" + content_type = 'application/json' + self.assertEqual( + self.serializer.get_body_serializer(content_type), + self.body_serializers[content_type]) + + def test_serialize_json_response(self): + response = self.serializer.serialize({}, 'application/json') + + self.assertEqual(response.headers['Content-Type'], 'application/json') + self.assertEqual(response.body, 'pew_json') + self.assertEqual(response.status_int, 404) + + def test_serialize_xml_response(self): + response = self.serializer.serialize({}, 'application/xml') + + self.assertEqual(response.headers['Content-Type'], 'application/xml') + self.assertEqual(response.body, 'pew_xml') + self.assertEqual(response.status_int, 404) + + def test_serialize_response_None(self): + response = self.serializer.serialize( + None, 'application/json') + + self.assertEqual(response.headers['Content-Type'], 'application/json') + self.assertEqual(response.body, '') + self.assertEqual(response.status_int, 404) + + +class RequestTest(base.BaseTestCase): + + def test_content_type_missing(self): + request = wsgi.Request.blank('/tests/123', method='POST') + request.body = "" + + self.assertIsNone(request.get_content_type()) + + def test_content_type_unsupported(self): + request = wsgi.Request.blank('/tests/123', method='POST') + request.headers["Content-Type"] = "text/html" + request.body = "fake
" + + self.assertIsNone(request.get_content_type()) + + def test_content_type_with_charset(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/json; charset=UTF-8" + result = request.get_content_type() + + self.assertEqual(result, "application/json") + + def test_content_type_with_given_content_types(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/new-type;" + + self.assertIsNone(request.get_content_type()) + + def test_content_type_from_accept(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml" + result = request.best_match_content_type() + + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml, application/json" + result = request.best_match_content_type() + + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = ("application/json; q=0.3, " + "application/xml; q=0.9") + result = request.best_match_content_type() + + self.assertEqual(result, "application/xml") + + def test_content_type_from_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.best_match_content_type() + + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match_content_type() + + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123.invalid') + result = request.best_match_content_type() + + self.assertEqual(result, "application/json") + + def test_content_type_accept_and_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + + self.assertEqual(result, "application/xml") + + def test_content_type_accept_default(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.best_match_content_type() + + self.assertEqual(result, "application/json") + + def test_content_type_accept_with_given_content_types(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/new_type" + result = request.best_match_content_type() + + self.assertEqual(result, 'application/json') + + +class ActionDispatcherTest(base.BaseTestCase): + def test_dispatch(self): + """Test ActionDispatcher.dispatch.""" + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: x + + self.assertEqual( + serializer.dispatch('pants', action='create'), + 'pants') + + def test_dispatch_action_None(self): + """Test ActionDispatcher.dispatch with none action.""" + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: x + ' pants' + serializer.default = lambda x: x + ' trousers' + + self.assertEqual( + serializer.dispatch('Two', action=None), + 'Two trousers') + + def test_dispatch_default(self): + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: x + ' pants' + serializer.default = lambda x: x + ' trousers' + + self.assertEqual( + serializer.dispatch('Two', action='update'), + 'Two trousers') + + +class ResponseHeadersSerializerTest(base.BaseTestCase): + def test_default(self): + serializer = wsgi.ResponseHeaderSerializer() + response = webob.Response() + serializer.serialize(response, {'v': '123'}, 'fake') + + self.assertEqual(response.status_int, 200) + + def test_custom(self): + class Serializer(wsgi.ResponseHeaderSerializer): + def update(self, response, data): + response.status_int = 404 + response.headers['X-Custom-Header'] = data['v'] + serializer = Serializer() + response = webob.Response() + serializer.serialize(response, {'v': '123'}, 'update') + + self.assertEqual(response.status_int, 404) + self.assertEqual(response.headers['X-Custom-Header'], '123') + + +class DictSerializerTest(base.BaseTestCase): + + def test_dispatch_default(self): + serializer = wsgi.DictSerializer() + self.assertEqual( + serializer.serialize({}, 'NonExistentAction'), '') + + +class JSONDictSerializerTest(base.BaseTestCase): + + def test_json(self): + input_dict = dict(servers=dict(a=(2, 3))) + expected_json = '{"servers":{"a":[2,3]}}' + serializer = wsgi.JSONDictSerializer() + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + + self.assertEqual(result, expected_json) + + def test_json_with_utf8(self): + input_dict = dict(servers=dict(a=(2, '\xe7\xbd\x91\xe7\xbb\x9c'))) + expected_json = '{"servers":{"a":[2,"\\u7f51\\u7edc"]}}' + serializer = wsgi.JSONDictSerializer() + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + + self.assertEqual(result, expected_json) + + def test_json_with_unicode(self): + input_dict = dict(servers=dict(a=(2, u'\u7f51\u7edc'))) + expected_json = '{"servers":{"a":[2,"\\u7f51\\u7edc"]}}' + serializer = wsgi.JSONDictSerializer() + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + + self.assertEqual(result, expected_json) + + +class TextDeserializerTest(base.BaseTestCase): + + def test_dispatch_default(self): + deserializer = wsgi.TextDeserializer() + self.assertEqual( + deserializer.deserialize({}, 'update'), {}) + + +class JSONDeserializerTest(base.BaseTestCase): + def test_json(self): + data = """{"a": { + "a1": "1", + "a2": "2", + "bs": ["1", "2", "3", {"c": {"c1": "1"}}], + "d": {"e": "1"}, + "f": "1"}}""" + as_dict = { + 'body': { + 'a': { + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], + 'd': {'e': '1'}, + 'f': '1'}}} + deserializer = wsgi.JSONDeserializer() + self.assertEqual( + deserializer.deserialize(data), as_dict) + + def test_default_raise_Malformed_Exception(self): + """Test JsonDeserializer.default. + + Test verifies JsonDeserializer.default raises exception + MalformedRequestBody correctly. + """ + data_string = "" + deserializer = wsgi.JSONDeserializer() + + self.assertRaises( + exception.MalformedRequestBody, deserializer.default, data_string) + + def test_json_with_utf8(self): + data = '{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}' + as_dict = {'body': {'a': u'\u7f51\u7edc'}} + deserializer = wsgi.JSONDeserializer() + self.assertEqual( + deserializer.deserialize(data), as_dict) + + def test_json_with_unicode(self): + data = '{"a": "\u7f51\u7edc"}' + as_dict = {'body': {'a': u'\u7f51\u7edc'}} + deserializer = wsgi.JSONDeserializer() + self.assertEqual( + deserializer.deserialize(data), as_dict) + + +class XMLDeserializerTest(base.BaseTestCase): + def test_xml_empty(self): + xml = '' + as_dict = {'body': {'a': ''}} + deserializer = wsgi.XMLDeserializer() + + self.assertEqual( + deserializer.deserialize(xml), as_dict) + + def test_initialization(self): + xml = 'test' + deserializer = wsgi.XMLDeserializer() + + self.assertEqual( + {'body': {u'a': {u'b': u'test'}}}, deserializer(xml)) + + def test_default_raise_Malformed_Exception(self): + """Verify that exception MalformedRequestBody is raised.""" + data_string = "" + deserializer = wsgi.XMLDeserializer() + + self.assertRaises( + exception.MalformedRequestBody, deserializer.default, data_string) + + def test_xml_with_utf8(self): + xml = '\xe7\xbd\x91\xe7\xbb\x9c' + as_dict = {'body': {'a': u'\u7f51\u7edc'}} + deserializer = wsgi.XMLDeserializer() + + self.assertEqual( + deserializer.deserialize(xml), as_dict) + + +class RequestHeadersDeserializerTest(base.BaseTestCase): + + def test_default(self): + deserializer = wsgi.RequestHeadersDeserializer() + req = wsgi.Request.blank('/') + + self.assertEqual( + deserializer.deserialize(req, 'nonExistent'), {}) + + def test_custom(self): + class Deserializer(wsgi.RequestHeadersDeserializer): + def update(self, request): + return {'a': request.headers['X-Custom-Header']} + deserializer = Deserializer() + req = wsgi.Request.blank('/') + req.headers['X-Custom-Header'] = 'b' + self.assertEqual( + deserializer.deserialize(req, 'update'), {'a': 'b'}) + + +class ResourceTest(base.BaseTestCase): + def test_dispatch(self): + class Controller(object): + def index(self, request, index=None): + return index + + def my_fault_body_function(): + return 'off' + + resource = wsgi.Resource(Controller(), my_fault_body_function) + actual = resource.dispatch( + resource.controller, 'index', action_args={'index': 'off'}) + expected = 'off' + + self.assertEqual(actual, expected) + + def test_dispatch_unknown_controller_action(self): + class Controller(object): + def index(self, request, pants=None): + return pants + + def my_fault_body_function(): + return 'off' + + resource = wsgi.Resource(Controller(), my_fault_body_function) + self.assertRaises( + AttributeError, resource.dispatch, + resource.controller, 'create', {}) + + def test_malformed_request_body_throws_bad_request(self): + def my_fault_body_function(): + return 'off' + + resource = wsgi.Resource(None, my_fault_body_function) + request = wsgi.Request.blank( + "/", body="{mal:formed", method='POST', + headers={'Content-Type': "application/json"}) + + response = resource(request) + self.assertEqual(response.status_int, 400) + + def test_wrong_content_type_throws_unsupported_media_type_error(self): + def my_fault_body_function(): + return 'off' + resource = wsgi.Resource(None, my_fault_body_function) + request = wsgi.Request.blank( + "/", body="{some:json}", method='POST', + headers={'Content-Type': "xxx"}) + + response = resource(request) + self.assertEqual(response.status_int, 400) + + def test_wrong_content_type_server_error(self): + def my_fault_body_function(): + return 'off' + resource = wsgi.Resource(None, my_fault_body_function) + request = wsgi.Request.blank( + "/", method='POST', headers={'Content-Type': "unknow"}) + + response = resource(request) + self.assertEqual(response.status_int, 500) + + def test_call_resource_class_bad_request(self): + class Controller(object): + def index(self, request, index=None): + return index + + def my_fault_body_function(): + return 'off' + + class FakeRequest(): + def __init__(self): + self.url = 'http://where.no' + self.environ = 'environ' + self.body = 'body' + + def method(self): + pass + + def best_match_content_type(self): + return 'best_match_content_type' + + resource = wsgi.Resource(Controller(), my_fault_body_function) + request = FakeRequest() + result = resource(request) + self.assertEqual(400, result.status_int) + + def test_type_error(self): + class Controller(object): + def index(self, request, index=None): + return index + + def my_fault_body_function(): + return 'off' + resource = wsgi.Resource(Controller(), my_fault_body_function) + request = wsgi.Request.blank( + "/", method='POST', headers={'Content-Type': "xml"}) + + response = resource.dispatch( + request, action='index', action_args='test') + self.assertEqual(400, response.status_int) + + def test_call_resource_class_internal_error(self): + class Controller(object): + def index(self, request, index=None): + return index + + def my_fault_body_function(): + return 'off' + + class FakeRequest(): + def __init__(self): + self.url = 'http://where.no' + self.environ = 'environ' + self.body = '{"Content-Type": "xml"}' + + def method(self): + pass + + def best_match_content_type(self): + return 'application/json' + + resource = wsgi.Resource(Controller(), my_fault_body_function) + request = FakeRequest() + result = resource(request) + self.assertEqual(500, result.status_int) + + +class MiddlewareTest(base.BaseTestCase): + def test_process_response(self): + def application(environ, start_response): + response = 'Success' + return response + response = application('test', 'fake') + result = wsgi.Middleware(application).process_response(response) + self.assertEqual('Success', result) + + +class FaultTest(base.BaseTestCase): + def test_call_fault(self): + class MyException(object): + status_int = 415 + explanation = 'test' + + my_exceptions = MyException() + my_fault = wsgi.Fault(exception=my_exceptions) + request = wsgi.Request.blank( + "/", method='POST', headers={'Content-Type': "unknow"}) + response = my_fault(request) + self.assertEqual(415, response.status_int) + + +class XMLDictSerializerTest(base.BaseTestCase): + def test_xml(self): + NETWORK = {'network': {'test': None, + 'tenant_id': 'test-tenant', + 'name': 'net1', + 'admin_state_up': True, + 'subnets': [], + 'dict': {}, + 'int': 3, + 'long': 4L, + 'float': 5.0, + 'prefix:external': True, + 'tests': [{'test1': 'value1'}, + {'test2': 2, 'test3': 3}]}} + # XML is: + # + # # Empty List + # 3 # Integer text + # 4 # Long text + # 5.0 # Float text + # # Empty Dict + # net1 + # True # Bool + # # None + # test-tenant + # # We must have a namespace defined in root for prefix:external + # True + # # List + # value1 + # 3 + # 2 + # + # + + metadata = attributes.get_attr_metadata() + ns = {'prefix': 'http://xxxx.yy.com'} + metadata[constants.EXT_NS] = ns + metadata['plurals'] = {'tests': 'test'} + serializer = wsgi.XMLDictSerializer(metadata) + result = serializer.serialize(NETWORK) + deserializer = wsgi.XMLDeserializer(metadata) + new_net = deserializer.deserialize(result)['body'] + self.assertEqual(NETWORK, new_net) + + def test_None(self): + data = None + # Since it is None, we use xsi:nil='true'. + # In addition, we use an + # virtual XML root _v_root to wrap the XML doc. + # XML is: + # <_v_root xsi:nil="true" + # xmlns="http://openstack.org/quantum/api/v2.0" + # xmlns:quantum="http://openstack.org/quantum/api/v2.0" + # xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" /> + serializer = wsgi.XMLDictSerializer(attributes.get_attr_metadata()) + result = serializer.serialize(data) + deserializer = wsgi.XMLDeserializer(attributes.get_attr_metadata()) + new_data = deserializer.deserialize(result)['body'] + self.assertIsNone(new_data) + + def test_empty_dic_xml(self): + data = {} + # Since it is an empty dict, we use quantum:type='dict' and + # an empty XML element to represent it. In addition, we use an + # virtual XML root _v_root to wrap the XML doc. + # XML is: + # <_v_root quantum:type="dict" + # xmlns="http://openstack.org/quantum/api/v2.0" + # xmlns:quantum="http://openstack.org/quantum/api/v2.0" + # xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" /> + serializer = wsgi.XMLDictSerializer(attributes.get_attr_metadata()) + result = serializer.serialize(data) + deserializer = wsgi.XMLDeserializer(attributes.get_attr_metadata()) + new_data = deserializer.deserialize(result)['body'] + self.assertEqual(data, new_data) + + def test_non_root_one_item_dic_xml(self): + data = {'test1': 1} + # We have a key in this dict, and its value is an integer. + # XML is: + # + # 1 + + serializer = wsgi.XMLDictSerializer(attributes.get_attr_metadata()) + result = serializer.serialize(data) + deserializer = wsgi.XMLDeserializer(attributes.get_attr_metadata()) + new_data = deserializer.deserialize(result)['body'] + self.assertEqual(data, new_data) + + def test_non_root_two_items_dic_xml(self): + data = {'test1': 1, 'test2': '2'} + # We have no root element in this data, We will use a virtual + # root element _v_root to wrap the doct. + # The XML is: + # <_v_root xmlns="http://openstack.org/quantum/api/v2.0" + # xmlns:quantum="http://openstack.org/quantum/api/v2.0" + # xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> + # 12 + # + + serializer = wsgi.XMLDictSerializer(attributes.get_attr_metadata()) + result = serializer.serialize(data) + deserializer = wsgi.XMLDeserializer(attributes.get_attr_metadata()) + new_data = deserializer.deserialize(result)['body'] + self.assertEqual(data, new_data) + + def test_xml_root_key_is_list(self): + input_dict = {'servers': ['test-pass']} + serializer = wsgi.XMLDictSerializer(xmlns="fake") + result = serializer.default(input_dict) + result = result.replace('\n', '').replace(' ', '') + expected = ( + '' + '' + 'test-pass' + ) + + self.assertEqual(result, expected) + + def test_xml_meta_contains_node_name_list(self): + input_dict = {'servers': ['test-pass']} + servers = {'nodename': 'test', + 'item_name': 'test', + 'item_key': 'test'} + metadata = {'list_collections': {'servers': servers}} + serializer = wsgi.XMLDictSerializer(xmlns="fake", metadata=metadata) + result = serializer.default(input_dict) + result = result.replace('\n', '').replace(' ', '') + expected = ( + '' + '' + 'test-pass' + ) + + self.assertEqual(result, expected) + + def test_xml_meta_contains_node_name_dict(self): + input_dict = {'servers': {'a': {'2': '3'}}} + servers = {'servers': { + 'nodename': 'test', + 'item_name': 'test', + 'item_key': 'test'}} + metadata = {'dict_collections': servers} + serializer = wsgi.XMLDictSerializer(xmlns="fake", metadata=metadata) + result = serializer.default(input_dict) + result = result.replace('\n', '').replace(' ', '') + expected = ( + '' + '' + '<2>3' + ) + + self.assertEqual(result, expected) + + def test_call(self): + data = {'servers': {'a': {'2': '3'}}} + serializer = wsgi.XMLDictSerializer() + expected = ( + '' + '' + '<2>3' + ) + result = serializer(data) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(expected, result) + + def test_xml_with_utf8(self): + data = {'servers': '\xe7\xbd\x91\xe7\xbb\x9c'} + serializer = wsgi.XMLDictSerializer() + expected = ( + '' + '' + '\xe7\xbd\x91\xe7\xbb\x9c' + ) + result = serializer(data) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(expected, result) + + def test_xml_with_unicode(self): + data = {'servers': u'\u7f51\u7edc'} + serializer = wsgi.XMLDictSerializer() + expected = ( + '' + '' + '\xe7\xbd\x91\xe7\xbb\x9c' + ) + result = serializer(data) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(expected, result) + + +class TestWSGIServerWithSSL(base.BaseTestCase): + """WSGI server tests.""" + + def test_app_using_ssl(self): + CONF.set_default('use_ssl', True) + CONF.set_default("ssl_cert_file", + os.path.join(TEST_VAR_DIR, 'certificate.crt')) + CONF.set_default("ssl_key_file", + os.path.join(TEST_VAR_DIR, 'privatekey.key')) + + greetings = 'Hello, World!!!' + + @webob.dec.wsgify + def hello_world(req): + return greetings + + server = wsgi.Server("test_app") + server.start(hello_world, 0, host="127.0.0.1") + + response = urllib2.urlopen('https://127.0.0.1:%d/' % server.port) + self.assertEqual(greetings, response.read()) + + server.stop() + + def test_app_using_ssl_combined_cert_and_key(self): + CONF.set_default('use_ssl', True) + CONF.set_default("ssl_cert_file", + os.path.join(TEST_VAR_DIR, 'certandkey.pem')) + + greetings = 'Hello, World!!!' + + @webob.dec.wsgify + def hello_world(req): + return greetings + + server = wsgi.Server("test_app") + server.start(hello_world, 0, host="127.0.0.1") + + response = urllib2.urlopen('https://127.0.0.1:%d/' % server.port) + self.assertEqual(greetings, response.read()) + + server.stop() + + def test_app_using_ipv6_and_ssl(self): + CONF.set_default('use_ssl', True) + CONF.set_default("ssl_cert_file", + os.path.join(TEST_VAR_DIR, 'certificate.crt')) + CONF.set_default("ssl_key_file", + os.path.join(TEST_VAR_DIR, 'privatekey.key')) + + greetings = 'Hello, World!!!' + + @webob.dec.wsgify + def hello_world(req): + return greetings + + server = wsgi.Server("test_app") + server.start(hello_world, 0, host="::1") + + response = urllib2.urlopen('https://[::1]:%d/' % server.port) + self.assertEqual(greetings, response.read()) + + server.stop() diff --git a/neutron/tests/unit/testlib_api.py b/neutron/tests/unit/testlib_api.py new file mode 100644 index 000000000..1407ab0b8 --- /dev/null +++ b/neutron/tests/unit/testlib_api.py @@ -0,0 +1,84 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from neutron.api.v2 import attributes +from neutron.tests import base +from neutron import wsgi + + +class ExpectedException(testtools.ExpectedException): + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if super(ExpectedException, self).__exit__(exc_type, + exc_value, + traceback): + self.exception = exc_value + return True + return False + + +def create_request(path, body, content_type, method='GET', + query_string=None, context=None): + if query_string: + url = "%s?%s" % (path, query_string) + else: + url = path + req = wsgi.Request.blank(url) + req.method = method + req.headers = {} + req.headers['Accept'] = content_type + req.body = body + if context: + req.environ['neutron.context'] = context + return req + + +class WebTestCase(base.BaseTestCase): + fmt = 'json' + + def setUp(self): + super(WebTestCase, self).setUp() + json_deserializer = wsgi.JSONDeserializer() + xml_deserializer = wsgi.XMLDeserializer( + attributes.get_attr_metadata()) + self._deserializers = { + 'application/json': json_deserializer, + 'application/xml': xml_deserializer, + } + + def deserialize(self, response): + ctype = 'application/%s' % self.fmt + data = self._deserializers[ctype].deserialize(response.body)['body'] + return data + + def serialize(self, data): + ctype = 'application/%s' % self.fmt + result = wsgi.Serializer( + attributes.get_attr_metadata()).serialize(data, ctype) + return result + + +class SubDictMatch(object): + + def __init__(self, sub_dict): + self.sub_dict = sub_dict + + def __eq__(self, super_dict): + return all(item in super_dict.items() + for item in self.sub_dict.items()) diff --git a/neutron/tests/unit/vmware/__init__.py b/neutron/tests/unit/vmware/__init__.py new file mode 100644 index 000000000..df9667860 --- /dev/null +++ b/neutron/tests/unit/vmware/__init__.py @@ -0,0 +1,53 @@ +# Copyright 2013 OpenStack Foundation. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from neutron.plugins.vmware.api_client import client as nsx_client +from neutron.plugins.vmware.api_client import eventlet_client +from neutron.plugins.vmware import extensions +import neutron.plugins.vmware.plugin as neutron_plugin +from neutron.plugins.vmware.vshield.common import VcnsApiClient as vcnsapi +from neutron.plugins.vmware.vshield import vcns +import neutron.plugins.vmware.vshield.vcns_driver as vcnsdriver + + +plugin = neutron_plugin.NsxPlugin +service_plugin = neutron_plugin.NsxServicePlugin +api_client = nsx_client.NsxApiClient +evt_client = eventlet_client.EventletApiClient +vcns_class = vcns.Vcns +vcns_driver = vcnsdriver.VcnsDriver +vcns_api_helper = vcnsapi.VcnsApiHelper + +STUBS_PATH = os.path.join(os.path.dirname(__file__), 'etc') +NSXEXT_PATH = os.path.dirname(extensions.__file__) +NSXAPI_NAME = '%s.%s' % (api_client.__module__, api_client.__name__) +PLUGIN_NAME = '%s.%s' % (plugin.__module__, plugin.__name__) +SERVICE_PLUGIN_NAME = '%s.%s' % (service_plugin.__module__, + service_plugin.__name__) +CLIENT_NAME = '%s.%s' % (evt_client.__module__, evt_client.__name__) +VCNS_NAME = '%s.%s' % (vcns_class.__module__, vcns_class.__name__) +VCNS_DRIVER_NAME = '%s.%s' % (vcns_driver.__module__, vcns_driver.__name__) +VCNSAPI_NAME = '%s.%s' % (vcns_api_helper.__module__, vcns_api_helper.__name__) + + +def get_fake_conf(filename): + return os.path.join(STUBS_PATH, filename) + + +def nsx_method(method_name, module_name='nsxlib'): + return '%s.%s.%s' % ('neutron.plugins.vmware', module_name, method_name) diff --git a/neutron/tests/unit/vmware/apiclient/__init__.py b/neutron/tests/unit/vmware/apiclient/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/vmware/apiclient/fake.py b/neutron/tests/unit/vmware/apiclient/fake.py new file mode 100644 index 000000000..eb1f62f49 --- /dev/null +++ b/neutron/tests/unit/vmware/apiclient/fake.py @@ -0,0 +1,660 @@ +# Copyright 2012 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six.moves.urllib.parse as urlparse + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.vmware.api_client import exception as api_exc + + +LOG = logging.getLogger(__name__) +MAX_NAME_LEN = 40 + + +def _validate_name(name): + if name and len(name) > MAX_NAME_LEN: + raise Exception("Logical switch name exceeds %d characters", + MAX_NAME_LEN) + + +def _validate_resource(body): + _validate_name(body.get('display_name')) + + +class FakeClient: + + LSWITCH_RESOURCE = 'lswitch' + LPORT_RESOURCE = 'lport' + LROUTER_RESOURCE = 'lrouter' + NAT_RESOURCE = 'nat' + LQUEUE_RESOURCE = 'lqueue' + SECPROF_RESOURCE = 'securityprofile' + LSWITCH_STATUS = 'lswitchstatus' + LROUTER_STATUS = 'lrouterstatus' + LSWITCH_LPORT_RESOURCE = 'lswitch_lport' + LROUTER_LPORT_RESOURCE = 'lrouter_lport' + LROUTER_NAT_RESOURCE = 'lrouter_nat' + LSWITCH_LPORT_STATUS = 'lswitch_lportstatus' + LSWITCH_LPORT_ATT = 'lswitch_lportattachment' + LROUTER_LPORT_STATUS = 'lrouter_lportstatus' + LROUTER_LPORT_ATT = 'lrouter_lportattachment' + GWSERVICE_RESOURCE = 'gatewayservice' + + RESOURCES = [LSWITCH_RESOURCE, LROUTER_RESOURCE, LQUEUE_RESOURCE, + LPORT_RESOURCE, NAT_RESOURCE, SECPROF_RESOURCE, + GWSERVICE_RESOURCE] + + FAKE_GET_RESPONSES = { + LSWITCH_RESOURCE: "fake_get_lswitch.json", + LSWITCH_LPORT_RESOURCE: "fake_get_lswitch_lport.json", + LSWITCH_LPORT_STATUS: "fake_get_lswitch_lport_status.json", + LSWITCH_LPORT_ATT: "fake_get_lswitch_lport_att.json", + LROUTER_RESOURCE: "fake_get_lrouter.json", + LROUTER_LPORT_RESOURCE: "fake_get_lrouter_lport.json", + LROUTER_LPORT_STATUS: "fake_get_lrouter_lport_status.json", + LROUTER_LPORT_ATT: "fake_get_lrouter_lport_att.json", + LROUTER_STATUS: "fake_get_lrouter_status.json", + LROUTER_NAT_RESOURCE: "fake_get_lrouter_nat.json", + SECPROF_RESOURCE: "fake_get_security_profile.json", + LQUEUE_RESOURCE: "fake_get_lqueue.json", + GWSERVICE_RESOURCE: "fake_get_gwservice.json" + } + + FAKE_POST_RESPONSES = { + LSWITCH_RESOURCE: "fake_post_lswitch.json", + LROUTER_RESOURCE: "fake_post_lrouter.json", + LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json", + LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json", + LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json", + SECPROF_RESOURCE: "fake_post_security_profile.json", + LQUEUE_RESOURCE: "fake_post_lqueue.json", + GWSERVICE_RESOURCE: "fake_post_gwservice.json" + } + + FAKE_PUT_RESPONSES = { + LSWITCH_RESOURCE: "fake_post_lswitch.json", + LROUTER_RESOURCE: "fake_post_lrouter.json", + LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json", + LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json", + LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json", + LSWITCH_LPORT_ATT: "fake_put_lswitch_lport_att.json", + LROUTER_LPORT_ATT: "fake_put_lrouter_lport_att.json", + SECPROF_RESOURCE: "fake_post_security_profile.json", + LQUEUE_RESOURCE: "fake_post_lqueue.json", + GWSERVICE_RESOURCE: "fake_post_gwservice.json" + } + + MANAGED_RELATIONS = { + LSWITCH_RESOURCE: [], + LROUTER_RESOURCE: [], + LSWITCH_LPORT_RESOURCE: ['LogicalPortAttachment'], + LROUTER_LPORT_RESOURCE: ['LogicalPortAttachment'], + } + + _validators = { + LSWITCH_RESOURCE: _validate_resource, + LSWITCH_LPORT_RESOURCE: _validate_resource, + LROUTER_LPORT_RESOURCE: _validate_resource, + SECPROF_RESOURCE: _validate_resource, + LQUEUE_RESOURCE: _validate_resource, + GWSERVICE_RESOURCE: _validate_resource + } + + def __init__(self, fake_files_path): + self.fake_files_path = fake_files_path + self._fake_lswitch_dict = {} + self._fake_lrouter_dict = {} + self._fake_lswitch_lport_dict = {} + self._fake_lrouter_lport_dict = {} + self._fake_lrouter_nat_dict = {} + self._fake_lswitch_lportstatus_dict = {} + self._fake_lrouter_lportstatus_dict = {} + self._fake_securityprofile_dict = {} + self._fake_lqueue_dict = {} + self._fake_gatewayservice_dict = {} + + def _get_tag(self, resource, scope): + tags = [tag['tag'] for tag in resource['tags'] + if tag['scope'] == scope] + return len(tags) > 0 and tags[0] + + def _get_filters(self, querystring): + if not querystring: + return (None, None, None, None) + params = urlparse.parse_qs(querystring) + tag_filter = None + attr_filter = None + if 'tag' in params and 'tag_scope' in params: + tag_filter = {'scope': params['tag_scope'][0], + 'tag': params['tag'][0]} + elif 'uuid' in params: + attr_filter = {'uuid': params['uuid'][0]} + # Handle page length and page cursor parameter + page_len = params.get('_page_length') + page_cursor = params.get('_page_cursor') + if page_len: + page_len = int(page_len[0]) + else: + # Explicitly set it to None (avoid 0 or empty list) + page_len = None + return (tag_filter, attr_filter, page_len, page_cursor) + + def _add_lswitch(self, body): + fake_lswitch = json.loads(body) + fake_lswitch['uuid'] = uuidutils.generate_uuid() + self._fake_lswitch_dict[fake_lswitch['uuid']] = fake_lswitch + # put the tenant_id and the zone_uuid in the main dict + # for simplyfying templating + zone_uuid = fake_lswitch['transport_zones'][0]['zone_uuid'] + fake_lswitch['zone_uuid'] = zone_uuid + fake_lswitch['tenant_id'] = self._get_tag(fake_lswitch, 'os_tid') + fake_lswitch['lport_count'] = 0 + # set status value + fake_lswitch['status'] = 'true' + return fake_lswitch + + def _build_lrouter(self, body, uuid=None): + fake_lrouter = json.loads(body) + if uuid: + fake_lrouter['uuid'] = uuid + fake_lrouter['tenant_id'] = self._get_tag(fake_lrouter, 'os_tid') + default_nexthop = fake_lrouter['routing_config'].get( + 'default_route_next_hop') + if default_nexthop: + fake_lrouter['default_next_hop'] = default_nexthop.get( + 'gateway_ip_address', '0.0.0.0') + else: + fake_lrouter['default_next_hop'] = '0.0.0.0' + # NOTE(salv-orlando): We won't make the Fake NSX API client + # aware of NSX version. The long term plan is to replace it + # with behavioral mocking of NSX API requests + if 'distributed' not in fake_lrouter: + fake_lrouter['distributed'] = False + distributed_json = ('"distributed": %s,' % + str(fake_lrouter['distributed']).lower()) + fake_lrouter['distributed_json'] = distributed_json + return fake_lrouter + + def _add_lrouter(self, body): + fake_lrouter = self._build_lrouter(body, + uuidutils.generate_uuid()) + self._fake_lrouter_dict[fake_lrouter['uuid']] = fake_lrouter + fake_lrouter['lport_count'] = 0 + # set status value + fake_lrouter['status'] = 'true' + return fake_lrouter + + def _add_lqueue(self, body): + fake_lqueue = json.loads(body) + fake_lqueue['uuid'] = uuidutils.generate_uuid() + self._fake_lqueue_dict[fake_lqueue['uuid']] = fake_lqueue + return fake_lqueue + + def _add_lswitch_lport(self, body, ls_uuid): + fake_lport = json.loads(body) + new_uuid = uuidutils.generate_uuid() + fake_lport['uuid'] = new_uuid + # put the tenant_id and the ls_uuid in the main dict + # for simplyfying templating + fake_lport['ls_uuid'] = ls_uuid + fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid') + fake_lport['neutron_port_id'] = self._get_tag(fake_lport, + 'q_port_id') + fake_lport['neutron_device_id'] = self._get_tag(fake_lport, 'vm_id') + fake_lport['att_type'] = "NoAttachment" + fake_lport['att_info_json'] = '' + self._fake_lswitch_lport_dict[fake_lport['uuid']] = fake_lport + + fake_lswitch = self._fake_lswitch_dict[ls_uuid] + fake_lswitch['lport_count'] += 1 + fake_lport_status = fake_lport.copy() + fake_lport_status['ls_tenant_id'] = fake_lswitch['tenant_id'] + fake_lport_status['ls_uuid'] = fake_lswitch['uuid'] + fake_lport_status['ls_name'] = fake_lswitch['display_name'] + fake_lport_status['ls_zone_uuid'] = fake_lswitch['zone_uuid'] + # set status value + fake_lport['status'] = 'true' + self._fake_lswitch_lportstatus_dict[new_uuid] = fake_lport_status + return fake_lport + + def _build_lrouter_lport(self, body, new_uuid=None, lr_uuid=None): + fake_lport = json.loads(body) + if new_uuid: + fake_lport['uuid'] = new_uuid + # put the tenant_id and the le_uuid in the main dict + # for simplyfying templating + if lr_uuid: + fake_lport['lr_uuid'] = lr_uuid + fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid') + fake_lport['neutron_port_id'] = self._get_tag(fake_lport, + 'q_port_id') + # replace ip_address with its json dump + if 'ip_addresses' in fake_lport: + ip_addresses_json = json.dumps(fake_lport['ip_addresses']) + fake_lport['ip_addresses_json'] = ip_addresses_json + return fake_lport + + def _add_lrouter_lport(self, body, lr_uuid): + new_uuid = uuidutils.generate_uuid() + fake_lport = self._build_lrouter_lport(body, new_uuid, lr_uuid) + self._fake_lrouter_lport_dict[fake_lport['uuid']] = fake_lport + try: + fake_lrouter = self._fake_lrouter_dict[lr_uuid] + except KeyError: + raise api_exc.ResourceNotFound() + fake_lrouter['lport_count'] += 1 + fake_lport_status = fake_lport.copy() + fake_lport_status['lr_tenant_id'] = fake_lrouter['tenant_id'] + fake_lport_status['lr_uuid'] = fake_lrouter['uuid'] + fake_lport_status['lr_name'] = fake_lrouter['display_name'] + self._fake_lrouter_lportstatus_dict[new_uuid] = fake_lport_status + return fake_lport + + def _add_securityprofile(self, body): + fake_securityprofile = json.loads(body) + fake_securityprofile['uuid'] = uuidutils.generate_uuid() + fake_securityprofile['tenant_id'] = self._get_tag( + fake_securityprofile, 'os_tid') + + fake_securityprofile['nova_spid'] = self._get_tag(fake_securityprofile, + 'nova_spid') + self._fake_securityprofile_dict[fake_securityprofile['uuid']] = ( + fake_securityprofile) + return fake_securityprofile + + def _add_lrouter_nat(self, body, lr_uuid): + fake_nat = json.loads(body) + new_uuid = uuidutils.generate_uuid() + fake_nat['uuid'] = new_uuid + fake_nat['lr_uuid'] = lr_uuid + self._fake_lrouter_nat_dict[fake_nat['uuid']] = fake_nat + if 'match' in fake_nat: + match_json = json.dumps(fake_nat['match']) + fake_nat['match_json'] = match_json + return fake_nat + + def _add_gatewayservice(self, body): + fake_gwservice = json.loads(body) + fake_gwservice['uuid'] = str(uuidutils.generate_uuid()) + fake_gwservice['tenant_id'] = self._get_tag( + fake_gwservice, 'os_tid') + # FIXME(salvatore-orlando): For simplicity we're managing only a + # single device. Extend the fake client for supporting multiple devices + first_gw = fake_gwservice['gateways'][0] + fake_gwservice['transport_node_uuid'] = first_gw['transport_node_uuid'] + fake_gwservice['device_id'] = first_gw['device_id'] + self._fake_gatewayservice_dict[fake_gwservice['uuid']] = ( + fake_gwservice) + return fake_gwservice + + def _build_relation(self, src, dst, resource_type, relation): + if relation not in self.MANAGED_RELATIONS[resource_type]: + return # Relation is not desired in output + if not '_relations' in src or not src['_relations'].get(relation): + return # Item does not have relation + relation_data = src['_relations'].get(relation) + dst_relations = dst.get('_relations', {}) + dst_relations[relation] = relation_data + dst['_relations'] = dst_relations + + def _fill_attachment(self, att_data, ls_uuid=None, + lr_uuid=None, lp_uuid=None): + new_data = att_data.copy() + for k in ('ls_uuid', 'lr_uuid', 'lp_uuid'): + if locals().get(k): + new_data[k] = locals()[k] + + def populate_field(field_name): + if field_name in att_data: + new_data['%s_field' % field_name] = ('"%s" : "%s",' + % (field_name, + att_data[field_name])) + del new_data[field_name] + else: + new_data['%s_field' % field_name] = "" + + for field in ['vif_uuid', 'peer_port_href', 'vlan_id', + 'peer_port_uuid', 'l3_gateway_service_uuid']: + populate_field(field) + return new_data + + def _get_resource_type(self, path): + """Get resource type. + + Identifies resource type and relevant uuids in the uri + + /ws.v1/lswitch/xxx + /ws.v1/lswitch/xxx/status + /ws.v1/lswitch/xxx/lport/yyy + /ws.v1/lswitch/xxx/lport/yyy/status + /ws.v1/lrouter/zzz + /ws.v1/lrouter/zzz/status + /ws.v1/lrouter/zzz/lport/www + /ws.v1/lrouter/zzz/lport/www/status + /ws.v1/lqueue/xxx + """ + # The first element will always be 'ws.v1' - so we just discard it + uri_split = path.split('/')[1:] + # parse uri_split backwards + suffix = "" + idx = len(uri_split) - 1 + if 'status' in uri_split[idx]: + suffix = "status" + idx = idx - 1 + elif 'attachment' in uri_split[idx]: + suffix = "attachment" + idx = idx - 1 + # then check if we have an uuid + uuids = [] + if uri_split[idx].replace('-', '') not in self.RESOURCES: + uuids.append(uri_split[idx]) + idx = idx - 1 + resource_type = "%s%s" % (uri_split[idx], suffix) + if idx > 1: + uuids.insert(0, uri_split[idx - 1]) + resource_type = "%s_%s" % (uri_split[idx - 2], resource_type) + return (resource_type.replace('-', ''), uuids) + + def _list(self, resource_type, response_file, + parent_uuid=None, query=None, relations=None): + (tag_filter, attr_filter, + page_len, page_cursor) = self._get_filters(query) + # result_count attribute in response should appear only when + # page_cursor is not specified + do_result_count = not page_cursor + with open("%s/%s" % (self.fake_files_path, response_file)) as f: + response_template = f.read() + res_dict = getattr(self, '_fake_%s_dict' % resource_type) + if parent_uuid == '*': + parent_uuid = None + # NSX raises ResourceNotFound if lswitch doesn't exist and is not * + elif not res_dict and resource_type == self.LSWITCH_LPORT_RESOURCE: + raise api_exc.ResourceNotFound() + + def _attr_match(res_uuid): + if not attr_filter: + return True + item = res_dict[res_uuid] + for (attr, value) in attr_filter.iteritems(): + if item.get(attr) != value: + return False + return True + + def _tag_match(res_uuid): + if not tag_filter: + return True + return any([x['scope'] == tag_filter['scope'] and + x['tag'] == tag_filter['tag'] + for x in res_dict[res_uuid]['tags']]) + + def _lswitch_match(res_uuid): + # verify that the switch exist + if parent_uuid and not parent_uuid in self._fake_lswitch_dict: + raise Exception(_("lswitch:%s not found") % parent_uuid) + if (not parent_uuid + or res_dict[res_uuid].get('ls_uuid') == parent_uuid): + return True + return False + + def _lrouter_match(res_uuid): + # verify that the router exist + if parent_uuid and not parent_uuid in self._fake_lrouter_dict: + raise Exception(_("lrouter:%s not found") % parent_uuid) + if (not parent_uuid or + res_dict[res_uuid].get('lr_uuid') == parent_uuid): + return True + return False + + def _cursor_match(res_uuid, page_cursor): + if not page_cursor: + return True + if page_cursor == res_uuid: + # always return True once page_cursor has been found + page_cursor = None + return True + return False + + def _build_item(resource): + item = json.loads(response_template % resource) + if relations: + for relation in relations: + self._build_relation(resource, item, + resource_type, relation) + return item + + for item in res_dict.itervalues(): + if 'tags' in item: + item['tags_json'] = json.dumps(item['tags']) + if resource_type in (self.LSWITCH_LPORT_RESOURCE, + self.LSWITCH_LPORT_ATT, + self.LSWITCH_LPORT_STATUS): + parent_func = _lswitch_match + elif resource_type in (self.LROUTER_LPORT_RESOURCE, + self.LROUTER_LPORT_ATT, + self.LROUTER_NAT_RESOURCE, + self.LROUTER_LPORT_STATUS): + parent_func = _lrouter_match + else: + parent_func = lambda x: True + + items = [_build_item(res_dict[res_uuid]) + for res_uuid in res_dict + if (parent_func(res_uuid) and + _tag_match(res_uuid) and + _attr_match(res_uuid) and + _cursor_match(res_uuid, page_cursor))] + # Rather inefficient, but hey this is just a mock! + next_cursor = None + total_items = len(items) + if page_len: + try: + next_cursor = items[page_len]['uuid'] + except IndexError: + next_cursor = None + items = items[:page_len] + response_dict = {'results': items} + if next_cursor: + response_dict['page_cursor'] = next_cursor + if do_result_count: + response_dict['result_count'] = total_items + return json.dumps(response_dict) + + def _show(self, resource_type, response_file, + uuid1, uuid2=None, relations=None): + target_uuid = uuid2 or uuid1 + if resource_type.endswith('attachment'): + resource_type = resource_type[:resource_type.index('attachment')] + with open("%s/%s" % (self.fake_files_path, response_file)) as f: + response_template = f.read() + res_dict = getattr(self, '_fake_%s_dict' % resource_type) + for item in res_dict.itervalues(): + if 'tags' in item: + item['tags_json'] = json.dumps(item['tags']) + + # replace sec prof rules with their json dump + def jsonify_rules(rule_key): + if rule_key in item: + rules_json = json.dumps(item[rule_key]) + item['%s_json' % rule_key] = rules_json + jsonify_rules('logical_port_egress_rules') + jsonify_rules('logical_port_ingress_rules') + + items = [json.loads(response_template % res_dict[res_uuid]) + for res_uuid in res_dict if res_uuid == target_uuid] + if items: + return json.dumps(items[0]) + raise api_exc.ResourceNotFound() + + def handle_get(self, url): + #TODO(salvatore-orlando): handle field selection + parsedurl = urlparse.urlparse(url) + (res_type, uuids) = self._get_resource_type(parsedurl.path) + relations = urlparse.parse_qs(parsedurl.query).get('relations') + response_file = self.FAKE_GET_RESPONSES.get(res_type) + if not response_file: + raise api_exc.NsxApiException() + if 'lport' in res_type or 'nat' in res_type: + if len(uuids) > 1: + return self._show(res_type, response_file, uuids[0], + uuids[1], relations=relations) + else: + return self._list(res_type, response_file, uuids[0], + query=parsedurl.query, relations=relations) + elif ('lswitch' in res_type or + 'lrouter' in res_type or + self.SECPROF_RESOURCE in res_type or + self.LQUEUE_RESOURCE in res_type or + 'gatewayservice' in res_type): + LOG.debug("UUIDS:%s", uuids) + if uuids: + return self._show(res_type, response_file, uuids[0], + relations=relations) + else: + return self._list(res_type, response_file, + query=parsedurl.query, + relations=relations) + else: + raise Exception("unknown resource:%s" % res_type) + + def handle_post(self, url, body): + parsedurl = urlparse.urlparse(url) + (res_type, uuids) = self._get_resource_type(parsedurl.path) + response_file = self.FAKE_POST_RESPONSES.get(res_type) + if not response_file: + raise Exception("resource not found") + with open("%s/%s" % (self.fake_files_path, response_file)) as f: + response_template = f.read() + add_resource = getattr(self, '_add_%s' % res_type) + body_json = json.loads(body) + val_func = self._validators.get(res_type) + if val_func: + val_func(body_json) + args = [body] + if uuids: + args.append(uuids[0]) + response = response_template % add_resource(*args) + return response + + def handle_put(self, url, body): + parsedurl = urlparse.urlparse(url) + (res_type, uuids) = self._get_resource_type(parsedurl.path) + response_file = self.FAKE_PUT_RESPONSES.get(res_type) + if not response_file: + raise Exception("resource not found") + with open("%s/%s" % (self.fake_files_path, response_file)) as f: + response_template = f.read() + # Manage attachment operations + is_attachment = False + if res_type.endswith('attachment'): + is_attachment = True + res_type = res_type[:res_type.index('attachment')] + res_dict = getattr(self, '_fake_%s_dict' % res_type) + body_json = json.loads(body) + val_func = self._validators.get(res_type) + if val_func: + val_func(body_json) + try: + resource = res_dict[uuids[-1]] + except KeyError: + raise api_exc.ResourceNotFound() + if not is_attachment: + edit_resource = getattr(self, '_build_%s' % res_type, None) + if edit_resource: + body_json = edit_resource(body) + resource.update(body_json) + else: + relations = resource.get("_relations", {}) + body_2 = json.loads(body) + resource['att_type'] = body_2['type'] + relations['LogicalPortAttachment'] = body_2 + resource['_relations'] = relations + if body_2['type'] == "PatchAttachment": + # We need to do a trick here + if self.LROUTER_RESOURCE in res_type: + res_type_2 = res_type.replace(self.LROUTER_RESOURCE, + self.LSWITCH_RESOURCE) + elif self.LSWITCH_RESOURCE in res_type: + res_type_2 = res_type.replace(self.LSWITCH_RESOURCE, + self.LROUTER_RESOURCE) + res_dict_2 = getattr(self, '_fake_%s_dict' % res_type_2) + body_2['peer_port_uuid'] = uuids[-1] + resource_2 = res_dict_2[json.loads(body)['peer_port_uuid']] + relations_2 = resource_2.get("_relations") + if not relations_2: + relations_2 = {} + relations_2['LogicalPortAttachment'] = body_2 + resource_2['_relations'] = relations_2 + resource['peer_port_uuid'] = body_2['peer_port_uuid'] + resource['att_info_json'] = ( + "\"peer_port_uuid\": \"%s\"," % + resource_2['uuid']) + resource_2['att_info_json'] = ( + "\"peer_port_uuid\": \"%s\"," % + body_2['peer_port_uuid']) + elif body_2['type'] == "L3GatewayAttachment": + resource['attachment_gwsvc_uuid'] = ( + body_2['l3_gateway_service_uuid']) + resource['vlan_id'] = body_2.get('vlan_id') + elif body_2['type'] == "L2GatewayAttachment": + resource['attachment_gwsvc_uuid'] = ( + body_2['l2_gateway_service_uuid']) + elif body_2['type'] == "VifAttachment": + resource['vif_uuid'] = body_2['vif_uuid'] + resource['att_info_json'] = ( + "\"vif_uuid\": \"%s\"," % body_2['vif_uuid']) + + if not is_attachment: + response = response_template % resource + else: + if res_type == self.LROUTER_LPORT_RESOURCE: + lr_uuid = uuids[0] + ls_uuid = None + elif res_type == self.LSWITCH_LPORT_RESOURCE: + ls_uuid = uuids[0] + lr_uuid = None + lp_uuid = uuids[1] + response = response_template % self._fill_attachment( + json.loads(body), ls_uuid, lr_uuid, lp_uuid) + return response + + def handle_delete(self, url): + parsedurl = urlparse.urlparse(url) + (res_type, uuids) = self._get_resource_type(parsedurl.path) + response_file = self.FAKE_PUT_RESPONSES.get(res_type) + if not response_file: + raise Exception("resource not found") + res_dict = getattr(self, '_fake_%s_dict' % res_type) + try: + del res_dict[uuids[-1]] + except KeyError: + raise api_exc.ResourceNotFound() + return "" + + def fake_request(self, *args, **kwargs): + method = args[0] + handler = getattr(self, "handle_%s" % method.lower()) + return handler(*args[1:]) + + def reset_all(self): + self._fake_lswitch_dict.clear() + self._fake_lrouter_dict.clear() + self._fake_lswitch_lport_dict.clear() + self._fake_lrouter_lport_dict.clear() + self._fake_lswitch_lportstatus_dict.clear() + self._fake_lrouter_lportstatus_dict.clear() + self._fake_lqueue_dict.clear() + self._fake_securityprofile_dict.clear() + self._fake_gatewayservice_dict.clear() diff --git a/neutron/tests/unit/vmware/apiclient/test_api_common.py b/neutron/tests/unit/vmware/apiclient/test_api_common.py new file mode 100644 index 000000000..5ea40d0d8 --- /dev/null +++ b/neutron/tests/unit/vmware/apiclient/test_api_common.py @@ -0,0 +1,35 @@ +# Copyright 2011 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import httplib + +from neutron.plugins.vmware import api_client +from neutron.tests import base + + +class ApiCommonTest(base.BaseTestCase): + + def test_ctrl_conn_to_str(self): + conn = httplib.HTTPSConnection('localhost', 4242, timeout=0) + self.assertTrue( + api_client.ctrl_conn_to_str(conn) == 'https://localhost:4242') + + conn = httplib.HTTPConnection('localhost', 4242, timeout=0) + self.assertTrue( + api_client.ctrl_conn_to_str(conn) == 'http://localhost:4242') + + self.assertRaises(TypeError, api_client.ctrl_conn_to_str, + ('not an httplib.HTTPSConnection')) diff --git a/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py b/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py new file mode 100644 index 000000000..b3e036909 --- /dev/null +++ b/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py @@ -0,0 +1,331 @@ +# Copyright (C) 2009-2012 VMware, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import httplib +import logging +import new +import random + +import eventlet +from eventlet.green import urllib2 +import mock + +from neutron.plugins.vmware.api_client import eventlet_client as client +from neutron.plugins.vmware.api_client import eventlet_request as request +from neutron.tests import base +from neutron.tests.unit import vmware + + +logging.basicConfig(level=logging.DEBUG) +LOG = logging.getLogger("test_api_request_eventlet") + + +REQUEST_TIMEOUT = 1 + + +def fetch(url): + return urllib2.urlopen(url).read() + + +class ApiRequestEventletTest(base.BaseTestCase): + + def setUp(self): + + super(ApiRequestEventletTest, self).setUp() + self.client = client.EventletApiClient( + [("127.0.0.1", 4401, True)], "admin", "admin") + self.url = "/ws.v1/_debug" + self.req = request.EventletApiRequest(self.client, self.url) + + def tearDown(self): + self.client = None + self.req = None + super(ApiRequestEventletTest, self).tearDown() + + def test_construct_eventlet_api_request(self): + e = request.EventletApiRequest(self.client, self.url) + self.assertIsNotNone(e) + + def test_apirequest_spawn(self): + def x(id): + eventlet.greenthread.sleep(random.random()) + LOG.info('spawned: %d' % id) + + for i in range(10): + request.EventletApiRequest._spawn(x, i) + + def test_apirequest_start(self): + for i in range(10): + a = request.EventletApiRequest( + self.client, self.url, request_timeout=0.1) + a._handle_request = mock.Mock() + a.start() + eventlet.greenthread.sleep(0.1) + logging.info('_handle_request called: %s' % + a._handle_request.called) + request.EventletApiRequest.joinall() + + def test_join_with_handle_request(self): + self.req._handle_request = mock.Mock() + self.req.start() + self.req.join() + self.assertTrue(self.req._handle_request.called) + + def test_join_without_handle_request(self): + self.req._handle_request = mock.Mock() + self.req.join() + self.assertFalse(self.req._handle_request.called) + + def test_copy(self): + req = self.req.copy() + for att in [ + '_api_client', '_url', '_method', '_body', '_headers', + '_http_timeout', '_request_timeout', '_retries', + '_redirects', '_auto_login']: + self.assertTrue(getattr(req, att) is getattr(self.req, att)) + + def test_request_error(self): + self.assertIsNone(self.req.request_error) + + def test_run_and_handle_request(self): + self.req._request_timeout = None + self.req._handle_request = mock.Mock() + self.req.start() + self.req.join() + self.assertTrue(self.req._handle_request.called) + + def test_run_and_timeout(self): + def my_handle_request(self): + LOG.info('my_handle_request() self: %s' % self) + LOG.info('my_handle_request() dir(self): %s' % dir(self)) + eventlet.greenthread.sleep(REQUEST_TIMEOUT * 2) + + self.req._request_timeout = REQUEST_TIMEOUT + self.req._handle_request = new.instancemethod( + my_handle_request, self.req, request.EventletApiRequest) + self.req.start() + self.assertIsNone(self.req.join()) + + def prep_issue_request(self): + mysock = mock.Mock() + mysock.gettimeout.return_value = 4242 + + myresponse = mock.Mock() + myresponse.read.return_value = 'body' + myresponse.getheaders.return_value = 'headers' + myresponse.status = httplib.MOVED_PERMANENTLY + + myconn = mock.Mock() + myconn.request.return_value = None + myconn.sock = mysock + myconn.getresponse.return_value = myresponse + myconn.__str__ = mock.Mock() + myconn.__str__.return_value = 'myconn string' + + req = self.req + req._redirect_params = mock.Mock() + req._redirect_params.return_value = (myconn, 'url') + req._request_str = mock.Mock() + req._request_str.return_value = 'http://cool/cool' + + client = self.client + client.need_login = False + client._auto_login = False + client._auth_cookie = False + client.acquire_connection = mock.Mock() + client.acquire_connection.return_value = myconn + client.release_connection = mock.Mock() + + return (mysock, myresponse, myconn) + + def test_issue_request_trigger_exception(self): + (mysock, myresponse, myconn) = self.prep_issue_request() + self.client.acquire_connection.return_value = None + + self.req._issue_request() + self.assertIsInstance(self.req._request_error, Exception) + self.assertTrue(self.client.acquire_connection.called) + + def test_issue_request_handle_none_sock(self): + (mysock, myresponse, myconn) = self.prep_issue_request() + myconn.sock = None + self.req.start() + self.assertIsNone(self.req.join()) + self.assertTrue(self.client.acquire_connection.called) + + def test_issue_request_exceed_maximum_retries(self): + (mysock, myresponse, myconn) = self.prep_issue_request() + self.req.start() + self.assertIsNone(self.req.join()) + self.assertTrue(self.client.acquire_connection.called) + + def test_issue_request_trigger_non_redirect(self): + (mysock, myresponse, myconn) = self.prep_issue_request() + myresponse.status = httplib.OK + self.req.start() + self.assertIsNone(self.req.join()) + self.assertTrue(self.client.acquire_connection.called) + + def test_issue_request_trigger_internal_server_error(self): + (mysock, myresponse, myconn) = self.prep_issue_request() + self.req._redirect_params.return_value = (myconn, None) + self.req.start() + self.assertIsNone(self.req.join()) + self.assertTrue(self.client.acquire_connection.called) + + def test_redirect_params_break_on_location(self): + myconn = mock.Mock() + (conn, retval) = self.req._redirect_params( + myconn, [('location', None)]) + self.assertIsNone(retval) + + def test_redirect_params_parse_a_url(self): + myconn = mock.Mock() + (conn, retval) = self.req._redirect_params( + myconn, [('location', '/path/a/b/c')]) + self.assertIsNotNone(retval) + + def test_redirect_params_invalid_redirect_location(self): + myconn = mock.Mock() + (conn, retval) = self.req._redirect_params( + myconn, [('location', '+path/a/b/c')]) + self.assertIsNone(retval) + + def test_redirect_params_invalid_scheme(self): + myconn = mock.Mock() + (conn, retval) = self.req._redirect_params( + myconn, [('location', 'invalidscheme://hostname:1/path')]) + self.assertIsNone(retval) + + def test_redirect_params_setup_https_with_cooki(self): + with mock.patch(vmware.CLIENT_NAME) as mock_client: + api_client = mock_client.return_value + self.req._api_client = api_client + myconn = mock.Mock() + (conn, retval) = self.req._redirect_params( + myconn, [('location', 'https://host:1/path')]) + + self.assertIsNotNone(retval) + self.assertTrue(api_client.acquire_redirect_connection.called) + + def test_redirect_params_setup_htttps_and_query(self): + with mock.patch(vmware.CLIENT_NAME) as mock_client: + api_client = mock_client.return_value + self.req._api_client = api_client + myconn = mock.Mock() + (conn, retval) = self.req._redirect_params(myconn, [ + ('location', 'https://host:1/path?q=1')]) + + self.assertIsNotNone(retval) + self.assertTrue(api_client.acquire_redirect_connection.called) + + def test_redirect_params_setup_https_connection_no_cookie(self): + with mock.patch(vmware.CLIENT_NAME) as mock_client: + api_client = mock_client.return_value + self.req._api_client = api_client + myconn = mock.Mock() + (conn, retval) = self.req._redirect_params(myconn, [ + ('location', 'https://host:1/path')]) + + self.assertIsNotNone(retval) + self.assertTrue(api_client.acquire_redirect_connection.called) + + def test_redirect_params_setup_https_and_query_no_cookie(self): + with mock.patch(vmware.CLIENT_NAME) as mock_client: + api_client = mock_client.return_value + self.req._api_client = api_client + myconn = mock.Mock() + (conn, retval) = self.req._redirect_params( + myconn, [('location', 'https://host:1/path?q=1')]) + self.assertIsNotNone(retval) + self.assertTrue(api_client.acquire_redirect_connection.called) + + def test_redirect_params_path_only_with_query(self): + with mock.patch(vmware.CLIENT_NAME) as mock_client: + api_client = mock_client.return_value + api_client.wait_for_login.return_value = None + api_client.auth_cookie = None + api_client.acquire_connection.return_value = True + myconn = mock.Mock() + (conn, retval) = self.req._redirect_params(myconn, [ + ('location', '/path?q=1')]) + self.assertIsNotNone(retval) + + def test_handle_request_auto_login(self): + self.req._auto_login = True + self.req._api_client = mock.Mock() + self.req._api_client.need_login = True + self.req._request_str = mock.Mock() + self.req._request_str.return_value = 'http://cool/cool' + self.req.spawn = mock.Mock() + self.req._handle_request() + + def test_handle_request_auto_login_unauth(self): + self.req._auto_login = True + self.req._api_client = mock.Mock() + self.req._api_client.need_login = True + self.req._request_str = mock.Mock() + self.req._request_str.return_value = 'http://cool/cool' + + import socket + resp = httplib.HTTPResponse(socket.socket()) + resp.status = httplib.UNAUTHORIZED + mywaiter = mock.Mock() + mywaiter.wait = mock.Mock() + mywaiter.wait.return_value = resp + self.req.spawn = mock.Mock(return_value=mywaiter) + self.req._handle_request() + + def test_construct_eventlet_login_request(self): + r = request.LoginRequestEventlet(self.client, 'user', 'password') + self.assertIsNotNone(r) + + def test_session_cookie_session_cookie_retrieval(self): + r = request.LoginRequestEventlet(self.client, 'user', 'password') + r.successful = mock.Mock() + r.successful.return_value = True + r.value = mock.Mock() + r.value.get_header = mock.Mock() + r.value.get_header.return_value = 'cool' + self.assertIsNotNone(r.session_cookie()) + + def test_session_cookie_not_retrieved(self): + r = request.LoginRequestEventlet(self.client, 'user', 'password') + r.successful = mock.Mock() + r.successful.return_value = False + r.value = mock.Mock() + r.value.get_header = mock.Mock() + r.value.get_header.return_value = 'cool' + self.assertIsNone(r.session_cookie()) + + def test_construct_eventlet_get_api_providers_request(self): + r = request.GetApiProvidersRequestEventlet(self.client) + self.assertIsNotNone(r) + + def test_api_providers_none_api_providers(self): + r = request.GetApiProvidersRequestEventlet(self.client) + r.successful = mock.Mock(return_value=False) + self.assertIsNone(r.api_providers()) + + def test_api_providers_non_none_api_providers(self): + r = request.GetApiProvidersRequestEventlet(self.client) + r.value = mock.Mock() + r.value.body = """{ + "results": [ + { "roles": [ + { "role": "api_provider", + "listen_addr": "pssl:1.1.1.1:1" }]}]}""" + r.successful = mock.Mock(return_value=True) + LOG.info('%s' % r.api_providers()) + self.assertIsNotNone(r.api_providers()) diff --git a/neutron/tests/unit/vmware/db/__init__.py b/neutron/tests/unit/vmware/db/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/vmware/db/test_lsn_db.py b/neutron/tests/unit/vmware/db/test_lsn_db.py new file mode 100644 index 000000000..34641de53 --- /dev/null +++ b/neutron/tests/unit/vmware/db/test_lsn_db.py @@ -0,0 +1,103 @@ +# Copyright 2014 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import orm + +from neutron import context +from neutron.db import api as db +from neutron.plugins.vmware.common import exceptions as p_exc +from neutron.plugins.vmware.dbexts import lsn_db +from neutron.tests import base + + +class LSNTestCase(base.BaseTestCase): + + def setUp(self): + super(LSNTestCase, self).setUp() + db.configure_db() + self.ctx = context.get_admin_context() + self.addCleanup(db.clear_db) + self.net_id = 'foo_network_id' + self.lsn_id = 'foo_lsn_id' + self.lsn_port_id = 'foo_port_id' + self.subnet_id = 'foo_subnet_id' + self.mac_addr = 'aa:bb:cc:dd:ee:ff' + + def test_lsn_add(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn = (self.ctx.session.query(lsn_db.Lsn). + filter_by(lsn_id=self.lsn_id).one()) + self.assertEqual(self.lsn_id, lsn.lsn_id) + + def test_lsn_remove(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn_db.lsn_remove(self.ctx, self.lsn_id) + q = self.ctx.session.query(lsn_db.Lsn).filter_by(lsn_id=self.lsn_id) + self.assertRaises(orm.exc.NoResultFound, q.one) + + def test_lsn_remove_for_network(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn_db.lsn_remove_for_network(self.ctx, self.net_id) + q = self.ctx.session.query(lsn_db.Lsn).filter_by(lsn_id=self.lsn_id) + self.assertRaises(orm.exc.NoResultFound, q.one) + + def test_lsn_get_for_network(self): + result = lsn_db.lsn_get_for_network(self.ctx, self.net_id, + raise_on_err=False) + self.assertIsNone(result) + + def test_lsn_get_for_network_raise_not_found(self): + self.assertRaises(p_exc.LsnNotFound, + lsn_db.lsn_get_for_network, + self.ctx, self.net_id) + + def test_lsn_port_add(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id, + self.subnet_id, self.mac_addr, self.lsn_id) + result = (self.ctx.session.query(lsn_db.LsnPort). + filter_by(lsn_port_id=self.lsn_port_id).one()) + self.assertEqual(self.lsn_port_id, result.lsn_port_id) + + def test_lsn_port_get_for_mac(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id, + self.subnet_id, self.mac_addr, self.lsn_id) + result = lsn_db.lsn_port_get_for_mac(self.ctx, self.mac_addr) + self.assertEqual(self.mac_addr, result.mac_addr) + + def test_lsn_port_get_for_mac_raise_not_found(self): + self.assertRaises(p_exc.LsnPortNotFound, + lsn_db.lsn_port_get_for_mac, + self.ctx, self.mac_addr) + + def test_lsn_port_get_for_subnet(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id, + self.subnet_id, self.mac_addr, self.lsn_id) + result = lsn_db.lsn_port_get_for_subnet(self.ctx, self.subnet_id) + self.assertEqual(self.subnet_id, result.sub_id) + + def test_lsn_port_get_for_subnet_raise_not_found(self): + self.assertRaises(p_exc.LsnPortNotFound, + lsn_db.lsn_port_get_for_subnet, + self.ctx, self.mac_addr) + + def test_lsn_port_remove(self): + lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id) + lsn_db.lsn_port_remove(self.ctx, self.lsn_port_id) + q = (self.ctx.session.query(lsn_db.LsnPort). + filter_by(lsn_port_id=self.lsn_port_id)) + self.assertRaises(orm.exc.NoResultFound, q.one) diff --git a/neutron/tests/unit/vmware/db/test_nsx_db.py b/neutron/tests/unit/vmware/db/test_nsx_db.py new file mode 100644 index 000000000..722e22a76 --- /dev/null +++ b/neutron/tests/unit/vmware/db/test_nsx_db.py @@ -0,0 +1,86 @@ +# Copyright 2013 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron import context +from neutron.db import api as db +from neutron.db import models_v2 +from neutron.openstack.common.db import exception as d_exc +from neutron.plugins.vmware.dbexts import db as nsx_db +from neutron.plugins.vmware.dbexts import models +from neutron.tests import base + + +class NsxDBTestCase(base.BaseTestCase): + + def setUp(self): + super(NsxDBTestCase, self).setUp() + db.configure_db() + self.ctx = context.get_admin_context() + self.addCleanup(db.clear_db) + + def _setup_neutron_network_and_port(self, network_id, port_id): + with self.ctx.session.begin(subtransactions=True): + self.ctx.session.add(models_v2.Network(id=network_id)) + port = models_v2.Port(id=port_id, + network_id=network_id, + mac_address='foo_mac_address', + admin_state_up=True, + status='ACTIVE', + device_id='', + device_owner='') + self.ctx.session.add(port) + + def test_add_neutron_nsx_port_mapping_handle_duplicate_constraint(self): + neutron_net_id = 'foo_neutron_network_id' + neutron_port_id = 'foo_neutron_port_id' + nsx_port_id = 'foo_nsx_port_id' + nsx_switch_id = 'foo_nsx_switch_id' + self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id) + + nsx_db.add_neutron_nsx_port_mapping( + self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id) + # Call the method twice to trigger a db duplicate constraint error + nsx_db.add_neutron_nsx_port_mapping( + self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id) + result = (self.ctx.session.query(models.NeutronNsxPortMapping). + filter_by(neutron_id=neutron_port_id).one()) + self.assertEqual(nsx_port_id, result.nsx_port_id) + self.assertEqual(neutron_port_id, result.neutron_id) + + def test_add_neutron_nsx_port_mapping_raise_on_duplicate_constraint(self): + neutron_net_id = 'foo_neutron_network_id' + neutron_port_id = 'foo_neutron_port_id' + nsx_port_id_1 = 'foo_nsx_port_id_1' + nsx_port_id_2 = 'foo_nsx_port_id_2' + nsx_switch_id = 'foo_nsx_switch_id' + self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id) + + nsx_db.add_neutron_nsx_port_mapping( + self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id_1) + # Call the method twice to trigger a db duplicate constraint error, + # this time with a different nsx port id! + self.assertRaises(d_exc.DBDuplicateEntry, + nsx_db.add_neutron_nsx_port_mapping, + self.ctx.session, neutron_port_id, + nsx_switch_id, nsx_port_id_2) + + def test_add_neutron_nsx_port_mapping_raise_integrity_constraint(self): + neutron_port_id = 'foo_neutron_port_id' + nsx_port_id = 'foo_nsx_port_id' + nsx_switch_id = 'foo_nsx_switch_id' + self.assertRaises(d_exc.DBError, + nsx_db.add_neutron_nsx_port_mapping, + self.ctx.session, neutron_port_id, + nsx_switch_id, nsx_port_id) diff --git a/neutron/tests/unit/vmware/etc/fake_get_gwservice.json b/neutron/tests/unit/vmware/etc/fake_get_gwservice.json new file mode 100644 index 000000000..5c8f9a376 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_get_gwservice.json @@ -0,0 +1,15 @@ +{ + "display_name": "%(display_name)s", + "_href": "/ws.v1/gateway-service/%(uuid)s", + "tags": %(tags_json)s, + "_schema": "/ws.v1/schema/L2GatewayServiceConfig", + "gateways": [ + { + "transport_node_uuid": "%(transport_node_uuid)s", + "type": "L2Gateway", + "device_id": "%(device_id)s" + } + ], + "type": "L2GatewayServiceConfig", + "uuid": "%(uuid)s" +} diff --git a/neutron/tests/unit/vmware/etc/fake_get_lqueue.json b/neutron/tests/unit/vmware/etc/fake_get_lqueue.json new file mode 100644 index 000000000..414945bb6 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_get_lqueue.json @@ -0,0 +1,11 @@ +{ + "display_name": "%(display_name)s", + "uuid": "%(uuid)s", + "type": "LogicalSwitchConfig", + "_schema": "/ws.v1/schema/LogicalQueueConfig", + "dscp": "%(dscp)s", + "max_bandwidth_rate": "%(max_bandwidth_rate)s", + "min_bandwidth_rate": "%(min_bandwidth_rate)s", + "qos_marking": "%(qos_marking)s", + "_href": "/ws.v1/lqueue/%(uuid)s" +} diff --git a/neutron/tests/unit/vmware/etc/fake_get_lrouter.json b/neutron/tests/unit/vmware/etc/fake_get_lrouter.json new file mode 100644 index 000000000..9425ad654 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_get_lrouter.json @@ -0,0 +1,29 @@ +{ + "display_name": "%(display_name)s", + %(distributed_json)s + "uuid": "%(uuid)s", + "tags": %(tags_json)s, + "routing_config": { + "type": "SingleDefaultRouteImplicitRoutingConfig", + "_schema": "/ws.v1/schema/SingleDefaultRouteImplicitRoutingConfig", + "default_route_next_hop": { + "type": "RouterNextHop", + "_schema": "/ws.v1/schema/RouterNextHop", + "gateway_ip_address": "%(default_next_hop)s" + } + }, + "_schema": "/ws.v1/schema/LogicalRouterConfig", + "_relations": { + "LogicalRouterStatus": { + "_href": "/ws.v1/lrouter/%(uuid)s/status", + "lport_admin_up_count": %(lport_count)d, + "_schema": "/ws.v1/schema/LogicalRouterStatus", + "lport_count": %(lport_count)d, + "fabric_status": %(status)s, + "type": "LogicalRouterStatus", + "lport_link_up_count": %(lport_count)d + } + }, + "type": "LogicalRouterConfig", + "_href": "/ws.v1/lrouter/%(uuid)s" +} \ No newline at end of file diff --git a/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport.json b/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport.json new file mode 100644 index 000000000..df9fcbea7 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport.json @@ -0,0 +1,12 @@ +{ + "display_name": "%(display_name)s", + "admin_status_enabled": "%(admin_status_enabled)s", + "_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(uuid)s", + "tags": + [{"scope": "q_port_id", "tag": "%(neutron_port_id)s"}, + {"scope": "os_tid", "tag": "%(tenant_id)s"}], + "ip_addresses": %(ip_addresses_json)s, + "_schema": "/ws.v1/schema/LogicalRouterPortConfig", + "type": "LogicalRouterPortConfig", + "uuid": "%(uuid)s" +} diff --git a/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport_att.json b/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport_att.json new file mode 100644 index 000000000..bc5723d11 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_get_lrouter_lport_att.json @@ -0,0 +1,11 @@ +{ + "LogicalPortAttachment": + { + %(peer_port_href_field)s + %(peer_port_uuid_field)s + %(l3_gateway_service_uuid_field)s + %(vlan_id)s + "type": "%(type)s", + "schema": "/ws.v1/schema/%(type)s" + } +} \ No newline at end of file diff --git a/neutron/tests/unit/vmware/etc/fake_get_lrouter_nat.json b/neutron/tests/unit/vmware/etc/fake_get_lrouter_nat.json new file mode 100644 index 000000000..5f7c8baac --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_get_lrouter_nat.json @@ -0,0 +1,6 @@ +{ + "_href": "/ws.v1/lrouter/%(lr_uuid)s/nat/%(uuid)s", + "type": "%(type)s", + "match": %(match_json)s, + "uuid": "%(uuid)s" +} \ No newline at end of file diff --git a/neutron/tests/unit/vmware/etc/fake_get_lswitch.json b/neutron/tests/unit/vmware/etc/fake_get_lswitch.json new file mode 100644 index 000000000..a55d508c7 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_get_lswitch.json @@ -0,0 +1,12 @@ +{"display_name": "%(display_name)s", + "_href": "/ws.v1/lswitch/%(uuid)s", + "_schema": "/ws.v1/schema/LogicalSwitchConfig", + "_relations": {"LogicalSwitchStatus": + {"fabric_status": %(status)s, + "type": "LogicalSwitchStatus", + "lport_count": %(lport_count)d, + "_href": "/ws.v1/lswitch/%(uuid)s/status", + "_schema": "/ws.v1/schema/LogicalSwitchStatus"}}, + "type": "LogicalSwitchConfig", + "tags": %(tags_json)s, + "uuid": "%(uuid)s"} diff --git a/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport.json b/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport.json new file mode 100644 index 000000000..3e5cb90c2 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport.json @@ -0,0 +1,28 @@ +{"display_name": "%(display_name)s", + "_relations": + {"LogicalPortStatus": + {"type": "LogicalSwitchPortStatus", + "admin_status_enabled": true, + "fabric_status_up": %(status)s, + "link_status_up": %(status)s, + "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s/status", + "_schema": "/ws.v1/schema/LogicalSwitchPortStatus"}, + "LogicalSwitchConfig": + {"uuid": "%(ls_uuid)s"}, + "LogicalPortAttachment": + { + "type": "%(att_type)s", + %(att_info_json)s + "schema": "/ws.v1/schema/%(att_type)s" + } + }, + "tags": + [{"scope": "q_port_id", "tag": "%(neutron_port_id)s"}, + {"scope": "vm_id", "tag": "%(neutron_device_id)s"}, + {"scope": "os_tid", "tag": "%(tenant_id)s"}], + "uuid": "%(uuid)s", + "admin_status_enabled": "%(admin_status_enabled)s", + "type": "LogicalSwitchPortConfig", + "_schema": "/ws.v1/schema/LogicalSwitchPortConfig", + "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s" + } diff --git a/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_att.json b/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_att.json new file mode 100644 index 000000000..cd1788b02 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_att.json @@ -0,0 +1,7 @@ +{ + "LogicalPortAttachment": + { + "type": "%(att_type)s", + "schema": "/ws.v1/schema/%(att_type)s" + } +} \ No newline at end of file diff --git a/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_status.json b/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_status.json new file mode 100644 index 000000000..0df7971b0 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_get_lswitch_lport_status.json @@ -0,0 +1,23 @@ +{"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s", + "lswitch": + {"display_name": "%(ls_name)s", + "uuid": "%(ls_uuid)s", + "tags": [ + {"scope": "os_tid", + "tag": "%(ls_tenant_id)s"} + ], + "type": "LogicalSwitchConfig", + "_schema": "/ws.v1/schema/LogicalSwitchConfig", + "port_isolation_enabled": false, + "transport_zones": [ + {"zone_uuid": "%(ls_zone_uuid)s", + "transport_type": "stt"} + ], + "_href": "/ws.v1/lswitch/%(ls_uuid)s"}, + "link_status_up": false, + "_schema": "/ws.v1/schema/LogicalSwitchPortStatus", + "admin_status_enabled": true, + "fabric_status_up": true, + "link_status_up": true, + "type": "LogicalSwitchPortStatus" +} diff --git a/neutron/tests/unit/vmware/etc/fake_get_security_profile.json b/neutron/tests/unit/vmware/etc/fake_get_security_profile.json new file mode 100644 index 000000000..898e4937b --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_get_security_profile.json @@ -0,0 +1,10 @@ +{ + "display_name": "%(display_name)s", + "_href": "/ws.v1/security-profile/%(uuid)s", + "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}, + {"scope": "nova_spid", "tag": "%(nova_spid)s"}], + "logical_port_egress_rules": %(logical_port_egress_rules_json)s, + "_schema": "/ws.v1/schema/SecurityProfileConfig", + "logical_port_ingress_rules": %(logical_port_ingress_rules_json)s, + "uuid": "%(uuid)s" +} diff --git a/neutron/tests/unit/vmware/etc/fake_post_gwservice.json b/neutron/tests/unit/vmware/etc/fake_post_gwservice.json new file mode 100644 index 000000000..72292fddc --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_post_gwservice.json @@ -0,0 +1,13 @@ +{ + "display_name": "%(display_name)s", + "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}], + "gateways": [ + { + "transport_node_uuid": "%(transport_node_uuid)s", + "device_id": "%(device_id)s", + "type": "L2Gateway" + } + ], + "type": "L2GatewayServiceConfig", + "uuid": "%(uuid)s" +} diff --git a/neutron/tests/unit/vmware/etc/fake_post_lqueue.json b/neutron/tests/unit/vmware/etc/fake_post_lqueue.json new file mode 100644 index 000000000..414945bb6 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_post_lqueue.json @@ -0,0 +1,11 @@ +{ + "display_name": "%(display_name)s", + "uuid": "%(uuid)s", + "type": "LogicalSwitchConfig", + "_schema": "/ws.v1/schema/LogicalQueueConfig", + "dscp": "%(dscp)s", + "max_bandwidth_rate": "%(max_bandwidth_rate)s", + "min_bandwidth_rate": "%(min_bandwidth_rate)s", + "qos_marking": "%(qos_marking)s", + "_href": "/ws.v1/lqueue/%(uuid)s" +} diff --git a/neutron/tests/unit/vmware/etc/fake_post_lrouter.json b/neutron/tests/unit/vmware/etc/fake_post_lrouter.json new file mode 100644 index 000000000..dbe2811b0 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_post_lrouter.json @@ -0,0 +1,23 @@ +{ + "display_name": "%(display_name)s", + %(distributed_json)s + "uuid": "%(uuid)s", + "tags": [ + { + "scope": "os_tid", + "tag": "%(tenant_id)s" + } + ], + "routing_config": { + "type": "SingleDefaultRouteImplicitRoutingConfig", + "_schema": "/ws.v1/schema/SingleDefaultRouteImplicitRoutingConfig", + "default_route_next_hop": { + "type": "RouterNextHop", + "_schema": "/ws.v1/schema/RouterNextHop", + "gateway_ip_address": "%(default_next_hop)s" + } + }, + "_schema": "/ws.v1/schema/LogicalRouterConfig", + "type": "LogicalRouterConfig", + "_href": "/ws.v1/lrouter/%(uuid)s" +} \ No newline at end of file diff --git a/neutron/tests/unit/vmware/etc/fake_post_lrouter_lport.json b/neutron/tests/unit/vmware/etc/fake_post_lrouter_lport.json new file mode 100644 index 000000000..bcb13ae07 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_post_lrouter_lport.json @@ -0,0 +1,10 @@ +{ + "display_name": "%(display_name)s", + "_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(uuid)s", + "_schema": "/ws.v1/schema/LogicalRouterPortConfig", + "mac_address": "00:00:00:00:00:00", + "admin_status_enabled": true, + "ip_addresses": %(ip_addresses_json)s, + "type": "LogicalRouterPortConfig", + "uuid": "%(uuid)s" +} \ No newline at end of file diff --git a/neutron/tests/unit/vmware/etc/fake_post_lrouter_nat.json b/neutron/tests/unit/vmware/etc/fake_post_lrouter_nat.json new file mode 100644 index 000000000..5f7c8baac --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_post_lrouter_nat.json @@ -0,0 +1,6 @@ +{ + "_href": "/ws.v1/lrouter/%(lr_uuid)s/nat/%(uuid)s", + "type": "%(type)s", + "match": %(match_json)s, + "uuid": "%(uuid)s" +} \ No newline at end of file diff --git a/neutron/tests/unit/vmware/etc/fake_post_lswitch.json b/neutron/tests/unit/vmware/etc/fake_post_lswitch.json new file mode 100644 index 000000000..7d8f9e38c --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_post_lswitch.json @@ -0,0 +1,12 @@ +{ + "display_name": "%(display_name)s", + "uuid": "%(uuid)s", + "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}], + "type": "LogicalSwitchConfig", + "_schema": "/ws.v1/schema/LogicalSwitchConfig", + "port_isolation_enabled": false, + "transport_zones": [ + {"zone_uuid": "%(zone_uuid)s", + "transport_type": "stt"}], + "_href": "/ws.v1/lswitch/%(uuid)s" +} diff --git a/neutron/tests/unit/vmware/etc/fake_post_lswitch_lport.json b/neutron/tests/unit/vmware/etc/fake_post_lswitch_lport.json new file mode 100644 index 000000000..cc8decf26 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_post_lswitch_lport.json @@ -0,0 +1,17 @@ +{ + "display_name": "%(uuid)s", + "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s", + "security_profiles": [], + "tags": + [{"scope": "q_port_id", "tag": "%(neutron_port_id)s"}, + {"scope": "vm_id", "tag": "%(neutron_device_id)s"}, + {"scope": "os_tid", "tag": "%(tenant_id)s"}], + "portno": 1, + "queue_uuid": null, + "_schema": "/ws.v1/schema/LogicalSwitchPortConfig", + "mirror_targets": [], + "allowed_address_pairs": [], + "admin_status_enabled": true, + "type": "LogicalSwitchPortConfig", + "uuid": "%(uuid)s" +} diff --git a/neutron/tests/unit/vmware/etc/fake_post_security_profile.json b/neutron/tests/unit/vmware/etc/fake_post_security_profile.json new file mode 100644 index 000000000..594da3310 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_post_security_profile.json @@ -0,0 +1,10 @@ +{ + "display_name": "%(display_name)s", + "_href": "/ws.v1/security-profile/%(uuid)s", + "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}, + {"scope": "nova_spid", "tag": "%(nova_spid)s"}], + "logical_port_egress_rules": [], + "_schema": "/ws.v1/schema/SecurityProfileConfig", + "logical_port_ingress_rules": [], + "uuid": "%(uuid)s" +} diff --git a/neutron/tests/unit/vmware/etc/fake_put_lrouter_lport_att.json b/neutron/tests/unit/vmware/etc/fake_put_lrouter_lport_att.json new file mode 100644 index 000000000..c58fa41c7 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_put_lrouter_lport_att.json @@ -0,0 +1,12 @@ +{ + "LogicalPortAttachment": + { + %(peer_port_href_field)s + %(peer_port_uuid_field)s + %(l3_gateway_service_uuid_field)s + %(vlan_id_field)s + "_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(lp_uuid)s/attachment", + "type": "%(type)s", + "schema": "/ws.v1/schema/%(type)s" + } +} \ No newline at end of file diff --git a/neutron/tests/unit/vmware/etc/fake_put_lswitch_lport_att.json b/neutron/tests/unit/vmware/etc/fake_put_lswitch_lport_att.json new file mode 100644 index 000000000..dd0daa336 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/fake_put_lswitch_lport_att.json @@ -0,0 +1,11 @@ +{ + "LogicalPortAttachment": + { + %(peer_port_href_field)s + %(peer_port_uuid_field)s + %(vif_uuid_field)s + "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(lp_uuid)s/attachment", + "type": "%(type)s", + "schema": "/ws.v1/schema/%(type)s" + } +} \ No newline at end of file diff --git a/neutron/tests/unit/vmware/etc/neutron.conf.test b/neutron/tests/unit/vmware/etc/neutron.conf.test new file mode 100644 index 000000000..9eff4405d --- /dev/null +++ b/neutron/tests/unit/vmware/etc/neutron.conf.test @@ -0,0 +1,26 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +verbose = True + +# Show debugging output in logs (sets DEBUG log level output) +debug = False + +# Address to bind the API server +bind_host = 0.0.0.0 + +# Port the bind the API server to +bind_port = 9696 + +# MISSING Path to the extensions +# api_extensions_path = + +# Paste configuration file +api_paste_config = api-paste.ini.test + +# The messaging module to use, defaults to kombu. +rpc_backend = neutron.openstack.common.rpc.impl_fake + +lock_path = $state_path/lock + +[database] +connection = 'sqlite://' diff --git a/neutron/tests/unit/vmware/etc/nsx.ini.agentless.test b/neutron/tests/unit/vmware/etc/nsx.ini.agentless.test new file mode 100644 index 000000000..ee129f1d1 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/nsx.ini.agentless.test @@ -0,0 +1,17 @@ +[DEFAULT] +default_tz_uuid = fake_tz_uuid +nova_zone_id = whatever +nsx_controllers = fake_1, fake_2 +nsx_user = foo +nsx_password = bar +default_l3_gw_service_uuid = whatever +default_l2_gw_service_uuid = whatever +default_service_cluster_uuid = whatever +default_interface_name = whatever +req_timeout = 14 +http_timeout = 13 +redirects = 12 +retries = 11 + +[NSX] +agent_mode = agentless diff --git a/neutron/tests/unit/vmware/etc/nsx.ini.basic.test b/neutron/tests/unit/vmware/etc/nsx.ini.basic.test new file mode 100644 index 000000000..c8fb9886e --- /dev/null +++ b/neutron/tests/unit/vmware/etc/nsx.ini.basic.test @@ -0,0 +1,5 @@ +[DEFAULT] +default_tz_uuid = fake_tz_uuid +nsx_controllers=fake_1,fake_2 +nsx_user=foo +nsx_password=bar diff --git a/neutron/tests/unit/vmware/etc/nsx.ini.combined.test b/neutron/tests/unit/vmware/etc/nsx.ini.combined.test new file mode 100644 index 000000000..2a6f8307c --- /dev/null +++ b/neutron/tests/unit/vmware/etc/nsx.ini.combined.test @@ -0,0 +1,17 @@ +[DEFAULT] +default_tz_uuid = fake_tz_uuid +nova_zone_id = whatever +nsx_controllers = fake_1, fake_2 +nsx_user = foo +nsx_password = bar +default_l3_gw_service_uuid = whatever +default_l2_gw_service_uuid = whatever +default_service_cluster_uuid = whatever +default_interface_name = whatever +req_timeout = 14 +http_timeout = 13 +redirects = 12 +retries = 11 + +[NSX] +agent_mode = combined diff --git a/neutron/tests/unit/vmware/etc/nsx.ini.full.test b/neutron/tests/unit/vmware/etc/nsx.ini.full.test new file mode 100644 index 000000000..7ca29bd42 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/nsx.ini.full.test @@ -0,0 +1,13 @@ +[DEFAULT] +default_tz_uuid = fake_tz_uuid +nova_zone_id = whatever +nsx_controllers = fake_1, fake_2 +nsx_user = foo +nsx_password = bar +default_l3_gw_service_uuid = whatever +default_l2_gw_service_uuid = whatever +default_interface_name = whatever +req_timeout = 14 +http_timeout = 13 +redirects = 12 +retries = 11 diff --git a/neutron/tests/unit/vmware/etc/nsx.ini.test b/neutron/tests/unit/vmware/etc/nsx.ini.test new file mode 100644 index 000000000..1bb959be3 --- /dev/null +++ b/neutron/tests/unit/vmware/etc/nsx.ini.test @@ -0,0 +1,7 @@ +[DEFAULT] +default_tz_uuid = fake_tz_uuid +nsx_controllers=fake_1, fake_2 +nsx_user=foo +nsx_password=bar +default_l3_gw_service_uuid = whatever +default_l2_gw_service_uuid = whatever diff --git a/neutron/tests/unit/vmware/etc/nvp.ini.full.test b/neutron/tests/unit/vmware/etc/nvp.ini.full.test new file mode 100644 index 000000000..500cc0eed --- /dev/null +++ b/neutron/tests/unit/vmware/etc/nvp.ini.full.test @@ -0,0 +1,13 @@ +[DEFAULT] +default_tz_uuid = fake_tz_uuid +nova_zone_id = whatever +nvp_controllers = fake_1, fake_2 +nvp_user = foo +nvp_password = bar +default_l3_gw_service_uuid = whatever +default_l2_gw_service_uuid = whatever +default_interface_name = whatever +req_timeout = 4 +http_timeout = 3 +redirects = 2 +retries = 2 diff --git a/neutron/tests/unit/vmware/etc/vcns.ini.test b/neutron/tests/unit/vmware/etc/vcns.ini.test new file mode 100644 index 000000000..38b3361ed --- /dev/null +++ b/neutron/tests/unit/vmware/etc/vcns.ini.test @@ -0,0 +1,9 @@ +[vcns] +manager_uri = https://fake-host +user = fake-user +passwordd = fake-password +datacenter_moid = fake-moid +resource_pool_id = fake-resgroup +datastore_id = fake-datastore +external_network = fake-ext-net +task_status_check_interval = 100 diff --git a/neutron/tests/unit/vmware/extensions/__init__.py b/neutron/tests/unit/vmware/extensions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/vmware/extensions/test_addresspairs.py b/neutron/tests/unit/vmware/extensions/test_addresspairs.py new file mode 100644 index 000000000..be30b5bc4 --- /dev/null +++ b/neutron/tests/unit/vmware/extensions/test_addresspairs.py @@ -0,0 +1,22 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.tests.unit import test_extension_allowedaddresspairs as ext_pairs +from neutron.tests.unit.vmware import test_nsx_plugin + + +class TestAllowedAddressPairs(test_nsx_plugin.NsxPluginV2TestCase, + ext_pairs.TestAllowedAddressPairs): + pass diff --git a/neutron/tests/unit/vmware/extensions/test_maclearning.py b/neutron/tests/unit/vmware/extensions/test_maclearning.py new file mode 100644 index 000000000..70d65731a --- /dev/null +++ b/neutron/tests/unit/vmware/extensions/test_maclearning.py @@ -0,0 +1,139 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import mock + +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.common import test_lib +from neutron import context +from neutron.extensions import agent +from neutron.plugins.vmware.api_client import version +from neutron.plugins.vmware.common import sync +from neutron.tests.unit import test_db_plugin +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware.apiclient import fake + + +class MacLearningExtensionManager(object): + + def get_resources(self): + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + agent.RESOURCE_ATTRIBUTE_MAP) + return agent.Agent.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class MacLearningDBTestCase(test_db_plugin.NeutronDbPluginV2TestCase): + fmt = 'json' + + def setUp(self): + self.adminContext = context.get_admin_context() + test_lib.test_config['config_files'] = [ + vmware.get_fake_conf('nsx.ini.full.test')] + cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) + # Save the original RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + ext_mgr = MacLearningExtensionManager() + # mock api client + self.fc = fake.FakeClient(vmware.STUBS_PATH) + self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True) + instance = self.mock_nsx.start() + # Avoid runs of the synchronizer looping call + patch_sync = mock.patch.object(sync, '_start_loopingcall') + patch_sync.start() + + # Emulate tests against NSX 2.x + instance.return_value.get_version.return_value = version.Version("3.0") + instance.return_value.request.side_effect = self.fc.fake_request + cfg.CONF.set_override('metadata_mode', None, 'NSX') + self.addCleanup(self.fc.reset_all) + self.addCleanup(self.restore_resource_attribute_map) + super(MacLearningDBTestCase, self).setUp(plugin=vmware.PLUGIN_NAME, + ext_mgr=ext_mgr) + + def restore_resource_attribute_map(self): + # Restore the original RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + + def test_create_with_mac_learning(self): + with self.port(arg_list=('mac_learning_enabled',), + mac_learning_enabled=True) as port: + # Validate create operation response + self.assertEqual(True, port['port']['mac_learning_enabled']) + # Verify that db operation successfully set mac learning state + req = self.new_show_request('ports', port['port']['id'], self.fmt) + sport = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(True, sport['port']['mac_learning_enabled']) + + def test_create_and_show_port_without_mac_learning(self): + with self.port() as port: + req = self.new_show_request('ports', port['port']['id'], self.fmt) + sport = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertNotIn('mac_learning_enabled', sport['port']) + + def test_update_port_with_mac_learning(self): + with self.port(arg_list=('mac_learning_enabled',), + mac_learning_enabled=False) as port: + data = {'port': {'mac_learning_enabled': True}} + req = self.new_update_request('ports', data, port['port']['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(True, res['port']['mac_learning_enabled']) + + def test_update_preexisting_port_with_mac_learning(self): + with self.port() as port: + req = self.new_show_request('ports', port['port']['id'], self.fmt) + sport = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertNotIn('mac_learning_enabled', sport['port']) + data = {'port': {'mac_learning_enabled': True}} + req = self.new_update_request('ports', data, port['port']['id']) + # Validate update operation response + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(True, res['port']['mac_learning_enabled']) + # Verify that db operation successfully updated mac learning state + req = self.new_show_request('ports', port['port']['id'], self.fmt) + sport = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(True, sport['port']['mac_learning_enabled']) + + def test_list_ports(self): + # for this test we need to enable overlapping ips + cfg.CONF.set_default('allow_overlapping_ips', True) + with contextlib.nested(self.port(arg_list=('mac_learning_enabled',), + mac_learning_enabled=True), + self.port(arg_list=('mac_learning_enabled',), + mac_learning_enabled=True), + self.port(arg_list=('mac_learning_enabled',), + mac_learning_enabled=True)): + for port in self._list('ports')['ports']: + self.assertEqual(True, port['mac_learning_enabled']) + + def test_show_port(self): + with self.port(arg_list=('mac_learning_enabled',), + mac_learning_enabled=True) as p: + port_res = self._show('ports', p['port']['id'])['port'] + self.assertEqual(True, port_res['mac_learning_enabled']) diff --git a/neutron/tests/unit/vmware/extensions/test_networkgw.py b/neutron/tests/unit/vmware/extensions/test_networkgw.py new file mode 100644 index 000000000..ac4caaee7 --- /dev/null +++ b/neutron/tests/unit/vmware/extensions/test_networkgw.py @@ -0,0 +1,1074 @@ +# Copyright 2012 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import mock + +from oslo.config import cfg +from webob import exc +import webtest + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron import context +from neutron.db import api as db_api +from neutron.db import db_base_plugin_v2 +from neutron import manager +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.dbexts import networkgw_db +from neutron.plugins.vmware.extensions import networkgw +from neutron.plugins.vmware import nsxlib +from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib +from neutron import quota +from neutron.tests import base +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import test_db_plugin +from neutron.tests.unit import test_extensions +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware import test_nsx_plugin + +_uuid = test_api_v2._uuid +_get_path = test_api_v2._get_path + + +class TestExtensionManager(object): + + def get_resources(self): + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + networkgw.RESOURCE_ATTRIBUTE_MAP) + return networkgw.Networkgw.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class NetworkGatewayExtensionTestCase(base.BaseTestCase): + + def setUp(self): + super(NetworkGatewayExtensionTestCase, self).setUp() + plugin = '%s.%s' % (networkgw.__name__, + networkgw.NetworkGatewayPluginBase.__name__) + self._gw_resource = networkgw.GATEWAY_RESOURCE_NAME + self._dev_resource = networkgw.DEVICE_RESOURCE_NAME + + # Ensure existing ExtensionManager is not used + extensions.PluginAwareExtensionManager._instance = None + + # Create the default configurations + self.config_parse() + + # Update the plugin and extensions path + self.setup_coreplugin(plugin) + + _plugin_patcher = mock.patch(plugin, autospec=True) + self.plugin = _plugin_patcher.start() + + # Instantiate mock plugin and enable extensions + manager.NeutronManager.get_plugin().supported_extension_aliases = ( + [networkgw.EXT_ALIAS]) + ext_mgr = TestExtensionManager() + extensions.PluginAwareExtensionManager._instance = ext_mgr + self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) + self.api = webtest.TestApp(self.ext_mdw) + + quota.QUOTAS._driver = None + cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', + group='QUOTAS') + + def test_network_gateway_create(self): + nw_gw_id = _uuid() + data = {self._gw_resource: {'name': 'nw-gw', + 'tenant_id': _uuid(), + 'devices': [{'id': _uuid(), + 'interface_name': 'xxx'}]}} + return_value = data[self._gw_resource].copy() + return_value.update({'id': nw_gw_id}) + instance = self.plugin.return_value + instance.create_network_gateway.return_value = return_value + res = self.api.post_json(_get_path(networkgw.NETWORK_GATEWAYS), data) + instance.create_network_gateway.assert_called_with( + mock.ANY, network_gateway=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + self.assertIn(self._gw_resource, res.json) + nw_gw = res.json[self._gw_resource] + self.assertEqual(nw_gw['id'], nw_gw_id) + + def _test_network_gateway_create_with_error( + self, data, error_code=exc.HTTPBadRequest.code): + res = self.api.post_json(_get_path(networkgw.NETWORK_GATEWAYS), data, + expect_errors=True) + self.assertEqual(res.status_int, error_code) + + def test_network_gateway_create_invalid_device_spec(self): + data = {self._gw_resource: {'name': 'nw-gw', + 'tenant_id': _uuid(), + 'devices': [{'id': _uuid(), + 'invalid': 'xxx'}]}} + self._test_network_gateway_create_with_error(data) + + def test_network_gateway_create_extra_attr_in_device_spec(self): + data = {self._gw_resource: {'name': 'nw-gw', + 'tenant_id': _uuid(), + 'devices': + [{'id': _uuid(), + 'interface_name': 'xxx', + 'extra_attr': 'onetoomany'}]}} + self._test_network_gateway_create_with_error(data) + + def test_network_gateway_update(self): + nw_gw_name = 'updated' + data = {self._gw_resource: {'name': nw_gw_name}} + nw_gw_id = _uuid() + return_value = {'id': nw_gw_id, + 'name': nw_gw_name} + + instance = self.plugin.return_value + instance.update_network_gateway.return_value = return_value + res = self.api.put_json( + _get_path('%s/%s' % (networkgw.NETWORK_GATEWAYS, nw_gw_id)), data) + instance.update_network_gateway.assert_called_with( + mock.ANY, nw_gw_id, network_gateway=data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + self.assertIn(self._gw_resource, res.json) + nw_gw = res.json[self._gw_resource] + self.assertEqual(nw_gw['id'], nw_gw_id) + self.assertEqual(nw_gw['name'], nw_gw_name) + + def test_network_gateway_delete(self): + nw_gw_id = _uuid() + instance = self.plugin.return_value + res = self.api.delete(_get_path('%s/%s' % (networkgw.NETWORK_GATEWAYS, + nw_gw_id))) + + instance.delete_network_gateway.assert_called_with(mock.ANY, + nw_gw_id) + self.assertEqual(res.status_int, exc.HTTPNoContent.code) + + def test_network_gateway_get(self): + nw_gw_id = _uuid() + return_value = {self._gw_resource: {'name': 'test', + 'devices': + [{'id': _uuid(), + 'interface_name': 'xxx'}], + 'id': nw_gw_id}} + instance = self.plugin.return_value + instance.get_network_gateway.return_value = return_value + + res = self.api.get(_get_path('%s/%s' % (networkgw.NETWORK_GATEWAYS, + nw_gw_id))) + + instance.get_network_gateway.assert_called_with(mock.ANY, + nw_gw_id, + fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_network_gateway_list(self): + nw_gw_id = _uuid() + return_value = [{self._gw_resource: {'name': 'test', + 'devices': + [{'id': _uuid(), + 'interface_name': 'xxx'}], + 'id': nw_gw_id}}] + instance = self.plugin.return_value + instance.get_network_gateways.return_value = return_value + + res = self.api.get(_get_path(networkgw.NETWORK_GATEWAYS)) + + instance.get_network_gateways.assert_called_with(mock.ANY, + fields=mock.ANY, + filters=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_network_gateway_connect(self): + nw_gw_id = _uuid() + nw_id = _uuid() + gw_port_id = _uuid() + mapping_data = {'network_id': nw_id, + 'segmentation_type': 'vlan', + 'segmentation_id': '999'} + return_value = {'connection_info': { + 'network_gateway_id': nw_gw_id, + 'port_id': gw_port_id, + 'network_id': nw_id}} + instance = self.plugin.return_value + instance.connect_network.return_value = return_value + res = self.api.put_json(_get_path('%s/%s/connect_network' % + (networkgw.NETWORK_GATEWAYS, + nw_gw_id)), + mapping_data) + instance.connect_network.assert_called_with(mock.ANY, + nw_gw_id, + mapping_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + nw_conn_res = res.json['connection_info'] + self.assertEqual(nw_conn_res['port_id'], gw_port_id) + self.assertEqual(nw_conn_res['network_id'], nw_id) + + def test_network_gateway_disconnect(self): + nw_gw_id = _uuid() + nw_id = _uuid() + mapping_data = {'network_id': nw_id} + instance = self.plugin.return_value + res = self.api.put_json(_get_path('%s/%s/disconnect_network' % + (networkgw.NETWORK_GATEWAYS, + nw_gw_id)), + mapping_data) + instance.disconnect_network.assert_called_with(mock.ANY, + nw_gw_id, + mapping_data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_gateway_device_get(self): + gw_dev_id = _uuid() + return_value = {self._dev_resource: {'name': 'test', + 'connector_type': 'stt', + 'connector_ip': '1.1.1.1', + 'id': gw_dev_id}} + instance = self.plugin.return_value + instance.get_gateway_device.return_value = return_value + + res = self.api.get(_get_path('%s/%s' % (networkgw.GATEWAY_DEVICES, + gw_dev_id))) + + instance.get_gateway_device.assert_called_with(mock.ANY, + gw_dev_id, + fields=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_gateway_device_list(self): + gw_dev_id = _uuid() + return_value = [{self._dev_resource: {'name': 'test', + 'connector_type': 'stt', + 'connector_ip': '1.1.1.1', + 'id': gw_dev_id}}] + instance = self.plugin.return_value + instance.get_gateway_devices.return_value = return_value + + res = self.api.get(_get_path(networkgw.GATEWAY_DEVICES)) + + instance.get_gateway_devices.assert_called_with(mock.ANY, + fields=mock.ANY, + filters=mock.ANY) + self.assertEqual(res.status_int, exc.HTTPOk.code) + + def test_gateway_device_create(self): + gw_dev_id = _uuid() + data = {self._dev_resource: {'name': 'test-dev', + 'tenant_id': _uuid(), + 'client_certificate': 'xyz', + 'connector_type': 'stt', + 'connector_ip': '1.1.1.1'}} + return_value = data[self._dev_resource].copy() + return_value.update({'id': gw_dev_id}) + instance = self.plugin.return_value + instance.create_gateway_device.return_value = return_value + res = self.api.post_json(_get_path(networkgw.GATEWAY_DEVICES), data) + instance.create_gateway_device.assert_called_with( + mock.ANY, gateway_device=data) + self.assertEqual(res.status_int, exc.HTTPCreated.code) + self.assertIn(self._dev_resource, res.json) + gw_dev = res.json[self._dev_resource] + self.assertEqual(gw_dev['id'], gw_dev_id) + + def _test_gateway_device_create_with_error( + self, data, error_code=exc.HTTPBadRequest.code): + res = self.api.post_json(_get_path(networkgw.GATEWAY_DEVICES), data, + expect_errors=True) + self.assertEqual(res.status_int, error_code) + + def test_gateway_device_create_invalid_connector_type(self): + data = {self._gw_resource: {'name': 'test-dev', + 'client_certificate': 'xyz', + 'tenant_id': _uuid(), + 'connector_type': 'invalid', + 'connector_ip': '1.1.1.1'}} + self._test_gateway_device_create_with_error(data) + + def test_gateway_device_create_invalid_connector_ip(self): + data = {self._gw_resource: {'name': 'test-dev', + 'client_certificate': 'xyz', + 'tenant_id': _uuid(), + 'connector_type': 'stt', + 'connector_ip': 'invalid'}} + self._test_gateway_device_create_with_error(data) + + def test_gateway_device_create_extra_attr_in_device_spec(self): + data = {self._gw_resource: {'name': 'test-dev', + 'client_certificate': 'xyz', + 'tenant_id': _uuid(), + 'alien_attribute': 'E.T.', + 'connector_type': 'stt', + 'connector_ip': '1.1.1.1'}} + self._test_gateway_device_create_with_error(data) + + def test_gateway_device_update(self): + gw_dev_name = 'updated' + data = {self._dev_resource: {'name': gw_dev_name}} + gw_dev_id = _uuid() + return_value = {'id': gw_dev_id, + 'name': gw_dev_name} + + instance = self.plugin.return_value + instance.update_gateway_device.return_value = return_value + res = self.api.put_json( + _get_path('%s/%s' % (networkgw.GATEWAY_DEVICES, gw_dev_id)), data) + instance.update_gateway_device.assert_called_with( + mock.ANY, gw_dev_id, gateway_device=data) + self.assertEqual(res.status_int, exc.HTTPOk.code) + self.assertIn(self._dev_resource, res.json) + gw_dev = res.json[self._dev_resource] + self.assertEqual(gw_dev['id'], gw_dev_id) + self.assertEqual(gw_dev['name'], gw_dev_name) + + def test_gateway_device_delete(self): + gw_dev_id = _uuid() + instance = self.plugin.return_value + res = self.api.delete(_get_path('%s/%s' % (networkgw.GATEWAY_DEVICES, + gw_dev_id))) + instance.delete_gateway_device.assert_called_with(mock.ANY, gw_dev_id) + self.assertEqual(res.status_int, exc.HTTPNoContent.code) + + +class NetworkGatewayDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase): + """Unit tests for Network Gateway DB support.""" + + def setUp(self, plugin=None, ext_mgr=None): + if not plugin: + plugin = '%s.%s' % (__name__, TestNetworkGatewayPlugin.__name__) + if not ext_mgr: + ext_mgr = TestExtensionManager() + self.gw_resource = networkgw.GATEWAY_RESOURCE_NAME + self.dev_resource = networkgw.DEVICE_RESOURCE_NAME + + super(NetworkGatewayDbTestCase, self).setUp(plugin=plugin, + ext_mgr=ext_mgr) + + def _create_network_gateway(self, fmt, tenant_id, name=None, + devices=None, arg_list=None, **kwargs): + data = {self.gw_resource: {'tenant_id': tenant_id, + 'devices': devices}} + if name: + data[self.gw_resource]['name'] = name + for arg in arg_list or (): + # Arg must be present and not empty + if arg in kwargs and kwargs[arg]: + data[self.gw_resource][arg] = kwargs[arg] + nw_gw_req = self.new_create_request(networkgw.NETWORK_GATEWAYS, + data, fmt) + if (kwargs.get('set_context') and tenant_id): + # create a specific auth context for this request + nw_gw_req.environ['neutron.context'] = context.Context( + '', tenant_id) + return nw_gw_req.get_response(self.ext_api) + + @contextlib.contextmanager + def _network_gateway(self, name='gw1', devices=None, + fmt='json', tenant_id=_uuid()): + device = None + if not devices: + device_res = self._create_gateway_device( + fmt, tenant_id, 'stt', '1.1.1.1', 'xxxxxx', + name='whatever') + if device_res.status_int >= 400: + raise exc.HTTPClientError(code=device_res.status_int) + device = self.deserialize(fmt, device_res) + devices = [{'id': device[self.dev_resource]['id'], + 'interface_name': 'xyz'}] + + res = self._create_network_gateway(fmt, tenant_id, name=name, + devices=devices) + if res.status_int >= 400: + raise exc.HTTPClientError(code=res.status_int) + network_gateway = self.deserialize(fmt, res) + yield network_gateway + + self._delete(networkgw.NETWORK_GATEWAYS, + network_gateway[self.gw_resource]['id']) + if device: + self._delete(networkgw.GATEWAY_DEVICES, + device[self.dev_resource]['id']) + + def _create_gateway_device(self, fmt, tenant_id, + connector_type, connector_ip, + client_certificate, name=None, + set_context=False): + data = {self.dev_resource: {'tenant_id': tenant_id, + 'connector_type': connector_type, + 'connector_ip': connector_ip, + 'client_certificate': client_certificate}} + if name: + data[self.dev_resource]['name'] = name + gw_dev_req = self.new_create_request(networkgw.GATEWAY_DEVICES, + data, fmt) + if (set_context and tenant_id): + # create a specific auth context for this request + gw_dev_req.environ['neutron.context'] = context.Context( + '', tenant_id) + return gw_dev_req.get_response(self.ext_api) + + def _update_gateway_device(self, fmt, gateway_device_id, + connector_type=None, connector_ip=None, + client_certificate=None, name=None, + set_context=False, tenant_id=None): + data = {self.dev_resource: {}} + if connector_type: + data[self.dev_resource]['connector_type'] = connector_type + if connector_ip: + data[self.dev_resource]['connector_ip'] = connector_ip + if client_certificate: + data[self.dev_resource]['client_certificate'] = client_certificate + if name: + data[self.dev_resource]['name'] = name + gw_dev_req = self.new_update_request(networkgw.GATEWAY_DEVICES, + data, gateway_device_id, fmt) + if (set_context and tenant_id): + # create a specific auth context for this request + gw_dev_req.environ['neutron.context'] = context.Context( + '', tenant_id) + return gw_dev_req.get_response(self.ext_api) + + @contextlib.contextmanager + def _gateway_device(self, name='gw_dev', + connector_type='stt', + connector_ip='1.1.1.1', + client_certificate='xxxxxxxxxxxxxxx', + fmt='json', tenant_id=_uuid()): + res = self._create_gateway_device( + fmt, + tenant_id, + connector_type=connector_type, + connector_ip=connector_ip, + client_certificate=client_certificate, + name=name) + if res.status_int >= 400: + raise exc.HTTPClientError(code=res.status_int) + gateway_device = self.deserialize(fmt, res) + yield gateway_device + + self._delete(networkgw.GATEWAY_DEVICES, + gateway_device[self.dev_resource]['id']) + + def _gateway_action(self, action, network_gateway_id, network_id, + segmentation_type, segmentation_id=None, + expected_status=exc.HTTPOk.code): + connection_data = {'network_id': network_id, + 'segmentation_type': segmentation_type} + if segmentation_id: + connection_data['segmentation_id'] = segmentation_id + + req = self.new_action_request(networkgw.NETWORK_GATEWAYS, + connection_data, + network_gateway_id, + "%s_network" % action) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, expected_status) + return self.deserialize('json', res) + + def _test_connect_and_disconnect_network(self, segmentation_type, + segmentation_id=None): + with self._network_gateway() as gw: + with self.network() as net: + body = self._gateway_action('connect', + gw[self.gw_resource]['id'], + net['network']['id'], + segmentation_type, + segmentation_id) + self.assertIn('connection_info', body) + connection_info = body['connection_info'] + for attr in ('network_id', 'port_id', + 'network_gateway_id'): + self.assertIn(attr, connection_info) + # fetch port and confirm device_id + gw_port_id = connection_info['port_id'] + port_body = self._show('ports', gw_port_id) + self.assertEqual(port_body['port']['device_id'], + gw[self.gw_resource]['id']) + # Clean up - otherwise delete will fail + body = self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net['network']['id'], + segmentation_type, + segmentation_id) + # Check associated port has been deleted too + body = self._show('ports', gw_port_id, + expected_code=exc.HTTPNotFound.code) + + def test_create_network_gateway(self): + tenant_id = _uuid() + with contextlib.nested( + self._gateway_device(name='dev_1', + tenant_id=tenant_id), + self._gateway_device(name='dev_2', + tenant_id=tenant_id)) as (dev_1, dev_2): + name = 'test-gw' + dev_1_id = dev_1[self.dev_resource]['id'] + dev_2_id = dev_2[self.dev_resource]['id'] + devices = [{'id': dev_1_id, 'interface_name': 'xxx'}, + {'id': dev_2_id, 'interface_name': 'yyy'}] + keys = [('devices', devices), ('name', name)] + with self._network_gateway(name=name, + devices=devices, + tenant_id=tenant_id) as gw: + for k, v in keys: + self.assertEqual(gw[self.gw_resource][k], v) + + def test_create_network_gateway_no_interface_name(self): + tenant_id = _uuid() + with self._gateway_device(tenant_id=tenant_id) as dev: + name = 'test-gw' + devices = [{'id': dev[self.dev_resource]['id']}] + exp_devices = devices + exp_devices[0]['interface_name'] = 'breth0' + keys = [('devices', exp_devices), ('name', name)] + with self._network_gateway(name=name, + devices=devices, + tenant_id=tenant_id) as gw: + for k, v in keys: + self.assertEqual(gw[self.gw_resource][k], v) + + def test_create_network_gateway_not_owned_device_raises_404(self): + # Create a device with a different tenant identifier + with self._gateway_device(name='dev', tenant_id=_uuid()) as dev: + name = 'test-gw' + dev_id = dev[self.dev_resource]['id'] + devices = [{'id': dev_id, 'interface_name': 'xxx'}] + res = self._create_network_gateway( + 'json', _uuid(), name=name, devices=devices) + self.assertEqual(404, res.status_int) + + def test_delete_network_gateway(self): + tenant_id = _uuid() + with self._gateway_device(tenant_id=tenant_id) as dev: + name = 'test-gw' + device_id = dev[self.dev_resource]['id'] + devices = [{'id': device_id, + 'interface_name': 'xxx'}] + with self._network_gateway(name=name, + devices=devices, + tenant_id=tenant_id) as gw: + # Nothing to do here - just let the gateway go + gw_id = gw[self.gw_resource]['id'] + # Verify nothing left on db + session = db_api.get_session() + dev_query = session.query( + networkgw_db.NetworkGatewayDevice).filter( + networkgw_db.NetworkGatewayDevice.id == device_id) + self.assertIsNone(dev_query.first()) + gw_query = session.query(networkgw_db.NetworkGateway).filter( + networkgw_db.NetworkGateway.id == gw_id) + self.assertIsNone(gw_query.first()) + + def test_update_network_gateway(self): + with self._network_gateway() as gw: + data = {self.gw_resource: {'name': 'new_name'}} + req = self.new_update_request(networkgw.NETWORK_GATEWAYS, + data, + gw[self.gw_resource]['id']) + res = self.deserialize('json', req.get_response(self.ext_api)) + self.assertEqual(res[self.gw_resource]['name'], + data[self.gw_resource]['name']) + + def test_get_network_gateway(self): + with self._network_gateway(name='test-gw') as gw: + req = self.new_show_request(networkgw.NETWORK_GATEWAYS, + gw[self.gw_resource]['id']) + res = self.deserialize('json', req.get_response(self.ext_api)) + self.assertEqual(res[self.gw_resource]['name'], + gw[self.gw_resource]['name']) + + def test_list_network_gateways(self): + with self._network_gateway(name='test-gw-1') as gw1: + with self._network_gateway(name='test_gw_2') as gw2: + req = self.new_list_request(networkgw.NETWORK_GATEWAYS) + res = self.deserialize('json', req.get_response(self.ext_api)) + key = self.gw_resource + 's' + self.assertEqual(len(res[key]), 2) + self.assertEqual(res[key][0]['name'], + gw1[self.gw_resource]['name']) + self.assertEqual(res[key][1]['name'], + gw2[self.gw_resource]['name']) + + def _test_list_network_gateway_with_multiple_connections( + self, expected_gateways=1): + with self._network_gateway() as gw: + with self.network() as net_1: + self._gateway_action('connect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + self._gateway_action('connect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 777) + req = self.new_list_request(networkgw.NETWORK_GATEWAYS) + res = self.deserialize('json', req.get_response(self.ext_api)) + key = self.gw_resource + 's' + self.assertEqual(len(res[key]), expected_gateways) + for item in res[key]: + self.assertIn('ports', item) + if item['id'] == gw[self.gw_resource]['id']: + gw_ports = item['ports'] + self.assertEqual(len(gw_ports), 2) + segmentation_ids = [555, 777] + for gw_port in gw_ports: + self.assertEqual('vlan', gw_port['segmentation_type']) + self.assertIn(gw_port['segmentation_id'], segmentation_ids) + segmentation_ids.remove(gw_port['segmentation_id']) + # Required cleanup + self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 777) + + def test_list_network_gateway_with_multiple_connections(self): + self._test_list_network_gateway_with_multiple_connections() + + def test_connect_and_disconnect_network(self): + self._test_connect_and_disconnect_network('flat') + + def test_connect_and_disconnect_network_no_seg_type(self): + self._test_connect_and_disconnect_network(None) + + def test_connect_and_disconnect_network_with_segmentation_id(self): + self._test_connect_and_disconnect_network('vlan', 999) + + def test_connect_network_multiple_times(self): + with self._network_gateway() as gw: + with self.network() as net_1: + self._gateway_action('connect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + self._gateway_action('connect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 777) + self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 777) + + def test_connect_network_multiple_gateways(self): + with self._network_gateway() as gw_1: + with self._network_gateway() as gw_2: + with self.network() as net_1: + self._gateway_action('connect', + gw_1[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + self._gateway_action('connect', + gw_2[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + self._gateway_action('disconnect', + gw_1[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + self._gateway_action('disconnect', + gw_2[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + + def test_connect_network_mapping_in_use_returns_409(self): + with self._network_gateway() as gw: + with self.network() as net_1: + self._gateway_action('connect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + with self.network() as net_2: + self._gateway_action('connect', + gw[self.gw_resource]['id'], + net_2['network']['id'], + 'vlan', 555, + expected_status=exc.HTTPConflict.code) + # Clean up - otherwise delete will fail + self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + + def test_connect_invalid_network_returns_400(self): + with self._network_gateway() as gw: + self._gateway_action('connect', + gw[self.gw_resource]['id'], + 'hohoho', + 'vlan', 555, + expected_status=exc.HTTPBadRequest.code) + + def test_connect_unspecified_network_returns_400(self): + with self._network_gateway() as gw: + self._gateway_action('connect', + gw[self.gw_resource]['id'], + None, + 'vlan', 555, + expected_status=exc.HTTPBadRequest.code) + + def test_disconnect_network_ambiguous_returns_409(self): + with self._network_gateway() as gw: + with self.network() as net_1: + self._gateway_action('connect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + self._gateway_action('connect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 777) + # This should raise + self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', + expected_status=exc.HTTPConflict.code) + self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 777) + + def test_delete_active_gateway_port_returns_409(self): + with self._network_gateway() as gw: + with self.network() as net_1: + body = self._gateway_action('connect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + # fetch port id and try to delete it + gw_port_id = body['connection_info']['port_id'] + self._delete('ports', gw_port_id, + expected_code=exc.HTTPConflict.code) + body = self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + + def test_delete_network_gateway_active_connections_returns_409(self): + with self._network_gateway() as gw: + with self.network() as net_1: + self._gateway_action('connect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'flat') + self._delete(networkgw.NETWORK_GATEWAYS, + gw[self.gw_resource]['id'], + expected_code=exc.HTTPConflict.code) + self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'flat') + + def test_disconnect_non_existing_connection_returns_404(self): + with self._network_gateway() as gw: + with self.network() as net_1: + self._gateway_action('connect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 999, + expected_status=exc.HTTPNotFound.code) + self._gateway_action('disconnect', + gw[self.gw_resource]['id'], + net_1['network']['id'], + 'vlan', 555) + + def test_create_gateway_device( + self, expected_status=networkgw_db.STATUS_UNKNOWN): + with self._gateway_device(name='test-dev', + connector_type='stt', + connector_ip='1.1.1.1', + client_certificate='xyz') as dev: + self.assertEqual(dev[self.dev_resource]['name'], 'test-dev') + self.assertEqual(dev[self.dev_resource]['connector_type'], 'stt') + self.assertEqual(dev[self.dev_resource]['connector_ip'], '1.1.1.1') + self.assertEqual(dev[self.dev_resource]['status'], expected_status) + + def test_list_gateway_devices(self): + with contextlib.nested( + self._gateway_device(name='test-dev-1', + connector_type='stt', + connector_ip='1.1.1.1', + client_certificate='xyz'), + self._gateway_device(name='test-dev-2', + connector_type='stt', + connector_ip='2.2.2.2', + client_certificate='qwe')) as (dev_1, dev_2): + req = self.new_list_request(networkgw.GATEWAY_DEVICES) + res = self.deserialize('json', req.get_response(self.ext_api)) + devices = res[networkgw.GATEWAY_DEVICES.replace('-', '_')] + self.assertEqual(len(devices), 2) + dev_1 = devices[0] + dev_2 = devices[1] + self.assertEqual(dev_1['name'], 'test-dev-1') + self.assertEqual(dev_2['name'], 'test-dev-2') + + def test_get_gateway_device( + self, expected_status=networkgw_db.STATUS_UNKNOWN): + with self._gateway_device(name='test-dev', + connector_type='stt', + connector_ip='1.1.1.1', + client_certificate='xyz') as dev: + req = self.new_show_request(networkgw.GATEWAY_DEVICES, + dev[self.dev_resource]['id']) + res = self.deserialize('json', req.get_response(self.ext_api)) + self.assertEqual(res[self.dev_resource]['name'], 'test-dev') + self.assertEqual(res[self.dev_resource]['connector_type'], 'stt') + self.assertEqual(res[self.dev_resource]['connector_ip'], '1.1.1.1') + self.assertEqual(res[self.dev_resource]['status'], expected_status) + + def test_update_gateway_device( + self, expected_status=networkgw_db.STATUS_UNKNOWN): + with self._gateway_device(name='test-dev', + connector_type='stt', + connector_ip='1.1.1.1', + client_certificate='xyz') as dev: + self._update_gateway_device('json', dev[self.dev_resource]['id'], + connector_type='stt', + connector_ip='2.2.2.2', + name='test-dev-upd') + req = self.new_show_request(networkgw.GATEWAY_DEVICES, + dev[self.dev_resource]['id']) + res = self.deserialize('json', req.get_response(self.ext_api)) + + self.assertEqual(res[self.dev_resource]['name'], 'test-dev-upd') + self.assertEqual(res[self.dev_resource]['connector_type'], 'stt') + self.assertEqual(res[self.dev_resource]['connector_ip'], '2.2.2.2') + self.assertEqual(res[self.dev_resource]['status'], expected_status) + + def test_delete_gateway_device(self): + with self._gateway_device(name='test-dev', + connector_type='stt', + connector_ip='1.1.1.1', + client_certificate='xyz') as dev: + # Nothing to do here - just note the device id + dev_id = dev[self.dev_resource]['id'] + # Verify nothing left on db + session = db_api.get_session() + dev_query = session.query(networkgw_db.NetworkGatewayDevice) + dev_query.filter(networkgw_db.NetworkGatewayDevice.id == dev_id) + self.assertIsNone(dev_query.first()) + + +class TestNetworkGateway(test_nsx_plugin.NsxPluginV2TestCase, + NetworkGatewayDbTestCase): + + def setUp(self, plugin=vmware.PLUGIN_NAME, ext_mgr=None): + cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) + # Mock l2gwlib calls for gateway devices since this resource is not + # mocked through the fake NSX API client + create_gw_dev_patcher = mock.patch.object( + l2gwlib, 'create_gateway_device') + update_gw_dev_patcher = mock.patch.object( + l2gwlib, 'update_gateway_device') + delete_gw_dev_patcher = mock.patch.object( + l2gwlib, 'delete_gateway_device') + get_gw_dev_status_patcher = mock.patch.object( + l2gwlib, 'get_gateway_device_status') + get_gw_dev_statuses_patcher = mock.patch.object( + l2gwlib, 'get_gateway_devices_status') + self.mock_create_gw_dev = create_gw_dev_patcher.start() + self.mock_create_gw_dev.return_value = {'uuid': 'callejon'} + self.mock_update_gw_dev = update_gw_dev_patcher.start() + delete_gw_dev_patcher.start() + self.mock_get_gw_dev_status = get_gw_dev_status_patcher.start() + get_gw_dev_statuses = get_gw_dev_statuses_patcher.start() + get_gw_dev_statuses.return_value = {} + + super(TestNetworkGateway, + self).setUp(plugin=plugin, ext_mgr=ext_mgr) + + def test_create_network_gateway_name_exceeds_40_chars(self): + name = 'this_is_a_gateway_whose_name_is_longer_than_40_chars' + with self._network_gateway(name=name) as nw_gw: + # Assert Neutron name is not truncated + self.assertEqual(nw_gw[self.gw_resource]['name'], name) + + def test_update_network_gateway_with_name_calls_backend(self): + with mock.patch.object( + nsxlib.l2gateway, 'update_l2_gw_service') as mock_update_gw: + with self._network_gateway(name='cavani') as nw_gw: + nw_gw_id = nw_gw[self.gw_resource]['id'] + self._update(networkgw.NETWORK_GATEWAYS, nw_gw_id, + {self.gw_resource: {'name': 'higuain'}}) + mock_update_gw.assert_called_once_with( + mock.ANY, nw_gw_id, 'higuain') + + def test_update_network_gateway_without_name_does_not_call_backend(self): + with mock.patch.object( + nsxlib.l2gateway, 'update_l2_gw_service') as mock_update_gw: + with self._network_gateway(name='something') as nw_gw: + nw_gw_id = nw_gw[self.gw_resource]['id'] + self._update(networkgw.NETWORK_GATEWAYS, nw_gw_id, + {self.gw_resource: {}}) + self.assertEqual(mock_update_gw.call_count, 0) + + def test_update_network_gateway_name_exceeds_40_chars(self): + new_name = 'this_is_a_gateway_whose_name_is_longer_than_40_chars' + with self._network_gateway(name='something') as nw_gw: + nw_gw_id = nw_gw[self.gw_resource]['id'] + self._update(networkgw.NETWORK_GATEWAYS, nw_gw_id, + {self.gw_resource: {'name': new_name}}) + req = self.new_show_request(networkgw.NETWORK_GATEWAYS, + nw_gw_id) + res = self.deserialize('json', req.get_response(self.ext_api)) + # Assert Neutron name is not truncated + self.assertEqual(new_name, res[self.gw_resource]['name']) + # Assert NSX name is truncated + self.assertEqual( + new_name[:40], + self.fc._fake_gatewayservice_dict[nw_gw_id]['display_name']) + + def test_create_network_gateway_nsx_error_returns_500(self): + def raise_nsx_api_exc(*args, **kwargs): + raise api_exc.NsxApiException + + with mock.patch.object(nsxlib.l2gateway, + 'create_l2_gw_service', + new=raise_nsx_api_exc): + with self._gateway_device() as dev: + res = self._create_network_gateway( + self.fmt, 'xxx', name='yyy', + devices=[{'id': dev[self.dev_resource]['id']}]) + self.assertEqual(500, res.status_int) + + def test_create_network_gateway_nsx_error_returns_409(self): + with mock.patch.object(nsxlib.l2gateway, + 'create_l2_gw_service', + side_effect=api_exc.Conflict): + with self._gateway_device() as dev: + res = self._create_network_gateway( + self.fmt, 'xxx', name='yyy', + devices=[{'id': dev[self.dev_resource]['id']}]) + self.assertEqual(409, res.status_int) + + def test_list_network_gateways(self): + with self._network_gateway(name='test-gw-1') as gw1: + with self._network_gateway(name='test_gw_2') as gw2: + req = self.new_list_request(networkgw.NETWORK_GATEWAYS) + res = self.deserialize('json', req.get_response(self.ext_api)) + # Ensure we always get the list in the same order + gateways = sorted( + res[self.gw_resource + 's'], key=lambda k: k['name']) + self.assertEqual(len(gateways), 3) + # We expect the default gateway too + self.assertEqual(gateways[0]['default'], True) + self.assertEqual(gateways[1]['name'], + gw1[self.gw_resource]['name']) + self.assertEqual(gateways[2]['name'], + gw2[self.gw_resource]['name']) + + def test_list_network_gateway_with_multiple_connections(self): + self._test_list_network_gateway_with_multiple_connections( + expected_gateways=2) + + def test_show_network_gateway_nsx_error_returns_404(self): + invalid_id = 'b5afd4a9-eb71-4af7-a082-8fc625a35b61' + req = self.new_show_request(networkgw.NETWORK_GATEWAYS, invalid_id) + res = req.get_response(self.ext_api) + self.assertEqual(exc.HTTPNotFound.code, res.status_int) + + def test_create_gateway_device(self): + self.mock_get_gw_dev_status.return_value = True + super(TestNetworkGateway, self).test_create_gateway_device( + expected_status=networkgw_db.STATUS_ACTIVE) + + def test_create_gateway_device_status_down(self): + self.mock_get_gw_dev_status.return_value = False + super(TestNetworkGateway, self).test_create_gateway_device( + expected_status=networkgw_db.STATUS_DOWN) + + def test_create_gateway_device_invalid_cert_returns_400(self): + self.mock_create_gw_dev.side_effect = ( + nsx_exc.InvalidSecurityCertificate) + res = self._create_gateway_device( + 'json', + _uuid(), + connector_type='stt', + connector_ip='1.1.1.1', + client_certificate='invalid_certificate', + name='whatever') + self.assertEqual(res.status_int, 400) + + def test_get_gateway_device(self): + self.mock_get_gw_dev_status.return_value = True + super(TestNetworkGateway, self).test_get_gateway_device( + expected_status=networkgw_db.STATUS_ACTIVE) + + def test_get_gateway_device_status_down(self): + self.mock_get_gw_dev_status.return_value = False + super(TestNetworkGateway, self).test_get_gateway_device( + expected_status=networkgw_db.STATUS_DOWN) + + def test_update_gateway_device(self): + self.mock_get_gw_dev_status.return_value = True + super(TestNetworkGateway, self).test_update_gateway_device( + expected_status=networkgw_db.STATUS_ACTIVE) + + def test_update_gateway_device_status_down(self): + self.mock_get_gw_dev_status.return_value = False + super(TestNetworkGateway, self).test_update_gateway_device( + expected_status=networkgw_db.STATUS_DOWN) + + def test_update_gateway_device_invalid_cert_returns_400(self): + with self._gateway_device( + name='whaterver', + connector_type='stt', + connector_ip='1.1.1.1', + client_certificate='iminvalidbutiitdoesnotmatter') as dev: + self.mock_update_gw_dev.side_effect = ( + nsx_exc.InvalidSecurityCertificate) + res = self._update_gateway_device( + 'json', + dev[self.dev_resource]['id'], + client_certificate='invalid_certificate') + self.assertEqual(res.status_int, 400) + + +class TestNetworkGatewayPlugin(db_base_plugin_v2.NeutronDbPluginV2, + networkgw_db.NetworkGatewayMixin): + """Simple plugin class for testing db support for network gateway ext.""" + + supported_extension_aliases = ["network-gateway"] + + def __init__(self, **args): + super(TestNetworkGatewayPlugin, self).__init__(**args) + extensions.append_api_extensions_path([vmware.NSXEXT_PATH]) + + def delete_port(self, context, id, nw_gw_port_check=True): + if nw_gw_port_check: + port = self._get_port(context, id) + self.prevent_network_gateway_port_deletion(context, port) + super(TestNetworkGatewayPlugin, self).delete_port(context, id) diff --git a/neutron/tests/unit/vmware/extensions/test_portsecurity.py b/neutron/tests/unit/vmware/extensions/test_portsecurity.py new file mode 100644 index 000000000..6b07b39c6 --- /dev/null +++ b/neutron/tests/unit/vmware/extensions/test_portsecurity.py @@ -0,0 +1,47 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.common import test_lib +from neutron.plugins.vmware.common import sync +from neutron.tests.unit import test_extension_portsecurity as psec +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware.apiclient import fake + + +class PortSecurityTestCase(psec.PortSecurityDBTestCase): + + def setUp(self): + test_lib.test_config['config_files'] = [ + vmware.get_fake_conf('nsx.ini.test')] + # mock api client + self.fc = fake.FakeClient(vmware.STUBS_PATH) + self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True) + instance = self.mock_nsx.start() + instance.return_value.login.return_value = "the_cookie" + # Avoid runs of the synchronizer looping call + patch_sync = mock.patch.object(sync, '_start_loopingcall') + patch_sync.start() + + instance.return_value.request.side_effect = self.fc.fake_request + super(PortSecurityTestCase, self).setUp(vmware.PLUGIN_NAME) + self.addCleanup(self.fc.reset_all) + self.addCleanup(self.mock_nsx.stop) + self.addCleanup(patch_sync.stop) + + +class TestPortSecurity(PortSecurityTestCase, psec.TestPortSecurity): + pass diff --git a/neutron/tests/unit/vmware/extensions/test_providernet.py b/neutron/tests/unit/vmware/extensions/test_providernet.py new file mode 100644 index 000000000..f6057f145 --- /dev/null +++ b/neutron/tests/unit/vmware/extensions/test_providernet.py @@ -0,0 +1,163 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.extensions import multiprovidernet as mpnet +from neutron.extensions import providernet as pnet +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware import test_nsx_plugin + + +class TestProvidernet(test_nsx_plugin.NsxPluginV2TestCase): + + def test_create_provider_network_default_physical_net(self): + data = {'network': {'name': 'net1', + 'admin_state_up': True, + 'tenant_id': 'admin', + pnet.NETWORK_TYPE: 'vlan', + pnet.SEGMENTATION_ID: 411}} + network_req = self.new_create_request('networks', data, self.fmt) + net = self.deserialize(self.fmt, network_req.get_response(self.api)) + self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') + self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) + + def test_create_provider_network(self): + data = {'network': {'name': 'net1', + 'admin_state_up': True, + 'tenant_id': 'admin', + pnet.NETWORK_TYPE: 'vlan', + pnet.SEGMENTATION_ID: 411, + pnet.PHYSICAL_NETWORK: 'physnet1'}} + network_req = self.new_create_request('networks', data, self.fmt) + net = self.deserialize(self.fmt, network_req.get_response(self.api)) + self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') + self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) + self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet1') + + +class TestMultiProviderNetworks(test_nsx_plugin.NsxPluginV2TestCase): + + def setUp(self, plugin=None): + cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) + super(TestMultiProviderNetworks, self).setUp() + + def test_create_network_provider(self): + data = {'network': {'name': 'net1', + pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1, + 'tenant_id': 'tenant_one'}} + network_req = self.new_create_request('networks', data) + network = self.deserialize(self.fmt, + network_req.get_response(self.api)) + self.assertEqual(network['network'][pnet.NETWORK_TYPE], 'vlan') + self.assertEqual(network['network'][pnet.PHYSICAL_NETWORK], 'physnet1') + self.assertEqual(network['network'][pnet.SEGMENTATION_ID], 1) + self.assertNotIn(mpnet.SEGMENTS, network['network']) + + def test_create_network_provider_flat(self): + data = {'network': {'name': 'net1', + pnet.NETWORK_TYPE: 'flat', + pnet.PHYSICAL_NETWORK: 'physnet1', + 'tenant_id': 'tenant_one'}} + network_req = self.new_create_request('networks', data) + network = self.deserialize(self.fmt, + network_req.get_response(self.api)) + self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE]) + self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK]) + self.assertEqual(0, network['network'][pnet.SEGMENTATION_ID]) + self.assertNotIn(mpnet.SEGMENTS, network['network']) + + def test_create_network_single_multiple_provider(self): + data = {'network': {'name': 'net1', + mpnet.SEGMENTS: + [{pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1}], + 'tenant_id': 'tenant_one'}} + net_req = self.new_create_request('networks', data) + network = self.deserialize(self.fmt, net_req.get_response(self.api)) + for provider_field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID]: + self.assertNotIn(provider_field, network['network']) + tz = network['network'][mpnet.SEGMENTS][0] + self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan') + self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1') + self.assertEqual(tz[pnet.SEGMENTATION_ID], 1) + + # Tests get_network() + net_req = self.new_show_request('networks', network['network']['id']) + network = self.deserialize(self.fmt, net_req.get_response(self.api)) + tz = network['network'][mpnet.SEGMENTS][0] + self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan') + self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1') + self.assertEqual(tz[pnet.SEGMENTATION_ID], 1) + + def test_create_network_multprovider(self): + data = {'network': {'name': 'net1', + mpnet.SEGMENTS: + [{pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1}, + {pnet.NETWORK_TYPE: 'stt', + pnet.PHYSICAL_NETWORK: 'physnet1'}], + 'tenant_id': 'tenant_one'}} + network_req = self.new_create_request('networks', data) + network = self.deserialize(self.fmt, + network_req.get_response(self.api)) + tz = network['network'][mpnet.SEGMENTS] + for tz in data['network'][mpnet.SEGMENTS]: + for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID]: + self.assertEqual(tz.get(field), tz.get(field)) + + # Tests get_network() + net_req = self.new_show_request('networks', network['network']['id']) + network = self.deserialize(self.fmt, net_req.get_response(self.api)) + tz = network['network'][mpnet.SEGMENTS] + for tz in data['network'][mpnet.SEGMENTS]: + for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID]: + self.assertEqual(tz.get(field), tz.get(field)) + + def test_create_network_with_provider_and_multiprovider_fail(self): + data = {'network': {'name': 'net1', + mpnet.SEGMENTS: + [{pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1}], + pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1, + 'tenant_id': 'tenant_one'}} + + network_req = self.new_create_request('networks', data) + res = network_req.get_response(self.api) + self.assertEqual(res.status_int, 400) + + def test_create_network_duplicate_segments(self): + data = {'network': {'name': 'net1', + mpnet.SEGMENTS: + [{pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1}, + {pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1', + pnet.SEGMENTATION_ID: 1}], + 'tenant_id': 'tenant_one'}} + network_req = self.new_create_request('networks', data) + res = network_req.get_response(self.api) + self.assertEqual(res.status_int, 400) diff --git a/neutron/tests/unit/vmware/extensions/test_qosqueues.py b/neutron/tests/unit/vmware/extensions/test_qosqueues.py new file mode 100644 index 000000000..53e82f52f --- /dev/null +++ b/neutron/tests/unit/vmware/extensions/test_qosqueues.py @@ -0,0 +1,273 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +import mock +from oslo.config import cfg +import webob.exc + +from neutron import context +from neutron.plugins.vmware.dbexts import qos_db +from neutron.plugins.vmware.extensions import qos as ext_qos +from neutron.plugins.vmware import nsxlib +from neutron.tests.unit import test_extensions +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware import test_nsx_plugin + + +class QoSTestExtensionManager(object): + + def get_resources(self): + return ext_qos.Qos.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class TestQoSQueue(test_nsx_plugin.NsxPluginV2TestCase): + + def setUp(self, plugin=None): + cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) + super(TestQoSQueue, self).setUp() + ext_mgr = QoSTestExtensionManager() + self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) + + def _create_qos_queue(self, fmt, body, **kwargs): + qos_queue = self.new_create_request('qos-queues', body) + if (kwargs.get('set_context') and 'tenant_id' in kwargs): + # create a specific auth context for this request + qos_queue.environ['neutron.context'] = context.Context( + '', kwargs['tenant_id']) + + return qos_queue.get_response(self.ext_api) + + @contextlib.contextmanager + def qos_queue(self, name='foo', min='0', max='10', + qos_marking=None, dscp='0', default=None, no_delete=False): + + body = {'qos_queue': {'tenant_id': 'tenant', + 'name': name, + 'min': min, + 'max': max}} + + if qos_marking: + body['qos_queue']['qos_marking'] = qos_marking + if dscp: + body['qos_queue']['dscp'] = dscp + if default: + body['qos_queue']['default'] = default + res = self._create_qos_queue('json', body) + qos_queue = self.deserialize('json', res) + if res.status_int >= 400: + raise webob.exc.HTTPClientError(code=res.status_int) + + yield qos_queue + + if not no_delete: + self._delete('qos-queues', + qos_queue['qos_queue']['id']) + + def test_create_qos_queue(self): + with self.qos_queue(name='fake_lqueue', min=34, max=44, + qos_marking='untrusted', default=False) as q: + self.assertEqual(q['qos_queue']['name'], 'fake_lqueue') + self.assertEqual(q['qos_queue']['min'], 34) + self.assertEqual(q['qos_queue']['max'], 44) + self.assertEqual(q['qos_queue']['qos_marking'], 'untrusted') + self.assertFalse(q['qos_queue']['default']) + + def test_create_trusted_qos_queue(self): + with mock.patch.object(qos_db.LOG, 'info') as log: + with mock.patch.object(nsxlib, 'do_request', + return_value={"uuid": "fake_queue"}): + with self.qos_queue(name='fake_lqueue', min=34, max=44, + qos_marking='trusted', default=False) as q: + self.assertIsNone(q['qos_queue']['dscp']) + self.assertTrue(log.called) + + def test_create_qos_queue_name_exceeds_40_chars(self): + name = 'this_is_a_queue_whose_name_is_longer_than_40_chars' + with self.qos_queue(name=name) as queue: + # Assert Neutron name is not truncated + self.assertEqual(queue['qos_queue']['name'], name) + + def test_create_qos_queue_default(self): + with self.qos_queue(default=True) as q: + self.assertTrue(q['qos_queue']['default']) + + def test_create_qos_queue_two_default_queues_fail(self): + with self.qos_queue(default=True): + body = {'qos_queue': {'tenant_id': 'tenant', + 'name': 'second_default_queue', + 'default': True}} + res = self._create_qos_queue('json', body) + self.assertEqual(res.status_int, 409) + + def test_create_port_with_queue(self): + with self.qos_queue(default=True) as q1: + res = self._create_network('json', 'net1', True, + arg_list=(ext_qos.QUEUE,), + queue_id=q1['qos_queue']['id']) + net1 = self.deserialize('json', res) + self.assertEqual(net1['network'][ext_qos.QUEUE], + q1['qos_queue']['id']) + device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1" + with self.port(device_id=device_id, do_delete=False) as p: + self.assertEqual(len(p['port'][ext_qos.QUEUE]), 36) + + def test_create_shared_queue_networks(self): + with self.qos_queue(default=True, no_delete=True) as q1: + res = self._create_network('json', 'net1', True, + arg_list=(ext_qos.QUEUE,), + queue_id=q1['qos_queue']['id']) + net1 = self.deserialize('json', res) + self.assertEqual(net1['network'][ext_qos.QUEUE], + q1['qos_queue']['id']) + res = self._create_network('json', 'net2', True, + arg_list=(ext_qos.QUEUE,), + queue_id=q1['qos_queue']['id']) + net2 = self.deserialize('json', res) + self.assertEqual(net1['network'][ext_qos.QUEUE], + q1['qos_queue']['id']) + device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1" + res = self._create_port('json', net1['network']['id'], + device_id=device_id) + port1 = self.deserialize('json', res) + res = self._create_port('json', net2['network']['id'], + device_id=device_id) + port2 = self.deserialize('json', res) + self.assertEqual(port1['port'][ext_qos.QUEUE], + port2['port'][ext_qos.QUEUE]) + + self._delete('ports', port1['port']['id']) + self._delete('ports', port2['port']['id']) + + def test_remove_queue_in_use_fail(self): + with self.qos_queue(no_delete=True) as q1: + res = self._create_network('json', 'net1', True, + arg_list=(ext_qos.QUEUE,), + queue_id=q1['qos_queue']['id']) + net1 = self.deserialize('json', res) + device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1" + res = self._create_port('json', net1['network']['id'], + device_id=device_id) + port = self.deserialize('json', res) + self._delete('qos-queues', port['port'][ext_qos.QUEUE], 409) + + def test_update_network_new_queue(self): + with self.qos_queue() as q1: + res = self._create_network('json', 'net1', True, + arg_list=(ext_qos.QUEUE,), + queue_id=q1['qos_queue']['id']) + net1 = self.deserialize('json', res) + with self.qos_queue() as new_q: + data = {'network': {ext_qos.QUEUE: new_q['qos_queue']['id']}} + req = self.new_update_request('networks', data, + net1['network']['id']) + res = req.get_response(self.api) + net1 = self.deserialize('json', res) + self.assertEqual(net1['network'][ext_qos.QUEUE], + new_q['qos_queue']['id']) + + def test_update_port_adding_device_id(self): + with self.qos_queue(no_delete=True) as q1: + res = self._create_network('json', 'net1', True, + arg_list=(ext_qos.QUEUE,), + queue_id=q1['qos_queue']['id']) + net1 = self.deserialize('json', res) + device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1" + res = self._create_port('json', net1['network']['id']) + port = self.deserialize('json', res) + self.assertIsNone(port['port'][ext_qos.QUEUE]) + + data = {'port': {'device_id': device_id}} + req = self.new_update_request('ports', data, + port['port']['id']) + + res = req.get_response(self.api) + port = self.deserialize('json', res) + self.assertEqual(len(port['port'][ext_qos.QUEUE]), 36) + + def test_get_port_with_qos_not_admin(self): + body = {'qos_queue': {'tenant_id': 'not_admin', + 'name': 'foo', 'min': 20, 'max': 20}} + res = self._create_qos_queue('json', body, tenant_id='not_admin') + q1 = self.deserialize('json', res) + res = self._create_network('json', 'net1', True, + arg_list=(ext_qos.QUEUE, 'tenant_id',), + queue_id=q1['qos_queue']['id'], + tenant_id="not_admin") + net1 = self.deserialize('json', res) + self.assertEqual(len(net1['network'][ext_qos.QUEUE]), 36) + res = self._create_port('json', net1['network']['id'], + tenant_id='not_admin', set_context=True) + + port = self.deserialize('json', res) + self.assertNotIn(ext_qos.QUEUE, port['port']) + + def test_dscp_value_out_of_range(self): + body = {'qos_queue': {'tenant_id': 'admin', 'dscp': '64', + 'name': 'foo', 'min': 20, 'max': 20}} + res = self._create_qos_queue('json', body) + self.assertEqual(res.status_int, 400) + + def test_non_admin_cannot_create_queue(self): + body = {'qos_queue': {'tenant_id': 'not_admin', + 'name': 'foo', 'min': 20, 'max': 20}} + res = self._create_qos_queue('json', body, tenant_id='not_admin', + set_context=True) + self.assertEqual(res.status_int, 403) + + def test_update_port_non_admin_does_not_show_queue_id(self): + body = {'qos_queue': {'tenant_id': 'not_admin', + 'name': 'foo', 'min': 20, 'max': 20}} + res = self._create_qos_queue('json', body, tenant_id='not_admin') + q1 = self.deserialize('json', res) + res = self._create_network('json', 'net1', True, + arg_list=(ext_qos.QUEUE,), + tenant_id='not_admin', + queue_id=q1['qos_queue']['id']) + + net1 = self.deserialize('json', res) + res = self._create_port('json', net1['network']['id'], + tenant_id='not_admin', set_context=True) + port = self.deserialize('json', res) + device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1" + data = {'port': {'device_id': device_id}} + neutron_context = context.Context('', 'not_admin') + port = self._update('ports', port['port']['id'], data, + neutron_context=neutron_context) + self.assertNotIn(ext_qos.QUEUE, port['port']) + + def test_rxtx_factor(self): + with self.qos_queue(max=10) as q1: + + res = self._create_network('json', 'net1', True, + arg_list=(ext_qos.QUEUE,), + queue_id=q1['qos_queue']['id']) + net1 = self.deserialize('json', res) + res = self._create_port('json', net1['network']['id'], + arg_list=(ext_qos.RXTX_FACTOR,), + rxtx_factor=2, device_id='1') + port = self.deserialize('json', res) + req = self.new_show_request('qos-queues', + port['port'][ext_qos.QUEUE]) + res = req.get_response(self.ext_api) + queue = self.deserialize('json', res) + self.assertEqual(queue['qos_queue']['max'], 20) diff --git a/neutron/tests/unit/vmware/nsxlib/__init__.py b/neutron/tests/unit/vmware/nsxlib/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/vmware/nsxlib/base.py b/neutron/tests/unit/vmware/nsxlib/base.py new file mode 100644 index 000000000..8856c00c3 --- /dev/null +++ b/neutron/tests/unit/vmware/nsxlib/base.py @@ -0,0 +1,88 @@ +# Copyright (c) 2014 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import mock + +from neutron.plugins.vmware.api_client import client +from neutron.plugins.vmware.api_client import exception +from neutron.plugins.vmware.api_client import version +from neutron.plugins.vmware.common import config # noqa +from neutron.plugins.vmware import nsx_cluster as cluster +from neutron.tests import base +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware.apiclient import fake + +_uuid = test_api_v2._uuid + + +class NsxlibTestCase(base.BaseTestCase): + + def setUp(self): + self.fc = fake.FakeClient(vmware.STUBS_PATH) + self.mock_nsxapi = mock.patch(vmware.NSXAPI_NAME, autospec=True) + instance = self.mock_nsxapi.start() + instance.return_value.login.return_value = "the_cookie" + fake_version = getattr(self, 'fake_version', "3.0") + instance.return_value.get_version.return_value = ( + version.Version(fake_version)) + + instance.return_value.request.side_effect = self.fc.fake_request + self.fake_cluster = cluster.NSXCluster( + name='fake-cluster', nsx_controllers=['1.1.1.1:999'], + default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar') + self.fake_cluster.api_client = client.NsxApiClient( + ('1.1.1.1', '999', True), + self.fake_cluster.nsx_user, self.fake_cluster.nsx_password, + self.fake_cluster.req_timeout, self.fake_cluster.http_timeout, + self.fake_cluster.retries, self.fake_cluster.redirects) + + super(NsxlibTestCase, self).setUp() + self.addCleanup(self.fc.reset_all) + + def _build_tag_dict(self, tags): + # This syntax is needed for python 2.6 compatibility + return dict((t['scope'], t['tag']) for t in tags) + + +class NsxlibNegativeBaseTestCase(base.BaseTestCase): + + def setUp(self): + self.fc = fake.FakeClient(vmware.STUBS_PATH) + self.mock_nsxapi = mock.patch(vmware.NSXAPI_NAME, autospec=True) + instance = self.mock_nsxapi.start() + instance.return_value.login.return_value = "the_cookie" + # Choose 3.0, but the version is irrelevant for the aim of + # these tests as calls are throwing up errors anyway + fake_version = getattr(self, 'fake_version', "3.0") + instance.return_value.get_version.return_value = ( + version.Version(fake_version)) + + def _faulty_request(*args, **kwargs): + raise exception.NsxApiException + + instance.return_value.request.side_effect = _faulty_request + self.fake_cluster = cluster.NSXCluster( + name='fake-cluster', nsx_controllers=['1.1.1.1:999'], + default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar') + self.fake_cluster.api_client = client.NsxApiClient( + ('1.1.1.1', '999', True), + self.fake_cluster.nsx_user, self.fake_cluster.nsx_password, + self.fake_cluster.req_timeout, self.fake_cluster.http_timeout, + self.fake_cluster.retries, self.fake_cluster.redirects) + + super(NsxlibNegativeBaseTestCase, self).setUp() + self.addCleanup(self.fc.reset_all) diff --git a/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py b/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py new file mode 100644 index 000000000..477233d6f --- /dev/null +++ b/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py @@ -0,0 +1,296 @@ +# Copyright (c) 2014 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import mock + +from neutron.openstack.common import jsonutils +from neutron.plugins.vmware.api_client import exception +from neutron.plugins.vmware.common import utils as nsx_utils +from neutron.plugins.vmware import nsxlib +from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib +from neutron.plugins.vmware.nsxlib import switch as switchlib +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit.vmware.nsxlib import base + +_uuid = test_api_v2._uuid + + +class L2GatewayNegativeTestCase(base.NsxlibNegativeBaseTestCase): + + def test_create_l2_gw_service_on_failure(self): + self.assertRaises(exception.NsxApiException, + l2gwlib.create_l2_gw_service, + self.fake_cluster, + 'fake-tenant', + 'fake-gateway', + [{'id': _uuid(), + 'interface_name': 'xxx'}]) + + def test_delete_l2_gw_service_on_failure(self): + self.assertRaises(exception.NsxApiException, + l2gwlib.delete_l2_gw_service, + self.fake_cluster, + 'fake-gateway') + + def test_get_l2_gw_service_on_failure(self): + self.assertRaises(exception.NsxApiException, + l2gwlib.get_l2_gw_service, + self.fake_cluster, + 'fake-gateway') + + def test_update_l2_gw_service_on_failure(self): + self.assertRaises(exception.NsxApiException, + l2gwlib.update_l2_gw_service, + self.fake_cluster, + 'fake-gateway', + 'pluto') + + +class L2GatewayTestCase(base.NsxlibTestCase): + + def _create_gw_service(self, node_uuid, display_name, + tenant_id='fake_tenant'): + return l2gwlib.create_l2_gw_service(self.fake_cluster, + tenant_id, + display_name, + [{'id': node_uuid, + 'interface_name': 'xxx'}]) + + def test_create_l2_gw_service(self): + display_name = 'fake-gateway' + node_uuid = _uuid() + response = self._create_gw_service(node_uuid, display_name) + self.assertEqual(response.get('type'), 'L2GatewayServiceConfig') + self.assertEqual(response.get('display_name'), display_name) + gateways = response.get('gateways', []) + self.assertEqual(len(gateways), 1) + self.assertEqual(gateways[0]['type'], 'L2Gateway') + self.assertEqual(gateways[0]['device_id'], 'xxx') + self.assertEqual(gateways[0]['transport_node_uuid'], node_uuid) + + def test_update_l2_gw_service(self): + display_name = 'fake-gateway' + new_display_name = 'still-fake-gateway' + node_uuid = _uuid() + res1 = self._create_gw_service(node_uuid, display_name) + gw_id = res1['uuid'] + res2 = l2gwlib.update_l2_gw_service( + self.fake_cluster, gw_id, new_display_name) + self.assertEqual(res2['display_name'], new_display_name) + + def test_get_l2_gw_service(self): + display_name = 'fake-gateway' + node_uuid = _uuid() + gw_id = self._create_gw_service(node_uuid, display_name)['uuid'] + response = l2gwlib.get_l2_gw_service(self.fake_cluster, gw_id) + self.assertEqual(response.get('type'), 'L2GatewayServiceConfig') + self.assertEqual(response.get('display_name'), display_name) + self.assertEqual(response.get('uuid'), gw_id) + + def test_list_l2_gw_service(self): + gw_ids = [] + for name in ('fake-1', 'fake-2'): + gw_ids.append(self._create_gw_service(_uuid(), name)['uuid']) + results = l2gwlib.get_l2_gw_services(self.fake_cluster) + self.assertEqual(len(results), 2) + self.assertEqual(sorted(gw_ids), sorted([r['uuid'] for r in results])) + + def test_list_l2_gw_service_by_tenant(self): + gw_ids = [self._create_gw_service( + _uuid(), name, tenant_id=name)['uuid'] + for name in ('fake-1', 'fake-2')] + results = l2gwlib.get_l2_gw_services(self.fake_cluster, + tenant_id='fake-1') + self.assertEqual(len(results), 1) + self.assertEqual(results[0]['uuid'], gw_ids[0]) + + def test_delete_l2_gw_service(self): + display_name = 'fake-gateway' + node_uuid = _uuid() + gw_id = self._create_gw_service(node_uuid, display_name)['uuid'] + l2gwlib.delete_l2_gw_service(self.fake_cluster, gw_id) + results = l2gwlib.get_l2_gw_services(self.fake_cluster) + self.assertEqual(len(results), 0) + + def test_plug_l2_gw_port_attachment(self): + tenant_id = 'pippo' + node_uuid = _uuid() + transport_zones_config = [{'zone_uuid': _uuid(), + 'transport_type': 'stt'}] + lswitch = switchlib.create_lswitch( + self.fake_cluster, _uuid(), tenant_id, + 'fake-switch', transport_zones_config) + gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid'] + lport = switchlib.create_lport( + self.fake_cluster, lswitch['uuid'], tenant_id, _uuid(), + 'fake-gw-port', gw_id, True) + l2gwlib.plug_l2_gw_service( + self.fake_cluster, lswitch['uuid'], + lport['uuid'], gw_id) + uri = nsxlib._build_uri_path(switchlib.LSWITCHPORT_RESOURCE, + lport['uuid'], + lswitch['uuid'], + is_attachment=True) + resp_obj = nsxlib.do_request("GET", uri, + cluster=self.fake_cluster) + self.assertIn('LogicalPortAttachment', resp_obj) + self.assertEqual(resp_obj['LogicalPortAttachment']['type'], + 'L2GatewayAttachment') + + def _create_expected_req_body(self, display_name, neutron_id, + connector_type, connector_ip, + client_certificate): + body = { + "display_name": display_name, + "tags": [{"tag": neutron_id, "scope": "q_gw_dev_id"}, + {"tag": 'fake_tenant', "scope": "os_tid"}, + {"tag": nsx_utils.NEUTRON_VERSION, + "scope": "quantum"}], + "transport_connectors": [ + {"transport_zone_uuid": 'fake_tz_uuid', + "ip_address": connector_ip, + "type": '%sConnector' % connector_type}], + "admin_status_enabled": True + } + if client_certificate: + body["credential"] = { + "client_certificate": { + "pem_encoded": client_certificate}, + "type": "SecurityCertificateCredential"} + return body + + def test_create_gw_device(self): + # NOTE(salv-orlando): This unit test mocks backend calls rather than + # leveraging the fake NSX API client + display_name = 'fake-device' + neutron_id = 'whatever' + connector_type = 'stt' + connector_ip = '1.1.1.1' + client_certificate = 'this_should_be_a_certificate' + with mock.patch.object(nsxlib, 'do_request') as request_mock: + expected_req_body = self._create_expected_req_body( + display_name, neutron_id, connector_type.upper(), + connector_ip, client_certificate) + l2gwlib.create_gateway_device( + self.fake_cluster, 'fake_tenant', display_name, neutron_id, + 'fake_tz_uuid', connector_type, connector_ip, + client_certificate) + request_mock.assert_called_once_with( + "POST", + "/ws.v1/transport-node", + jsonutils.dumps(expected_req_body), + cluster=self.fake_cluster) + + def test_update_gw_device(self): + # NOTE(salv-orlando): This unit test mocks backend calls rather than + # leveraging the fake NSX API client + display_name = 'fake-device' + neutron_id = 'whatever' + connector_type = 'stt' + connector_ip = '1.1.1.1' + client_certificate = 'this_should_be_a_certificate' + with mock.patch.object(nsxlib, 'do_request') as request_mock: + expected_req_body = self._create_expected_req_body( + display_name, neutron_id, connector_type.upper(), + connector_ip, client_certificate) + l2gwlib.update_gateway_device( + self.fake_cluster, 'whatever', 'fake_tenant', + display_name, neutron_id, + 'fake_tz_uuid', connector_type, connector_ip, + client_certificate) + + request_mock.assert_called_once_with( + "PUT", + "/ws.v1/transport-node/whatever", + jsonutils.dumps(expected_req_body), + cluster=self.fake_cluster) + + def test_update_gw_device_without_certificate(self): + # NOTE(salv-orlando): This unit test mocks backend calls rather than + # leveraging the fake NSX API client + display_name = 'fake-device' + neutron_id = 'whatever' + connector_type = 'stt' + connector_ip = '1.1.1.1' + with mock.patch.object(nsxlib, 'do_request') as request_mock: + expected_req_body = self._create_expected_req_body( + display_name, neutron_id, connector_type.upper(), + connector_ip, None) + l2gwlib.update_gateway_device( + self.fake_cluster, 'whatever', 'fake_tenant', + display_name, neutron_id, + 'fake_tz_uuid', connector_type, connector_ip, + client_certificate=None) + + request_mock.assert_called_once_with( + "PUT", + "/ws.v1/transport-node/whatever", + jsonutils.dumps(expected_req_body), + cluster=self.fake_cluster) + + def test_get_gw_device_status(self): + # NOTE(salv-orlando): This unit test mocks backend calls rather than + # leveraging the fake NSX API client + with mock.patch.object(nsxlib, 'do_request') as request_mock: + l2gwlib.get_gateway_device_status(self.fake_cluster, 'whatever') + request_mock.assert_called_once_with( + "GET", + "/ws.v1/transport-node/whatever/status", + cluster=self.fake_cluster) + + def test_get_gw_devices_status(self): + # NOTE(salv-orlando): This unit test mocks backend calls rather than + # leveraging the fake NSX API client + with mock.patch.object(nsxlib, 'do_request') as request_mock: + request_mock.return_value = { + 'results': [], + 'page_cursor': None, + 'result_count': 0} + l2gwlib.get_gateway_devices_status(self.fake_cluster) + request_mock.assert_called_once_with( + "GET", + ("/ws.v1/transport-node?fields=uuid,tags&" + "relations=TransportNodeStatus&" + "_page_length=1000&tag_scope=quantum"), + cluster=self.fake_cluster) + + def test_get_gw_devices_status_filter_by_tenant(self): + # NOTE(salv-orlando): This unit test mocks backend calls rather than + # leveraging the fake NSX API client + with mock.patch.object(nsxlib, 'do_request') as request_mock: + request_mock.return_value = { + 'results': [], + 'page_cursor': None, + 'result_count': 0} + l2gwlib.get_gateway_devices_status(self.fake_cluster, + tenant_id='ssc_napoli') + request_mock.assert_called_once_with( + "GET", + ("/ws.v1/transport-node?fields=uuid,tags&" + "relations=TransportNodeStatus&" + "tag_scope=os_tid&tag=ssc_napoli&" + "_page_length=1000&tag_scope=quantum"), + cluster=self.fake_cluster) + + def test_delete_gw_device(self): + # NOTE(salv-orlando): This unit test mocks backend calls rather than + # leveraging the fake NSX API client + with mock.patch.object(nsxlib, 'do_request') as request_mock: + l2gwlib.delete_gateway_device(self.fake_cluster, 'whatever') + request_mock.assert_called_once_with( + "DELETE", + "/ws.v1/transport-node/whatever", + cluster=self.fake_cluster) diff --git a/neutron/tests/unit/vmware/nsxlib/test_lsn.py b/neutron/tests/unit/vmware/nsxlib/test_lsn.py new file mode 100644 index 000000000..41c50b6c3 --- /dev/null +++ b/neutron/tests/unit/vmware/nsxlib/test_lsn.py @@ -0,0 +1,370 @@ +# Copyright 2013 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from neutron.common import exceptions +from neutron.openstack.common import jsonutils as json +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware.nsxlib import lsn as lsnlib +from neutron.tests import base + + +class LSNTestCase(base.BaseTestCase): + + def setUp(self): + super(LSNTestCase, self).setUp() + self.mock_request_p = mock.patch( + 'neutron.plugins.vmware.nsxlib.do_request') + self.mock_request = self.mock_request_p.start() + self.cluster = mock.Mock() + self.cluster.default_service_cluster_uuid = 'foo' + + def test_service_cluster_None(self): + self.mock_request.return_value = None + expected = lsnlib.service_cluster_exists(None, None) + self.assertFalse(expected) + + def test_service_cluster_found(self): + self.mock_request.return_value = { + "results": [ + { + "_href": "/ws.v1/service-cluster/foo_uuid", + "display_name": "foo_name", + "uuid": "foo_uuid", + "tags": [], + "_schema": "/ws.v1/schema/ServiceClusterConfig", + "gateways": [] + } + ], + "result_count": 1 + } + expected = lsnlib.service_cluster_exists(None, 'foo_uuid') + self.assertTrue(expected) + + def test_service_cluster_not_found(self): + self.mock_request.side_effect = exceptions.NotFound() + expected = lsnlib.service_cluster_exists(None, 'foo_uuid') + self.assertFalse(expected) + + def test_lsn_for_network_create(self): + net_id = "foo_network_id" + tags = utils.get_tags(n_network_id=net_id) + obj = {"edge_cluster_uuid": "foo", "tags": tags} + lsnlib.lsn_for_network_create(self.cluster, net_id) + self.mock_request.assert_called_once_with( + "POST", "/ws.v1/lservices-node", + json.dumps(obj), cluster=self.cluster) + + def test_lsn_for_network_get(self): + net_id = "foo_network_id" + lsn_id = "foo_lsn_id" + self.mock_request.return_value = { + "results": [{"uuid": "foo_lsn_id"}], + "result_count": 1 + } + result = lsnlib.lsn_for_network_get(self.cluster, net_id) + self.assertEqual(lsn_id, result) + self.mock_request.assert_called_once_with( + "GET", + ("/ws.v1/lservices-node?fields=uuid&tag_scope=" + "n_network_id&tag=%s" % net_id), + cluster=self.cluster) + + def test_lsn_for_network_get_none(self): + net_id = "foo_network_id" + self.mock_request.return_value = { + "results": [{"uuid": "foo_lsn_id1"}, {"uuid": "foo_lsn_id2"}], + "result_count": 2 + } + result = lsnlib.lsn_for_network_get(self.cluster, net_id) + self.assertIsNone(result) + + def test_lsn_for_network_get_raise_not_found(self): + net_id = "foo_network_id" + self.mock_request.return_value = { + "results": [], "result_count": 0 + } + self.assertRaises(exceptions.NotFound, + lsnlib.lsn_for_network_get, + self.cluster, net_id) + + def test_lsn_delete(self): + lsn_id = "foo_id" + lsnlib.lsn_delete(self.cluster, lsn_id) + self.mock_request.assert_called_once_with( + "DELETE", + "/ws.v1/lservices-node/%s" % lsn_id, cluster=self.cluster) + + def _test_lsn_port_host_entries_update(self, lsn_type, hosts_data): + lsn_id = 'foo_lsn_id' + lsn_port_id = 'foo_lsn_port_id' + lsnlib.lsn_port_host_entries_update( + self.cluster, lsn_id, lsn_port_id, lsn_type, hosts_data) + self.mock_request.assert_called_once_with( + 'PUT', + '/ws.v1/lservices-node/%s/lport/%s/%s' % (lsn_id, + lsn_port_id, + lsn_type), + json.dumps({'hosts': hosts_data}), + cluster=self.cluster) + + def test_lsn_port_dhcp_entries_update(self): + hosts_data = [{"ip_address": "11.22.33.44", + "mac_address": "aa:bb:cc:dd:ee:ff"}, + {"ip_address": "44.33.22.11", + "mac_address": "ff:ee:dd:cc:bb:aa"}] + self._test_lsn_port_host_entries_update("dhcp", hosts_data) + + def test_lsn_port_metadata_entries_update(self): + hosts_data = [{"ip_address": "11.22.33.44", + "device_id": "foo_vm_uuid"}] + self._test_lsn_port_host_entries_update("metadata-proxy", hosts_data) + + def test_lsn_port_create(self): + port_data = { + "ip_address": "1.2.3.0/24", + "mac_address": "aa:bb:cc:dd:ee:ff", + "subnet_id": "foo_subnet_id" + } + port_id = "foo_port_id" + self.mock_request.return_value = {"uuid": port_id} + lsn_id = "foo_lsn_id" + result = lsnlib.lsn_port_create(self.cluster, lsn_id, port_data) + self.assertEqual(result, port_id) + tags = utils.get_tags(n_subnet_id=port_data["subnet_id"], + n_mac_address=port_data["mac_address"]) + port_obj = { + "ip_address": port_data["ip_address"], + "mac_address": port_data["mac_address"], + "type": "LogicalServicesNodePortConfig", + "tags": tags + } + self.mock_request.assert_called_once_with( + "POST", "/ws.v1/lservices-node/%s/lport" % lsn_id, + json.dumps(port_obj), cluster=self.cluster) + + def test_lsn_port_delete(self): + lsn_id = "foo_lsn_id" + lsn_port_id = "foo_port_id" + lsnlib.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) + self.mock_request.assert_called_once_with( + "DELETE", + "/ws.v1/lservices-node/%s/lport/%s" % (lsn_id, lsn_port_id), + cluster=self.cluster) + + def test_lsn_port_get_with_filters(self): + lsn_id = "foo_lsn_id" + port_id = "foo_port_id" + filters = {"tag": "foo_tag", "tag_scope": "foo_scope"} + self.mock_request.return_value = { + "results": [{"uuid": port_id}], + "result_count": 1 + } + result = lsnlib._lsn_port_get(self.cluster, lsn_id, filters) + self.assertEqual(result, port_id) + self.mock_request.assert_called_once_with( + "GET", + ("/ws.v1/lservices-node/%s/lport?fields=uuid&tag_scope=%s&" + "tag=%s" % (lsn_id, filters["tag_scope"], filters["tag"])), + cluster=self.cluster) + + def test_lsn_port_get_with_filters_return_none(self): + self.mock_request.return_value = { + "results": [{"uuid": "foo1"}, {"uuid": "foo2"}], + "result_count": 2 + } + result = lsnlib._lsn_port_get(self.cluster, "lsn_id", None) + self.assertIsNone(result) + + def test_lsn_port_get_with_filters_raises_not_found(self): + self.mock_request.return_value = {"results": [], "result_count": 0} + self.assertRaises(exceptions.NotFound, + lsnlib._lsn_port_get, + self.cluster, "lsn_id", None) + + def test_lsn_port_info_get(self): + self.mock_request.return_value = { + "tags": [ + {"scope": "n_mac_address", "tag": "fa:16:3e:27:fd:a0"}, + {"scope": "n_subnet_id", "tag": "foo_subnet_id"}, + ], + "mac_address": "aa:bb:cc:dd:ee:ff", + "ip_address": "0.0.0.0/0", + "uuid": "foo_lsn_port_id" + } + result = lsnlib.lsn_port_info_get( + self.cluster, 'foo_lsn_id', 'foo_lsn_port_id') + self.mock_request.assert_called_once_with( + 'GET', '/ws.v1/lservices-node/foo_lsn_id/lport/foo_lsn_port_id', + cluster=self.cluster) + self.assertIn('subnet_id', result) + self.assertIn('mac_address', result) + + def test_lsn_port_info_get_raise_not_found(self): + self.mock_request.side_effect = exceptions.NotFound + self.assertRaises(exceptions.NotFound, + lsnlib.lsn_port_info_get, + self.cluster, mock.ANY, mock.ANY) + + def test_lsn_port_plug_network(self): + lsn_id = "foo_lsn_id" + lsn_port_id = "foo_lsn_port_id" + lswitch_port_id = "foo_lswitch_port_id" + lsnlib.lsn_port_plug_network( + self.cluster, lsn_id, lsn_port_id, lswitch_port_id) + self.mock_request.assert_called_once_with( + "PUT", + ("/ws.v1/lservices-node/%s/lport/%s/" + "attachment") % (lsn_id, lsn_port_id), + json.dumps({"peer_port_uuid": lswitch_port_id, + "type": "PatchAttachment"}), + cluster=self.cluster) + + def test_lsn_port_plug_network_raise_conflict(self): + lsn_id = "foo_lsn_id" + lsn_port_id = "foo_lsn_port_id" + lswitch_port_id = "foo_lswitch_port_id" + self.mock_request.side_effect = api_exc.Conflict + self.assertRaises( + nsx_exc.LsnConfigurationConflict, + lsnlib.lsn_port_plug_network, + self.cluster, lsn_id, lsn_port_id, lswitch_port_id) + + def _test_lsn_port_dhcp_configure( + self, lsn_id, lsn_port_id, is_enabled, opts): + lsnlib.lsn_port_dhcp_configure( + self.cluster, lsn_id, lsn_port_id, is_enabled, opts) + opt_array = [ + {"name": key, "value": val} + for key, val in opts.iteritems() + ] + self.mock_request.assert_has_calls([ + mock.call("PUT", "/ws.v1/lservices-node/%s/dhcp" % lsn_id, + json.dumps({"enabled": is_enabled}), + cluster=self.cluster), + mock.call("PUT", + ("/ws.v1/lservices-node/%s/" + "lport/%s/dhcp") % (lsn_id, lsn_port_id), + json.dumps({"options": opt_array}), + cluster=self.cluster) + ]) + + def test_lsn_port_dhcp_configure_empty_opts(self): + lsn_id = "foo_lsn_id" + lsn_port_id = "foo_lsn_port_id" + is_enabled = False + opts = {} + self._test_lsn_port_dhcp_configure( + lsn_id, lsn_port_id, is_enabled, opts) + + def test_lsn_port_dhcp_configure_with_opts(self): + lsn_id = "foo_lsn_id" + lsn_port_id = "foo_lsn_port_id" + is_enabled = True + opts = {"opt1": "val1", "opt2": "val2"} + self._test_lsn_port_dhcp_configure( + lsn_id, lsn_port_id, is_enabled, opts) + + def _test_lsn_metadata_configure( + self, lsn_id, is_enabled, opts, expected_opts): + lsnlib.lsn_metadata_configure( + self.cluster, lsn_id, is_enabled, opts) + lsn_obj = {"enabled": is_enabled} + lsn_obj.update(expected_opts) + self.mock_request.assert_has_calls([ + mock.call("PUT", + "/ws.v1/lservices-node/%s/metadata-proxy" % lsn_id, + json.dumps(lsn_obj), + cluster=self.cluster), + ]) + + def test_lsn_port_metadata_configure_empty_secret(self): + lsn_id = "foo_lsn_id" + is_enabled = True + opts = { + "metadata_server_ip": "1.2.3.4", + "metadata_server_port": "8775" + } + expected_opts = { + "metadata_server_ip": "1.2.3.4", + "metadata_server_port": "8775", + } + self._test_lsn_metadata_configure( + lsn_id, is_enabled, opts, expected_opts) + + def test_lsn_metadata_configure_with_secret(self): + lsn_id = "foo_lsn_id" + is_enabled = True + opts = { + "metadata_server_ip": "1.2.3.4", + "metadata_server_port": "8775", + "metadata_proxy_shared_secret": "foo_secret" + } + expected_opts = { + "metadata_server_ip": "1.2.3.4", + "metadata_server_port": "8775", + "options": [{ + "name": "metadata_proxy_shared_secret", + "value": "foo_secret" + }] + } + self._test_lsn_metadata_configure( + lsn_id, is_enabled, opts, expected_opts) + + def _test_lsn_port_host_action( + self, lsn_port_action_func, extra_action, action, host): + lsn_id = "foo_lsn_id" + lsn_port_id = "foo_lsn_port_id" + lsn_port_action_func(self.cluster, lsn_id, lsn_port_id, host) + self.mock_request.assert_called_once_with( + "POST", + ("/ws.v1/lservices-node/%s/lport/" + "%s/%s?action=%s") % (lsn_id, lsn_port_id, extra_action, action), + json.dumps(host), cluster=self.cluster) + + def test_lsn_port_dhcp_host_add(self): + host = { + "ip_address": "1.2.3.4", + "mac_address": "aa:bb:cc:dd:ee:ff" + } + self._test_lsn_port_host_action( + lsnlib.lsn_port_dhcp_host_add, "dhcp", "add_host", host) + + def test_lsn_port_dhcp_host_remove(self): + host = { + "ip_address": "1.2.3.4", + "mac_address": "aa:bb:cc:dd:ee:ff" + } + self._test_lsn_port_host_action( + lsnlib.lsn_port_dhcp_host_remove, "dhcp", "remove_host", host) + + def test_lsn_port_metadata_host_add(self): + host = { + "ip_address": "1.2.3.4", + "instance_id": "foo_instance_id" + } + self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_add, + "metadata-proxy", "add_host", host) + + def test_lsn_port_metadata_host_remove(self): + host = { + "ip_address": "1.2.3.4", + "instance_id": "foo_instance_id" + } + self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_remove, + "metadata-proxy", "remove_host", host) diff --git a/neutron/tests/unit/vmware/nsxlib/test_queue.py b/neutron/tests/unit/vmware/nsxlib/test_queue.py new file mode 100644 index 000000000..1d7e2ea56 --- /dev/null +++ b/neutron/tests/unit/vmware/nsxlib/test_queue.py @@ -0,0 +1,69 @@ +# Copyright (c) 2014 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import mock + +from neutron.common import exceptions +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware import nsxlib +from neutron.plugins.vmware.nsxlib import queue as queuelib +from neutron.tests.unit.vmware.nsxlib import base + + +class TestLogicalQueueLib(base.NsxlibTestCase): + + def setUp(self): + super(TestLogicalQueueLib, self).setUp() + self.fake_queue = { + 'name': 'fake_queue', + 'min': 0, 'max': 256, + 'dscp': 0, 'qos_marking': False + } + + def test_create_and_get_lqueue(self): + queue_id = queuelib.create_lqueue( + self.fake_cluster, self.fake_queue) + queue_res = nsxlib.do_request( + 'GET', + nsxlib._build_uri_path('lqueue', resource_id=queue_id), + cluster=self.fake_cluster) + self.assertEqual(queue_id, queue_res['uuid']) + self.assertEqual('fake_queue', queue_res['display_name']) + + def test_create_lqueue_nsx_error_raises(self): + def raise_nsx_exc(*args, **kwargs): + raise api_exc.NsxApiException() + + with mock.patch.object(nsxlib, 'do_request', new=raise_nsx_exc): + self.assertRaises( + exceptions.NeutronException, queuelib.create_lqueue, + self.fake_cluster, self.fake_queue) + + def test_delete_lqueue(self): + queue_id = queuelib.create_lqueue( + self.fake_cluster, self.fake_queue) + queuelib.delete_lqueue(self.fake_cluster, queue_id) + self.assertRaises(exceptions.NotFound, + nsxlib.do_request, + 'GET', + nsxlib._build_uri_path( + 'lqueue', resource_id=queue_id), + cluster=self.fake_cluster) + + def test_delete_non_existing_lqueue_raises(self): + self.assertRaises(exceptions.NeutronException, + queuelib.delete_lqueue, + self.fake_cluster, 'whatever') diff --git a/neutron/tests/unit/vmware/nsxlib/test_router.py b/neutron/tests/unit/vmware/nsxlib/test_router.py new file mode 100644 index 000000000..6d4063fe5 --- /dev/null +++ b/neutron/tests/unit/vmware/nsxlib/test_router.py @@ -0,0 +1,922 @@ +# Copyright (c) 2014 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import mock + +from oslo.config import cfg + +from neutron.common import exceptions +from neutron.openstack.common import uuidutils +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.api_client import version as version_module +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib +from neutron.plugins.vmware.nsxlib import router as routerlib +from neutron.plugins.vmware.nsxlib import switch as switchlib +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit.vmware.nsxlib import base + +_uuid = test_api_v2._uuid + + +class TestNatRules(base.NsxlibTestCase): + + def _test_create_lrouter_dnat_rule(self, version): + with mock.patch.object(self.fake_cluster.api_client, + 'get_version', + new=lambda: version_module.Version(version)): + tenant_id = 'pippo' + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + tenant_id, + 'fake_router', + '192.168.0.1') + nat_rule = routerlib.create_lrouter_dnat_rule( + self.fake_cluster, lrouter['uuid'], '10.0.0.99', + match_criteria={'destination_ip_addresses': + '192.168.0.5'}) + uri = nsxlib._build_uri_path(routerlib.LROUTERNAT_RESOURCE, + nat_rule['uuid'], + lrouter['uuid']) + resp_obj = nsxlib.do_request("GET", uri, cluster=self.fake_cluster) + self.assertEqual('DestinationNatRule', resp_obj['type']) + self.assertEqual('192.168.0.5', + resp_obj['match']['destination_ip_addresses']) + + def test_create_lrouter_dnat_rule_v2(self): + self._test_create_lrouter_dnat_rule('2.9') + + def test_create_lrouter_dnat_rule_v31(self): + self._test_create_lrouter_dnat_rule('3.1') + + +class TestExplicitLRouters(base.NsxlibTestCase): + + def setUp(self): + self.fake_version = '3.2' + super(TestExplicitLRouters, self).setUp() + + def _get_lrouter(self, tenant_id, router_name, router_id, relations=None): + schema = '/ws.v1/schema/RoutingTableRoutingConfig' + + router = {'display_name': router_name, + 'uuid': router_id, + 'tags': utils.get_tags(os_tid=tenant_id), + 'distributed': False, + 'routing_config': {'type': 'RoutingTableRoutingConfig', + '_schema': schema}, + '_schema': schema, + 'nat_synchronization_enabled': True, + 'replication_mode': 'service', + 'type': 'LogicalRouterConfig', + '_href': '/ws.v1/lrouter/%s' % router_id, } + if relations: + router['_relations'] = relations + return router + + def _get_single_route(self, router_id, route_id='fake_route_id_0', + prefix='0.0.0.0/0', next_hop_ip='1.1.1.1'): + return {'protocol': 'static', + '_href': '/ws.v1/lrouter/%s/rib/%s' % (router_id, route_id), + 'prefix': prefix, + '_schema': '/ws.v1/schema/RoutingTableEntry', + 'next_hop_ip': next_hop_ip, + 'action': 'accept', + 'uuid': route_id} + + def test_prepare_body_with_implicit_routing_config(self): + router_name = 'fake_router_name' + tenant_id = 'fake_tenant_id' + neutron_router_id = 'pipita_higuain' + router_type = 'SingleDefaultRouteImplicitRoutingConfig' + route_config = { + 'default_route_next_hop': {'gateway_ip_address': 'fake_address', + 'type': 'RouterNextHop'}, } + body = routerlib._prepare_lrouter_body(router_name, neutron_router_id, + tenant_id, router_type, + **route_config) + expected = {'display_name': 'fake_router_name', + 'routing_config': { + 'default_route_next_hop': + {'gateway_ip_address': 'fake_address', + 'type': 'RouterNextHop'}, + 'type': 'SingleDefaultRouteImplicitRoutingConfig'}, + 'tags': utils.get_tags(os_tid='fake_tenant_id', + q_router_id='pipita_higuain'), + 'type': 'LogicalRouterConfig', + 'replication_mode': cfg.CONF.NSX.replication_mode} + self.assertEqual(expected, body) + + def test_prepare_body_without_routing_config(self): + router_name = 'fake_router_name' + tenant_id = 'fake_tenant_id' + neutron_router_id = 'marekiaro_hamsik' + router_type = 'RoutingTableRoutingConfig' + body = routerlib._prepare_lrouter_body(router_name, neutron_router_id, + tenant_id, router_type) + expected = {'display_name': 'fake_router_name', + 'routing_config': {'type': 'RoutingTableRoutingConfig'}, + 'tags': utils.get_tags(os_tid='fake_tenant_id', + q_router_id='marekiaro_hamsik'), + 'type': 'LogicalRouterConfig', + 'replication_mode': cfg.CONF.NSX.replication_mode} + self.assertEqual(expected, body) + + def test_get_lrouter(self): + tenant_id = 'fake_tenant_id' + router_name = 'fake_router_name' + router_id = 'fake_router_id' + relations = { + 'LogicalRouterStatus': + {'_href': '/ws.v1/lrouter/%s/status' % router_id, + 'lport_admin_up_count': 1, + '_schema': '/ws.v1/schema/LogicalRouterStatus', + 'lport_count': 1, + 'fabric_status': True, + 'type': 'LogicalRouterStatus', + 'lport_link_up_count': 0, }, } + + with mock.patch.object(nsxlib, 'do_request', + return_value=self._get_lrouter(tenant_id, + router_name, + router_id, + relations)): + lrouter = routerlib.get_lrouter(self.fake_cluster, router_id) + self.assertTrue( + lrouter['_relations']['LogicalRouterStatus']['fabric_status']) + + def test_create_lrouter(self): + tenant_id = 'fake_tenant_id' + router_name = 'fake_router_name' + router_id = 'fake_router_id' + nexthop_ip = '10.0.0.1' + with mock.patch.object( + nsxlib, 'do_request', + return_value=self._get_lrouter(tenant_id, + router_name, + router_id)): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + tenant_id, + router_name, nexthop_ip) + self.assertEqual(lrouter['routing_config']['type'], + 'RoutingTableRoutingConfig') + self.assertNotIn('default_route_next_hop', + lrouter['routing_config']) + + def test_update_lrouter_with_no_routes(self): + router_id = 'fake_router_id' + new_routes = [{"nexthop": "10.0.0.2", + "destination": "169.254.169.0/30"}, ] + + nsx_routes = [self._get_single_route(router_id)] + with mock.patch.object(routerlib, 'get_explicit_routes_lrouter', + return_value=nsx_routes): + with mock.patch.object(routerlib, 'create_explicit_route_lrouter', + return_value='fake_uuid'): + old_routes = routerlib.update_explicit_routes_lrouter( + self.fake_cluster, router_id, new_routes) + self.assertEqual(old_routes, nsx_routes) + + def test_update_lrouter_with_no_routes_raise_nsx_exception(self): + router_id = 'fake_router_id' + new_routes = [{"nexthop": "10.0.0.2", + "destination": "169.254.169.0/30"}, ] + + nsx_routes = [self._get_single_route(router_id)] + with mock.patch.object(routerlib, 'get_explicit_routes_lrouter', + return_value=nsx_routes): + with mock.patch.object(routerlib, 'create_explicit_route_lrouter', + side_effect=api_exc.NsxApiException): + self.assertRaises(api_exc.NsxApiException, + routerlib.update_explicit_routes_lrouter, + self.fake_cluster, router_id, new_routes) + + def test_update_lrouter_with_routes(self): + router_id = 'fake_router_id' + new_routes = [{"next_hop_ip": "10.0.0.2", + "prefix": "169.254.169.0/30"}, ] + + nsx_routes = [self._get_single_route(router_id), + self._get_single_route(router_id, 'fake_route_id_1', + '0.0.0.1/24', '10.0.0.3'), + self._get_single_route(router_id, 'fake_route_id_2', + '0.0.0.2/24', '10.0.0.4'), ] + + with mock.patch.object(routerlib, 'get_explicit_routes_lrouter', + return_value=nsx_routes): + with mock.patch.object(routerlib, 'delete_explicit_route_lrouter', + return_value=None): + with mock.patch.object(routerlib, + 'create_explicit_route_lrouter', + return_value='fake_uuid'): + old_routes = routerlib.update_explicit_routes_lrouter( + self.fake_cluster, router_id, new_routes) + self.assertEqual(old_routes, nsx_routes) + + def test_update_lrouter_with_routes_raises_nsx_expception(self): + router_id = 'fake_router_id' + new_routes = [{"nexthop": "10.0.0.2", + "destination": "169.254.169.0/30"}, ] + + nsx_routes = [self._get_single_route(router_id), + self._get_single_route(router_id, 'fake_route_id_1', + '0.0.0.1/24', '10.0.0.3'), + self._get_single_route(router_id, 'fake_route_id_2', + '0.0.0.2/24', '10.0.0.4'), ] + + with mock.patch.object(routerlib, 'get_explicit_routes_lrouter', + return_value=nsx_routes): + with mock.patch.object(routerlib, 'delete_explicit_route_lrouter', + side_effect=api_exc.NsxApiException): + with mock.patch.object( + routerlib, 'create_explicit_route_lrouter', + return_value='fake_uuid'): + self.assertRaises( + api_exc.NsxApiException, + routerlib.update_explicit_routes_lrouter, + self.fake_cluster, router_id, new_routes) + + +class RouterNegativeTestCase(base.NsxlibNegativeBaseTestCase): + + def test_create_lrouter_on_failure(self): + self.assertRaises(api_exc.NsxApiException, + routerlib.create_lrouter, + self.fake_cluster, + uuidutils.generate_uuid(), + 'pluto', + 'fake_router', + 'my_hop') + + def test_delete_lrouter_on_failure(self): + self.assertRaises(api_exc.NsxApiException, + routerlib.delete_lrouter, + self.fake_cluster, + 'fake_router') + + def test_get_lrouter_on_failure(self): + self.assertRaises(api_exc.NsxApiException, + routerlib.get_lrouter, + self.fake_cluster, + 'fake_router') + + def test_update_lrouter_on_failure(self): + self.assertRaises(api_exc.NsxApiException, + routerlib.update_lrouter, + self.fake_cluster, + 'fake_router', + 'pluto', + 'new_hop') + + +class TestLogicalRouters(base.NsxlibTestCase): + + def _verify_lrouter(self, res_lrouter, + expected_uuid, + expected_display_name, + expected_nexthop, + expected_tenant_id, + expected_neutron_id=None, + expected_distributed=None): + self.assertEqual(res_lrouter['uuid'], expected_uuid) + nexthop = (res_lrouter['routing_config'] + ['default_route_next_hop']['gateway_ip_address']) + self.assertEqual(nexthop, expected_nexthop) + router_tags = self._build_tag_dict(res_lrouter['tags']) + self.assertIn('os_tid', router_tags) + self.assertEqual(res_lrouter['display_name'], expected_display_name) + self.assertEqual(expected_tenant_id, router_tags['os_tid']) + if expected_distributed is not None: + self.assertEqual(expected_distributed, + res_lrouter['distributed']) + if expected_neutron_id: + self.assertIn('q_router_id', router_tags) + self.assertEqual(expected_neutron_id, router_tags['q_router_id']) + + def test_get_lrouters(self): + lrouter_uuids = [routerlib.create_lrouter( + self.fake_cluster, 'whatever', 'pippo', 'fake-lrouter-%s' % k, + '10.0.0.1')['uuid'] for k in range(3)] + routers = routerlib.get_lrouters(self.fake_cluster, 'pippo') + for router in routers: + self.assertIn(router['uuid'], lrouter_uuids) + + def _create_lrouter(self, version, neutron_id=None, distributed=None): + with mock.patch.object( + self.fake_cluster.api_client, 'get_version', + return_value=version_module.Version(version)): + if not neutron_id: + neutron_id = uuidutils.generate_uuid() + lrouter = routerlib.create_lrouter( + self.fake_cluster, neutron_id, 'pippo', + 'fake-lrouter', '10.0.0.1', distributed=distributed) + return routerlib.get_lrouter(self.fake_cluster, + lrouter['uuid']) + + def test_create_and_get_lrouter_v30(self): + neutron_id = uuidutils.generate_uuid() + res_lrouter = self._create_lrouter('3.0', neutron_id=neutron_id) + self._verify_lrouter(res_lrouter, res_lrouter['uuid'], + 'fake-lrouter', '10.0.0.1', 'pippo', + expected_neutron_id=neutron_id) + + def test_create_and_get_lrouter_v31_centralized(self): + neutron_id = uuidutils.generate_uuid() + res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id, + distributed=False) + self._verify_lrouter(res_lrouter, res_lrouter['uuid'], + 'fake-lrouter', '10.0.0.1', 'pippo', + expected_neutron_id=neutron_id, + expected_distributed=False) + + def test_create_and_get_lrouter_v31_distributed(self): + neutron_id = uuidutils.generate_uuid() + res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id, + distributed=True) + self._verify_lrouter(res_lrouter, res_lrouter['uuid'], + 'fake-lrouter', '10.0.0.1', 'pippo', + expected_neutron_id=neutron_id, + expected_distributed=True) + + def test_create_and_get_lrouter_name_exceeds_40chars(self): + neutron_id = uuidutils.generate_uuid() + display_name = '*' * 50 + lrouter = routerlib.create_lrouter(self.fake_cluster, + neutron_id, + 'pippo', + display_name, + '10.0.0.1') + res_lrouter = routerlib.get_lrouter(self.fake_cluster, + lrouter['uuid']) + self._verify_lrouter(res_lrouter, lrouter['uuid'], + '*' * 40, '10.0.0.1', 'pippo', + expected_neutron_id=neutron_id) + + def _test_version_dependent_update_lrouter(self, version): + def foo(*args, **kwargs): + return version + + foo_func_dict = { + 'update_lrouter': { + 2: {-1: foo}, + 3: {-1: foo, 2: foo} + } + } + + with mock.patch.object(self.fake_cluster.api_client, + 'get_version', + return_value=version_module.Version(version)): + with mock.patch.dict(routerlib.ROUTER_FUNC_DICT, + foo_func_dict, clear=True): + return routerlib.update_lrouter( + self.fake_cluster, 'foo_router_id', 'foo_router_name', + 'foo_nexthop', routes={'foo_destination': 'foo_address'}) + + def test_version_dependent_update_lrouter_old_versions(self): + self.assertRaises(nsx_exc.InvalidVersion, + self._test_version_dependent_update_lrouter, + "2.9") + self.assertRaises(nsx_exc.InvalidVersion, + self._test_version_dependent_update_lrouter, + "3.0") + self.assertRaises(nsx_exc.InvalidVersion, + self._test_version_dependent_update_lrouter, + "3.1") + + def test_version_dependent_update_lrouter_new_versions(self): + self.assertEqual("3.2", + self._test_version_dependent_update_lrouter("3.2")) + self.assertEqual("4.0", + self._test_version_dependent_update_lrouter("4.0")) + self.assertEqual("4.1", + self._test_version_dependent_update_lrouter("4.1")) + + def test_update_lrouter_no_nexthop(self): + neutron_id = uuidutils.generate_uuid() + lrouter = routerlib.create_lrouter(self.fake_cluster, + neutron_id, + 'pippo', + 'fake-lrouter', + '10.0.0.1') + lrouter = routerlib.update_lrouter(self.fake_cluster, + lrouter['uuid'], + 'new_name', + None) + res_lrouter = routerlib.get_lrouter(self.fake_cluster, + lrouter['uuid']) + self._verify_lrouter(res_lrouter, lrouter['uuid'], + 'new_name', '10.0.0.1', 'pippo', + expected_neutron_id=neutron_id) + + def test_update_lrouter(self): + neutron_id = uuidutils.generate_uuid() + lrouter = routerlib.create_lrouter(self.fake_cluster, + neutron_id, + 'pippo', + 'fake-lrouter', + '10.0.0.1') + lrouter = routerlib.update_lrouter(self.fake_cluster, + lrouter['uuid'], + 'new_name', + '192.168.0.1') + res_lrouter = routerlib.get_lrouter(self.fake_cluster, + lrouter['uuid']) + self._verify_lrouter(res_lrouter, lrouter['uuid'], + 'new_name', '192.168.0.1', 'pippo', + expected_neutron_id=neutron_id) + + def test_update_nonexistent_lrouter_raises(self): + self.assertRaises(exceptions.NotFound, + routerlib.update_lrouter, + self.fake_cluster, + 'whatever', + 'foo', '9.9.9.9') + + def test_delete_lrouter(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + routerlib.delete_lrouter(self.fake_cluster, lrouter['uuid']) + self.assertRaises(exceptions.NotFound, + routerlib.get_lrouter, + self.fake_cluster, + lrouter['uuid']) + + def test_query_lrouter_ports(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + router_port_uuids = [routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', + 'qp_id_%s' % k, 'port-%s' % k, True, + ['192.168.0.%s' % k], '00:11:22:33:44:55')['uuid'] + for k in range(3)] + ports = routerlib.query_lrouter_lports( + self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(ports), 3) + for res_port in ports: + self.assertIn(res_port['uuid'], router_port_uuids) + + def test_query_lrouter_lports_nonexistent_lrouter_raises(self): + self.assertRaises( + exceptions.NotFound, routerlib.create_router_lport, + self.fake_cluster, 'booo', 'pippo', 'neutron_port_id', + 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') + + def test_create_and_get_lrouter_port(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', + 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') + ports = routerlib.query_lrouter_lports( + self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(ports), 1) + res_port = ports[0] + port_tags = self._build_tag_dict(res_port['tags']) + self.assertEqual(['192.168.0.1'], res_port['ip_addresses']) + self.assertIn('os_tid', port_tags) + self.assertIn('q_port_id', port_tags) + self.assertEqual('pippo', port_tags['os_tid']) + self.assertEqual('neutron_port_id', port_tags['q_port_id']) + + def test_create_lrouter_port_nonexistent_router_raises(self): + self.assertRaises( + exceptions.NotFound, routerlib.create_router_lport, + self.fake_cluster, 'booo', 'pippo', 'neutron_port_id', + 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') + + def test_update_lrouter_port(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + lrouter_port = routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', + 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') + routerlib.update_router_lport( + self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], + 'pippo', 'another_port_id', 'name', False, + ['192.168.0.1', '10.10.10.254']) + + ports = routerlib.query_lrouter_lports( + self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(ports), 1) + res_port = ports[0] + port_tags = self._build_tag_dict(res_port['tags']) + self.assertEqual(['192.168.0.1', '10.10.10.254'], + res_port['ip_addresses']) + self.assertEqual('False', res_port['admin_status_enabled']) + self.assertIn('os_tid', port_tags) + self.assertIn('q_port_id', port_tags) + self.assertEqual('pippo', port_tags['os_tid']) + self.assertEqual('another_port_id', port_tags['q_port_id']) + + def test_update_lrouter_port_nonexistent_router_raises(self): + self.assertRaises( + exceptions.NotFound, routerlib.update_router_lport, + self.fake_cluster, 'boo-router', 'boo-port', 'pippo', + 'neutron_port_id', 'name', True, ['192.168.0.1']) + + def test_update_lrouter_port_nonexistent_port_raises(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + self.assertRaises( + exceptions.NotFound, routerlib.update_router_lport, + self.fake_cluster, lrouter['uuid'], 'boo-port', 'pippo', + 'neutron_port_id', 'name', True, ['192.168.0.1']) + + def test_delete_lrouter_port(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + lrouter_port = routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [], + '00:11:22:33:44:55') + ports = routerlib.query_lrouter_lports( + self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(ports), 1) + routerlib.delete_router_lport(self.fake_cluster, lrouter['uuid'], + lrouter_port['uuid']) + ports = routerlib.query_lrouter_lports( + self.fake_cluster, lrouter['uuid']) + self.assertFalse(len(ports)) + + def test_delete_lrouter_port_nonexistent_router_raises(self): + self.assertRaises(exceptions.NotFound, + routerlib.delete_router_lport, + self.fake_cluster, 'xyz', 'abc') + + def test_delete_lrouter_port_nonexistent_port_raises(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + self.assertRaises(exceptions.NotFound, + routerlib.delete_router_lport, + self.fake_cluster, lrouter['uuid'], 'abc') + + def test_delete_peer_lrouter_port(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + lrouter_port = routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [], + '00:11:22:33:44:55') + + def fakegetport(*args, **kwargs): + return {'_relations': {'LogicalPortAttachment': + {'peer_port_uuid': lrouter_port['uuid']}}} + # mock get_port + with mock.patch.object(switchlib, 'get_port', new=fakegetport): + routerlib.delete_peer_router_lport(self.fake_cluster, + lrouter_port['uuid'], + 'whatwever', 'whatever') + + def test_update_lrouter_port_ips_add_only(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + lrouter_port = routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', + 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') + routerlib.update_lrouter_port_ips( + self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], + ['10.10.10.254'], []) + ports = routerlib.query_lrouter_lports( + self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(ports), 1) + res_port = ports[0] + self.assertEqual(['10.10.10.254', '192.168.0.1'], + res_port['ip_addresses']) + + def test_update_lrouter_port_ips_remove_only(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + lrouter_port = routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', + 'name', True, ['192.168.0.1', '10.10.10.254'], + '00:11:22:33:44:55') + routerlib.update_lrouter_port_ips( + self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], + [], ['10.10.10.254']) + ports = routerlib.query_lrouter_lports( + self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(ports), 1) + res_port = ports[0] + self.assertEqual(['192.168.0.1'], res_port['ip_addresses']) + + def test_update_lrouter_port_ips_add_and_remove(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + lrouter_port = routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', + 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') + routerlib.update_lrouter_port_ips( + self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'], + ['10.10.10.254'], ['192.168.0.1']) + ports = routerlib.query_lrouter_lports( + self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(ports), 1) + res_port = ports[0] + self.assertEqual(['10.10.10.254'], res_port['ip_addresses']) + + def test_update_lrouter_port_ips_nonexistent_router_raises(self): + self.assertRaises( + nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips, + self.fake_cluster, 'boo-router', 'boo-port', [], []) + + def test_update_lrouter_port_ips_nsx_exception_raises(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + lrouter_port = routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', + 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') + + def raise_nsx_exc(*args, **kwargs): + raise api_exc.NsxApiException() + + with mock.patch.object(nsxlib, 'do_request', new=raise_nsx_exc): + self.assertRaises( + nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips, + self.fake_cluster, lrouter['uuid'], + lrouter_port['uuid'], [], []) + + def test_plug_lrouter_port_patch_attachment(self): + tenant_id = 'pippo' + transport_zones_config = [{'zone_uuid': _uuid(), + 'transport_type': 'stt'}] + lswitch = switchlib.create_lswitch(self.fake_cluster, + _uuid(), + tenant_id, 'fake-switch', + transport_zones_config) + lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'], + tenant_id, 'xyz', + 'name', 'device_id', True) + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + tenant_id, + 'fake-lrouter', + '10.0.0.1') + lrouter_port = routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', + 'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66') + result = routerlib.plug_router_port_attachment( + self.fake_cluster, lrouter['uuid'], + lrouter_port['uuid'], + lport['uuid'], 'PatchAttachment') + self.assertEqual(lport['uuid'], + result['LogicalPortAttachment']['peer_port_uuid']) + + def test_plug_lrouter_port_l3_gw_attachment(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + lrouter_port = routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', + 'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66') + result = routerlib.plug_router_port_attachment( + self.fake_cluster, lrouter['uuid'], + lrouter_port['uuid'], + 'gw_att', 'L3GatewayAttachment') + self.assertEqual( + 'gw_att', + result['LogicalPortAttachment']['l3_gateway_service_uuid']) + + def test_plug_lrouter_port_l3_gw_attachment_with_vlan(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + lrouter_port = routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', + 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') + result = routerlib.plug_router_port_attachment( + self.fake_cluster, lrouter['uuid'], + lrouter_port['uuid'], + 'gw_att', 'L3GatewayAttachment', 123) + self.assertEqual( + 'gw_att', + result['LogicalPortAttachment']['l3_gateway_service_uuid']) + self.assertEqual( + '123', + result['LogicalPortAttachment']['vlan_id']) + + def test_plug_lrouter_port_invalid_attachment_type_raises(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + lrouter_port = routerlib.create_router_lport( + self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id', + 'name', True, ['192.168.0.1'], '00:11:22:33:44:55') + self.assertRaises(nsx_exc.InvalidAttachmentType, + routerlib.plug_router_port_attachment, + self.fake_cluster, lrouter['uuid'], + lrouter_port['uuid'], 'gw_att', 'BadType') + + def _test_create_router_snat_rule(self, version): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + with mock.patch.object(self.fake_cluster.api_client, + 'get_version', + new=lambda: version_module.Version(version)): + routerlib.create_lrouter_snat_rule( + self.fake_cluster, lrouter['uuid'], + '10.0.0.2', '10.0.0.2', order=200, + match_criteria={'source_ip_addresses': '192.168.0.24'}) + rules = routerlib.query_nat_rules( + self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(rules), 1) + + def test_create_router_snat_rule_v3(self): + self._test_create_router_snat_rule('3.0') + + def test_create_router_snat_rule_v2(self): + self._test_create_router_snat_rule('2.0') + + def _test_create_router_dnat_rule(self, version, dest_port=None): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + with mock.patch.object(self.fake_cluster.api_client, + 'get_version', + return_value=version_module.Version(version)): + routerlib.create_lrouter_dnat_rule( + self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200, + dest_port=dest_port, + match_criteria={'destination_ip_addresses': '10.0.0.3'}) + rules = routerlib.query_nat_rules( + self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(rules), 1) + + def test_create_router_dnat_rule_v3(self): + self._test_create_router_dnat_rule('3.0') + + def test_create_router_dnat_rule_v2(self): + self._test_create_router_dnat_rule('2.0') + + def test_create_router_dnat_rule_v2_with_destination_port(self): + self._test_create_router_dnat_rule('2.0', 8080) + + def test_create_router_dnat_rule_v3_with_destination_port(self): + self._test_create_router_dnat_rule('3.0', 8080) + + def test_create_router_snat_rule_invalid_match_keys_raises(self): + # In this case the version does not make a difference + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + + with mock.patch.object(self.fake_cluster.api_client, + 'get_version', + new=lambda: '2.0'): + self.assertRaises(AttributeError, + routerlib.create_lrouter_snat_rule, + self.fake_cluster, lrouter['uuid'], + '10.0.0.2', '10.0.0.2', order=200, + match_criteria={'foo': 'bar'}) + + def _test_create_router_nosnat_rule(self, version, expected=1): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + with mock.patch.object(self.fake_cluster.api_client, + 'get_version', + new=lambda: version_module.Version(version)): + routerlib.create_lrouter_nosnat_rule( + self.fake_cluster, lrouter['uuid'], + order=100, + match_criteria={'destination_ip_addresses': '192.168.0.0/24'}) + rules = routerlib.query_nat_rules( + self.fake_cluster, lrouter['uuid']) + # NoSNAT rules do not exist in V2 + self.assertEqual(len(rules), expected) + + def test_create_router_nosnat_rule_v2(self): + self._test_create_router_nosnat_rule('2.0', expected=0) + + def test_create_router_nosnat_rule_v3(self): + self._test_create_router_nosnat_rule('3.0') + + def _prepare_nat_rules_for_delete_tests(self): + lrouter = routerlib.create_lrouter(self.fake_cluster, + uuidutils.generate_uuid(), + 'pippo', + 'fake-lrouter', + '10.0.0.1') + # v2 or v3 makes no difference for this test + with mock.patch.object(self.fake_cluster.api_client, + 'get_version', + new=lambda: version_module.Version('2.0')): + routerlib.create_lrouter_snat_rule( + self.fake_cluster, lrouter['uuid'], + '10.0.0.2', '10.0.0.2', order=220, + match_criteria={'source_ip_addresses': '192.168.0.0/24'}) + routerlib.create_lrouter_snat_rule( + self.fake_cluster, lrouter['uuid'], + '10.0.0.3', '10.0.0.3', order=200, + match_criteria={'source_ip_addresses': '192.168.0.2/32'}) + routerlib.create_lrouter_dnat_rule( + self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200, + match_criteria={'destination_ip_addresses': '10.0.0.3'}) + return lrouter + + def test_delete_router_nat_rules_by_match_on_destination_ip(self): + lrouter = self._prepare_nat_rules_for_delete_tests() + rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(rules), 3) + routerlib.delete_nat_rules_by_match( + self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 1, 1, + destination_ip_addresses='10.0.0.3') + rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(rules), 2) + + def test_delete_router_nat_rules_by_match_on_source_ip(self): + lrouter = self._prepare_nat_rules_for_delete_tests() + rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(rules), 3) + routerlib.delete_nat_rules_by_match( + self.fake_cluster, lrouter['uuid'], 'SourceNatRule', 1, 1, + source_ip_addresses='192.168.0.2/32') + rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(rules), 2) + + def test_delete_router_nat_rules_by_match_no_match_expected(self): + lrouter = self._prepare_nat_rules_for_delete_tests() + rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(rules), 3) + routerlib.delete_nat_rules_by_match( + self.fake_cluster, lrouter['uuid'], 'SomeWeirdType', 0) + rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(rules), 3) + routerlib.delete_nat_rules_by_match( + self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 0, + destination_ip_addresses='99.99.99.99') + rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(rules), 3) + + def test_delete_router_nat_rules_by_match_no_match_raises(self): + lrouter = self._prepare_nat_rules_for_delete_tests() + rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid']) + self.assertEqual(len(rules), 3) + self.assertRaises( + nsx_exc.NatRuleMismatch, + routerlib.delete_nat_rules_by_match, + self.fake_cluster, lrouter['uuid'], + 'SomeWeirdType', 1, 1) diff --git a/neutron/tests/unit/vmware/nsxlib/test_secgroup.py b/neutron/tests/unit/vmware/nsxlib/test_secgroup.py new file mode 100644 index 000000000..fb2574ff4 --- /dev/null +++ b/neutron/tests/unit/vmware/nsxlib/test_secgroup.py @@ -0,0 +1,140 @@ +# Copyright (c) 2014 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from neutron.common import exceptions +from neutron.plugins.vmware import nsxlib +from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit.vmware.nsxlib import base + +_uuid = test_api_v2._uuid + + +class SecurityProfileTestCase(base.NsxlibTestCase): + + def test_create_and_get_security_profile(self): + sec_prof = secgrouplib.create_security_profile( + self.fake_cluster, _uuid(), 'pippo', {'name': 'test'}) + sec_prof_res = nsxlib.do_request( + secgrouplib.HTTP_GET, + nsxlib._build_uri_path('security-profile', + resource_id=sec_prof['uuid']), + cluster=self.fake_cluster) + self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid']) + # Check for builtin rules + self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 1) + self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 2) + + def test_create_and_get_default_security_profile(self): + sec_prof = secgrouplib.create_security_profile( + self.fake_cluster, _uuid(), 'pippo', {'name': 'default'}) + sec_prof_res = nsxlib.do_request( + secgrouplib.HTTP_GET, + nsxlib._build_uri_path('security-profile', + resource_id=sec_prof['uuid']), + cluster=self.fake_cluster) + self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid']) + # Check for builtin rules + self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 3) + self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 2) + + def test_update_security_profile_raise_not_found(self): + self.assertRaises(exceptions.NotFound, + secgrouplib.update_security_profile, + self.fake_cluster, + _uuid(), 'tatore_magno(the great)') + + def test_update_security_profile(self): + tenant_id = 'foo_tenant_uuid' + secgroup_id = 'foo_secgroup_uuid' + old_sec_prof = secgrouplib.create_security_profile( + self.fake_cluster, tenant_id, secgroup_id, + {'name': 'tatore_magno'}) + new_sec_prof = secgrouplib.update_security_profile( + self.fake_cluster, old_sec_prof['uuid'], 'aaron_magno') + self.assertEqual('aaron_magno', new_sec_prof['display_name']) + + def test_update_security_profile_rules(self): + sec_prof = secgrouplib.create_security_profile( + self.fake_cluster, _uuid(), 'pippo', {'name': 'test'}) + ingress_rule = {'ethertype': 'IPv4'} + egress_rule = {'ethertype': 'IPv4', 'profile_uuid': 'xyz'} + new_rules = {'logical_port_egress_rules': [egress_rule], + 'logical_port_ingress_rules': [ingress_rule]} + secgrouplib.update_security_group_rules( + self.fake_cluster, sec_prof['uuid'], new_rules) + sec_prof_res = nsxlib.do_request( + nsxlib.HTTP_GET, + nsxlib._build_uri_path('security-profile', + resource_id=sec_prof['uuid']), + cluster=self.fake_cluster) + self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid']) + # Check for builtin rules + self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 2) + self.assertIn(egress_rule, + sec_prof_res['logical_port_egress_rules']) + self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1) + self.assertIn(ingress_rule, + sec_prof_res['logical_port_ingress_rules']) + + def test_update_security_profile_rules_noingress(self): + sec_prof = secgrouplib.create_security_profile( + self.fake_cluster, _uuid(), 'pippo', {'name': 'test'}) + hidden_ingress_rule = {'ethertype': 'IPv4', + 'ip_prefix': '127.0.0.1/32'} + egress_rule = {'ethertype': 'IPv4', 'profile_uuid': 'xyz'} + new_rules = {'logical_port_egress_rules': [egress_rule], + 'logical_port_ingress_rules': []} + secgrouplib.update_security_group_rules( + self.fake_cluster, sec_prof['uuid'], new_rules) + sec_prof_res = nsxlib.do_request( + nsxlib.HTTP_GET, + nsxlib._build_uri_path('security-profile', + resource_id=sec_prof['uuid']), + cluster=self.fake_cluster) + self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid']) + # Check for builtin rules + self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 2) + self.assertIn(egress_rule, + sec_prof_res['logical_port_egress_rules']) + self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1) + self.assertIn(hidden_ingress_rule, + sec_prof_res['logical_port_ingress_rules']) + + def test_update_non_existing_securityprofile_raises(self): + self.assertRaises(exceptions.NeutronException, + secgrouplib.update_security_group_rules, + self.fake_cluster, 'whatever', + {'logical_port_egress_rules': [], + 'logical_port_ingress_rules': []}) + + def test_delete_security_profile(self): + sec_prof = secgrouplib.create_security_profile( + self.fake_cluster, _uuid(), 'pippo', {'name': 'test'}) + secgrouplib.delete_security_profile( + self.fake_cluster, sec_prof['uuid']) + self.assertRaises(exceptions.NotFound, + nsxlib.do_request, + nsxlib.HTTP_GET, + nsxlib._build_uri_path( + 'security-profile', + resource_id=sec_prof['uuid']), + cluster=self.fake_cluster) + + def test_delete_non_existing_securityprofile_raises(self): + self.assertRaises(exceptions.NeutronException, + secgrouplib.delete_security_profile, + self.fake_cluster, 'whatever') diff --git a/neutron/tests/unit/vmware/nsxlib/test_switch.py b/neutron/tests/unit/vmware/nsxlib/test_switch.py new file mode 100644 index 000000000..db8c5af98 --- /dev/null +++ b/neutron/tests/unit/vmware/nsxlib/test_switch.py @@ -0,0 +1,314 @@ +# Copyright (c) 2014 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import hashlib +import mock + +from neutron.common import constants +from neutron.common import exceptions +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware.nsxlib import switch as switchlib +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit.vmware.nsxlib import base + +_uuid = test_api_v2._uuid + + +class LogicalSwitchesTestCase(base.NsxlibTestCase): + + def test_create_and_get_lswitches_single(self): + tenant_id = 'pippo' + transport_zones_config = [{'zone_uuid': _uuid(), + 'transport_type': 'stt'}] + lswitch = switchlib.create_lswitch(self.fake_cluster, + _uuid(), + tenant_id, + 'fake-switch', + transport_zones_config) + res_lswitch = switchlib.get_lswitches(self.fake_cluster, + lswitch['uuid']) + self.assertEqual(len(res_lswitch), 1) + self.assertEqual(res_lswitch[0]['uuid'], + lswitch['uuid']) + + def test_create_and_get_lswitches_single_name_exceeds_40_chars(self): + tenant_id = 'pippo' + transport_zones_config = [{'zone_uuid': _uuid(), + 'transport_type': 'stt'}] + lswitch = switchlib.create_lswitch(self.fake_cluster, + tenant_id, + _uuid(), + '*' * 50, + transport_zones_config) + res_lswitch = switchlib.get_lswitches(self.fake_cluster, + lswitch['uuid']) + self.assertEqual(len(res_lswitch), 1) + self.assertEqual(res_lswitch[0]['uuid'], lswitch['uuid']) + self.assertEqual(res_lswitch[0]['display_name'], '*' * 40) + + def test_create_and_get_lswitches_multiple(self): + tenant_id = 'pippo' + transport_zones_config = [{'zone_uuid': _uuid(), + 'transport_type': 'stt'}] + network_id = _uuid() + main_lswitch = switchlib.create_lswitch( + self.fake_cluster, network_id, + tenant_id, 'fake-switch', transport_zones_config, + tags=[{'scope': 'multi_lswitch', 'tag': 'True'}]) + # Create secondary lswitch + second_lswitch = switchlib.create_lswitch( + self.fake_cluster, network_id, + tenant_id, 'fake-switch-2', transport_zones_config) + res_lswitch = switchlib.get_lswitches(self.fake_cluster, + network_id) + self.assertEqual(len(res_lswitch), 2) + switch_uuids = [ls['uuid'] for ls in res_lswitch] + self.assertIn(main_lswitch['uuid'], switch_uuids) + self.assertIn(second_lswitch['uuid'], switch_uuids) + for ls in res_lswitch: + if ls['uuid'] == main_lswitch['uuid']: + main_ls = ls + else: + second_ls = ls + main_ls_tags = self._build_tag_dict(main_ls['tags']) + second_ls_tags = self._build_tag_dict(second_ls['tags']) + self.assertIn('multi_lswitch', main_ls_tags) + self.assertNotIn('multi_lswitch', second_ls_tags) + self.assertIn('quantum_net_id', main_ls_tags) + self.assertIn('quantum_net_id', second_ls_tags) + self.assertEqual(main_ls_tags['quantum_net_id'], + network_id) + self.assertEqual(second_ls_tags['quantum_net_id'], + network_id) + + def _test_update_lswitch(self, tenant_id, name, tags): + transport_zones_config = [{'zone_uuid': _uuid(), + 'transport_type': 'stt'}] + lswitch = switchlib.create_lswitch(self.fake_cluster, + _uuid(), + 'pippo', + 'fake-switch', + transport_zones_config) + switchlib.update_lswitch(self.fake_cluster, lswitch['uuid'], + name, tenant_id=tenant_id, tags=tags) + res_lswitch = switchlib.get_lswitches(self.fake_cluster, + lswitch['uuid']) + self.assertEqual(len(res_lswitch), 1) + self.assertEqual(res_lswitch[0]['display_name'], name) + if not tags: + # no need to validate tags + return + switch_tags = self._build_tag_dict(res_lswitch[0]['tags']) + for tag in tags: + self.assertIn(tag['scope'], switch_tags) + self.assertEqual(tag['tag'], switch_tags[tag['scope']]) + + def test_update_lswitch(self): + self._test_update_lswitch(None, 'new-name', + [{'scope': 'new_tag', 'tag': 'xxx'}]) + + def test_update_lswitch_no_tags(self): + self._test_update_lswitch(None, 'new-name', None) + + def test_update_lswitch_tenant_id(self): + self._test_update_lswitch('whatever', 'new-name', None) + + def test_update_non_existing_lswitch_raises(self): + self.assertRaises(exceptions.NetworkNotFound, + switchlib.update_lswitch, + self.fake_cluster, 'whatever', + 'foo', 'bar') + + def test_delete_networks(self): + transport_zones_config = [{'zone_uuid': _uuid(), + 'transport_type': 'stt'}] + lswitch = switchlib.create_lswitch(self.fake_cluster, + _uuid(), + 'pippo', + 'fake-switch', + transport_zones_config) + switchlib.delete_networks(self.fake_cluster, lswitch['uuid'], + [lswitch['uuid']]) + self.assertRaises(exceptions.NotFound, + switchlib.get_lswitches, + self.fake_cluster, + lswitch['uuid']) + + def test_delete_non_existing_lswitch_raises(self): + self.assertRaises(exceptions.NetworkNotFound, + switchlib.delete_networks, + self.fake_cluster, 'whatever', ['whatever']) + + +class LogicalPortsTestCase(base.NsxlibTestCase): + + def _create_switch_and_port(self, tenant_id='pippo', + neutron_port_id='whatever', + name='name', device_id='device_id'): + transport_zones_config = [{'zone_uuid': _uuid(), + 'transport_type': 'stt'}] + lswitch = switchlib.create_lswitch(self.fake_cluster, + _uuid(), tenant_id, 'fake-switch', + transport_zones_config) + lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'], + tenant_id, neutron_port_id, + name, device_id, True) + return lswitch, lport + + def test_create_and_get_port(self): + lswitch, lport = self._create_switch_and_port() + lport_res = switchlib.get_port(self.fake_cluster, + lswitch['uuid'], lport['uuid']) + self.assertEqual(lport['uuid'], lport_res['uuid']) + # Try again with relation + lport_res = switchlib.get_port(self.fake_cluster, + lswitch['uuid'], lport['uuid'], + relations='LogicalPortStatus') + self.assertEqual(lport['uuid'], lport_res['uuid']) + + def test_plug_interface(self): + lswitch, lport = self._create_switch_and_port() + switchlib.plug_vif_interface(self.fake_cluster, lswitch['uuid'], + lport['uuid'], 'VifAttachment', 'fake') + lport_res = switchlib.get_port(self.fake_cluster, + lswitch['uuid'], lport['uuid']) + self.assertEqual(lport['uuid'], lport_res['uuid']) + + def test_get_port_by_tag(self): + lswitch, lport = self._create_switch_and_port() + lport2 = switchlib.get_port_by_neutron_tag(self.fake_cluster, + lswitch['uuid'], + 'whatever') + self.assertIsNotNone(lport2) + self.assertEqual(lport['uuid'], lport2['uuid']) + + def test_get_port_by_tag_not_found_with_switch_id_raises_not_found(self): + tenant_id = 'pippo' + neutron_port_id = 'whatever' + transport_zones_config = [{'zone_uuid': _uuid(), + 'transport_type': 'stt'}] + lswitch = switchlib.create_lswitch( + self.fake_cluster, tenant_id, _uuid(), + 'fake-switch', transport_zones_config) + self.assertRaises(exceptions.NotFound, + switchlib.get_port_by_neutron_tag, + self.fake_cluster, lswitch['uuid'], + neutron_port_id) + + def test_get_port_by_tag_not_find_wildcard_lswitch_returns_none(self): + tenant_id = 'pippo' + neutron_port_id = 'whatever' + transport_zones_config = [{'zone_uuid': _uuid(), + 'transport_type': 'stt'}] + switchlib.create_lswitch( + self.fake_cluster, tenant_id, _uuid(), + 'fake-switch', transport_zones_config) + lport = switchlib.get_port_by_neutron_tag( + self.fake_cluster, '*', neutron_port_id) + self.assertIsNone(lport) + + def test_get_port_status(self): + lswitch, lport = self._create_switch_and_port() + status = switchlib.get_port_status( + self.fake_cluster, lswitch['uuid'], lport['uuid']) + self.assertEqual(constants.PORT_STATUS_ACTIVE, status) + + def test_get_port_status_non_existent_raises(self): + self.assertRaises(exceptions.PortNotFoundOnNetwork, + switchlib.get_port_status, + self.fake_cluster, + 'boo', 'boo') + + def test_update_port(self): + lswitch, lport = self._create_switch_and_port() + switchlib.update_port( + self.fake_cluster, lswitch['uuid'], lport['uuid'], + 'neutron_port_id', 'pippo2', 'new_name', 'device_id', False) + lport_res = switchlib.get_port(self.fake_cluster, + lswitch['uuid'], lport['uuid']) + self.assertEqual(lport['uuid'], lport_res['uuid']) + self.assertEqual('new_name', lport_res['display_name']) + self.assertEqual('False', lport_res['admin_status_enabled']) + port_tags = self._build_tag_dict(lport_res['tags']) + self.assertIn('os_tid', port_tags) + self.assertIn('q_port_id', port_tags) + self.assertIn('vm_id', port_tags) + + def test_create_port_device_id_less_than_40_chars(self): + lswitch, lport = self._create_switch_and_port() + lport_res = switchlib.get_port(self.fake_cluster, + lswitch['uuid'], lport['uuid']) + port_tags = self._build_tag_dict(lport_res['tags']) + self.assertEqual('device_id', port_tags['vm_id']) + + def test_create_port_device_id_more_than_40_chars(self): + dev_id = "this_is_a_very_long_device_id_with_lots_of_characters" + lswitch, lport = self._create_switch_and_port(device_id=dev_id) + lport_res = switchlib.get_port(self.fake_cluster, + lswitch['uuid'], lport['uuid']) + port_tags = self._build_tag_dict(lport_res['tags']) + self.assertNotEqual(len(dev_id), len(port_tags['vm_id'])) + + def test_get_ports_with_obsolete_and_new_vm_id_tag(self): + def obsolete(device_id, obfuscate=False): + return hashlib.sha1(device_id).hexdigest() + + with mock.patch.object(utils, 'device_id_to_vm_id', new=obsolete): + dev_id1 = "short-dev-id-1" + _, lport1 = self._create_switch_and_port(device_id=dev_id1) + dev_id2 = "short-dev-id-2" + _, lport2 = self._create_switch_and_port(device_id=dev_id2) + + lports = switchlib.get_ports(self.fake_cluster, None, [dev_id1]) + port_tags = self._build_tag_dict(lports['whatever']['tags']) + self.assertNotEqual(dev_id1, port_tags['vm_id']) + + lports = switchlib.get_ports(self.fake_cluster, None, [dev_id2]) + port_tags = self._build_tag_dict(lports['whatever']['tags']) + self.assertEqual(dev_id2, port_tags['vm_id']) + + def test_update_non_existent_port_raises(self): + self.assertRaises(exceptions.PortNotFoundOnNetwork, + switchlib.update_port, self.fake_cluster, + 'boo', 'boo', 'boo', 'boo', 'boo', 'boo', False) + + def test_delete_port(self): + lswitch, lport = self._create_switch_and_port() + switchlib.delete_port(self.fake_cluster, + lswitch['uuid'], lport['uuid']) + self.assertRaises(exceptions.PortNotFoundOnNetwork, + switchlib.get_port, self.fake_cluster, + lswitch['uuid'], lport['uuid']) + + def test_delete_non_existent_port_raises(self): + lswitch = self._create_switch_and_port()[0] + self.assertRaises(exceptions.PortNotFoundOnNetwork, + switchlib.delete_port, self.fake_cluster, + lswitch['uuid'], 'bad_port_uuid') + + def test_query_lswitch_ports(self): + lswitch, lport = self._create_switch_and_port() + switch_port_uuids = [ + switchlib.create_lport( + self.fake_cluster, lswitch['uuid'], 'pippo', 'qportid-%s' % k, + 'port-%s' % k, 'deviceid-%s' % k, True)['uuid'] + for k in range(2)] + switch_port_uuids.append(lport['uuid']) + ports = switchlib.query_lswitch_lports( + self.fake_cluster, lswitch['uuid']) + self.assertEqual(len(ports), 3) + for res_port in ports: + self.assertIn(res_port['uuid'], switch_port_uuids) diff --git a/neutron/tests/unit/vmware/nsxlib/test_versioning.py b/neutron/tests/unit/vmware/nsxlib/test_versioning.py new file mode 100644 index 000000000..a50f94283 --- /dev/null +++ b/neutron/tests/unit/vmware/nsxlib/test_versioning.py @@ -0,0 +1,58 @@ +# Copyright (c) 2014 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from neutron.plugins.vmware.api_client import exception +from neutron.plugins.vmware.api_client import version as version_module +from neutron.plugins.vmware.nsxlib import router as routerlib +from neutron.plugins.vmware.nsxlib import versioning +from neutron.tests import base + + +class TestVersioning(base.BaseTestCase): + + def test_function_handling_missing_minor(self): + version = version_module.Version('2.0') + function = versioning.get_function_by_version( + routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version) + self.assertEqual(routerlib.create_implicit_routing_lrouter, + function) + + def test_function_handling_with_both_major_and_minor(self): + version = version_module.Version('3.2') + function = versioning.get_function_by_version( + routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version) + self.assertEqual(routerlib.create_explicit_routing_lrouter, + function) + + def test_function_handling_with_newer_major(self): + version = version_module.Version('5.2') + function = versioning.get_function_by_version( + routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version) + self.assertEqual(routerlib.create_explicit_routing_lrouter, + function) + + def test_function_handling_with_obsolete_major(self): + version = version_module.Version('1.2') + self.assertRaises(NotImplementedError, + versioning.get_function_by_version, + routerlib.ROUTER_FUNC_DICT, + 'create_lrouter', version) + + def test_function_handling_with_unknown_version(self): + self.assertRaises(exception.ServiceUnavailable, + versioning.get_function_by_version, + routerlib.ROUTER_FUNC_DICT, + 'create_lrouter', None) diff --git a/neutron/tests/unit/vmware/test_agent_scheduler.py b/neutron/tests/unit/vmware/test_agent_scheduler.py new file mode 100644 index 000000000..6d1193454 --- /dev/null +++ b/neutron/tests/unit/vmware/test_agent_scheduler.py @@ -0,0 +1,65 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo.config import cfg + +from neutron.common import constants +from neutron.common import test_lib +from neutron.plugins.vmware.common import sync +from neutron.plugins.vmware.dhcp_meta import rpc +from neutron.tests.unit.openvswitch import test_agent_scheduler as test_base +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware.apiclient import fake + + +class DhcpAgentNotifierTestCase(test_base.OvsDhcpAgentNotifierTestCase): + plugin_str = vmware.PLUGIN_NAME + + def setUp(self): + test_lib.test_config['config_files'] = [ + vmware.get_fake_conf('nsx.ini.full.test')] + + # mock api client + self.fc = fake.FakeClient(vmware.STUBS_PATH) + self.mock_nsx_api = mock.patch(vmware.NSXAPI_NAME, autospec=True) + instance = self.mock_nsx_api.start() + # Avoid runs of the synchronizer looping call + patch_sync = mock.patch.object(sync, '_start_loopingcall') + patch_sync.start() + + # Emulate tests against NSX 2.x + instance.return_value.get_version.return_value = "2.999" + instance.return_value.request.side_effect = self.fc.fake_request + super(DhcpAgentNotifierTestCase, self).setUp() + self.addCleanup(self.fc.reset_all) + self.addCleanup(patch_sync.stop) + self.addCleanup(self.mock_nsx_api.stop) + + def _test_gateway_subnet_notification(self, gateway='10.0.0.1'): + cfg.CONF.set_override('metadata_mode', 'dhcp_host_route', 'NSX') + hosts = ['hosta'] + with mock.patch.object(rpc.LOG, 'info') as mock_log: + net, subnet, port = self._network_port_create( + hosts, gateway=gateway, owner=constants.DEVICE_OWNER_DHCP) + self.assertEqual(subnet['subnet']['gateway_ip'], gateway) + called = 1 if gateway is None else 0 + self.assertEqual(called, mock_log.call_count) + + def test_gatewayless_subnet_notification(self): + self._test_gateway_subnet_notification(gateway=None) + + def test_subnet_with_gateway_notification(self): + self._test_gateway_subnet_notification() diff --git a/neutron/tests/unit/vmware/test_dhcpmeta.py b/neutron/tests/unit/vmware/test_dhcpmeta.py new file mode 100644 index 000000000..23c4a86cf --- /dev/null +++ b/neutron/tests/unit/vmware/test_dhcpmeta.py @@ -0,0 +1,1429 @@ +# Copyright 2013 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from oslo.config import cfg + +from neutron.common import constants as n_consts +from neutron.common import exceptions as n_exc +from neutron import context +from neutron.db import api as db +from neutron.plugins.vmware.api_client import exception +from neutron.plugins.vmware.common import exceptions as p_exc +from neutron.plugins.vmware.dbexts import lsn_db +from neutron.plugins.vmware.dhcp_meta import constants +from neutron.plugins.vmware.dhcp_meta import lsnmanager as lsn_man +from neutron.plugins.vmware.dhcp_meta import migration as mig_man +from neutron.plugins.vmware.dhcp_meta import nsx +from neutron.plugins.vmware.dhcp_meta import rpc +from neutron.tests import base + + +class DhcpMetadataBuilderTestCase(base.BaseTestCase): + + def setUp(self): + super(DhcpMetadataBuilderTestCase, self).setUp() + self.builder = mig_man.DhcpMetadataBuilder(mock.Mock(), mock.Mock()) + self.network_id = 'foo_network_id' + self.subnet_id = 'foo_subnet_id' + self.router_id = 'foo_router_id' + + def test_dhcp_agent_get_all(self): + expected = [] + self.builder.plugin.list_dhcp_agents_hosting_network.return_value = ( + {'agents': expected}) + agents = self.builder.dhcp_agent_get_all(mock.ANY, self.network_id) + self.assertEqual(expected, agents) + + def test_dhcp_port_get_all(self): + expected = [] + self.builder.plugin.get_ports.return_value = expected + ports = self.builder.dhcp_port_get_all(mock.ANY, self.network_id) + self.assertEqual(expected, ports) + + def test_router_id_get(self): + port = { + 'device_id': self.router_id, + 'network_id': self.network_id, + 'fixed_ips': [{'subnet_id': self.subnet_id}] + } + subnet = { + 'id': self.subnet_id, + 'network_id': self.network_id + } + self.builder.plugin.get_ports.return_value = [port] + result = self.builder.router_id_get(context, subnet) + self.assertEqual(self.router_id, result) + + def test_router_id_get_none_subnet(self): + self.assertIsNone(self.builder.router_id_get(mock.ANY, None)) + + def test_router_id_get_none_no_router(self): + self.builder.plugin.get_ports.return_value = [] + subnet = {'network_id': self.network_id} + self.assertIsNone(self.builder.router_id_get(mock.ANY, subnet)) + + def test_metadata_deallocate(self): + self.builder.metadata_deallocate( + mock.ANY, self.router_id, self.subnet_id) + self.assertTrue(self.builder.plugin.remove_router_interface.call_count) + + def test_metadata_allocate(self): + self.builder.metadata_allocate( + mock.ANY, self.router_id, self.subnet_id) + self.assertTrue(self.builder.plugin.add_router_interface.call_count) + + def test_dhcp_deallocate(self): + agents = [{'id': 'foo_agent_id'}] + ports = [{'id': 'foo_port_id'}] + self.builder.dhcp_deallocate(mock.ANY, self.network_id, agents, ports) + self.assertTrue( + self.builder.plugin.remove_network_from_dhcp_agent.call_count) + self.assertTrue(self.builder.plugin.delete_port.call_count) + + def _test_dhcp_allocate(self, subnet, expected_notify_count): + with mock.patch.object(mig_man.nsx, 'handle_network_dhcp_access') as f: + self.builder.dhcp_allocate(mock.ANY, self.network_id, subnet) + self.assertTrue(f.call_count) + self.assertEqual(expected_notify_count, + self.builder.notifier.notify.call_count) + + def test_dhcp_allocate(self): + subnet = {'network_id': self.network_id, 'id': self.subnet_id} + self._test_dhcp_allocate(subnet, 2) + + def test_dhcp_allocate_none_subnet(self): + self._test_dhcp_allocate(None, 0) + + +class MigrationManagerTestCase(base.BaseTestCase): + + def setUp(self): + super(MigrationManagerTestCase, self).setUp() + self.manager = mig_man.MigrationManager(mock.Mock(), + mock.Mock(), + mock.Mock()) + self.network_id = 'foo_network_id' + self.router_id = 'foo_router_id' + self.subnet_id = 'foo_subnet_id' + self.mock_builder_p = mock.patch.object(self.manager, 'builder') + self.mock_builder = self.mock_builder_p.start() + + def _test_validate(self, lsn_exists=False, ext_net=False, subnets=None): + network = {'router:external': ext_net} + self.manager.manager.lsn_exists.return_value = lsn_exists + self.manager.plugin.get_network.return_value = network + self.manager.plugin.get_subnets.return_value = subnets + result = self.manager.validate(mock.ANY, self.network_id) + if len(subnets): + self.assertEqual(subnets[0], result) + else: + self.assertIsNone(result) + + def test_validate_no_subnets(self): + self._test_validate(subnets=[]) + + def test_validate_with_one_subnet(self): + self._test_validate(subnets=[{'cidr': '0.0.0.0/0'}]) + + def test_validate_raise_conflict_many_subnets(self): + self.assertRaises(p_exc.LsnMigrationConflict, + self._test_validate, + subnets=[{'id': 'sub1'}, {'id': 'sub2'}]) + + def test_validate_raise_conflict_lsn_exists(self): + self.assertRaises(p_exc.LsnMigrationConflict, + self._test_validate, + lsn_exists=True) + + def test_validate_raise_badrequest_external_net(self): + self.assertRaises(n_exc.BadRequest, + self._test_validate, + ext_net=True) + + def test_validate_raise_badrequest_metadata_net(self): + self.assertRaises(n_exc.BadRequest, + self._test_validate, + ext_net=False, + subnets=[{'cidr': rpc.METADATA_SUBNET_CIDR}]) + + def _test_migrate(self, router, subnet, expected_calls): + self.mock_builder.router_id_get.return_value = router + self.manager.migrate(mock.ANY, self.network_id, subnet) + # testing the exact the order of calls is important + self.assertEqual(expected_calls, self.mock_builder.mock_calls) + + def test_migrate(self): + subnet = { + 'id': self.subnet_id, + 'network_id': self.network_id + } + call_sequence = [ + mock.call.router_id_get(mock.ANY, subnet), + mock.call.metadata_deallocate( + mock.ANY, self.router_id, self.subnet_id), + mock.call.dhcp_agent_get_all(mock.ANY, self.network_id), + mock.call.dhcp_port_get_all(mock.ANY, self.network_id), + mock.call.dhcp_deallocate( + mock.ANY, self.network_id, mock.ANY, mock.ANY), + mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet), + mock.call.metadata_allocate( + mock.ANY, self.router_id, self.subnet_id) + ] + self._test_migrate(self.router_id, subnet, call_sequence) + + def test_migrate_no_router_uplink(self): + subnet = { + 'id': self.subnet_id, + 'network_id': self.network_id + } + call_sequence = [ + mock.call.router_id_get(mock.ANY, subnet), + mock.call.dhcp_agent_get_all(mock.ANY, self.network_id), + mock.call.dhcp_port_get_all(mock.ANY, self.network_id), + mock.call.dhcp_deallocate( + mock.ANY, self.network_id, mock.ANY, mock.ANY), + mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet), + ] + self._test_migrate(None, subnet, call_sequence) + + def test_migrate_no_subnet(self): + call_sequence = [ + mock.call.router_id_get(mock.ANY, None), + mock.call.dhcp_allocate(mock.ANY, self.network_id, None), + ] + self._test_migrate(None, None, call_sequence) + + def _test_report(self, lsn_attrs, expected): + self.manager.manager.lsn_port_get.return_value = lsn_attrs + report = self.manager.report(mock.ANY, self.network_id, self.subnet_id) + self.assertEqual(expected, report) + + def test_report_for_lsn(self): + self._test_report(('foo_lsn_id', 'foo_lsn_port_id'), + {'ports': ['foo_lsn_port_id'], + 'services': ['foo_lsn_id'], 'type': 'lsn'}) + + def test_report_for_lsn_without_lsn_port(self): + self._test_report(('foo_lsn_id', None), + {'ports': [], + 'services': ['foo_lsn_id'], 'type': 'lsn'}) + + def _test_report_for_lsn_without_subnet(self, validated_subnet): + with mock.patch.object(self.manager.plugin, 'get_subnets', + return_value=validated_subnet): + self.manager.manager.lsn_port_get.return_value = ( + ('foo_lsn_id', 'foo_lsn_port_id')) + report = self.manager.report(context, self.network_id) + expected = { + 'ports': ['foo_lsn_port_id'] if validated_subnet else [], + 'services': ['foo_lsn_id'], 'type': 'lsn' + } + self.assertEqual(expected, report) + + def test_report_for_lsn_without_subnet_subnet_found(self): + self._test_report_for_lsn_without_subnet([{'id': self.subnet_id}]) + + def test_report_for_lsn_without_subnet_subnet_not_found(self): + self.manager.manager.lsn_get.return_value = 'foo_lsn_id' + self._test_report_for_lsn_without_subnet(None) + + def test_report_for_dhcp_agent(self): + self.manager.manager.lsn_port_get.return_value = (None, None) + self.mock_builder.dhcp_agent_get_all.return_value = ( + [{'id': 'foo_agent_id'}]) + self.mock_builder.dhcp_port_get_all.return_value = ( + [{'id': 'foo_dhcp_port_id'}]) + result = self.manager.report(mock.ANY, self.network_id, self.subnet_id) + expected = { + 'ports': ['foo_dhcp_port_id'], + 'services': ['foo_agent_id'], + 'type': 'agent' + } + self.assertEqual(expected, result) + + +class LsnManagerTestCase(base.BaseTestCase): + + def setUp(self): + super(LsnManagerTestCase, self).setUp() + self.net_id = 'foo_network_id' + self.sub_id = 'foo_subnet_id' + self.port_id = 'foo_port_id' + self.lsn_id = 'foo_lsn_id' + self.mac = 'aa:bb:cc:dd:ee:ff' + self.switch_id = 'foo_switch_id' + self.lsn_port_id = 'foo_lsn_port_id' + self.tenant_id = 'foo_tenant_id' + self.manager = lsn_man.LsnManager(mock.Mock()) + self.context = context.get_admin_context() + self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api') + self.mock_lsn_api = self.mock_lsn_api_p.start() + self.mock_nsx_utils_p = mock.patch.object(lsn_man, 'nsx_utils') + self.mock_nsx_utils = self.mock_nsx_utils_p.start() + nsx.register_dhcp_opts(cfg) + nsx.register_metadata_opts(cfg) + + def test_lsn_get(self): + self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id + expected = self.manager.lsn_get(mock.ANY, self.net_id) + self.mock_lsn_api.lsn_for_network_get.assert_called_once_with( + mock.ANY, self.net_id) + self.assertEqual(expected, self.lsn_id) + + def _test_lsn_get_raise_not_found_with_exc(self, exc): + self.mock_lsn_api.lsn_for_network_get.side_effect = exc + self.assertRaises(p_exc.LsnNotFound, + self.manager.lsn_get, + mock.ANY, self.net_id) + self.mock_lsn_api.lsn_for_network_get.assert_called_once_with( + mock.ANY, self.net_id) + + def test_lsn_get_raise_not_found_with_not_found(self): + self._test_lsn_get_raise_not_found_with_exc(n_exc.NotFound) + + def test_lsn_get_raise_not_found_with_api_error(self): + self._test_lsn_get_raise_not_found_with_exc(exception.NsxApiException) + + def _test_lsn_get_silent_raise_with_exc(self, exc): + self.mock_lsn_api.lsn_for_network_get.side_effect = exc + expected = self.manager.lsn_get( + mock.ANY, self.net_id, raise_on_err=False) + self.mock_lsn_api.lsn_for_network_get.assert_called_once_with( + mock.ANY, self.net_id) + self.assertIsNone(expected) + + def test_lsn_get_silent_raise_with_not_found(self): + self._test_lsn_get_silent_raise_with_exc(n_exc.NotFound) + + def test_lsn_get_silent_raise_with_api_error(self): + self._test_lsn_get_silent_raise_with_exc(exception.NsxApiException) + + def test_lsn_create(self): + self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id + self.manager.lsn_create(mock.ANY, self.net_id) + self.mock_lsn_api.lsn_for_network_create.assert_called_once_with( + mock.ANY, self.net_id) + + def test_lsn_create_raise_api_error(self): + self.mock_lsn_api.lsn_for_network_create.side_effect = ( + exception.NsxApiException) + self.assertRaises(p_exc.NsxPluginException, + self.manager.lsn_create, + mock.ANY, self.net_id) + self.mock_lsn_api.lsn_for_network_create.assert_called_once_with( + mock.ANY, self.net_id) + + def test_lsn_delete(self): + self.manager.lsn_delete(mock.ANY, self.lsn_id) + self.mock_lsn_api.lsn_delete.assert_called_once_with( + mock.ANY, self.lsn_id) + + def _test_lsn_delete_with_exc(self, exc): + self.mock_lsn_api.lsn_delete.side_effect = exc + self.manager.lsn_delete(mock.ANY, self.lsn_id) + self.mock_lsn_api.lsn_delete.assert_called_once_with( + mock.ANY, self.lsn_id) + + def test_lsn_delete_with_not_found(self): + self._test_lsn_delete_with_exc(n_exc.NotFound) + + def test_lsn_delete_api_exception(self): + self._test_lsn_delete_with_exc(exception.NsxApiException) + + def test_lsn_delete_by_network(self): + self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id + with mock.patch.object(self.manager, 'lsn_delete') as f: + self.manager.lsn_delete_by_network(mock.ANY, self.net_id) + self.mock_lsn_api.lsn_for_network_get.assert_called_once_with( + mock.ANY, self.net_id) + f.assert_called_once_with(mock.ANY, self.lsn_id) + + def _test_lsn_delete_by_network_with_exc(self, exc): + self.mock_lsn_api.lsn_for_network_get.side_effect = exc + with mock.patch.object(lsn_man.LOG, 'warn') as l: + self.manager.lsn_delete_by_network(mock.ANY, self.net_id) + self.assertEqual(1, l.call_count) + + def test_lsn_delete_by_network_with_not_found(self): + self._test_lsn_delete_by_network_with_exc(n_exc.NotFound) + + def test_lsn_delete_by_network_with_not_api_error(self): + self._test_lsn_delete_by_network_with_exc(exception.NsxApiException) + + def test_lsn_port_get(self): + self.mock_lsn_api.lsn_port_by_subnet_get.return_value = ( + self.lsn_port_id) + with mock.patch.object( + self.manager, 'lsn_get', return_value=self.lsn_id): + expected = self.manager.lsn_port_get( + mock.ANY, self.net_id, self.sub_id) + self.assertEqual(expected, (self.lsn_id, self.lsn_port_id)) + + def test_lsn_port_get_lsn_not_found_on_raise(self): + with mock.patch.object( + self.manager, 'lsn_get', + side_effect=p_exc.LsnNotFound(entity='network', + entity_id=self.net_id)): + self.assertRaises(p_exc.LsnNotFound, + self.manager.lsn_port_get, + mock.ANY, self.net_id, self.sub_id) + + def test_lsn_port_get_lsn_not_found_silent_raise(self): + with mock.patch.object(self.manager, 'lsn_get', return_value=None): + expected = self.manager.lsn_port_get( + mock.ANY, self.net_id, self.sub_id, raise_on_err=False) + self.assertEqual(expected, (None, None)) + + def test_lsn_port_get_port_not_found_on_raise(self): + self.mock_lsn_api.lsn_port_by_subnet_get.side_effect = n_exc.NotFound + with mock.patch.object( + self.manager, 'lsn_get', return_value=self.lsn_id): + self.assertRaises(p_exc.LsnPortNotFound, + self.manager.lsn_port_get, + mock.ANY, self.net_id, self.sub_id) + + def test_lsn_port_get_port_not_found_silent_raise(self): + self.mock_lsn_api.lsn_port_by_subnet_get.side_effect = n_exc.NotFound + with mock.patch.object( + self.manager, 'lsn_get', return_value=self.lsn_id): + expected = self.manager.lsn_port_get( + mock.ANY, self.net_id, self.sub_id, raise_on_err=False) + self.assertEqual(expected, (self.lsn_id, None)) + + def test_lsn_port_create(self): + self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id + expected = self.manager.lsn_port_create(mock.ANY, mock.ANY, mock.ANY) + self.assertEqual(expected, self.lsn_port_id) + + def _test_lsn_port_create_with_exc(self, exc, expected): + self.mock_lsn_api.lsn_port_create.side_effect = exc + self.assertRaises(expected, + self.manager.lsn_port_create, + mock.ANY, mock.ANY, mock.ANY) + + def test_lsn_port_create_with_not_found(self): + self._test_lsn_port_create_with_exc(n_exc.NotFound, p_exc.LsnNotFound) + + def test_lsn_port_create_api_exception(self): + self._test_lsn_port_create_with_exc(exception.NsxApiException, + p_exc.NsxPluginException) + + def test_lsn_port_delete(self): + self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY) + self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count) + + def _test_lsn_port_delete_with_exc(self, exc): + self.mock_lsn_api.lsn_port_delete.side_effect = exc + with mock.patch.object(lsn_man.LOG, 'warn') as l: + self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY) + self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count) + self.assertEqual(1, l.call_count) + + def test_lsn_port_delete_with_not_found(self): + self._test_lsn_port_delete_with_exc(n_exc.NotFound) + + def test_lsn_port_delete_api_exception(self): + self._test_lsn_port_delete_with_exc(exception.NsxApiException) + + def _test_lsn_port_dhcp_setup(self, ret_val, sub): + self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] + self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id + with mock.patch.object( + self.manager, 'lsn_get', return_value=self.lsn_id): + with mock.patch.object(lsn_man.switch_api, + 'get_port_by_neutron_tag'): + expected = self.manager.lsn_port_dhcp_setup( + mock.Mock(), mock.ANY, mock.ANY, + mock.ANY, subnet_config=sub) + self.assertEqual( + 1, self.mock_lsn_api.lsn_port_create.call_count) + self.assertEqual( + 1, self.mock_lsn_api.lsn_port_plug_network.call_count) + self.assertEqual(expected, ret_val) + + def test_lsn_port_dhcp_setup(self): + self._test_lsn_port_dhcp_setup((self.lsn_id, self.lsn_port_id), None) + + def test_lsn_port_dhcp_setup_with_config(self): + with mock.patch.object(self.manager, 'lsn_port_dhcp_configure') as f: + self._test_lsn_port_dhcp_setup(None, mock.ANY) + self.assertEqual(1, f.call_count) + + def test_lsn_port_dhcp_setup_with_not_found(self): + self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] + with mock.patch.object(lsn_man.switch_api, + 'get_port_by_neutron_tag') as f: + f.side_effect = n_exc.NotFound + self.assertRaises(p_exc.PortConfigurationError, + self.manager.lsn_port_dhcp_setup, + mock.Mock(), mock.ANY, mock.ANY, mock.ANY) + + def test_lsn_port_dhcp_setup_with_conflict(self): + self.mock_lsn_api.lsn_port_plug_network.side_effect = ( + p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id)) + self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] + with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag'): + with mock.patch.object(self.manager, 'lsn_port_delete') as g: + self.assertRaises(p_exc.PortConfigurationError, + self.manager.lsn_port_dhcp_setup, + mock.Mock(), mock.ANY, mock.ANY, mock.ANY) + self.assertEqual(1, g.call_count) + + def _test_lsn_port_dhcp_configure_with_subnet( + self, expected, dns=None, gw=None, routes=None): + subnet = { + 'enable_dhcp': True, + 'dns_nameservers': dns or [], + 'gateway_ip': gw, + 'host_routes': routes + } + self.manager.lsn_port_dhcp_configure(mock.ANY, self.lsn_id, + self.lsn_port_id, subnet) + self.mock_lsn_api.lsn_port_dhcp_configure.assert_called_once_with( + mock.ANY, self.lsn_id, self.lsn_port_id, subnet['enable_dhcp'], + expected) + + def test_lsn_port_dhcp_configure(self): + expected = { + 'routers': '127.0.0.1', + 'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time, + 'domain_name': cfg.CONF.NSX_DHCP.domain_name + } + self._test_lsn_port_dhcp_configure_with_subnet( + expected, dns=[], gw='127.0.0.1', routes=[]) + + def test_lsn_port_dhcp_configure_gatewayless(self): + expected = { + 'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time, + 'domain_name': cfg.CONF.NSX_DHCP.domain_name + } + self._test_lsn_port_dhcp_configure_with_subnet(expected, gw=None) + + def test_lsn_port_dhcp_configure_with_extra_dns_servers(self): + expected = { + 'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time, + 'domain_name_servers': '8.8.8.8,9.9.9.9', + 'domain_name': cfg.CONF.NSX_DHCP.domain_name + } + self._test_lsn_port_dhcp_configure_with_subnet( + expected, dns=['8.8.8.8', '9.9.9.9']) + + def test_lsn_port_dhcp_configure_with_host_routes(self): + expected = { + 'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time, + 'domain_name': cfg.CONF.NSX_DHCP.domain_name, + 'classless_static_routes': '8.8.8.8,9.9.9.9' + } + self._test_lsn_port_dhcp_configure_with_subnet( + expected, routes=['8.8.8.8', '9.9.9.9']) + + def _test_lsn_metadata_configure(self, is_enabled): + with mock.patch.object(self.manager, 'lsn_port_dispose') as f: + self.manager.plugin.get_subnet.return_value = ( + {'network_id': self.net_id}) + self.manager.lsn_metadata_configure(mock.ANY, + self.sub_id, is_enabled) + expected = { + 'metadata_server_port': 8775, + 'metadata_server_ip': '127.0.0.1', + 'metadata_proxy_shared_secret': '' + } + self.mock_lsn_api.lsn_metadata_configure.assert_called_once_with( + mock.ANY, mock.ANY, is_enabled, expected) + if is_enabled: + self.assertEqual( + 1, self.mock_lsn_api.lsn_port_by_subnet_get.call_count) + else: + self.assertEqual(1, f.call_count) + + def test_lsn_metadata_configure_enabled(self): + self._test_lsn_metadata_configure(True) + + def test_lsn_metadata_configure_disabled(self): + self._test_lsn_metadata_configure(False) + + def test_lsn_metadata_configure_not_found(self): + self.mock_lsn_api.lsn_metadata_configure.side_effect = ( + p_exc.LsnNotFound(entity='lsn', entity_id=self.lsn_id)) + self.manager.plugin.get_subnet.return_value = ( + {'network_id': self.net_id}) + self.assertRaises(p_exc.NsxPluginException, + self.manager.lsn_metadata_configure, + mock.ANY, self.sub_id, True) + + def test_lsn_port_metadata_setup(self): + subnet = { + 'cidr': '0.0.0.0/0', + 'id': self.sub_id, + 'network_id': self.net_id, + 'tenant_id': self.tenant_id + } + expected_data = { + 'subnet_id': subnet['id'], + 'ip_address': subnet['cidr'], + 'mac_address': constants.METADATA_MAC + } + self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] + with mock.patch.object(lsn_man.switch_api, 'create_lport') as f: + with mock.patch.object(self.manager, 'lsn_port_create') as g: + f.return_value = {'uuid': self.port_id} + self.manager.lsn_port_metadata_setup( + self.context, self.lsn_id, subnet) + (self.mock_lsn_api.lsn_port_plug_network. + assert_called_once_with(mock.ANY, self.lsn_id, + mock.ANY, self.port_id)) + g.assert_called_once_with( + self.context, self.lsn_id, expected_data) + + def test_lsn_port_metadata_setup_raise_not_found(self): + subnet = { + 'cidr': '0.0.0.0/0', + 'id': self.sub_id, + 'network_id': self.net_id, + 'tenant_id': self.tenant_id + } + self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] + with mock.patch.object(lsn_man.switch_api, 'create_lport') as f: + f.side_effect = n_exc.NotFound + self.assertRaises(p_exc.PortConfigurationError, + self.manager.lsn_port_metadata_setup, + mock.Mock(), self.lsn_id, subnet) + + def test_lsn_port_metadata_setup_raise_conflict(self): + subnet = { + 'cidr': '0.0.0.0/0', + 'id': self.sub_id, + 'network_id': self.net_id, + 'tenant_id': self.tenant_id + } + self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] + with mock.patch.object(lsn_man.switch_api, 'create_lport') as f: + with mock.patch.object(lsn_man.switch_api, 'delete_port') as g: + f.return_value = {'uuid': self.port_id} + self.mock_lsn_api.lsn_port_plug_network.side_effect = ( + p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id)) + self.assertRaises(p_exc.PortConfigurationError, + self.manager.lsn_port_metadata_setup, + mock.Mock(), self.lsn_id, subnet) + self.assertEqual(1, + self.mock_lsn_api.lsn_port_delete.call_count) + self.assertEqual(1, g.call_count) + + def _test_lsn_port_dispose_with_values(self, lsn_id, lsn_port_id, count): + with mock.patch.object(self.manager, + 'lsn_port_get_by_mac', + return_value=(lsn_id, lsn_port_id)): + self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac) + self.assertEqual(count, + self.mock_lsn_api.lsn_port_delete.call_count) + + def test_lsn_port_dispose(self): + self._test_lsn_port_dispose_with_values( + self.lsn_id, self.lsn_port_id, 1) + + def test_lsn_port_dispose_meta_mac(self): + self.mac = constants.METADATA_MAC + with mock.patch.object(lsn_man.switch_api, + 'get_port_by_neutron_tag') as f: + with mock.patch.object(lsn_man.switch_api, 'delete_port') as g: + f.return_value = {'uuid': self.port_id} + self._test_lsn_port_dispose_with_values( + self.lsn_id, self.lsn_port_id, 1) + f.assert_called_once_with( + mock.ANY, self.net_id, constants.METADATA_PORT_ID) + g.assert_called_once_with(mock.ANY, self.net_id, self.port_id) + + def test_lsn_port_dispose_lsn_not_found(self): + self._test_lsn_port_dispose_with_values(None, None, 0) + + def test_lsn_port_dispose_lsn_port_not_found(self): + self._test_lsn_port_dispose_with_values(self.lsn_id, None, 0) + + def test_lsn_port_dispose_api_error(self): + self.mock_lsn_api.lsn_port_delete.side_effect = ( + exception.NsxApiException) + with mock.patch.object(lsn_man.LOG, 'warn') as l: + self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac) + self.assertEqual(1, l.call_count) + + def test_lsn_port_host_conf(self): + with mock.patch.object(self.manager, + 'lsn_port_get', + return_value=(self.lsn_id, self.lsn_port_id)): + f = mock.Mock() + self.manager._lsn_port_host_conf(mock.ANY, self.net_id, + self.sub_id, mock.ANY, f) + self.assertEqual(1, f.call_count) + + def test_lsn_port_host_conf_lsn_port_not_found(self): + with mock.patch.object( + self.manager, 'lsn_port_get', return_value=(None, None)) as f: + self.manager._lsn_port_host_conf( + mock.ANY, self.net_id, self.sub_id, mock.ANY, mock.Mock()) + self.assertEqual(1, f.call_count) + + def _test_lsn_port_update(self, dhcp=None, meta=None): + self.manager.lsn_port_update( + mock.ANY, self.net_id, self.sub_id, dhcp, meta) + count = 1 if dhcp else 0 + count = count + 1 if meta else count + self.assertEqual(count, (self.mock_lsn_api. + lsn_port_host_entries_update.call_count)) + + def test_lsn_port_update(self): + self._test_lsn_port_update() + + def test_lsn_port_update_dhcp_meta(self): + self._test_lsn_port_update(mock.ANY, mock.ANY) + + def test_lsn_port_update_dhcp_and_nometa(self): + self._test_lsn_port_update(mock.ANY, None) + + def test_lsn_port_update_nodhcp_and_nmeta(self): + self._test_lsn_port_update(None, mock.ANY) + + def test_lsn_port_update_raise_error(self): + self.mock_lsn_api.lsn_port_host_entries_update.side_effect = ( + exception.NsxApiException) + self.assertRaises(p_exc.PortConfigurationError, + self.manager.lsn_port_update, + mock.ANY, mock.ANY, mock.ANY, mock.ANY) + + +class PersistentLsnManagerTestCase(base.BaseTestCase): + + def setUp(self): + super(PersistentLsnManagerTestCase, self).setUp() + self.net_id = 'foo_network_id' + self.sub_id = 'foo_subnet_id' + self.port_id = 'foo_port_id' + self.lsn_id = 'foo_lsn_id' + self.mac = 'aa:bb:cc:dd:ee:ff' + self.lsn_port_id = 'foo_lsn_port_id' + self.tenant_id = 'foo_tenant_id' + db.configure_db() + nsx.register_dhcp_opts(cfg) + nsx.register_metadata_opts(cfg) + lsn_man.register_lsn_opts(cfg) + self.manager = lsn_man.PersistentLsnManager(mock.Mock()) + self.context = context.get_admin_context() + self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api') + self.mock_lsn_api = self.mock_lsn_api_p.start() + self.addCleanup(db.clear_db) + + def test_lsn_get(self): + lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) + result = self.manager.lsn_get(self.context, self.net_id) + self.assertEqual(self.lsn_id, result) + + def test_lsn_get_raise_not_found(self): + self.assertRaises(p_exc.LsnNotFound, + self.manager.lsn_get, self.context, self.net_id) + + def test_lsn_get_silent_not_found(self): + result = self.manager.lsn_get( + self.context, self.net_id, raise_on_err=False) + self.assertIsNone(result) + + def test_lsn_get_sync_on_missing(self): + cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN') + self.manager = lsn_man.PersistentLsnManager(mock.Mock()) + with mock.patch.object(self.manager, 'lsn_save') as f: + self.manager.lsn_get(self.context, self.net_id, raise_on_err=True) + self.assertTrue(self.mock_lsn_api.lsn_for_network_get.call_count) + self.assertTrue(f.call_count) + + def test_lsn_save(self): + self.manager.lsn_save(self.context, self.net_id, self.lsn_id) + result = self.manager.lsn_get(self.context, self.net_id) + self.assertEqual(self.lsn_id, result) + + def test_lsn_create(self): + self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id + with mock.patch.object(self.manager, 'lsn_save') as f: + result = self.manager.lsn_create(self.context, self.net_id) + self.assertTrue( + self.mock_lsn_api.lsn_for_network_create.call_count) + self.assertTrue(f.call_count) + self.assertEqual(self.lsn_id, result) + + def test_lsn_create_failure(self): + with mock.patch.object( + self.manager, 'lsn_save', + side_effect=p_exc.NsxPluginException(err_msg='')): + self.assertRaises(p_exc.NsxPluginException, + self.manager.lsn_create, + self.context, self.net_id) + self.assertTrue(self.mock_lsn_api.lsn_delete.call_count) + + def test_lsn_delete(self): + self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id + self.manager.lsn_create(self.context, self.net_id) + self.manager.lsn_delete(self.context, self.lsn_id) + self.assertIsNone(self.manager.lsn_get( + self.context, self.net_id, raise_on_err=False)) + + def test_lsn_delete_not_existent(self): + self.manager.lsn_delete(self.context, self.lsn_id) + self.assertTrue(self.mock_lsn_api.lsn_delete.call_count) + + def test_lsn_port_get(self): + lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) + lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id, + self.sub_id, self.mac, self.lsn_id) + res = self.manager.lsn_port_get(self.context, self.net_id, self.sub_id) + self.assertEqual((self.lsn_id, self.lsn_port_id), res) + + def test_lsn_port_get_raise_not_found(self): + self.assertRaises(p_exc.LsnPortNotFound, + self.manager.lsn_port_get, + self.context, self.net_id, self.sub_id) + + def test_lsn_port_get_silent_not_found(self): + result = self.manager.lsn_port_get( + self.context, self.net_id, self.sub_id, raise_on_err=False) + self.assertEqual((None, None), result) + + def test_lsn_port_get_sync_on_missing(self): + return + cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN') + self.manager = lsn_man.PersistentLsnManager(mock.Mock()) + self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id + self.mock_lsn_api.lsn_port_by_subnet_get.return_value = ( + self.lsn_id, self.lsn_port_id) + with mock.patch.object(self.manager, 'lsn_save') as f: + with mock.patch.object(self.manager, 'lsn_port_save') as g: + self.manager.lsn_port_get( + self.context, self.net_id, self.sub_id) + self.assertTrue( + self.mock_lsn_api.lsn_port_by_subnet_get.call_count) + self.assertTrue( + self.mock_lsn_api.lsn_port_info_get.call_count) + self.assertTrue(f.call_count) + self.assertTrue(g.call_count) + + def test_lsn_port_get_by_mac(self): + lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) + lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id, + self.sub_id, self.mac, self.lsn_id) + res = self.manager.lsn_port_get_by_mac( + self.context, self.net_id, self.mac) + self.assertEqual((self.lsn_id, self.lsn_port_id), res) + + def test_lsn_port_get_by_mac_raise_not_found(self): + self.assertRaises(p_exc.LsnPortNotFound, + self.manager.lsn_port_get_by_mac, + self.context, self.net_id, self.sub_id) + + def test_lsn_port_get_by_mac_silent_not_found(self): + result = self.manager.lsn_port_get_by_mac( + self.context, self.net_id, self.sub_id, raise_on_err=False) + self.assertEqual((None, None), result) + + def test_lsn_port_create(self): + lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) + self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id + subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac} + with mock.patch.object(self.manager, 'lsn_port_save') as f: + result = self.manager.lsn_port_create( + self.context, self.net_id, subnet) + self.assertTrue( + self.mock_lsn_api.lsn_port_create.call_count) + self.assertTrue(f.call_count) + self.assertEqual(self.lsn_port_id, result) + + def test_lsn_port_create_failure(self): + subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac} + with mock.patch.object( + self.manager, 'lsn_port_save', + side_effect=p_exc.NsxPluginException(err_msg='')): + self.assertRaises(p_exc.NsxPluginException, + self.manager.lsn_port_create, + self.context, self.net_id, subnet) + self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count) + + def test_lsn_port_delete(self): + lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) + lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id, + self.sub_id, self.mac, self.lsn_id) + self.manager.lsn_port_delete( + self.context, self.lsn_id, self.lsn_port_id) + self.assertEqual((None, None), self.manager.lsn_port_get( + self.context, self.lsn_id, self.sub_id, raise_on_err=False)) + + def test_lsn_port_delete_not_existent(self): + self.manager.lsn_port_delete( + self.context, self.lsn_id, self.lsn_port_id) + self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count) + + def test_lsn_port_save(self): + self.manager.lsn_save(self.context, self.net_id, self.lsn_id) + self.manager.lsn_port_save(self.context, self.lsn_port_id, + self.sub_id, self.mac, self.lsn_id) + result = self.manager.lsn_port_get( + self.context, self.net_id, self.sub_id, raise_on_err=False) + self.assertEqual((self.lsn_id, self.lsn_port_id), result) + + +class DhcpAgentNotifyAPITestCase(base.BaseTestCase): + + def setUp(self): + super(DhcpAgentNotifyAPITestCase, self).setUp() + self.notifier = nsx.DhcpAgentNotifyAPI(mock.Mock(), mock.Mock()) + self.plugin = self.notifier.plugin + self.lsn_manager = self.notifier.lsn_manager + + def _test_notify_port_update( + self, ports, expected_count, expected_args=None): + port = { + 'id': 'foo_port_id', + 'network_id': 'foo_network_id', + 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] + } + self.notifier.plugin.get_ports.return_value = ports + self.notifier.notify(mock.ANY, {'port': port}, 'port.update.end') + self.lsn_manager.lsn_port_update.assert_has_calls(expected_args) + + def test_notify_ports_update_no_ports(self): + self._test_notify_port_update(None, 0, []) + self._test_notify_port_update([], 0, []) + + def test_notify_ports_update_one_port(self): + ports = [{ + 'fixed_ips': [{'subnet_id': 'foo_subnet_id', + 'ip_address': '1.2.3.4'}], + 'device_id': 'foo_device_id', + 'device_owner': 'foo_device_owner', + 'mac_address': 'fa:16:3e:da:1d:46' + }] + call_args = mock.call( + mock.ANY, 'foo_network_id', 'foo_subnet_id', + dhcp=[{'ip_address': '1.2.3.4', + 'mac_address': 'fa:16:3e:da:1d:46'}], + meta=[{'instance_id': 'foo_device_id', + 'ip_address': '1.2.3.4'}]) + self._test_notify_port_update(ports, 1, call_args) + + def test_notify_ports_update_ports_with_empty_device_id(self): + ports = [{ + 'fixed_ips': [{'subnet_id': 'foo_subnet_id', + 'ip_address': '1.2.3.4'}], + 'device_id': '', + 'device_owner': 'foo_device_owner', + 'mac_address': 'fa:16:3e:da:1d:46' + }] + call_args = mock.call( + mock.ANY, 'foo_network_id', 'foo_subnet_id', + dhcp=[{'ip_address': '1.2.3.4', + 'mac_address': 'fa:16:3e:da:1d:46'}], + meta=[] + ) + self._test_notify_port_update(ports, 1, call_args) + + def test_notify_ports_update_ports_with_no_fixed_ips(self): + ports = [{ + 'fixed_ips': [], + 'device_id': 'foo_device_id', + 'device_owner': 'foo_device_owner', + 'mac_address': 'fa:16:3e:da:1d:46' + }] + call_args = mock.call( + mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[]) + self._test_notify_port_update(ports, 1, call_args) + + def test_notify_ports_update_ports_with_no_fixed_ips_and_no_device(self): + ports = [{ + 'fixed_ips': [], + 'device_id': '', + 'device_owner': 'foo_device_owner', + 'mac_address': 'fa:16:3e:da:1d:46' + }] + call_args = mock.call( + mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[]) + self._test_notify_port_update(ports, 0, call_args) + + def test_notify_ports_update_with_special_ports(self): + ports = [{'fixed_ips': [], + 'device_id': '', + 'device_owner': n_consts.DEVICE_OWNER_DHCP, + 'mac_address': 'fa:16:3e:da:1d:46'}, + {'fixed_ips': [{'subnet_id': 'foo_subnet_id', + 'ip_address': '1.2.3.4'}], + 'device_id': 'foo_device_id', + 'device_owner': n_consts.DEVICE_OWNER_ROUTER_GW, + 'mac_address': 'fa:16:3e:da:1d:46'}] + call_args = mock.call( + mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[]) + self._test_notify_port_update(ports, 0, call_args) + + def test_notify_ports_update_many_ports(self): + ports = [{'fixed_ips': [], + 'device_id': '', + 'device_owner': 'foo_device_owner', + 'mac_address': 'fa:16:3e:da:1d:46'}, + {'fixed_ips': [{'subnet_id': 'foo_subnet_id', + 'ip_address': '1.2.3.4'}], + 'device_id': 'foo_device_id', + 'device_owner': 'foo_device_owner', + 'mac_address': 'fa:16:3e:da:1d:46'}] + call_args = mock.call( + mock.ANY, 'foo_network_id', 'foo_subnet_id', + dhcp=[{'ip_address': '1.2.3.4', + 'mac_address': 'fa:16:3e:da:1d:46'}], + meta=[{'instance_id': 'foo_device_id', + 'ip_address': '1.2.3.4'}]) + self._test_notify_port_update(ports, 1, call_args) + + def _test_notify_subnet_action(self, action): + with mock.patch.object(self.notifier, '_subnet_%s' % action) as f: + self.notifier._handle_subnet_dhcp_access[action] = f + subnet = {'subnet': mock.ANY} + self.notifier.notify( + mock.ANY, subnet, 'subnet.%s.end' % action) + f.assert_called_once_with(mock.ANY, subnet) + + def test_notify_subnet_create(self): + self._test_notify_subnet_action('create') + + def test_notify_subnet_update(self): + self._test_notify_subnet_action('update') + + def test_notify_subnet_delete(self): + self._test_notify_subnet_action('delete') + + def _test_subnet_create(self, enable_dhcp, exc=None, + exc_obj=None, call_notify=True): + subnet = { + 'id': 'foo_subnet_id', + 'enable_dhcp': enable_dhcp, + 'network_id': 'foo_network_id', + 'tenant_id': 'foo_tenant_id', + 'cidr': '0.0.0.0/0' + } + if exc: + self.plugin.create_port.side_effect = exc_obj or exc + self.assertRaises(exc, + self.notifier.notify, + mock.ANY, + {'subnet': subnet}, + 'subnet.create.end') + self.plugin.delete_subnet.assert_called_with( + mock.ANY, subnet['id']) + else: + if call_notify: + self.notifier.notify( + mock.ANY, {'subnet': subnet}, 'subnet.create.end') + if enable_dhcp: + dhcp_port = { + 'name': '', + 'admin_state_up': True, + 'network_id': 'foo_network_id', + 'tenant_id': 'foo_tenant_id', + 'device_owner': n_consts.DEVICE_OWNER_DHCP, + 'mac_address': mock.ANY, + 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}], + 'device_id': '' + } + self.plugin.create_port.assert_called_once_with( + mock.ANY, {'port': dhcp_port}) + else: + self.assertEqual(0, self.plugin.create_port.call_count) + + def test_subnet_create_enabled_dhcp(self): + self._test_subnet_create(True) + + def test_subnet_create_disabled_dhcp(self): + self._test_subnet_create(False) + + def test_subnet_create_raise_port_config_error(self): + with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2, + 'delete_port') as d: + self._test_subnet_create( + True, + exc=n_exc.Conflict, + exc_obj=p_exc.PortConfigurationError(lsn_id='foo_lsn_id', + net_id='foo_net_id', + port_id='foo_port_id')) + d.assert_called_once_with(self.plugin, mock.ANY, 'foo_port_id') + + def test_subnet_update(self): + subnet = { + 'id': 'foo_subnet_id', + 'network_id': 'foo_network_id', + } + self.lsn_manager.lsn_port_get.return_value = ('foo_lsn_id', + 'foo_lsn_port_id') + self.notifier.notify( + mock.ANY, {'subnet': subnet}, 'subnet.update.end') + self.lsn_manager.lsn_port_dhcp_configure.assert_called_once_with( + mock.ANY, 'foo_lsn_id', 'foo_lsn_port_id', subnet) + + def test_subnet_update_raise_lsn_not_found(self): + subnet = { + 'id': 'foo_subnet_id', + 'network_id': 'foo_network_id', + } + self.lsn_manager.lsn_port_get.side_effect = ( + p_exc.LsnNotFound(entity='network', + entity_id=subnet['network_id'])) + self.assertRaises(p_exc.LsnNotFound, + self.notifier.notify, + mock.ANY, {'subnet': subnet}, 'subnet.update.end') + + def _test_subnet_update_lsn_port_not_found(self, dhcp_port): + subnet = { + 'id': 'foo_subnet_id', + 'enable_dhcp': True, + 'network_id': 'foo_network_id', + 'tenant_id': 'foo_tenant_id' + } + self.lsn_manager.lsn_port_get.side_effect = ( + p_exc.LsnPortNotFound(lsn_id='foo_lsn_id', + entity='subnet', + entity_id=subnet['id'])) + self.notifier.plugin.get_ports.return_value = dhcp_port + count = 0 if dhcp_port is None else 1 + with mock.patch.object(nsx, 'handle_port_dhcp_access') as h: + self.notifier.notify( + mock.ANY, {'subnet': subnet}, 'subnet.update.end') + self.assertEqual(count, h.call_count) + if not dhcp_port: + self._test_subnet_create(enable_dhcp=True, + exc=None, call_notify=False) + + def test_subnet_update_lsn_port_not_found_without_dhcp_port(self): + self._test_subnet_update_lsn_port_not_found(None) + + def test_subnet_update_lsn_port_not_found_with_dhcp_port(self): + self._test_subnet_update_lsn_port_not_found([mock.ANY]) + + def _test_subnet_delete(self, ports=None): + subnet = { + 'id': 'foo_subnet_id', + 'network_id': 'foo_network_id', + 'cidr': '0.0.0.0/0' + } + self.plugin.get_ports.return_value = ports + self.notifier.notify(mock.ANY, {'subnet': subnet}, 'subnet.delete.end') + filters = { + 'network_id': [subnet['network_id']], + 'device_owner': [n_consts.DEVICE_OWNER_DHCP] + } + self.plugin.get_ports.assert_called_once_with( + mock.ANY, filters=filters) + if ports: + self.plugin.delete_port.assert_called_once_with( + mock.ANY, ports[0]['id']) + else: + self.assertEqual(0, self.plugin.delete_port.call_count) + + def test_subnet_delete_enabled_dhcp_no_ports(self): + self._test_subnet_delete() + + def test_subnet_delete_enabled_dhcp_with_dhcp_port(self): + self._test_subnet_delete([{'id': 'foo_port_id'}]) + + +class DhcpTestCase(base.BaseTestCase): + + def setUp(self): + super(DhcpTestCase, self).setUp() + self.plugin = mock.Mock() + self.plugin.lsn_manager = mock.Mock() + + def test_handle_create_network(self): + network = {'id': 'foo_network_id'} + nsx.handle_network_dhcp_access( + self.plugin, mock.ANY, network, 'create_network') + self.plugin.lsn_manager.lsn_create.assert_called_once_with( + mock.ANY, network['id']) + + def test_handle_create_network_router_external(self): + network = {'id': 'foo_network_id', 'router:external': True} + nsx.handle_network_dhcp_access( + self.plugin, mock.ANY, network, 'create_network') + self.assertFalse(self.plugin.lsn_manager.lsn_create.call_count) + + def test_handle_delete_network(self): + network_id = 'foo_network_id' + self.plugin.lsn_manager.lsn_delete_by_network.return_value = ( + 'foo_lsn_id') + nsx.handle_network_dhcp_access( + self.plugin, mock.ANY, network_id, 'delete_network') + self.plugin.lsn_manager.lsn_delete_by_network.assert_called_once_with( + mock.ANY, 'foo_network_id') + + def _test_handle_create_dhcp_owner_port(self, exc=None): + subnet = { + 'cidr': '0.0.0.0/0', + 'id': 'foo_subnet_id' + } + port = { + 'id': 'foo_port_id', + 'device_owner': n_consts.DEVICE_OWNER_DHCP, + 'mac_address': 'aa:bb:cc:dd:ee:ff', + 'network_id': 'foo_network_id', + 'fixed_ips': [{'subnet_id': subnet['id']}] + } + expected_data = { + 'subnet_id': subnet['id'], + 'ip_address': subnet['cidr'], + 'mac_address': port['mac_address'] + } + self.plugin.get_subnet.return_value = subnet + if exc is None: + nsx.handle_port_dhcp_access( + self.plugin, mock.ANY, port, 'create_port') + (self.plugin.lsn_manager.lsn_port_dhcp_setup. + assert_called_once_with(mock.ANY, port['network_id'], + port['id'], expected_data, subnet)) + else: + self.plugin.lsn_manager.lsn_port_dhcp_setup.side_effect = exc + self.assertRaises(n_exc.NeutronException, + nsx.handle_port_dhcp_access, + self.plugin, mock.ANY, port, 'create_port') + + def test_handle_create_dhcp_owner_port(self): + self._test_handle_create_dhcp_owner_port() + + def test_handle_create_dhcp_owner_port_raise_port_config_error(self): + config_error = p_exc.PortConfigurationError(lsn_id='foo_lsn_id', + net_id='foo_net_id', + port_id='foo_port_id') + self._test_handle_create_dhcp_owner_port(exc=config_error) + + def test_handle_delete_dhcp_owner_port(self): + port = { + 'id': 'foo_port_id', + 'device_owner': n_consts.DEVICE_OWNER_DHCP, + 'network_id': 'foo_network_id', + 'fixed_ips': [], + 'mac_address': 'aa:bb:cc:dd:ee:ff' + } + nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, 'delete_port') + self.plugin.lsn_manager.lsn_port_dispose.assert_called_once_with( + mock.ANY, port['network_id'], port['mac_address']) + + def _test_handle_user_port(self, action, handler): + port = { + 'id': 'foo_port_id', + 'device_owner': 'foo_device_owner', + 'network_id': 'foo_network_id', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + 'fixed_ips': [{'subnet_id': 'foo_subnet_id', + 'ip_address': '1.2.3.4'}] + } + expected_data = { + 'ip_address': '1.2.3.4', + 'mac_address': 'aa:bb:cc:dd:ee:ff' + } + self.plugin.get_subnet.return_value = {'enable_dhcp': True} + nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) + handler.assert_called_once_with( + mock.ANY, port['network_id'], 'foo_subnet_id', expected_data) + + def test_handle_create_user_port(self): + self._test_handle_user_port( + 'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add) + + def test_handle_delete_user_port(self): + self._test_handle_user_port( + 'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove) + + def _test_handle_user_port_disabled_dhcp(self, action, handler): + port = { + 'id': 'foo_port_id', + 'device_owner': 'foo_device_owner', + 'network_id': 'foo_network_id', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + 'fixed_ips': [{'subnet_id': 'foo_subnet_id', + 'ip_address': '1.2.3.4'}] + } + self.plugin.get_subnet.return_value = {'enable_dhcp': False} + nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) + self.assertEqual(0, handler.call_count) + + def test_handle_create_user_port_disabled_dhcp(self): + self._test_handle_user_port_disabled_dhcp( + 'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add) + + def test_handle_delete_user_port_disabled_dhcp(self): + self._test_handle_user_port_disabled_dhcp( + 'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove) + + def _test_handle_user_port_no_fixed_ips(self, action, handler): + port = { + 'id': 'foo_port_id', + 'device_owner': 'foo_device_owner', + 'network_id': 'foo_network_id', + 'fixed_ips': [] + } + nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) + self.assertEqual(0, handler.call_count) + + def test_handle_create_user_port_no_fixed_ips(self): + self._test_handle_user_port_no_fixed_ips( + 'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add) + + def test_handle_delete_user_port_no_fixed_ips(self): + self._test_handle_user_port_no_fixed_ips( + 'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove) + + +class MetadataTestCase(base.BaseTestCase): + + def setUp(self): + super(MetadataTestCase, self).setUp() + self.plugin = mock.Mock() + self.plugin.lsn_manager = mock.Mock() + + def _test_handle_port_metadata_access_special_owners( + self, owner, dev_id='foo_device_id', ips=None): + port = { + 'id': 'foo_port_id', + 'device_owner': owner, + 'device_id': dev_id, + 'fixed_ips': ips or [] + } + nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY) + self.assertFalse( + self.plugin.lsn_manager.lsn_port_meta_host_add.call_count) + self.assertFalse( + self.plugin.lsn_manager.lsn_port_meta_host_remove.call_count) + + def test_handle_port_metadata_access_external_network(self): + port = { + 'id': 'foo_port_id', + 'device_owner': 'foo_device_owner', + 'device_id': 'foo_device_id', + 'network_id': 'foo_network_id', + 'fixed_ips': [{'subnet_id': 'foo_subnet'}] + } + self.plugin.get_network.return_value = {'router:external': True} + nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY) + self.assertFalse( + self.plugin.lsn_manager.lsn_port_meta_host_add.call_count) + self.assertFalse( + self.plugin.lsn_manager.lsn_port_meta_host_remove.call_count) + + def test_handle_port_metadata_access_dhcp_port(self): + self._test_handle_port_metadata_access_special_owners( + n_consts.DEVICE_OWNER_DHCP, [{'subnet_id': 'foo_subnet'}]) + + def test_handle_port_metadata_access_router_port(self): + self._test_handle_port_metadata_access_special_owners( + n_consts.DEVICE_OWNER_ROUTER_INTF, [{'subnet_id': 'foo_subnet'}]) + + def test_handle_port_metadata_access_no_device_id(self): + self._test_handle_port_metadata_access_special_owners( + n_consts.DEVICE_OWNER_DHCP, '') + + def test_handle_port_metadata_access_no_fixed_ips(self): + self._test_handle_port_metadata_access_special_owners( + 'foo', 'foo', None) + + def _test_handle_port_metadata_access(self, is_delete, raise_exc=False): + port = { + 'id': 'foo_port_id', + 'device_owner': 'foo_device_id', + 'network_id': 'foo_network_id', + 'device_id': 'foo_device_id', + 'tenant_id': 'foo_tenant_id', + 'fixed_ips': [ + {'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'} + ] + } + meta = { + 'instance_id': port['device_id'], + 'tenant_id': port['tenant_id'], + 'ip_address': port['fixed_ips'][0]['ip_address'] + } + self.plugin.get_network.return_value = {'router:external': False} + if is_delete: + mock_func = self.plugin.lsn_manager.lsn_port_meta_host_remove + else: + mock_func = self.plugin.lsn_manager.lsn_port_meta_host_add + if raise_exc: + mock_func.side_effect = p_exc.PortConfigurationError( + lsn_id='foo_lsn_id', net_id='foo_net_id', port_id=None) + with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2, + 'delete_port') as d: + self.assertRaises(p_exc.PortConfigurationError, + nsx.handle_port_metadata_access, + self.plugin, mock.ANY, port, + is_delete=is_delete) + if not is_delete: + d.assert_called_once_with(mock.ANY, mock.ANY, port['id']) + else: + self.assertFalse(d.call_count) + else: + nsx.handle_port_metadata_access( + self.plugin, mock.ANY, port, is_delete=is_delete) + mock_func.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, meta) + + def test_handle_port_metadata_access_on_delete_true(self): + self._test_handle_port_metadata_access(True) + + def test_handle_port_metadata_access_on_delete_false(self): + self._test_handle_port_metadata_access(False) + + def test_handle_port_metadata_access_on_delete_true_raise(self): + self._test_handle_port_metadata_access(True, raise_exc=True) + + def test_handle_port_metadata_access_on_delete_false_raise(self): + self._test_handle_port_metadata_access(False, raise_exc=True) + + def _test_handle_router_metadata_access( + self, is_port_found, raise_exc=False): + subnet = { + 'id': 'foo_subnet_id', + 'network_id': 'foo_network_id' + } + interface = { + 'subnet_id': subnet['id'], + 'port_id': 'foo_port_id' + } + mock_func = self.plugin.lsn_manager.lsn_metadata_configure + if not is_port_found: + self.plugin.get_port.side_effect = n_exc.NotFound + if raise_exc: + with mock.patch.object(nsx.l3_db.L3_NAT_db_mixin, + 'remove_router_interface') as d: + mock_func.side_effect = p_exc.NsxPluginException(err_msg='') + self.assertRaises(p_exc.NsxPluginException, + nsx.handle_router_metadata_access, + self.plugin, mock.ANY, 'foo_router_id', + interface) + d.assert_called_once_with(mock.ANY, mock.ANY, 'foo_router_id', + interface) + else: + nsx.handle_router_metadata_access( + self.plugin, mock.ANY, 'foo_router_id', interface) + mock_func.assert_called_once_with( + mock.ANY, subnet['id'], is_port_found) + + def test_handle_router_metadata_access_add_interface(self): + self._test_handle_router_metadata_access(True) + + def test_handle_router_metadata_access_delete_interface(self): + self._test_handle_router_metadata_access(False) + + def test_handle_router_metadata_access_raise_error_on_add(self): + self._test_handle_router_metadata_access(True, raise_exc=True) + + def test_handle_router_metadata_access_raise_error_on_delete(self): + self._test_handle_router_metadata_access(True, raise_exc=False) diff --git a/neutron/tests/unit/vmware/test_nsx_opts.py b/neutron/tests/unit/vmware/test_nsx_opts.py new file mode 100644 index 000000000..6bdfc3408 --- /dev/null +++ b/neutron/tests/unit/vmware/test_nsx_opts.py @@ -0,0 +1,253 @@ +# Copyright 2013 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import fixtures + +import mock +from oslo.config import cfg + +from neutron import manager +from neutron.openstack.common import uuidutils +from neutron.plugins.vmware.api_client import client +from neutron.plugins.vmware.api_client import version +from neutron.plugins.vmware.common import config # noqa +from neutron.plugins.vmware.common import exceptions +from neutron.plugins.vmware.common import sync +from neutron.plugins.vmware import nsx_cluster +from neutron.plugins.vmware.nsxlib import lsn as lsnlib +from neutron.tests import base +from neutron.tests.unit import vmware + +BASE_CONF_PATH = vmware.get_fake_conf('neutron.conf.test') +NSX_INI_PATH = vmware.get_fake_conf('nsx.ini.basic.test') +NSX_INI_FULL_PATH = vmware.get_fake_conf('nsx.ini.full.test') +NSX_INI_AGENTLESS_PATH = vmware.get_fake_conf('nsx.ini.agentless.test') +NSX_INI_COMBINED_PATH = vmware.get_fake_conf('nsx.ini.combined.test') +NVP_INI_DEPR_PATH = vmware.get_fake_conf('nvp.ini.full.test') + + +class NSXClusterTest(base.BaseTestCase): + + cluster_opts = {'default_tz_uuid': uuidutils.generate_uuid(), + 'default_l2_gw_service_uuid': uuidutils.generate_uuid(), + 'default_l2_gw_service_uuid': uuidutils.generate_uuid(), + 'nsx_user': 'foo', + 'nsx_password': 'bar', + 'req_timeout': 45, + 'http_timeout': 25, + 'retries': 7, + 'redirects': 23, + 'default_interface_name': 'baz', + 'nsx_controllers': ['1.1.1.1:443']} + + def test_create_cluster(self): + cluster = nsx_cluster.NSXCluster(**self.cluster_opts) + for (k, v) in self.cluster_opts.iteritems(): + self.assertEqual(v, getattr(cluster, k)) + + def test_create_cluster_default_port(self): + opts = self.cluster_opts.copy() + opts['nsx_controllers'] = ['1.1.1.1'] + cluster = nsx_cluster.NSXCluster(**opts) + for (k, v) in self.cluster_opts.iteritems(): + self.assertEqual(v, getattr(cluster, k)) + + def test_create_cluster_missing_required_attribute_raises(self): + opts = self.cluster_opts.copy() + opts.pop('default_tz_uuid') + self.assertRaises(exceptions.InvalidClusterConfiguration, + nsx_cluster.NSXCluster, **opts) + + +class ConfigurationTest(base.BaseTestCase): + + def setUp(self): + super(ConfigurationTest, self).setUp() + self.useFixture(fixtures.MonkeyPatch( + 'neutron.manager.NeutronManager._instance', + None)) + # Avoid runs of the synchronizer looping call + patch_sync = mock.patch.object(sync, '_start_loopingcall') + patch_sync.start() + + def _assert_required_options(self, cluster): + self.assertEqual(cluster.nsx_controllers, ['fake_1:443', 'fake_2:443']) + self.assertEqual(cluster.default_tz_uuid, 'fake_tz_uuid') + self.assertEqual(cluster.nsx_user, 'foo') + self.assertEqual(cluster.nsx_password, 'bar') + + def _assert_extra_options(self, cluster): + self.assertEqual(14, cluster.req_timeout) + self.assertEqual(13, cluster.http_timeout) + self.assertEqual(12, cluster.redirects) + self.assertEqual(11, cluster.retries) + self.assertEqual('whatever', cluster.default_l2_gw_service_uuid) + self.assertEqual('whatever', cluster.default_l3_gw_service_uuid) + self.assertEqual('whatever', cluster.default_interface_name) + + def test_load_plugin_with_full_options(self): + self.config_parse(args=['--config-file', BASE_CONF_PATH, + '--config-file', NSX_INI_FULL_PATH]) + cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) + plugin = manager.NeutronManager().get_plugin() + cluster = plugin.cluster + self._assert_required_options(cluster) + self._assert_extra_options(cluster) + + def test_load_plugin_with_required_options_only(self): + self.config_parse(args=['--config-file', BASE_CONF_PATH, + '--config-file', NSX_INI_PATH]) + cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) + plugin = manager.NeutronManager().get_plugin() + self._assert_required_options(plugin.cluster) + + def test_defaults(self): + self.assertEqual(5000, cfg.CONF.NSX.max_lp_per_bridged_ls) + self.assertEqual(256, cfg.CONF.NSX.max_lp_per_overlay_ls) + self.assertEqual(10, cfg.CONF.NSX.concurrent_connections) + self.assertEqual('access_network', cfg.CONF.NSX.metadata_mode) + self.assertEqual('stt', cfg.CONF.NSX.default_transport_type) + self.assertEqual('service', cfg.CONF.NSX.replication_mode) + + self.assertIsNone(cfg.CONF.default_tz_uuid) + self.assertEqual('admin', cfg.CONF.nsx_user) + self.assertEqual('admin', cfg.CONF.nsx_password) + self.assertEqual(30, cfg.CONF.req_timeout) + self.assertEqual(30, cfg.CONF.http_timeout) + self.assertEqual(2, cfg.CONF.retries) + self.assertEqual(2, cfg.CONF.redirects) + self.assertIsNone(cfg.CONF.nsx_controllers) + self.assertIsNone(cfg.CONF.default_l3_gw_service_uuid) + self.assertIsNone(cfg.CONF.default_l2_gw_service_uuid) + self.assertEqual('breth0', cfg.CONF.default_interface_name) + + def test_load_api_extensions(self): + self.config_parse(args=['--config-file', BASE_CONF_PATH, + '--config-file', NSX_INI_FULL_PATH]) + cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) + # Load the configuration, and initialize the plugin + manager.NeutronManager().get_plugin() + self.assertIn('extensions', cfg.CONF.api_extensions_path) + + def test_agentless_extensions(self): + self.config_parse(args=['--config-file', BASE_CONF_PATH, + '--config-file', NSX_INI_AGENTLESS_PATH]) + cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) + self.assertEqual(config.AgentModes.AGENTLESS, + cfg.CONF.NSX.agent_mode) + # The version returned from NSX does not really matter here + with mock.patch.object(client.NsxApiClient, + 'get_version', + return_value=version.Version("9.9")): + with mock.patch.object(lsnlib, + 'service_cluster_exists', + return_value=True): + plugin = manager.NeutronManager().get_plugin() + self.assertNotIn('agent', + plugin.supported_extension_aliases) + self.assertNotIn('dhcp_agent_scheduler', + plugin.supported_extension_aliases) + self.assertNotIn('lsn', + plugin.supported_extension_aliases) + + def test_agentless_extensions_version_fail(self): + self.config_parse(args=['--config-file', BASE_CONF_PATH, + '--config-file', NSX_INI_AGENTLESS_PATH]) + cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) + self.assertEqual(config.AgentModes.AGENTLESS, + cfg.CONF.NSX.agent_mode) + with mock.patch.object(client.NsxApiClient, + 'get_version', + return_value=version.Version("3.2")): + self.assertRaises(exceptions.NsxPluginException, + manager.NeutronManager) + + def test_agentless_extensions_unmet_deps_fail(self): + self.config_parse(args=['--config-file', BASE_CONF_PATH, + '--config-file', NSX_INI_AGENTLESS_PATH]) + cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) + self.assertEqual(config.AgentModes.AGENTLESS, + cfg.CONF.NSX.agent_mode) + with mock.patch.object(client.NsxApiClient, + 'get_version', + return_value=version.Version("3.2")): + with mock.patch.object(lsnlib, + 'service_cluster_exists', + return_value=False): + self.assertRaises(exceptions.NsxPluginException, + manager.NeutronManager) + + def test_agent_extensions(self): + self.config_parse(args=['--config-file', BASE_CONF_PATH, + '--config-file', NSX_INI_FULL_PATH]) + cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) + self.assertEqual(config.AgentModes.AGENT, + cfg.CONF.NSX.agent_mode) + plugin = manager.NeutronManager().get_plugin() + self.assertIn('agent', + plugin.supported_extension_aliases) + self.assertIn('dhcp_agent_scheduler', + plugin.supported_extension_aliases) + + def test_combined_extensions(self): + self.config_parse(args=['--config-file', BASE_CONF_PATH, + '--config-file', NSX_INI_COMBINED_PATH]) + cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) + self.assertEqual(config.AgentModes.COMBINED, + cfg.CONF.NSX.agent_mode) + with mock.patch.object(client.NsxApiClient, + 'get_version', + return_value=version.Version("4.2")): + with mock.patch.object(lsnlib, + 'service_cluster_exists', + return_value=True): + plugin = manager.NeutronManager().get_plugin() + self.assertIn('agent', + plugin.supported_extension_aliases) + self.assertIn('dhcp_agent_scheduler', + plugin.supported_extension_aliases) + self.assertIn('lsn', + plugin.supported_extension_aliases) + + +class OldNVPConfigurationTest(base.BaseTestCase): + + def setUp(self): + super(OldNVPConfigurationTest, self).setUp() + self.useFixture(fixtures.MonkeyPatch( + 'neutron.manager.NeutronManager._instance', + None)) + # Avoid runs of the synchronizer looping call + patch_sync = mock.patch.object(sync, '_start_loopingcall') + patch_sync.start() + + def _assert_required_options(self, cluster): + self.assertEqual(cluster.nsx_controllers, ['fake_1:443', 'fake_2:443']) + self.assertEqual(cluster.nsx_user, 'foo') + self.assertEqual(cluster.nsx_password, 'bar') + self.assertEqual(cluster.default_tz_uuid, 'fake_tz_uuid') + + def test_load_plugin_with_deprecated_options(self): + self.config_parse(args=['--config-file', BASE_CONF_PATH, + '--config-file', NVP_INI_DEPR_PATH]) + cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME) + plugin = manager.NeutronManager().get_plugin() + cluster = plugin.cluster + # Verify old nvp_* params have been fully parsed + self._assert_required_options(cluster) + self.assertEqual(4, cluster.req_timeout) + self.assertEqual(3, cluster.http_timeout) + self.assertEqual(2, cluster.retries) + self.assertEqual(2, cluster.redirects) diff --git a/neutron/tests/unit/vmware/test_nsx_plugin.py b/neutron/tests/unit/vmware/test_nsx_plugin.py new file mode 100644 index 000000000..4b99bd734 --- /dev/null +++ b/neutron/tests/unit/vmware/test_nsx_plugin.py @@ -0,0 +1,1181 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib + +import mock +import netaddr +from oslo.config import cfg +from sqlalchemy import exc as sql_exc +import webob.exc + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as ntn_exc +import neutron.common.test_lib as test_lib +from neutron import context +from neutron.extensions import external_net +from neutron.extensions import l3 +from neutron.extensions import l3_ext_gw_mode +from neutron.extensions import portbindings +from neutron.extensions import providernet as pnet +from neutron.extensions import securitygroup as secgrp +from neutron import manager +from neutron.openstack.common.db import exception as db_exc +from neutron.openstack.common import log +from neutron.openstack.common import uuidutils +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.api_client import version as version_module +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import sync +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware.dbexts import db as nsx_db +from neutron.plugins.vmware.extensions import distributedrouter as dist_router +from neutron.plugins.vmware import nsxlib +from neutron.tests.unit import _test_extension_portbindings as test_bindings +import neutron.tests.unit.test_db_plugin as test_plugin +import neutron.tests.unit.test_extension_ext_gw_mode as test_ext_gw_mode +import neutron.tests.unit.test_extension_security_group as ext_sg +import neutron.tests.unit.test_l3_plugin as test_l3_plugin +from neutron.tests.unit import testlib_api +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware.apiclient import fake + +LOG = log.getLogger(__name__) + + +class NsxPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): + + def _create_network(self, fmt, name, admin_state_up, + arg_list=None, providernet_args=None, **kwargs): + data = {'network': {'name': name, + 'admin_state_up': admin_state_up, + 'tenant_id': self._tenant_id}} + # Fix to allow the router:external attribute and any other + # attributes containing a colon to be passed with + # a double underscore instead + kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items()) + if external_net.EXTERNAL in kwargs: + arg_list = (external_net.EXTERNAL, ) + (arg_list or ()) + + attrs = kwargs + if providernet_args: + attrs.update(providernet_args) + for arg in (('admin_state_up', 'tenant_id', 'shared') + + (arg_list or ())): + # Arg must be present and not empty + if arg in kwargs and kwargs[arg]: + data['network'][arg] = kwargs[arg] + network_req = self.new_create_request('networks', data, fmt) + if (kwargs.get('set_context') and 'tenant_id' in kwargs): + # create a specific auth context for this request + network_req.environ['neutron.context'] = context.Context( + '', kwargs['tenant_id']) + return network_req.get_response(self.api) + + def setUp(self, + plugin=vmware.PLUGIN_NAME, + ext_mgr=None, + service_plugins=None): + test_lib.test_config['config_files'] = [ + vmware.get_fake_conf('nsx.ini.test')] + # mock api client + self.fc = fake.FakeClient(vmware.STUBS_PATH) + self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True) + self.mock_instance = self.mock_nsx.start() + # Avoid runs of the synchronizer looping call + patch_sync = mock.patch.object(sync, '_start_loopingcall') + patch_sync.start() + + # Emulate tests against NSX 2.x + self.mock_instance.return_value.get_version.return_value = ( + version_module.Version("2.9")) + self.mock_instance.return_value.request.side_effect = ( + self.fc.fake_request) + super(NsxPluginV2TestCase, self).setUp(plugin=plugin, + ext_mgr=ext_mgr) + # Newly created port's status is always 'DOWN' till NSX wires them. + self.port_create_status = constants.PORT_STATUS_DOWN + cfg.CONF.set_override('metadata_mode', None, 'NSX') + self.addCleanup(self.fc.reset_all) + + +class TestBasicGet(test_plugin.TestBasicGet, NsxPluginV2TestCase): + pass + + +class TestV2HTTPResponse(test_plugin.TestV2HTTPResponse, NsxPluginV2TestCase): + pass + + +class TestPortsV2(NsxPluginV2TestCase, + test_plugin.TestPortsV2, + test_bindings.PortBindingsTestCase, + test_bindings.PortBindingsHostTestCaseMixin): + + VIF_TYPE = portbindings.VIF_TYPE_OVS + HAS_PORT_FILTER = True + + def test_exhaust_ports_overlay_network(self): + cfg.CONF.set_override('max_lp_per_overlay_ls', 1, group='NSX') + with self.network(name='testnet', + arg_list=(pnet.NETWORK_TYPE, + pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID)) as net: + with self.subnet(network=net) as sub: + with self.port(subnet=sub): + # creating another port should see an exception + self._create_port('json', net['network']['id'], 400) + + def test_exhaust_ports_bridged_network(self): + cfg.CONF.set_override('max_lp_per_bridged_ls', 1, group="NSX") + providernet_args = {pnet.NETWORK_TYPE: 'flat', + pnet.PHYSICAL_NETWORK: 'tzuuid'} + with self.network(name='testnet', + providernet_args=providernet_args, + arg_list=(pnet.NETWORK_TYPE, + pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID)) as net: + with self.subnet(network=net) as sub: + with self.port(subnet=sub): + with self.port(subnet=sub): + plugin = manager.NeutronManager.get_plugin() + ls = nsxlib.switch.get_lswitches(plugin.cluster, + net['network']['id']) + self.assertEqual(len(ls), 2) + + def test_update_port_delete_ip(self): + # This test case overrides the default because the nsx plugin + # implements port_security/security groups and it is not allowed + # to remove an ip address from a port unless the security group + # is first removed. + with self.subnet() as subnet: + with self.port(subnet=subnet) as port: + data = {'port': {'admin_state_up': False, + 'fixed_ips': [], + secgrp.SECURITYGROUPS: []}} + req = self.new_update_request('ports', + data, port['port']['id']) + res = self.deserialize('json', req.get_response(self.api)) + self.assertEqual(res['port']['admin_state_up'], + data['port']['admin_state_up']) + self.assertEqual(res['port']['fixed_ips'], + data['port']['fixed_ips']) + + def test_create_port_name_exceeds_40_chars(self): + name = 'this_is_a_port_whose_name_is_longer_than_40_chars' + with self.port(name=name) as port: + # Assert the neutron name is not truncated + self.assertEqual(name, port['port']['name']) + + def _verify_no_orphan_left(self, net_id): + # Verify no port exists on net + # ie: cleanup on db was successful + query_params = "network_id=%s" % net_id + self._test_list_resources('port', [], + query_params=query_params) + # Also verify no orphan port was left on nsx + # no port should be there at all + self.assertFalse(self.fc._fake_lswitch_lport_dict) + + def test_create_port_nsx_error_no_orphan_left(self): + with mock.patch.object(nsxlib.switch, 'create_lport', + side_effect=api_exc.NsxApiException): + with self.network() as net: + net_id = net['network']['id'] + self._create_port(self.fmt, net_id, + webob.exc.HTTPInternalServerError.code) + self._verify_no_orphan_left(net_id) + + def test_create_port_neutron_error_no_orphan_left(self): + with mock.patch.object(nsx_db, 'add_neutron_nsx_port_mapping', + side_effect=ntn_exc.NeutronException): + with self.network() as net: + net_id = net['network']['id'] + self._create_port(self.fmt, net_id, + webob.exc.HTTPInternalServerError.code) + self._verify_no_orphan_left(net_id) + + def test_create_port_db_error_no_orphan_left(self): + db_exception = db_exc.DBError( + inner_exception=sql_exc.IntegrityError(mock.ANY, + mock.ANY, + mock.ANY)) + with mock.patch.object(nsx_db, 'add_neutron_nsx_port_mapping', + side_effect=db_exception): + with self.network() as net: + with self.port(device_owner=constants.DEVICE_OWNER_DHCP): + self._verify_no_orphan_left(net['network']['id']) + + def test_create_port_maintenance_returns_503(self): + with self.network() as net: + with mock.patch.object(nsxlib, 'do_request', + side_effect=nsx_exc.MaintenanceInProgress): + data = {'port': {'network_id': net['network']['id'], + 'admin_state_up': False, + 'fixed_ips': [], + 'tenant_id': self._tenant_id}} + plugin = manager.NeutronManager.get_plugin() + with mock.patch.object(plugin, 'get_network', + return_value=net['network']): + port_req = self.new_create_request('ports', data, self.fmt) + res = port_req.get_response(self.api) + self.assertEqual(webob.exc.HTTPServiceUnavailable.code, + res.status_int) + + +class TestNetworksV2(test_plugin.TestNetworksV2, NsxPluginV2TestCase): + + def _test_create_bridge_network(self, vlan_id=0): + net_type = vlan_id and 'vlan' or 'flat' + name = 'bridge_net' + expected = [('subnets', []), ('name', name), ('admin_state_up', True), + ('status', 'ACTIVE'), ('shared', False), + (pnet.NETWORK_TYPE, net_type), + (pnet.PHYSICAL_NETWORK, 'tzuuid'), + (pnet.SEGMENTATION_ID, vlan_id)] + providernet_args = {pnet.NETWORK_TYPE: net_type, + pnet.PHYSICAL_NETWORK: 'tzuuid'} + if vlan_id: + providernet_args[pnet.SEGMENTATION_ID] = vlan_id + with self.network(name=name, + providernet_args=providernet_args, + arg_list=(pnet.NETWORK_TYPE, + pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID)) as net: + for k, v in expected: + self.assertEqual(net['network'][k], v) + + def test_create_bridge_network(self): + self._test_create_bridge_network() + + def test_create_bridge_vlan_network(self): + self._test_create_bridge_network(vlan_id=123) + + def test_create_bridge_vlan_network_outofrange_returns_400(self): + with testlib_api.ExpectedException( + webob.exc.HTTPClientError) as ctx_manager: + self._test_create_bridge_network(vlan_id=5000) + self.assertEqual(ctx_manager.exception.code, 400) + + def test_list_networks_filter_by_id(self): + # We add this unit test to cover some logic specific to the + # nsx plugin + with contextlib.nested(self.network(name='net1'), + self.network(name='net2')) as (net1, net2): + query_params = 'id=%s' % net1['network']['id'] + self._test_list_resources('network', [net1], + query_params=query_params) + query_params += '&id=%s' % net2['network']['id'] + self._test_list_resources('network', [net1, net2], + query_params=query_params) + + def test_delete_network_after_removing_subet(self): + gateway_ip = '10.0.0.1' + cidr = '10.0.0.0/24' + fmt = 'json' + # Create new network + res = self._create_network(fmt=fmt, name='net', + admin_state_up=True) + network = self.deserialize(fmt, res) + subnet = self._make_subnet(fmt, network, gateway_ip, + cidr, ip_version=4) + req = self.new_delete_request('subnets', subnet['subnet']['id']) + sub_del_res = req.get_response(self.api) + self.assertEqual(sub_del_res.status_int, 204) + req = self.new_delete_request('networks', network['network']['id']) + net_del_res = req.get_response(self.api) + self.assertEqual(net_del_res.status_int, 204) + + def test_list_networks_with_shared(self): + with self.network(name='net1'): + with self.network(name='net2', shared=True): + req = self.new_list_request('networks') + res = self.deserialize('json', req.get_response(self.api)) + self.assertEqual(len(res['networks']), 2) + req_2 = self.new_list_request('networks') + req_2.environ['neutron.context'] = context.Context('', + 'somebody') + res = self.deserialize('json', req_2.get_response(self.api)) + # tenant must see a single network + self.assertEqual(len(res['networks']), 1) + + def test_create_network_name_exceeds_40_chars(self): + name = 'this_is_a_network_whose_name_is_longer_than_40_chars' + with self.network(name=name) as net: + # Assert neutron name is not truncated + self.assertEqual(net['network']['name'], name) + + def test_create_network_maintenance_returns_503(self): + data = {'network': {'name': 'foo', + 'admin_state_up': True, + 'tenant_id': self._tenant_id}} + with mock.patch.object(nsxlib, 'do_request', + side_effect=nsx_exc.MaintenanceInProgress): + net_req = self.new_create_request('networks', data, self.fmt) + res = net_req.get_response(self.api) + self.assertEqual(webob.exc.HTTPServiceUnavailable.code, + res.status_int) + + def test_update_network_with_admin_false(self): + data = {'network': {'admin_state_up': False}} + with self.network() as net: + plugin = manager.NeutronManager.get_plugin() + self.assertRaises(NotImplementedError, + plugin.update_network, + context.get_admin_context(), + net['network']['id'], data) + + def test_update_network_with_name_calls_nsx(self): + with mock.patch.object( + nsxlib.switch, 'update_lswitch') as update_lswitch_mock: + # don't worry about deleting this network, do not use + # context manager + ctx = context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + net = plugin.create_network( + ctx, {'network': {'name': 'xxx', + 'admin_state_up': True, + 'shared': False, + 'port_security_enabled': True}}) + plugin.update_network(ctx, net['id'], + {'network': {'name': 'yyy'}}) + update_lswitch_mock.assert_called_once_with( + mock.ANY, mock.ANY, 'yyy') + + +class SecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase): + + def setUp(self): + test_lib.test_config['config_files'] = [ + vmware.get_fake_conf('nsx.ini.test')] + # mock nsx api client + self.fc = fake.FakeClient(vmware.STUBS_PATH) + self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True) + instance = self.mock_nsx.start() + instance.return_value.login.return_value = "the_cookie" + # Avoid runs of the synchronizer looping call + patch_sync = mock.patch.object(sync, '_start_loopingcall') + patch_sync.start() + + instance.return_value.request.side_effect = self.fc.fake_request + super(SecurityGroupsTestCase, self).setUp(vmware.PLUGIN_NAME) + + +class TestSecurityGroup(ext_sg.TestSecurityGroups, SecurityGroupsTestCase): + + def test_create_security_group_name_exceeds_40_chars(self): + name = 'this_is_a_secgroup_whose_name_is_longer_than_40_chars' + with self.security_group(name=name) as sg: + # Assert Neutron name is not truncated + self.assertEqual(sg['security_group']['name'], name) + + def test_create_security_group_rule_bad_input(self): + name = 'foo security group' + description = 'foo description' + with self.security_group(name, description) as sg: + security_group_id = sg['security_group']['id'] + protocol = 200 + min_range = 32 + max_range = 4343 + rule = self._build_security_group_rule( + security_group_id, 'ingress', protocol, + min_range, max_range) + res = self._create_security_group_rule(self.fmt, rule) + self.deserialize(self.fmt, res) + self.assertEqual(res.status_int, 400) + + +class TestL3ExtensionManager(object): + + def get_resources(self): + # Simulate extension of L3 attribute map + # First apply attribute extensions + for key in l3.RESOURCE_ATTRIBUTE_MAP.keys(): + l3.RESOURCE_ATTRIBUTE_MAP[key].update( + l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {})) + l3.RESOURCE_ATTRIBUTE_MAP[key].update( + dist_router.EXTENDED_ATTRIBUTES_2_0.get(key, {})) + # Finally add l3 resources to the global attribute map + attributes.RESOURCE_ATTRIBUTE_MAP.update( + l3.RESOURCE_ATTRIBUTE_MAP) + return l3.L3.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class TestL3SecGrpExtensionManager(TestL3ExtensionManager): + """A fake extension manager for L3 and Security Group extensions. + + Includes also NSX specific L3 attributes. + """ + + def get_resources(self): + resources = super(TestL3SecGrpExtensionManager, + self).get_resources() + resources.extend(secgrp.Securitygroup.get_resources()) + return resources + + +def backup_l3_attribute_map(): + """Return a backup of the original l3 attribute map.""" + return dict((res, attrs.copy()) for + (res, attrs) in l3.RESOURCE_ATTRIBUTE_MAP.iteritems()) + + +def restore_l3_attribute_map(map_to_restore): + """Ensure changes made by fake ext mgrs are reverted.""" + l3.RESOURCE_ATTRIBUTE_MAP = map_to_restore + + +class L3NatTest(test_l3_plugin.L3BaseForIntTests, NsxPluginV2TestCase): + + def _restore_l3_attribute_map(self): + l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk + + def setUp(self, plugin=vmware.PLUGIN_NAME, ext_mgr=None, + service_plugins=None): + self._l3_attribute_map_bk = {} + for item in l3.RESOURCE_ATTRIBUTE_MAP: + self._l3_attribute_map_bk[item] = ( + l3.RESOURCE_ATTRIBUTE_MAP[item].copy()) + cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) + l3_attribute_map_bk = backup_l3_attribute_map() + self.addCleanup(restore_l3_attribute_map, l3_attribute_map_bk) + ext_mgr = ext_mgr or TestL3ExtensionManager() + super(L3NatTest, self).setUp( + plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) + plugin_instance = manager.NeutronManager.get_plugin() + self._plugin_name = "%s.%s" % ( + plugin_instance.__module__, + plugin_instance.__class__.__name__) + self._plugin_class = plugin_instance.__class__ + + def _create_l3_ext_network(self, vlan_id=None): + name = 'l3_ext_net' + net_type = utils.NetworkTypes.L3_EXT + providernet_args = {pnet.NETWORK_TYPE: net_type, + pnet.PHYSICAL_NETWORK: 'l3_gw_uuid'} + if vlan_id: + providernet_args[pnet.SEGMENTATION_ID] = vlan_id + return self.network(name=name, + router__external=True, + providernet_args=providernet_args, + arg_list=(pnet.NETWORK_TYPE, + pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID)) + + +class TestL3NatTestCase(L3NatTest, + test_l3_plugin.L3NatDBIntTestCase, + NsxPluginV2TestCase): + + def _test_create_l3_ext_network(self, vlan_id=0): + name = 'l3_ext_net' + net_type = utils.NetworkTypes.L3_EXT + expected = [('subnets', []), ('name', name), ('admin_state_up', True), + ('status', 'ACTIVE'), ('shared', False), + (external_net.EXTERNAL, True), + (pnet.NETWORK_TYPE, net_type), + (pnet.PHYSICAL_NETWORK, 'l3_gw_uuid'), + (pnet.SEGMENTATION_ID, vlan_id)] + with self._create_l3_ext_network(vlan_id) as net: + for k, v in expected: + self.assertEqual(net['network'][k], v) + + def _nsx_validate_ext_gw(self, router_id, l3_gw_uuid, vlan_id): + """Verify data on fake NSX API client in order to validate + plugin did set them properly + """ + # First find the NSX router ID + ctx = context.get_admin_context() + nsx_router_id = nsx_db.get_nsx_router_id(ctx.session, router_id) + ports = [port for port in self.fc._fake_lrouter_lport_dict.values() + if (port['lr_uuid'] == nsx_router_id and + port['att_type'] == "L3GatewayAttachment")] + self.assertEqual(len(ports), 1) + self.assertEqual(ports[0]['attachment_gwsvc_uuid'], l3_gw_uuid) + self.assertEqual(ports[0].get('vlan_id'), vlan_id) + + def test_create_l3_ext_network_without_vlan(self): + self._test_create_l3_ext_network() + + def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None, + validate_ext_gw=True): + with self._create_l3_ext_network(vlan_id) as net: + with self.subnet(network=net) as s: + data = {'router': {'tenant_id': 'whatever'}} + data['router']['name'] = 'router1' + data['router']['external_gateway_info'] = { + 'network_id': s['subnet']['network_id']} + router_req = self.new_create_request('routers', data, + self.fmt) + try: + res = router_req.get_response(self.ext_api) + router = self.deserialize(self.fmt, res) + self.assertEqual( + s['subnet']['network_id'], + (router['router']['external_gateway_info'] + ['network_id'])) + if validate_ext_gw: + self._nsx_validate_ext_gw(router['router']['id'], + 'l3_gw_uuid', vlan_id) + finally: + self._delete('routers', router['router']['id']) + + def test_router_create_with_gwinfo_and_l3_ext_net(self): + self._test_router_create_with_gwinfo_and_l3_ext_net() + + def test_router_create_with_gwinfo_and_l3_ext_net_with_vlan(self): + self._test_router_create_with_gwinfo_and_l3_ext_net(444) + + def _test_router_create_with_distributed(self, dist_input, dist_expected, + version='3.1', return_code=201): + self.mock_instance.return_value.get_version.return_value = ( + version_module.Version(version)) + + data = {'tenant_id': 'whatever'} + data['name'] = 'router1' + data['distributed'] = dist_input + router_req = self.new_create_request( + 'routers', {'router': data}, self.fmt) + try: + res = router_req.get_response(self.ext_api) + self.assertEqual(return_code, res.status_int) + if res.status_int == 201: + router = self.deserialize(self.fmt, res) + self.assertIn('distributed', router['router']) + self.assertEqual(dist_expected, + router['router']['distributed']) + finally: + if res.status_int == 201: + self._delete('routers', router['router']['id']) + + def test_router_create_distributed_with_3_1(self): + self._test_router_create_with_distributed(True, True) + + def test_router_create_distributed_with_new_nsx_versions(self): + with mock.patch.object(nsxlib.router, 'create_explicit_route_lrouter'): + self._test_router_create_with_distributed(True, True, '3.2') + self._test_router_create_with_distributed(True, True, '4.0') + self._test_router_create_with_distributed(True, True, '4.1') + + def test_router_create_not_distributed(self): + self._test_router_create_with_distributed(False, False) + + def test_router_create_distributed_unspecified(self): + self._test_router_create_with_distributed(None, False) + + def test_router_create_distributed_returns_400(self): + self._test_router_create_with_distributed(True, None, '3.0', 400) + + def test_router_create_on_obsolete_platform(self): + + def obsolete_response(*args, **kwargs): + response = (nsxlib.router. + _create_implicit_routing_lrouter(*args, **kwargs)) + response.pop('distributed') + return response + + with mock.patch.object( + nsxlib.router, 'create_lrouter', new=obsolete_response): + self._test_router_create_with_distributed(None, False, '2.2') + + def _create_router_with_gw_info_for_test(self, subnet): + data = {'router': {'tenant_id': 'whatever', + 'name': 'router1', + 'external_gateway_info': + {'network_id': subnet['subnet']['network_id']}}} + router_req = self.new_create_request( + 'routers', data, self.fmt) + return router_req.get_response(self.ext_api) + + def test_router_create_nsx_error_returns_500(self, vlan_id=None): + with mock.patch.object(nsxlib.router, + 'create_router_lport', + side_effect=api_exc.NsxApiException): + with self._create_l3_ext_network(vlan_id) as net: + with self.subnet(network=net) as s: + res = self._create_router_with_gw_info_for_test(s) + self.assertEqual( + webob.exc.HTTPInternalServerError.code, + res.status_int) + + def test_router_add_gateway_invalid_network_returns_404(self): + # NOTE(salv-orlando): This unit test has been overriden + # as the nsx plugin support the ext_gw_mode extension + # which mandates a uuid for the external network identifier + with self.router() as r: + self._add_external_gateway_to_router( + r['router']['id'], + uuidutils.generate_uuid(), + expected_code=webob.exc.HTTPNotFound.code) + + def _verify_router_rollback(self): + # Check that nothing is left on DB + # TODO(salv-orlando): Verify whehter this is thread-safe + # w.r.t. sqllite and parallel testing + self._test_list_resources('router', []) + # Check that router is not in NSX + self.assertFalse(self.fc._fake_lrouter_dict) + + def test_router_create_with_gw_info_neutron_fail_does_rollback(self): + # Simulate get subnet error while building list of ips with prefix + with mock.patch.object(self._plugin_class, + '_build_ip_address_list', + side_effect=ntn_exc.SubnetNotFound( + subnet_id='xxx')): + with self._create_l3_ext_network() as net: + with self.subnet(network=net) as s: + res = self._create_router_with_gw_info_for_test(s) + self.assertEqual( + webob.exc.HTTPNotFound.code, + res.status_int) + self._verify_router_rollback() + + def test_router_create_with_gw_info_nsx_fail_does_rollback(self): + # Simulate error while fetching nsx router gw port + with mock.patch.object(self._plugin_class, + '_find_router_gw_port', + side_effect=api_exc.NsxApiException): + with self._create_l3_ext_network() as net: + with self.subnet(network=net) as s: + res = self._create_router_with_gw_info_for_test(s) + self.assertEqual( + webob.exc.HTTPInternalServerError.code, + res.status_int) + self._verify_router_rollback() + + def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None, + validate_ext_gw=True): + with self.router() as r: + with self.subnet() as s1: + with self._create_l3_ext_network(vlan_id) as net: + with self.subnet(network=net) as s2: + self._set_net_external(s1['subnet']['network_id']) + try: + self._add_external_gateway_to_router( + r['router']['id'], + s1['subnet']['network_id']) + body = self._show('routers', r['router']['id']) + net_id = (body['router'] + ['external_gateway_info']['network_id']) + self.assertEqual(net_id, + s1['subnet']['network_id']) + # Plug network with external mapping + self._set_net_external(s2['subnet']['network_id']) + self._add_external_gateway_to_router( + r['router']['id'], + s2['subnet']['network_id']) + body = self._show('routers', r['router']['id']) + net_id = (body['router'] + ['external_gateway_info']['network_id']) + self.assertEqual(net_id, + s2['subnet']['network_id']) + if validate_ext_gw: + self._nsx_validate_ext_gw( + body['router']['id'], + 'l3_gw_uuid', vlan_id) + finally: + # Cleanup + self._remove_external_gateway_from_router( + r['router']['id'], + s2['subnet']['network_id']) + + def test_router_update_gateway_on_l3_ext_net(self): + self._test_router_update_gateway_on_l3_ext_net() + + def test_router_update_gateway_on_l3_ext_net_with_vlan(self): + self._test_router_update_gateway_on_l3_ext_net(444) + + def test_router_list_by_tenant_id(self): + with contextlib.nested(self.router(tenant_id='custom'), + self.router(), + self.router() + ) as routers: + self._test_list_resources('router', [routers[0]], + query_params="tenant_id=custom") + + def test_create_l3_ext_network_with_vlan(self): + self._test_create_l3_ext_network(666) + + def test_floatingip_with_assoc_fails(self): + self._test_floatingip_with_assoc_fails(self._plugin_name) + + def test_floatingip_with_invalid_create_port(self): + self._test_floatingip_with_invalid_create_port(self._plugin_name) + + def _metadata_setup(self): + cfg.CONF.set_override('metadata_mode', 'access_network', 'NSX') + + def _metadata_teardown(self): + cfg.CONF.set_override('metadata_mode', None, 'NSX') + + def test_create_router_name_exceeds_40_chars(self): + name = 'this_is_a_router_whose_name_is_longer_than_40_chars' + with self.router(name=name) as rtr: + # Assert Neutron name is not truncated + self.assertEqual(rtr['router']['name'], name) + + def test_router_add_interface_subnet_with_metadata_access(self): + self._metadata_setup() + self.test_router_add_interface_subnet() + self._metadata_teardown() + + def test_router_add_interface_port_with_metadata_access(self): + self._metadata_setup() + self.test_router_add_interface_port() + self._metadata_teardown() + + def test_router_add_interface_dupsubnet_returns_400_with_metadata(self): + self._metadata_setup() + self.test_router_add_interface_dup_subnet1_returns_400() + self._metadata_teardown() + + def test_router_add_interface_overlapped_cidr_returns_400_with(self): + self._metadata_setup() + self.test_router_add_interface_overlapped_cidr_returns_400() + self._metadata_teardown() + + def test_router_remove_interface_inuse_returns_409_with_metadata(self): + self._metadata_setup() + self.test_router_remove_interface_inuse_returns_409() + self._metadata_teardown() + + def test_router_remove_iface_wrong_sub_returns_400_with_metadata(self): + self._metadata_setup() + self.test_router_remove_interface_wrong_subnet_returns_400() + self._metadata_teardown() + + def test_router_delete_with_metadata_access(self): + self._metadata_setup() + self.test_router_delete() + self._metadata_teardown() + + def test_router_delete_with_port_existed_returns_409_with_metadata(self): + self._metadata_setup() + self.test_router_delete_with_port_existed_returns_409() + self._metadata_teardown() + + def test_metadatata_network_created_with_router_interface_add(self): + self._metadata_setup() + with mock.patch.object(self._plugin_class, 'schedule_network') as f: + with self.router() as r: + with self.subnet() as s: + self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None) + r_ports = self._list('ports')['ports'] + self.assertEqual(len(r_ports), 2) + ips = [] + for port in r_ports: + ips.extend([netaddr.IPAddress(fixed_ip['ip_address']) + for fixed_ip in port['fixed_ips']]) + meta_cidr = netaddr.IPNetwork('169.254.0.0/16') + self.assertTrue(any([ip in meta_cidr for ip in ips])) + # Needed to avoid 409 + self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + None) + # Verify that the metadata network gets scheduled first, so that + # an active dhcp agent can pick it up + expected_meta_net = { + 'status': 'ACTIVE', + 'subnets': [], + 'name': 'meta-%s' % r['router']['id'], + 'admin_state_up': True, + 'tenant_id': '', + 'port_security_enabled': False, + 'shared': False, + 'id': mock.ANY + } + f.assert_called_once_with(mock.ANY, expected_meta_net) + self._metadata_teardown() + + def test_metadata_network_create_rollback_on_create_subnet_failure(self): + self._metadata_setup() + with self.router() as r: + with self.subnet() as s: + # Raise a NeutronException (eg: NotFound) + with mock.patch.object(self._plugin_class, + 'create_subnet', + side_effect=ntn_exc.NotFound): + self._router_interface_action( + 'add', r['router']['id'], s['subnet']['id'], None) + # Ensure metadata network was removed + nets = self._list('networks')['networks'] + self.assertEqual(len(nets), 1) + # Needed to avoid 409 + self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + None) + self._metadata_teardown() + + def test_metadata_network_create_rollback_on_add_rtr_iface_failure(self): + self._metadata_setup() + with self.router() as r: + with self.subnet() as s: + # Raise a NeutronException when adding metadata subnet + # to router + # save function being mocked + real_func = self._plugin_class.add_router_interface + plugin_instance = manager.NeutronManager.get_plugin() + + def side_effect(*args): + if args[-1]['subnet_id'] == s['subnet']['id']: + # do the real thing + return real_func(plugin_instance, *args) + # otherwise raise + raise api_exc.NsxApiException() + + with mock.patch.object(self._plugin_class, + 'add_router_interface', + side_effect=side_effect): + self._router_interface_action( + 'add', r['router']['id'], s['subnet']['id'], None) + # Ensure metadata network was removed + nets = self._list('networks')['networks'] + self.assertEqual(len(nets), 1) + # Needed to avoid 409 + self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + None) + self._metadata_teardown() + + def test_metadata_network_removed_with_router_interface_remove(self): + self._metadata_setup() + with self.router() as r: + with self.subnet() as s: + self._router_interface_action('add', r['router']['id'], + s['subnet']['id'], None) + subnets = self._list('subnets')['subnets'] + self.assertEqual(len(subnets), 2) + meta_cidr = netaddr.IPNetwork('169.254.0.0/16') + for subnet in subnets: + cidr = netaddr.IPNetwork(subnet['cidr']) + if meta_cidr == cidr or meta_cidr in cidr.supernet(16): + meta_sub_id = subnet['id'] + meta_net_id = subnet['network_id'] + ports = self._list( + 'ports', + query_params='network_id=%s' % meta_net_id)['ports'] + self.assertEqual(len(ports), 1) + meta_port_id = ports[0]['id'] + self._router_interface_action('remove', r['router']['id'], + s['subnet']['id'], None) + self._show('networks', meta_net_id, + webob.exc.HTTPNotFound.code) + self._show('ports', meta_port_id, + webob.exc.HTTPNotFound.code) + self._show('subnets', meta_sub_id, + webob.exc.HTTPNotFound.code) + self._metadata_teardown() + + def test_metadata_network_remove_rollback_on_failure(self): + self._metadata_setup() + with self.router() as r: + with self.subnet() as s: + self._router_interface_action('add', r['router']['id'], + s['subnet']['id'], None) + networks = self._list('networks')['networks'] + for network in networks: + if network['id'] != s['subnet']['network_id']: + meta_net_id = network['id'] + ports = self._list( + 'ports', + query_params='network_id=%s' % meta_net_id)['ports'] + meta_port_id = ports[0]['id'] + # Raise a NeutronException when removing + # metadata subnet from router + # save function being mocked + real_func = self._plugin_class.remove_router_interface + plugin_instance = manager.NeutronManager.get_plugin() + + def side_effect(*args): + if args[-1].get('subnet_id') == s['subnet']['id']: + # do the real thing + return real_func(plugin_instance, *args) + # otherwise raise + raise api_exc.NsxApiException() + + with mock.patch.object(self._plugin_class, + 'remove_router_interface', + side_effect=side_effect): + self._router_interface_action('remove', r['router']['id'], + s['subnet']['id'], None) + # Metadata network and subnet should still be there + self._show('networks', meta_net_id, + webob.exc.HTTPOk.code) + self._show('ports', meta_port_id, + webob.exc.HTTPOk.code) + self._metadata_teardown() + + def test_metadata_dhcp_host_route(self): + cfg.CONF.set_override('metadata_mode', 'dhcp_host_route', 'NSX') + subnets = self._list('subnets')['subnets'] + with self.subnet() as s: + with self.port(subnet=s, device_id='1234', + device_owner=constants.DEVICE_OWNER_DHCP): + subnets = self._list('subnets')['subnets'] + self.assertEqual(len(subnets), 1) + self.assertEqual(subnets[0]['host_routes'][0]['nexthop'], + '10.0.0.2') + self.assertEqual(subnets[0]['host_routes'][0]['destination'], + '169.254.169.254/32') + + subnets = self._list('subnets')['subnets'] + # Test that route is deleted after dhcp port is removed + self.assertEqual(len(subnets[0]['host_routes']), 0) + + def test_floatingip_disassociate(self): + with self.port() as p: + private_sub = {'subnet': {'id': + p['port']['fixed_ips'][0]['subnet_id']}} + with self.floatingip_no_assoc(private_sub) as fip: + port_id = p['port']['id'] + body = self._update('floatingips', fip['floatingip']['id'], + {'floatingip': {'port_id': port_id}}) + self.assertEqual(body['floatingip']['port_id'], port_id) + # Disassociate + body = self._update('floatingips', fip['floatingip']['id'], + {'floatingip': {'port_id': None}}) + body = self._show('floatingips', fip['floatingip']['id']) + self.assertIsNone(body['floatingip']['port_id']) + self.assertIsNone(body['floatingip']['fixed_ip_address']) + + def test_create_router_maintenance_returns_503(self): + with self._create_l3_ext_network() as net: + with self.subnet(network=net) as s: + with mock.patch.object( + nsxlib, + 'do_request', + side_effect=nsx_exc.MaintenanceInProgress): + data = {'router': {'tenant_id': 'whatever'}} + data['router']['name'] = 'router1' + data['router']['external_gateway_info'] = { + 'network_id': s['subnet']['network_id']} + router_req = self.new_create_request( + 'routers', data, self.fmt) + res = router_req.get_response(self.ext_api) + self.assertEqual(webob.exc.HTTPServiceUnavailable.code, + res.status_int) + + +class ExtGwModeTestCase(NsxPluginV2TestCase, + test_ext_gw_mode.ExtGwModeIntTestCase): + pass + + +class NeutronNsxOutOfSync(NsxPluginV2TestCase, + test_l3_plugin.L3NatTestCaseMixin, + ext_sg.SecurityGroupsTestCase): + + def setUp(self): + l3_attribute_map_bk = backup_l3_attribute_map() + self.addCleanup(restore_l3_attribute_map, l3_attribute_map_bk) + super(NeutronNsxOutOfSync, self).setUp( + ext_mgr=TestL3SecGrpExtensionManager()) + + def test_delete_network_not_in_nsx(self): + res = self._create_network('json', 'net1', True) + net1 = self.deserialize('json', res) + self.fc._fake_lswitch_dict.clear() + req = self.new_delete_request('networks', net1['network']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 204) + + def test_show_network_not_in_nsx(self): + res = self._create_network('json', 'net1', True) + net = self.deserialize('json', res) + self.fc._fake_lswitch_dict.clear() + req = self.new_show_request('networks', net['network']['id'], + fields=['id', 'status']) + net = self.deserialize('json', req.get_response(self.api)) + self.assertEqual(net['network']['status'], + constants.NET_STATUS_ERROR) + + def test_delete_port_not_in_nsx(self): + res = self._create_network('json', 'net1', True) + net1 = self.deserialize('json', res) + res = self._create_port('json', net1['network']['id']) + port = self.deserialize('json', res) + self.fc._fake_lswitch_lport_dict.clear() + req = self.new_delete_request('ports', port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 204) + + def test_show_port_not_in_nsx(self): + res = self._create_network('json', 'net1', True) + net1 = self.deserialize('json', res) + res = self._create_port('json', net1['network']['id']) + port = self.deserialize('json', res) + self.fc._fake_lswitch_lport_dict.clear() + self.fc._fake_lswitch_lportstatus_dict.clear() + req = self.new_show_request('ports', port['port']['id'], + fields=['id', 'status']) + net = self.deserialize('json', req.get_response(self.api)) + self.assertEqual(net['port']['status'], + constants.PORT_STATUS_ERROR) + + def test_create_port_on_network_not_in_nsx(self): + res = self._create_network('json', 'net1', True) + net1 = self.deserialize('json', res) + self.fc._fake_lswitch_dict.clear() + res = self._create_port('json', net1['network']['id']) + port = self.deserialize('json', res) + self.assertEqual(port['port']['status'], constants.PORT_STATUS_ERROR) + + def test_update_port_not_in_nsx(self): + res = self._create_network('json', 'net1', True) + net1 = self.deserialize('json', res) + res = self._create_port('json', net1['network']['id']) + port = self.deserialize('json', res) + self.fc._fake_lswitch_lport_dict.clear() + data = {'port': {'name': 'error_port'}} + req = self.new_update_request('ports', data, port['port']['id']) + port = self.deserialize('json', req.get_response(self.api)) + self.assertEqual(port['port']['status'], constants.PORT_STATUS_ERROR) + self.assertEqual(port['port']['name'], 'error_port') + + def test_delete_port_and_network_not_in_nsx(self): + res = self._create_network('json', 'net1', True) + net1 = self.deserialize('json', res) + res = self._create_port('json', net1['network']['id']) + port = self.deserialize('json', res) + self.fc._fake_lswitch_dict.clear() + self.fc._fake_lswitch_lport_dict.clear() + req = self.new_delete_request('ports', port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 204) + req = self.new_delete_request('networks', net1['network']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, 204) + + def test_delete_router_not_in_nsx(self): + res = self._create_router('json', 'tenant') + router = self.deserialize('json', res) + self.fc._fake_lrouter_dict.clear() + req = self.new_delete_request('routers', router['router']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + + def test_show_router_not_in_nsx(self): + res = self._create_router('json', 'tenant') + router = self.deserialize('json', res) + self.fc._fake_lrouter_dict.clear() + req = self.new_show_request('routers', router['router']['id'], + fields=['id', 'status']) + router = self.deserialize('json', req.get_response(self.ext_api)) + self.assertEqual(router['router']['status'], + constants.NET_STATUS_ERROR) + + def _create_network_and_subnet(self, cidr, external=False): + net_res = self._create_network('json', 'ext_net', True) + net = self.deserialize('json', net_res) + net_id = net['network']['id'] + if external: + self._update('networks', net_id, + {'network': {external_net.EXTERNAL: True}}) + sub_res = self._create_subnet('json', net_id, cidr) + sub = self.deserialize('json', sub_res) + return net_id, sub['subnet']['id'] + + def test_clear_gateway_nat_rule_not_in_nsx(self): + # Create external network and subnet + ext_net_id = self._create_network_and_subnet('1.1.1.0/24', True)[0] + # Create internal network and subnet + int_sub_id = self._create_network_and_subnet('10.0.0.0/24')[1] + res = self._create_router('json', 'tenant') + router = self.deserialize('json', res) + # Add interface to router (needed to generate NAT rule) + req = self.new_action_request( + 'routers', + {'subnet_id': int_sub_id}, + router['router']['id'], + "add_router_interface") + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 200) + # Set gateway for router + req = self.new_update_request( + 'routers', + {'router': {'external_gateway_info': + {'network_id': ext_net_id}}}, + router['router']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 200) + # Delete NAT rule from NSX, clear gateway + # and verify operation still succeeds + self.fc._fake_lrouter_nat_dict.clear() + req = self.new_update_request( + 'routers', + {'router': {'external_gateway_info': {}}}, + router['router']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 200) + + def test_remove_router_interface_not_in_nsx(self): + # Create internal network and subnet + int_sub_id = self._create_network_and_subnet('10.0.0.0/24')[1] + res = self._create_router('json', 'tenant') + router = self.deserialize('json', res) + # Add interface to router (needed to generate NAT rule) + req = self.new_action_request( + 'routers', + {'subnet_id': int_sub_id}, + router['router']['id'], + "add_router_interface") + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 200) + self.fc._fake_lrouter_dict.clear() + req = self.new_action_request( + 'routers', + {'subnet_id': int_sub_id}, + router['router']['id'], + "remove_router_interface") + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 200) + + def test_update_router_not_in_nsx(self): + res = self._create_router('json', 'tenant') + router = self.deserialize('json', res) + self.fc._fake_lrouter_dict.clear() + req = self.new_update_request( + 'routers', + {'router': {'name': 'goo'}}, + router['router']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 500) + req = self.new_show_request('routers', router['router']['id']) + router = self.deserialize('json', req.get_response(self.ext_api)) + self.assertEqual(router['router']['status'], + constants.NET_STATUS_ERROR) + + def test_delete_security_group_not_in_nsx(self): + res = self._create_security_group('json', 'name', 'desc') + sec_group = self.deserialize('json', res) + self.fc._fake_securityprofile_dict.clear() + req = self.new_delete_request( + 'security-groups', + sec_group['security_group']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) diff --git a/neutron/tests/unit/vmware/test_nsx_sync.py b/neutron/tests/unit/vmware/test_nsx_sync.py new file mode 100644 index 000000000..67c2fdd44 --- /dev/null +++ b/neutron/tests/unit/vmware/test_nsx_sync.py @@ -0,0 +1,712 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import contextlib +import time + +import mock +from oslo.config import cfg + +from neutron.api.v2 import attributes as attr +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron import context +from neutron.extensions import l3 +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import client +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.api_client import version +from neutron.plugins.vmware.common import sync +from neutron.plugins.vmware.dbexts import db +from neutron.plugins.vmware import nsx_cluster as cluster +from neutron.plugins.vmware import nsxlib +from neutron.plugins.vmware import plugin +from neutron.tests import base +from neutron.tests.unit import test_api_v2 +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware.apiclient import fake + +LOG = log.getLogger(__name__) + +_uuid = test_api_v2._uuid +LSWITCHES = [{'uuid': _uuid(), 'name': 'ls-1'}, + {'uuid': _uuid(), 'name': 'ls-2'}] +LSWITCHPORTS = [{'uuid': _uuid(), 'name': 'lp-1'}, + {'uuid': _uuid(), 'name': 'lp-2'}] +LROUTERS = [{'uuid': _uuid(), 'name': 'lr-1'}, + {'uuid': _uuid(), 'name': 'lr-2'}] + + +class CacheTestCase(base.BaseTestCase): + """Test suite providing coverage for the Cache class.""" + + def setUp(self): + self.nsx_cache = sync.NsxCache() + for lswitch in LSWITCHES: + self.nsx_cache._uuid_dict_mappings[lswitch['uuid']] = ( + self.nsx_cache._lswitches) + self.nsx_cache._lswitches[lswitch['uuid']] = ( + {'data': lswitch, + 'hash': hash(json.dumps(lswitch))}) + for lswitchport in LSWITCHPORTS: + self.nsx_cache._uuid_dict_mappings[lswitchport['uuid']] = ( + self.nsx_cache._lswitchports) + self.nsx_cache._lswitchports[lswitchport['uuid']] = ( + {'data': lswitchport, + 'hash': hash(json.dumps(lswitchport))}) + for lrouter in LROUTERS: + self.nsx_cache._uuid_dict_mappings[lrouter['uuid']] = ( + self.nsx_cache._lrouters) + self.nsx_cache._lrouters[lrouter['uuid']] = ( + {'data': lrouter, + 'hash': hash(json.dumps(lrouter))}) + super(CacheTestCase, self).setUp() + + def test_get_lswitches(self): + ls_uuids = self.nsx_cache.get_lswitches() + self.assertEqual(set(ls_uuids), + set([ls['uuid'] for ls in LSWITCHES])) + + def test_get_lswitchports(self): + lp_uuids = self.nsx_cache.get_lswitchports() + self.assertEqual(set(lp_uuids), + set([lp['uuid'] for lp in LSWITCHPORTS])) + + def test_get_lrouters(self): + lr_uuids = self.nsx_cache.get_lrouters() + self.assertEqual(set(lr_uuids), + set([lr['uuid'] for lr in LROUTERS])) + + def test_get_lswitches_changed_only(self): + ls_uuids = self.nsx_cache.get_lswitches(changed_only=True) + self.assertEqual(0, len(ls_uuids)) + + def test_get_lswitchports_changed_only(self): + lp_uuids = self.nsx_cache.get_lswitchports(changed_only=True) + self.assertEqual(0, len(lp_uuids)) + + def test_get_lrouters_changed_only(self): + lr_uuids = self.nsx_cache.get_lrouters(changed_only=True) + self.assertEqual(0, len(lr_uuids)) + + def _verify_update(self, new_resource, changed=True, hit=True): + cached_resource = self.nsx_cache[new_resource['uuid']] + self.assertEqual(new_resource, cached_resource['data']) + self.assertEqual(hit, cached_resource.get('hit', False)) + self.assertEqual(changed, + cached_resource.get('changed', False)) + + def test_update_lswitch_new_item(self): + new_switch_uuid = _uuid() + new_switch = {'uuid': new_switch_uuid, 'name': 'new_switch'} + self.nsx_cache.update_lswitch(new_switch) + self.assertIn(new_switch_uuid, self.nsx_cache._lswitches.keys()) + self._verify_update(new_switch) + + def test_update_lswitch_existing_item(self): + switch = LSWITCHES[0] + switch['name'] = 'new_name' + self.nsx_cache.update_lswitch(switch) + self.assertIn(switch['uuid'], self.nsx_cache._lswitches.keys()) + self._verify_update(switch) + + def test_update_lswitchport_new_item(self): + new_switchport_uuid = _uuid() + new_switchport = {'uuid': new_switchport_uuid, + 'name': 'new_switchport'} + self.nsx_cache.update_lswitchport(new_switchport) + self.assertIn(new_switchport_uuid, + self.nsx_cache._lswitchports.keys()) + self._verify_update(new_switchport) + + def test_update_lswitchport_existing_item(self): + switchport = LSWITCHPORTS[0] + switchport['name'] = 'new_name' + self.nsx_cache.update_lswitchport(switchport) + self.assertIn(switchport['uuid'], + self.nsx_cache._lswitchports.keys()) + self._verify_update(switchport) + + def test_update_lrouter_new_item(self): + new_router_uuid = _uuid() + new_router = {'uuid': new_router_uuid, + 'name': 'new_router'} + self.nsx_cache.update_lrouter(new_router) + self.assertIn(new_router_uuid, + self.nsx_cache._lrouters.keys()) + self._verify_update(new_router) + + def test_update_lrouter_existing_item(self): + router = LROUTERS[0] + router['name'] = 'new_name' + self.nsx_cache.update_lrouter(router) + self.assertIn(router['uuid'], + self.nsx_cache._lrouters.keys()) + self._verify_update(router) + + def test_process_updates_initial(self): + # Clear cache content to simulate first-time filling + self.nsx_cache._lswitches.clear() + self.nsx_cache._lswitchports.clear() + self.nsx_cache._lrouters.clear() + self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS) + for resource in LSWITCHES + LROUTERS + LSWITCHPORTS: + self._verify_update(resource) + + def test_process_updates_no_change(self): + self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS) + for resource in LSWITCHES + LROUTERS + LSWITCHPORTS: + self._verify_update(resource, changed=False) + + def test_process_updates_with_changes(self): + LSWITCHES[0]['name'] = 'altered' + self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS) + for resource in LSWITCHES + LROUTERS + LSWITCHPORTS: + changed = (True if resource['uuid'] == LSWITCHES[0]['uuid'] + else False) + self._verify_update(resource, changed=changed) + + def _test_process_updates_with_removals(self): + lswitches = LSWITCHES[:] + lswitch = lswitches.pop() + self.nsx_cache.process_updates(lswitches, LROUTERS, LSWITCHPORTS) + for resource in LSWITCHES + LROUTERS + LSWITCHPORTS: + hit = (False if resource['uuid'] == lswitch['uuid'] + else True) + self._verify_update(resource, changed=False, hit=hit) + return (lswitch, lswitches) + + def test_process_updates_with_removals(self): + self._test_process_updates_with_removals() + + def test_process_updates_cleanup_after_delete(self): + deleted_lswitch, lswitches = self._test_process_updates_with_removals() + self.nsx_cache.process_deletes() + self.nsx_cache.process_updates(lswitches, LROUTERS, LSWITCHPORTS) + self.assertNotIn(deleted_lswitch['uuid'], self.nsx_cache._lswitches) + + def _verify_delete(self, resource, deleted=True, hit=True): + cached_resource = self.nsx_cache[resource['uuid']] + data_field = 'data_bk' if deleted else 'data' + self.assertEqual(resource, cached_resource[data_field]) + self.assertEqual(hit, cached_resource.get('hit', False)) + self.assertEqual(deleted, + cached_resource.get('changed', False)) + + def _set_hit(self, resources, uuid_to_delete=None): + for resource in resources: + if resource['data']['uuid'] != uuid_to_delete: + resource['hit'] = True + + def test_process_deletes_no_change(self): + # Mark all resources as hit + self._set_hit(self.nsx_cache._lswitches.values()) + self._set_hit(self.nsx_cache._lswitchports.values()) + self._set_hit(self.nsx_cache._lrouters.values()) + self.nsx_cache.process_deletes() + for resource in LSWITCHES + LROUTERS + LSWITCHPORTS: + self._verify_delete(resource, hit=False, deleted=False) + + def test_process_deletes_with_removals(self): + # Mark all resources but one as hit + uuid_to_delete = LSWITCHPORTS[0]['uuid'] + self._set_hit(self.nsx_cache._lswitches.values(), + uuid_to_delete) + self._set_hit(self.nsx_cache._lswitchports.values(), + uuid_to_delete) + self._set_hit(self.nsx_cache._lrouters.values(), + uuid_to_delete) + self.nsx_cache.process_deletes() + for resource in LSWITCHES + LROUTERS + LSWITCHPORTS: + deleted = resource['uuid'] == uuid_to_delete + self._verify_delete(resource, hit=False, deleted=deleted) + + +class SyncLoopingCallTestCase(base.BaseTestCase): + + def test_looping_calls(self): + # Avoid runs of the synchronization process - just start + # the looping call + with mock.patch.object( + sync.NsxSynchronizer, '_synchronize_state', return_value=0.01): + synchronizer = sync.NsxSynchronizer(mock.ANY, mock.ANY, + 100, 0, 0) + time.sleep(0.03) + # stop looping call before asserting + synchronizer._sync_looping_call.stop() + # Just verify the looping call has been called, trying + # to assess the exact number of calls would be unreliable + self.assertTrue(synchronizer._synchronize_state.call_count) + + +class SyncTestCase(base.BaseTestCase): + + def setUp(self): + # mock api client + self.fc = fake.FakeClient(vmware.STUBS_PATH) + mock_api = mock.patch(vmware.NSXAPI_NAME, autospec=True) + # Avoid runs of the synchronizer looping call + # These unit tests will excplicitly invoke synchronization + patch_sync = mock.patch.object(sync, '_start_loopingcall') + self.mock_api = mock_api.start() + patch_sync.start() + self.mock_api.return_value.login.return_value = "the_cookie" + # Emulate tests against NSX 3.x + self.mock_api.return_value.get_version.return_value = ( + version.Version("3.1")) + + self.mock_api.return_value.request.side_effect = self.fc.fake_request + self.fake_cluster = cluster.NSXCluster( + name='fake-cluster', nsx_controllers=['1.1.1.1:999'], + default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar') + self.fake_cluster.api_client = client.NsxApiClient( + ('1.1.1.1', '999', True), + self.fake_cluster.nsx_user, self.fake_cluster.nsx_password, + request_timeout=self.fake_cluster.req_timeout, + http_timeout=self.fake_cluster.http_timeout, + retries=self.fake_cluster.retries, + redirects=self.fake_cluster.redirects) + # Instantiate Neutron plugin + # and setup needed config variables + args = ['--config-file', vmware.get_fake_conf('neutron.conf.test'), + '--config-file', vmware.get_fake_conf('nsx.ini.test')] + self.config_parse(args=args) + cfg.CONF.set_override('allow_overlapping_ips', True) + self._plugin = plugin.NsxPlugin() + # Mock neutron manager plugin load functions to speed up tests + mock_nm_get_plugin = mock.patch('neutron.manager.NeutronManager.' + 'get_plugin') + mock_nm_get_service_plugins = mock.patch( + 'neutron.manager.NeutronManager.get_service_plugins') + self.mock_nm_get_plugin = mock_nm_get_plugin.start() + self.mock_nm_get_plugin.return_value = self._plugin + mock_nm_get_service_plugins.start() + super(SyncTestCase, self).setUp() + self.addCleanup(self.fc.reset_all) + + @contextlib.contextmanager + def _populate_data(self, ctx, net_size=2, port_size=2, router_size=2): + + def network(idx): + return {'network': {'name': 'net-%s' % idx, + 'admin_state_up': True, + 'shared': False, + 'port_security_enabled': True, + 'tenant_id': 'foo'}} + + def subnet(idx, net_id): + return {'subnet': + {'cidr': '10.10.%s.0/24' % idx, + 'name': 'sub-%s' % idx, + 'gateway_ip': attr.ATTR_NOT_SPECIFIED, + 'allocation_pools': attr.ATTR_NOT_SPECIFIED, + 'ip_version': 4, + 'dns_nameservers': attr.ATTR_NOT_SPECIFIED, + 'host_routes': attr.ATTR_NOT_SPECIFIED, + 'enable_dhcp': True, + 'network_id': net_id, + 'tenant_id': 'foo'}} + + def port(idx, net_id): + return {'port': {'network_id': net_id, + 'name': 'port-%s' % idx, + 'admin_state_up': True, + 'device_id': 'miao', + 'device_owner': 'bau', + 'fixed_ips': attr.ATTR_NOT_SPECIFIED, + 'mac_address': attr.ATTR_NOT_SPECIFIED, + 'tenant_id': 'foo'}} + + def router(idx): + # Use random uuids as names + return {'router': {'name': 'rtr-%s' % idx, + 'admin_state_up': True, + 'tenant_id': 'foo'}} + + networks = [] + ports = [] + routers = [] + for i in range(net_size): + net = self._plugin.create_network(ctx, network(i)) + networks.append(net) + self._plugin.create_subnet(ctx, subnet(i, net['id'])) + for j in range(port_size): + ports.append(self._plugin.create_port( + ctx, port("%s-%s" % (i, j), net['id']))) + for i in range(router_size): + routers.append(self._plugin.create_router(ctx, router(i))) + # Do not return anything as the user does need the actual + # data created + yield + + # Remove everything + for router in routers: + self._plugin.delete_router(ctx, router['id']) + for port in ports: + self._plugin.delete_port(ctx, port['id']) + # This will remove networks and subnets + for network in networks: + self._plugin.delete_network(ctx, network['id']) + + def _get_tag_dict(self, tags): + return dict((tag['scope'], tag['tag']) for tag in tags) + + def _test_sync(self, exp_net_status, + exp_port_status, exp_router_status, + action_callback=None, sp=None): + ls_uuid = self.fc._fake_lswitch_dict.keys()[0] + neutron_net_id = self._get_tag_dict( + self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id'] + lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0] + neutron_port_id = self._get_tag_dict( + self.fc._fake_lswitch_lport_dict[lp_uuid]['tags'])['q_port_id'] + lr_uuid = self.fc._fake_lrouter_dict.keys()[0] + neutron_rtr_id = self._get_tag_dict( + self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id'] + if action_callback: + action_callback(ls_uuid, lp_uuid, lr_uuid) + # Make chunk big enough to read everything + if not sp: + sp = sync.SyncParameters(100) + self._plugin._synchronizer._synchronize_state(sp) + # Verify element is in expected status + # TODO(salv-orlando): Verify status for all elements + ctx = context.get_admin_context() + neutron_net = self._plugin.get_network(ctx, neutron_net_id) + neutron_port = self._plugin.get_port(ctx, neutron_port_id) + neutron_rtr = self._plugin.get_router(ctx, neutron_rtr_id) + self.assertEqual(exp_net_status, neutron_net['status']) + self.assertEqual(exp_port_status, neutron_port['status']) + self.assertEqual(exp_router_status, neutron_rtr['status']) + + def _action_callback_status_down(self, ls_uuid, lp_uuid, lr_uuid): + self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false' + self.fc._fake_lswitch_lport_dict[lp_uuid]['status'] = 'false' + self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false' + + def test_initial_sync(self): + ctx = context.get_admin_context() + with self._populate_data(ctx): + self._test_sync( + constants.NET_STATUS_ACTIVE, + constants.PORT_STATUS_ACTIVE, + constants.NET_STATUS_ACTIVE) + + def test_initial_sync_with_resources_down(self): + ctx = context.get_admin_context() + with self._populate_data(ctx): + self._test_sync( + constants.NET_STATUS_DOWN, constants.PORT_STATUS_DOWN, + constants.NET_STATUS_DOWN, self._action_callback_status_down) + + def test_resync_with_resources_down(self): + ctx = context.get_admin_context() + with self._populate_data(ctx): + sp = sync.SyncParameters(100) + self._plugin._synchronizer._synchronize_state(sp) + # Ensure the synchronizer performs a resync + sp.init_sync_performed = True + self._test_sync( + constants.NET_STATUS_DOWN, constants.PORT_STATUS_DOWN, + constants.NET_STATUS_DOWN, self._action_callback_status_down, + sp=sp) + + def _action_callback_del_resource(self, ls_uuid, lp_uuid, lr_uuid): + del self.fc._fake_lswitch_dict[ls_uuid] + del self.fc._fake_lswitch_lport_dict[lp_uuid] + del self.fc._fake_lrouter_dict[lr_uuid] + + def test_initial_sync_with_resources_removed(self): + ctx = context.get_admin_context() + with self._populate_data(ctx): + self._test_sync( + constants.NET_STATUS_ERROR, constants.PORT_STATUS_ERROR, + constants.NET_STATUS_ERROR, self._action_callback_del_resource) + + def test_resync_with_resources_removed(self): + ctx = context.get_admin_context() + with self._populate_data(ctx): + sp = sync.SyncParameters(100) + self._plugin._synchronizer._synchronize_state(sp) + # Ensure the synchronizer performs a resync + sp.init_sync_performed = True + self._test_sync( + constants.NET_STATUS_ERROR, constants.PORT_STATUS_ERROR, + constants.NET_STATUS_ERROR, self._action_callback_del_resource, + sp=sp) + + def _test_sync_with_chunk_larger_maxpagesize( + self, net_size, port_size, router_size, chunk_size, exp_calls): + ctx = context.get_admin_context() + real_func = nsxlib.get_single_query_page + sp = sync.SyncParameters(chunk_size) + with self._populate_data(ctx, net_size=net_size, + port_size=port_size, + router_size=router_size): + with mock.patch.object(sync, 'MAX_PAGE_SIZE', 15): + # The following mock is just for counting calls, + # but we will still run the actual function + with mock.patch.object( + nsxlib, 'get_single_query_page', + side_effect=real_func) as mock_get_page: + self._test_sync( + constants.NET_STATUS_ACTIVE, + constants.PORT_STATUS_ACTIVE, + constants.NET_STATUS_ACTIVE, + sp=sp) + # As each resource type does not exceed the maximum page size, + # the method should be called once for each resource type + self.assertEqual(exp_calls, mock_get_page.call_count) + + def test_sync_chunk_larger_maxpagesize_no_multiple_requests(self): + # total resource size = 20 + # total size for each resource does not exceed max page size (15) + self._test_sync_with_chunk_larger_maxpagesize( + net_size=5, port_size=2, router_size=5, + chunk_size=20, exp_calls=3) + + def test_sync_chunk_larger_maxpagesize_triggers_multiple_requests(self): + # total resource size = 48 + # total size for each resource does exceed max page size (15) + self._test_sync_with_chunk_larger_maxpagesize( + net_size=16, port_size=1, router_size=16, + chunk_size=48, exp_calls=6) + + def test_sync_multi_chunk(self): + # The fake NSX API client cannot be used for this test + ctx = context.get_admin_context() + # Generate 4 networks, 1 port per network, and 4 routers + with self._populate_data(ctx, net_size=4, port_size=1, router_size=4): + fake_lswitches = json.loads( + self.fc.handle_get('/ws.v1/lswitch'))['results'] + fake_lrouters = json.loads( + self.fc.handle_get('/ws.v1/lrouter'))['results'] + fake_lswitchports = json.loads( + self.fc.handle_get('/ws.v1/lswitch/*/lport'))['results'] + return_values = [ + # Chunk 0 - lswitches + (fake_lswitches, None, 4), + # Chunk 0 - lrouters + (fake_lrouters[:2], 'xxx', 4), + # Chunk 0 - lports (size only) + ([], 'start', 4), + # Chunk 1 - lrouters (2 more) (lswitches are skipped) + (fake_lrouters[2:], None, None), + # Chunk 1 - lports + (fake_lswitchports, None, 4)] + + def fake_fetch_data(*args, **kwargs): + return return_values.pop(0) + + # 2 Chunks, with 6 resources each. + # 1st chunk lswitches and lrouters + # 2nd chunk lrouters and lports + # Mock _fetch_data + with mock.patch.object( + self._plugin._synchronizer, '_fetch_data', + side_effect=fake_fetch_data): + sp = sync.SyncParameters(6) + + def do_chunk(chunk_idx, ls_cursor, lr_cursor, lp_cursor): + self._plugin._synchronizer._synchronize_state(sp) + self.assertEqual(chunk_idx, sp.current_chunk) + self.assertEqual(ls_cursor, sp.ls_cursor) + self.assertEqual(lr_cursor, sp.lr_cursor) + self.assertEqual(lp_cursor, sp.lp_cursor) + + # check 1st chunk + do_chunk(1, None, 'xxx', 'start') + # check 2nd chunk + do_chunk(0, None, None, None) + # Chunk size should have stayed the same + self.assertEqual(sp.chunk_size, 6) + + def test_synchronize_network(self): + ctx = context.get_admin_context() + with self._populate_data(ctx): + # Put a network down to verify synchronization + ls_uuid = self.fc._fake_lswitch_dict.keys()[0] + q_net_id = self._get_tag_dict( + self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id'] + self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false' + q_net_data = self._plugin._get_network(ctx, q_net_id) + self._plugin._synchronizer.synchronize_network(ctx, q_net_data) + # Reload from db + q_nets = self._plugin.get_networks(ctx) + for q_net in q_nets: + if q_net['id'] == q_net_id: + exp_status = constants.NET_STATUS_DOWN + else: + exp_status = constants.NET_STATUS_ACTIVE + self.assertEqual(exp_status, q_net['status']) + + def test_synchronize_network_not_found_in_db_no_raise(self): + ctx = context.get_admin_context() + with self._populate_data(ctx): + # Put a network down to verify synchronization + ls_uuid = self.fc._fake_lswitch_dict.keys()[0] + q_net_id = self._get_tag_dict( + self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id'] + self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false' + q_net_data = self._plugin._get_network(ctx, q_net_id) + with mock.patch.object(self._plugin, + '_get_network') as _get_network: + _get_network.side_effect = n_exc.NetworkNotFound( + net_id=q_net_data['id']) + self._plugin._synchronizer.synchronize_network(ctx, q_net_data) + + def test_synchronize_network_on_get(self): + cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC') + ctx = context.get_admin_context() + with self._populate_data(ctx): + # Put a network down to verify punctual synchronization + ls_uuid = self.fc._fake_lswitch_dict.keys()[0] + q_net_id = self._get_tag_dict( + self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id'] + self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false' + q_net_data = self._plugin.get_network(ctx, q_net_id) + self.assertEqual(constants.NET_STATUS_DOWN, q_net_data['status']) + + def test_synchronize_port_not_found_in_db_no_raise(self): + ctx = context.get_admin_context() + with self._populate_data(ctx): + # Put a port down to verify synchronization + lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0] + lport = self.fc._fake_lswitch_lport_dict[lp_uuid] + q_port_id = self._get_tag_dict(lport['tags'])['q_port_id'] + lport['status'] = 'true' + q_port_data = self._plugin._get_port(ctx, q_port_id) + with mock.patch.object(self._plugin, + '_get_port') as _get_port: + _get_port.side_effect = n_exc.PortNotFound( + port_id=q_port_data['id']) + self._plugin._synchronizer.synchronize_port(ctx, q_port_data) + + def test_synchronize_port(self): + ctx = context.get_admin_context() + with self._populate_data(ctx): + # Put a port down to verify synchronization + lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0] + lport = self.fc._fake_lswitch_lport_dict[lp_uuid] + q_port_id = self._get_tag_dict(lport['tags'])['q_port_id'] + lport['status'] = 'true' + q_port_data = self._plugin._get_port(ctx, q_port_id) + self._plugin._synchronizer.synchronize_port(ctx, q_port_data) + # Reload from db + q_ports = self._plugin.get_ports(ctx) + for q_port in q_ports: + if q_port['id'] == q_port_id: + exp_status = constants.PORT_STATUS_ACTIVE + else: + exp_status = constants.PORT_STATUS_DOWN + self.assertEqual(exp_status, q_port['status']) + + def test_synchronize_port_on_get(self): + cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC') + ctx = context.get_admin_context() + with self._populate_data(ctx): + # Put a port down to verify punctual synchronization + lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0] + lport = self.fc._fake_lswitch_lport_dict[lp_uuid] + q_port_id = self._get_tag_dict(lport['tags'])['q_port_id'] + lport['status'] = 'false' + q_port_data = self._plugin.get_port(ctx, q_port_id) + self.assertEqual(constants.PORT_STATUS_DOWN, + q_port_data['status']) + + def test_synchronize_routernot_found_in_db_no_raise(self): + ctx = context.get_admin_context() + with self._populate_data(ctx): + # Put a router down to verify synchronization + lr_uuid = self.fc._fake_lrouter_dict.keys()[0] + q_rtr_id = self._get_tag_dict( + self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id'] + self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false' + q_rtr_data = self._plugin._get_router(ctx, q_rtr_id) + with mock.patch.object(self._plugin, + '_get_router') as _get_router: + _get_router.side_effect = l3.RouterNotFound( + router_id=q_rtr_data['id']) + self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data) + + def test_synchronize_router(self): + ctx = context.get_admin_context() + with self._populate_data(ctx): + # Put a router down to verify synchronization + lr_uuid = self.fc._fake_lrouter_dict.keys()[0] + q_rtr_id = self._get_tag_dict( + self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id'] + self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false' + q_rtr_data = self._plugin._get_router(ctx, q_rtr_id) + self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data) + # Reload from db + q_routers = self._plugin.get_routers(ctx) + for q_rtr in q_routers: + if q_rtr['id'] == q_rtr_id: + exp_status = constants.NET_STATUS_DOWN + else: + exp_status = constants.NET_STATUS_ACTIVE + self.assertEqual(exp_status, q_rtr['status']) + + def test_synchronize_router_nsx_mapping_not_found(self): + ctx = context.get_admin_context() + with self._populate_data(ctx): + # Put a router down to verify synchronization + lr_uuid = self.fc._fake_lrouter_dict.keys()[0] + q_rtr_id = self._get_tag_dict( + self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id'] + self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false' + q_rtr_data = self._plugin._get_router(ctx, q_rtr_id) + + # delete router mapping from db. + db.delete_neutron_nsx_router_mapping(ctx.session, q_rtr_id) + # pop router from fake nsx client + router_data = self.fc._fake_lrouter_dict.pop(lr_uuid) + + self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data) + # Reload from db + q_routers = self._plugin.get_routers(ctx) + for q_rtr in q_routers: + if q_rtr['id'] == q_rtr_id: + exp_status = constants.NET_STATUS_ERROR + else: + exp_status = constants.NET_STATUS_ACTIVE + self.assertEqual(exp_status, q_rtr['status']) + # put the router database since we don't handle missing + # router data in the fake nsx api_client + self.fc._fake_lrouter_dict[lr_uuid] = router_data + + def test_synchronize_router_on_get(self): + cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC') + ctx = context.get_admin_context() + with self._populate_data(ctx): + # Put a router down to verify punctual synchronization + lr_uuid = self.fc._fake_lrouter_dict.keys()[0] + q_rtr_id = self._get_tag_dict( + self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id'] + self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false' + q_rtr_data = self._plugin.get_router(ctx, q_rtr_id) + self.assertEqual(constants.NET_STATUS_DOWN, q_rtr_data['status']) + + def test_sync_nsx_failure_backoff(self): + self.mock_api.return_value.request.side_effect = api_exc.RequestTimeout + # chunk size won't matter here + sp = sync.SyncParameters(999) + for i in range(10): + self.assertEqual( + min(64, 2 ** i), + self._plugin._synchronizer._synchronize_state(sp)) diff --git a/neutron/tests/unit/vmware/test_nsx_utils.py b/neutron/tests/unit/vmware/test_nsx_utils.py new file mode 100644 index 000000000..2144154ab --- /dev/null +++ b/neutron/tests/unit/vmware/test_nsx_utils.py @@ -0,0 +1,325 @@ +# Copyright (c) 2013 VMware. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from neutron.db import api as db_api +from neutron.openstack.common import uuidutils +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import nsx_utils +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib +from neutron.tests import base +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware.nsxlib import base as nsx_base + + +class NsxUtilsTestCase(base.BaseTestCase): + + def _mock_port_mapping_db_calls(self, ret_value): + # Mock relevant db calls + # This will allow for avoiding setting up the plugin + # for creating db entries + mock.patch(vmware.nsx_method('get_nsx_switch_and_port_id', + module_name='dbexts.db'), + return_value=ret_value).start() + mock.patch(vmware.nsx_method('add_neutron_nsx_port_mapping', + module_name='dbexts.db')).start() + mock.patch(vmware.nsx_method('delete_neutron_nsx_port_mapping', + module_name='dbexts.db')).start() + + def _mock_network_mapping_db_calls(self, ret_value): + # Mock relevant db calls + # This will allow for avoiding setting up the plugin + # for creating db entries + mock.patch(vmware.nsx_method('get_nsx_switch_ids', + module_name='dbexts.db'), + return_value=ret_value).start() + mock.patch(vmware.nsx_method('add_neutron_nsx_network_mapping', + module_name='dbexts.db')).start() + + def _mock_router_mapping_db_calls(self, ret_value): + # Mock relevant db calls + # This will allow for avoiding setting up the plugin + # for creating db entries + mock.patch(vmware.nsx_method('get_nsx_router_id', + module_name='dbexts.db'), + return_value=ret_value).start() + mock.patch(vmware.nsx_method('add_neutron_nsx_router_mapping', + module_name='dbexts.db')).start() + + def _verify_get_nsx_switch_and_port_id(self, exp_ls_uuid, exp_lp_uuid): + # The nsxlib and db calls are mocked, therefore the cluster + # and the neutron_port_id parameters can be set to None + ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id( + db_api.get_session(), None, None) + self.assertEqual(exp_ls_uuid, ls_uuid) + self.assertEqual(exp_lp_uuid, lp_uuid) + + def _verify_get_nsx_switch_ids(self, exp_ls_uuids): + # The nsxlib and db calls are mocked, therefore the cluster + # and the neutron_router_id parameters can be set to None + ls_uuids = nsx_utils.get_nsx_switch_ids( + db_api.get_session(), None, None) + for ls_uuid in ls_uuids or []: + self.assertIn(ls_uuid, exp_ls_uuids) + exp_ls_uuids.remove(ls_uuid) + self.assertFalse(exp_ls_uuids) + + def _verify_get_nsx_router_id(self, exp_lr_uuid): + # The nsxlib and db calls are mocked, therefore the cluster + # and the neutron_router_id parameters can be set to None + lr_uuid = nsx_utils.get_nsx_router_id(db_api.get_session(), None, None) + self.assertEqual(exp_lr_uuid, lr_uuid) + + def test_get_nsx_switch_and_port_id_from_db_mappings(self): + # This test is representative of the 'standard' case in which both the + # switch and the port mappings were stored in the neutron db + exp_ls_uuid = uuidutils.generate_uuid() + exp_lp_uuid = uuidutils.generate_uuid() + ret_value = exp_ls_uuid, exp_lp_uuid + self._mock_port_mapping_db_calls(ret_value) + self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid) + + def test_get_nsx_switch_and_port_id_only_port_db_mapping(self): + # This test is representative of the case in which a port with a nsx + # db mapping in the havana db was upgraded to icehouse + exp_ls_uuid = uuidutils.generate_uuid() + exp_lp_uuid = uuidutils.generate_uuid() + ret_value = None, exp_lp_uuid + self._mock_port_mapping_db_calls(ret_value) + with mock.patch(vmware.nsx_method('query_lswitch_lports', + module_name='nsxlib.switch'), + return_value=[{'uuid': exp_lp_uuid, + '_relations': { + 'LogicalSwitchConfig': { + 'uuid': exp_ls_uuid} + }}]): + self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid) + + def test_get_nsx_switch_and_port_id_no_db_mapping(self): + # This test is representative of the case where db mappings where not + # found for a given port identifier + exp_ls_uuid = uuidutils.generate_uuid() + exp_lp_uuid = uuidutils.generate_uuid() + ret_value = None, None + self._mock_port_mapping_db_calls(ret_value) + with mock.patch(vmware.nsx_method('query_lswitch_lports', + module_name='nsxlib.switch'), + return_value=[{'uuid': exp_lp_uuid, + '_relations': { + 'LogicalSwitchConfig': { + 'uuid': exp_ls_uuid} + }}]): + self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid) + + def test_get_nsx_switch_and_port_id_no_mappings_returns_none(self): + # This test verifies that the function return (None, None) if the + # mappings are not found both in the db and the backend + ret_value = None, None + self._mock_port_mapping_db_calls(ret_value) + with mock.patch(vmware.nsx_method('query_lswitch_lports', + module_name='nsxlib.switch'), + return_value=[]): + self._verify_get_nsx_switch_and_port_id(None, None) + + def test_get_nsx_switch_ids_from_db_mappings(self): + # This test is representative of the 'standard' case in which the + # lswitch mappings were stored in the neutron db + exp_ls_uuids = [uuidutils.generate_uuid()] + self._mock_network_mapping_db_calls(exp_ls_uuids) + self._verify_get_nsx_switch_ids(exp_ls_uuids) + + def test_get_nsx_switch_ids_no_db_mapping(self): + # This test is representative of the case where db mappings where not + # found for a given network identifier + exp_ls_uuids = [uuidutils.generate_uuid()] + self._mock_network_mapping_db_calls(None) + with mock.patch(vmware.nsx_method('get_lswitches', + module_name='nsxlib.switch'), + return_value=[{'uuid': uuid} + for uuid in exp_ls_uuids]): + self._verify_get_nsx_switch_ids(exp_ls_uuids) + + def test_get_nsx_switch_ids_no_mapping_returns_None(self): + # This test verifies that the function returns None if the mappings + # are not found both in the db and in the backend + self._mock_network_mapping_db_calls(None) + with mock.patch(vmware.nsx_method('get_lswitches', + module_name='nsxlib.switch'), + return_value=[]): + self._verify_get_nsx_switch_ids(None) + + def test_get_nsx_router_id_from_db_mappings(self): + # This test is representative of the 'standard' case in which the + # router mapping was stored in the neutron db + exp_lr_uuid = uuidutils.generate_uuid() + self._mock_router_mapping_db_calls(exp_lr_uuid) + self._verify_get_nsx_router_id(exp_lr_uuid) + + def test_get_nsx_router_id_no_db_mapping(self): + # This test is representative of the case where db mappings where not + # found for a given port identifier + exp_lr_uuid = uuidutils.generate_uuid() + self._mock_router_mapping_db_calls(None) + with mock.patch(vmware.nsx_method('query_lrouters', + module_name='nsxlib.router'), + return_value=[{'uuid': exp_lr_uuid}]): + self._verify_get_nsx_router_id(exp_lr_uuid) + + def test_get_nsx_router_id_no_mapping_returns_None(self): + # This test verifies that the function returns None if the mapping + # are not found both in the db and in the backend + self._mock_router_mapping_db_calls(None) + with mock.patch(vmware.nsx_method('query_lrouters', + module_name='nsxlib.router'), + return_value=[]): + self._verify_get_nsx_router_id(None) + + def test_check_and_truncate_name_with_none(self): + name = None + result = utils.check_and_truncate(name) + self.assertEqual('', result) + + def test_check_and_truncate_name_with_short_name(self): + name = 'foo_port_name' + result = utils.check_and_truncate(name) + self.assertEqual(name, result) + + def test_check_and_truncate_name_long_name(self): + name = 'this_is_a_port_whose_name_is_longer_than_40_chars' + result = utils.check_and_truncate(name) + self.assertEqual(len(result), utils.MAX_DISPLAY_NAME_LEN) + + def test_build_uri_path_plain(self): + result = nsxlib._build_uri_path('RESOURCE') + self.assertEqual("%s/%s" % (nsxlib.URI_PREFIX, 'RESOURCE'), result) + + def test_build_uri_path_with_field(self): + result = nsxlib._build_uri_path('RESOURCE', fields='uuid') + expected = "%s/%s?fields=uuid" % (nsxlib.URI_PREFIX, 'RESOURCE') + self.assertEqual(expected, result) + + def test_build_uri_path_with_filters(self): + filters = {"tag": 'foo', "tag_scope": "scope_foo"} + result = nsxlib._build_uri_path('RESOURCE', filters=filters) + expected = ( + "%s/%s?tag_scope=scope_foo&tag=foo" % + (nsxlib.URI_PREFIX, 'RESOURCE')) + self.assertEqual(expected, result) + + def test_build_uri_path_with_resource_id(self): + res = 'RESOURCE' + res_id = 'resource_id' + result = nsxlib._build_uri_path(res, resource_id=res_id) + expected = "%s/%s/%s" % (nsxlib.URI_PREFIX, res, res_id) + self.assertEqual(expected, result) + + def test_build_uri_path_with_parent_and_resource_id(self): + parent_res = 'RESOURCE_PARENT' + child_res = 'RESOURCE_CHILD' + res = '%s/%s' % (child_res, parent_res) + par_id = 'parent_resource_id' + res_id = 'resource_id' + result = nsxlib._build_uri_path( + res, parent_resource_id=par_id, resource_id=res_id) + expected = ("%s/%s/%s/%s/%s" % + (nsxlib.URI_PREFIX, parent_res, par_id, child_res, res_id)) + self.assertEqual(expected, result) + + def test_build_uri_path_with_attachment(self): + parent_res = 'RESOURCE_PARENT' + child_res = 'RESOURCE_CHILD' + res = '%s/%s' % (child_res, parent_res) + par_id = 'parent_resource_id' + res_id = 'resource_id' + result = nsxlib._build_uri_path(res, parent_resource_id=par_id, + resource_id=res_id, is_attachment=True) + expected = ("%s/%s/%s/%s/%s/%s" % + (nsxlib.URI_PREFIX, parent_res, + par_id, child_res, res_id, 'attachment')) + self.assertEqual(expected, result) + + def test_build_uri_path_with_extra_action(self): + parent_res = 'RESOURCE_PARENT' + child_res = 'RESOURCE_CHILD' + res = '%s/%s' % (child_res, parent_res) + par_id = 'parent_resource_id' + res_id = 'resource_id' + result = nsxlib._build_uri_path(res, parent_resource_id=par_id, + resource_id=res_id, extra_action='doh') + expected = ("%s/%s/%s/%s/%s/%s" % + (nsxlib.URI_PREFIX, parent_res, + par_id, child_res, res_id, 'doh')) + self.assertEqual(expected, result) + + def _mock_sec_group_mapping_db_calls(self, ret_value): + mock.patch(vmware.nsx_method('get_nsx_security_group_id', + module_name='dbexts.db'), + return_value=ret_value).start() + mock.patch(vmware.nsx_method('add_neutron_nsx_security_group_mapping', + module_name='dbexts.db')).start() + + def _verify_get_nsx_sec_profile_id(self, exp_sec_prof_uuid): + # The nsxlib and db calls are mocked, therefore the cluster + # and the neutron_id parameters can be set to None + sec_prof_uuid = nsx_utils.get_nsx_security_group_id( + db_api.get_session(), None, None) + self.assertEqual(exp_sec_prof_uuid, sec_prof_uuid) + + def test_get_nsx_sec_profile_id_from_db_mappings(self): + # This test is representative of the 'standard' case in which the + # security group mapping was stored in the neutron db + exp_sec_prof_uuid = uuidutils.generate_uuid() + self._mock_sec_group_mapping_db_calls(exp_sec_prof_uuid) + self._verify_get_nsx_sec_profile_id(exp_sec_prof_uuid) + + def test_get_nsx_sec_profile_id_no_db_mapping(self): + # This test is representative of the case where db mappings where not + # found for a given security profile identifier + exp_sec_prof_uuid = uuidutils.generate_uuid() + self._mock_sec_group_mapping_db_calls(None) + with mock.patch(vmware.nsx_method('query_security_profiles', + module_name='nsxlib.secgroup'), + return_value=[{'uuid': exp_sec_prof_uuid}]): + self._verify_get_nsx_sec_profile_id(exp_sec_prof_uuid) + + def test_get_nsx_sec_profile_id_no_mapping_returns_None(self): + # This test verifies that the function returns None if the mapping + # are not found both in the db and in the backend + self._mock_sec_group_mapping_db_calls(None) + with mock.patch(vmware.nsx_method('query_security_profiles', + module_name='nsxlib.secgroup'), + return_value=[]): + self._verify_get_nsx_sec_profile_id(None) + + +class ClusterManagementTestCase(nsx_base.NsxlibTestCase): + + def test_cluster_in_readonly_mode(self): + with mock.patch.object(self.fake_cluster.api_client, + 'request', + side_effect=api_exc.ReadOnlyMode): + self.assertRaises(nsx_exc.MaintenanceInProgress, + nsxlib.do_request, cluster=self.fake_cluster) + + def test_cluster_method_not_implemented(self): + self.assertRaises(api_exc.NsxApiException, + nsxlib.do_request, + nsxlib.HTTP_GET, + nsxlib._build_uri_path('MY_FAKE_RESOURCE', + resource_id='foo'), + cluster=self.fake_cluster) diff --git a/neutron/tests/unit/vmware/vshield/__init__.py b/neutron/tests/unit/vmware/vshield/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/neutron/tests/unit/vmware/vshield/fake_vcns.py b/neutron/tests/unit/vmware/vshield/fake_vcns.py new file mode 100644 index 000000000..2c9aa6162 --- /dev/null +++ b/neutron/tests/unit/vmware/vshield/fake_vcns.py @@ -0,0 +1,600 @@ +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import uuidutils +from neutron.plugins.vmware.vshield.common import exceptions + + +class FakeVcns(object): + + errors = { + 303: exceptions.ResourceRedirect, + 400: exceptions.RequestBad, + 403: exceptions.Forbidden, + 404: exceptions.ResourceNotFound, + 415: exceptions.MediaTypeUnsupport, + 503: exceptions.ServiceUnavailable + } + + def __init__(self, unique_router_name=True): + self._jobs = {} + self._job_idx = 0 + self._edges = {} + self._edge_idx = 0 + self._lswitches = {} + self._unique_router_name = unique_router_name + self._fake_nsx_api = None + self.fake_firewall_dict = {} + self.temp_firewall = { + "firewallRules": { + "firewallRules": [] + } + } + self.fake_ipsecvpn_dict = {} + self.temp_ipsecvpn = { + 'featureType': "ipsec_4.0", + 'enabled': True, + 'sites': {'sites': []}} + self._fake_virtualservers_dict = {} + self._fake_pools_dict = {} + self._fake_monitors_dict = {} + self._fake_app_profiles_dict = {} + self._fake_loadbalancer_config = {} + + def set_fake_nsx_api(self, fake_nsx_api): + self._fake_nsx_api = fake_nsx_api + + def _validate_edge_name(self, name): + for edge_id, edge in self._edges.iteritems(): + if edge['name'] == name: + return False + return True + + def deploy_edge(self, request): + if (self._unique_router_name and + not self._validate_edge_name(request['name'])): + header = { + 'status': 400 + } + msg = ('Edge name should be unique for tenant. Edge %s ' + 'already exists for default tenant.') % request['name'] + response = { + 'details': msg, + 'errorCode': 10085, + 'rootCauseString': None, + 'moduleName': 'vShield Edge', + 'errorData': None + } + return (header, json.dumps(response)) + + self._job_idx = self._job_idx + 1 + job_id = "jobdata-%d" % self._job_idx + self._edge_idx = self._edge_idx + 1 + edge_id = "edge-%d" % self._edge_idx + self._jobs[job_id] = edge_id + self._edges[edge_id] = { + 'name': request['name'], + 'request': request, + 'nat_rules': None, + 'nat_rule_id': 0 + } + header = { + 'status': 200, + 'location': 'https://host/api/4.0/jobs/%s' % job_id + } + response = '' + return (header, response) + + def get_edge_id(self, job_id): + if job_id not in self._jobs: + raise Exception(_("Job %s does not nexist") % job_id) + + header = { + 'status': 200 + } + response = { + 'edgeId': self._jobs[job_id] + } + return (header, response) + + def get_edge_deploy_status(self, edge_id): + if edge_id not in self._edges: + raise Exception(_("Edge %s does not exist") % edge_id) + header = { + 'status': 200, + } + response = { + 'systemStatus': 'good' + } + return (header, response) + + def delete_edge(self, edge_id): + if edge_id not in self._edges: + raise Exception(_("Edge %s does not exist") % edge_id) + del self._edges[edge_id] + header = { + 'status': 200 + } + response = '' + return (header, response) + + def update_interface(self, edge_id, vnic): + header = { + 'status': 200 + } + response = '' + return (header, response) + + def get_nat_config(self, edge_id): + if edge_id not in self._edges: + raise Exception(_("Edge %s does not exist") % edge_id) + edge = self._edges[edge_id] + rules = edge['nat_rules'] + if rules is None: + rules = { + 'rules': { + 'natRulesDtos': [] + }, + 'version': 1 + } + header = { + 'status': 200 + } + rules['version'] = 1 + return (header, rules) + + def update_nat_config(self, edge_id, nat): + if edge_id not in self._edges: + raise Exception(_("Edge %s does not exist") % edge_id) + edge = self._edges[edge_id] + max_rule_id = edge['nat_rule_id'] + rules = copy.deepcopy(nat) + for rule in rules['rules']['natRulesDtos']: + rule_id = rule.get('ruleId', 0) + if rule_id > max_rule_id: + max_rule_id = rule_id + for rule in rules['rules']['natRulesDtos']: + if 'ruleId' not in rule: + max_rule_id = max_rule_id + 1 + rule['ruleId'] = max_rule_id + edge['nat_rules'] = rules + edge['nat_rule_id'] = max_rule_id + header = { + 'status': 200 + } + response = '' + return (header, response) + + def delete_nat_rule(self, edge_id, rule_id): + if edge_id not in self._edges: + raise Exception(_("Edge %s does not exist") % edge_id) + + edge = self._edges[edge_id] + rules = edge['nat_rules'] + rule_to_delete = None + for rule in rules['rules']['natRulesDtos']: + if rule_id == rule['ruleId']: + rule_to_delete = rule + break + if rule_to_delete is None: + raise Exception(_("Rule id %d doest not exist") % rule_id) + + rules['rules']['natRulesDtos'].remove(rule_to_delete) + + header = { + 'status': 200 + } + response = '' + return (header, response) + + def get_edge_status(self, edge_id): + if edge_id not in self._edges: + raise Exception(_("Edge %s does not exist") % edge_id) + + header = { + 'status': 200 + } + response = { + 'edgeStatus': 'GREEN' + } + return (header, response) + + def get_edges(self): + header = { + 'status': 200 + } + edges = [] + for edge_id in self._edges: + edges.append({ + 'id': edge_id, + 'edgeStatus': 'GREEN' + }) + response = { + 'edgePage': { + 'data': edges + } + } + return (header, response) + + def update_routes(self, edge_id, routes): + header = { + 'status': 200 + } + response = '' + return (header, response) + + def create_lswitch(self, lsconfig): + # The lswitch is created via VCNS API so the fake nsx_api will not + # see it. Added to fake nsx_api here. + if self._fake_nsx_api: + lswitch = self._fake_nsx_api._add_lswitch(json.dumps(lsconfig)) + else: + lswitch = lsconfig + lswitch['uuid'] = uuidutils.generate_uuid() + self._lswitches[lswitch['uuid']] = lswitch + header = { + 'status': 200 + } + lswitch['_href'] = '/api/ws.v1/lswitch/%s' % lswitch['uuid'] + return (header, lswitch) + + def delete_lswitch(self, id): + if id not in self._lswitches: + raise Exception(_("Lswitch %s does not exist") % id) + del self._lswitches[id] + if self._fake_nsx_api: + # TODO(fank): fix the hack + del self._fake_nsx_api._fake_lswitch_dict[id] + header = { + 'status': 200 + } + response = '' + return (header, response) + + def update_firewall(self, edge_id, fw_req): + self.fake_firewall_dict[edge_id] = fw_req + rules = self.fake_firewall_dict[edge_id][ + 'firewallRules']['firewallRules'] + index = 10 + for rule in rules: + rule['ruleId'] = index + index += 10 + header = {'status': 204} + response = "" + return self.return_helper(header, response) + + def delete_firewall(self, edge_id): + header = {'status': 404} + if edge_id in self.fake_firewall_dict: + header = {'status': 204} + del self.fake_firewall_dict[edge_id] + response = "" + return self.return_helper(header, response) + + def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req): + if edge_id not in self.fake_firewall_dict: + raise Exception(_("Edge %s does not exist") % edge_id) + header = {'status': 404} + rules = self.fake_firewall_dict[edge_id][ + 'firewallRules']['firewallRules'] + for rule in rules: + if rule['ruleId'] == int(vcns_rule_id): + header['status'] = 204 + rule.update(fwr_req) + break + response = "" + return self.return_helper(header, response) + + def delete_firewall_rule(self, edge_id, vcns_rule_id): + if edge_id not in self.fake_firewall_dict: + raise Exception(_("Edge %s does not exist") % edge_id) + header = {'status': 404} + rules = self.fake_firewall_dict[edge_id][ + 'firewallRules']['firewallRules'] + for index in range(len(rules)): + if rules[index]['ruleId'] == int(vcns_rule_id): + header['status'] = 204 + del rules[index] + break + response = "" + return self.return_helper(header, response) + + def add_firewall_rule_above(self, edge_id, ref_vcns_rule_id, fwr_req): + if edge_id not in self.fake_firewall_dict: + raise Exception(_("Edge %s does not exist") % edge_id) + header = {'status': 404} + rules = self.fake_firewall_dict[edge_id][ + 'firewallRules']['firewallRules'] + pre = 0 + for index in range(len(rules)): + if rules[index]['ruleId'] == int(ref_vcns_rule_id): + rules.insert(index, fwr_req) + rules[index]['ruleId'] = (int(ref_vcns_rule_id) + pre) / 2 + header = { + 'status': 204, + 'location': "https://host/api/4.0/edges/edge_id/firewall" + "/config/rules/%s" % rules[index]['ruleId']} + break + pre = int(rules[index]['ruleId']) + response = "" + return self.return_helper(header, response) + + def add_firewall_rule(self, edge_id, fwr_req): + if edge_id not in self.fake_firewall_dict: + self.fake_firewall_dict[edge_id] = self.temp_firewall + rules = self.fake_firewall_dict[edge_id][ + 'firewallRules']['firewallRules'] + rules.append(fwr_req) + index = len(rules) + rules[index - 1]['ruleId'] = index * 10 + header = { + 'status': 204, + 'location': "https://host/api/4.0/edges/edge_id/firewall" + "/config/rules/%s" % rules[index - 1]['ruleId']} + response = "" + return self.return_helper(header, response) + + def get_firewall(self, edge_id): + if edge_id not in self.fake_firewall_dict: + self.fake_firewall_dict[edge_id] = self.temp_firewall + header = {'status': 204} + response = self.fake_firewall_dict[edge_id] + return self.return_helper(header, response) + + def get_firewall_rule(self, edge_id, vcns_rule_id): + if edge_id not in self.fake_firewall_dict: + raise Exception(_("Edge %s does not exist") % edge_id) + header = {'status': 404} + response = "" + rules = self.fake_firewall_dict[edge_id][ + 'firewallRules']['firewallRules'] + for rule in rules: + if rule['ruleId'] == int(vcns_rule_id): + header['status'] = 204 + response = rule + break + return self.return_helper(header, response) + + def is_name_unique(self, objs_dict, name): + return name not in [obj_dict['name'] + for obj_dict in objs_dict.values()] + + def create_vip(self, edge_id, vip_new): + header = {'status': 403} + response = "" + if not self._fake_virtualservers_dict.get(edge_id): + self._fake_virtualservers_dict[edge_id] = {} + if not self.is_name_unique(self._fake_virtualservers_dict[edge_id], + vip_new['name']): + return self.return_helper(header, response) + vip_vseid = uuidutils.generate_uuid() + self._fake_virtualservers_dict[edge_id][vip_vseid] = vip_new + header = { + 'status': 204, + 'location': "https://host/api/4.0/edges/edge_id" + "/loadbalancer/config/%s" % vip_vseid} + return self.return_helper(header, response) + + def get_vip(self, edge_id, vip_vseid): + header = {'status': 404} + response = "" + if not self._fake_virtualservers_dict.get(edge_id) or ( + not self._fake_virtualservers_dict[edge_id].get(vip_vseid)): + return self.return_helper(header, response) + header = {'status': 204} + response = self._fake_virtualservers_dict[edge_id][vip_vseid] + return self.return_helper(header, response) + + def update_vip(self, edge_id, vip_vseid, vip_new): + header = {'status': 404} + response = "" + if not self._fake_virtualservers_dict.get(edge_id) or ( + not self._fake_virtualservers_dict[edge_id].get(vip_vseid)): + return self.return_helper(header, response) + header = {'status': 204} + self._fake_virtualservers_dict[edge_id][vip_vseid].update( + vip_new) + return self.return_helper(header, response) + + def delete_vip(self, edge_id, vip_vseid): + header = {'status': 404} + response = "" + if not self._fake_virtualservers_dict.get(edge_id) or ( + not self._fake_virtualservers_dict[edge_id].get(vip_vseid)): + return self.return_helper(header, response) + header = {'status': 204} + del self._fake_virtualservers_dict[edge_id][vip_vseid] + return self.return_helper(header, response) + + def create_pool(self, edge_id, pool_new): + header = {'status': 403} + response = "" + if not self._fake_pools_dict.get(edge_id): + self._fake_pools_dict[edge_id] = {} + if not self.is_name_unique(self._fake_pools_dict[edge_id], + pool_new['name']): + return self.return_helper(header, response) + pool_vseid = uuidutils.generate_uuid() + self._fake_pools_dict[edge_id][pool_vseid] = pool_new + header = { + 'status': 204, + 'location': "https://host/api/4.0/edges/edge_id" + "/loadbalancer/config/%s" % pool_vseid} + return self.return_helper(header, response) + + def get_pool(self, edge_id, pool_vseid): + header = {'status': 404} + response = "" + if not self._fake_pools_dict.get(edge_id) or ( + not self._fake_pools_dict[edge_id].get(pool_vseid)): + return self.return_helper(header, response) + header = {'status': 204} + response = self._fake_pools_dict[edge_id][pool_vseid] + return self.return_helper(header, response) + + def update_pool(self, edge_id, pool_vseid, pool_new): + header = {'status': 404} + response = "" + if not self._fake_pools_dict.get(edge_id) or ( + not self._fake_pools_dict[edge_id].get(pool_vseid)): + return self.return_helper(header, response) + header = {'status': 204} + self._fake_pools_dict[edge_id][pool_vseid].update( + pool_new) + return self.return_helper(header, response) + + def delete_pool(self, edge_id, pool_vseid): + header = {'status': 404} + response = "" + if not self._fake_pools_dict.get(edge_id) or ( + not self._fake_pools_dict[edge_id].get(pool_vseid)): + return self.return_helper(header, response) + header = {'status': 204} + del self._fake_pools_dict[edge_id][pool_vseid] + return self.return_helper(header, response) + + def create_health_monitor(self, edge_id, monitor_new): + if not self._fake_monitors_dict.get(edge_id): + self._fake_monitors_dict[edge_id] = {} + monitor_vseid = uuidutils.generate_uuid() + self._fake_monitors_dict[edge_id][monitor_vseid] = monitor_new + header = { + 'status': 204, + 'location': "https://host/api/4.0/edges/edge_id" + "/loadbalancer/config/%s" % monitor_vseid} + response = "" + return self.return_helper(header, response) + + def get_health_monitor(self, edge_id, monitor_vseid): + header = {'status': 404} + response = "" + if not self._fake_monitors_dict.get(edge_id) or ( + not self._fake_monitors_dict[edge_id].get(monitor_vseid)): + return self.return_helper(header, response) + header = {'status': 204} + response = self._fake_monitors_dict[edge_id][monitor_vseid] + return self.return_helper(header, response) + + def update_health_monitor(self, edge_id, monitor_vseid, monitor_new): + header = {'status': 404} + response = "" + if not self._fake_monitors_dict.get(edge_id) or ( + not self._fake_monitors_dict[edge_id].get(monitor_vseid)): + return self.return_helper(header, response) + header = {'status': 204} + self._fake_monitors_dict[edge_id][monitor_vseid].update( + monitor_new) + return self.return_helper(header, response) + + def delete_health_monitor(self, edge_id, monitor_vseid): + header = {'status': 404} + response = "" + if not self._fake_monitors_dict.get(edge_id) or ( + not self._fake_monitors_dict[edge_id].get(monitor_vseid)): + return self.return_helper(header, response) + header = {'status': 204} + del self._fake_monitors_dict[edge_id][monitor_vseid] + return self.return_helper(header, response) + + def create_app_profile(self, edge_id, app_profile): + if not self._fake_app_profiles_dict.get(edge_id): + self._fake_app_profiles_dict[edge_id] = {} + app_profileid = uuidutils.generate_uuid() + self._fake_app_profiles_dict[edge_id][app_profileid] = app_profile + header = { + 'status': 204, + 'location': "https://host/api/4.0/edges/edge_id" + "/loadbalancer/config/%s" % app_profileid} + response = "" + return self.return_helper(header, response) + + def update_app_profile(self, edge_id, app_profileid, app_profile): + header = {'status': 404} + response = "" + if not self._fake_app_profiles_dict.get(edge_id) or ( + not self._fake_app_profiles_dict[edge_id].get(app_profileid)): + return self.return_helper(header, response) + header = {'status': 204} + self._fake_app_profiles_dict[edge_id][app_profileid].update( + app_profile) + return self.return_helper(header, response) + + def delete_app_profile(self, edge_id, app_profileid): + header = {'status': 404} + response = "" + if not self._fake_app_profiles_dict.get(edge_id) or ( + not self._fake_app_profiles_dict[edge_id].get(app_profileid)): + return self.return_helper(header, response) + header = {'status': 204} + del self._fake_app_profiles_dict[edge_id][app_profileid] + return self.return_helper(header, response) + + def get_loadbalancer_config(self, edge_id): + header = {'status': 204} + response = {'config': False} + if self._fake_loadbalancer_config[edge_id]: + response['config'] = self._fake_loadbalancer_config[edge_id] + return self.return_helper(header, response) + + def update_ipsec_config(self, edge_id, ipsec_config): + self.fake_ipsecvpn_dict[edge_id] = ipsec_config + header = {'status': 204} + response = "" + return self.return_helper(header, response) + + def delete_ipsec_config(self, edge_id): + header = {'status': 404} + if edge_id in self.fake_ipsecvpn_dict: + header = {'status': 204} + del self.fake_ipsecvpn_dict[edge_id] + response = "" + return self.return_helper(header, response) + + def get_ipsec_config(self, edge_id): + if edge_id not in self.fake_ipsecvpn_dict: + self.fake_ipsecvpn_dict[edge_id] = self.temp_ipsecvpn + header = {'status': 204} + response = self.fake_ipsecvpn_dict[edge_id] + return self.return_helper(header, response) + + def enable_service_loadbalancer(self, edge_id, config): + header = {'status': 204} + response = "" + self._fake_loadbalancer_config[edge_id] = True + return self.return_helper(header, response) + + def return_helper(self, header, response): + status = int(header['status']) + if 200 <= status <= 300: + return (header, response) + if status in self.errors: + cls = self.errors[status] + else: + cls = exceptions.VcnsApiException + raise cls( + status=status, header=header, uri='fake_url', response=response) + + def reset_all(self): + self._jobs.clear() + self._edges.clear() + self._lswitches.clear() + self.fake_firewall_dict = {} + self._fake_virtualservers_dict = {} + self._fake_pools_dict = {} + self._fake_monitors_dict = {} + self._fake_app_profiles_dict = {} + self._fake_loadbalancer_config = {} diff --git a/neutron/tests/unit/vmware/vshield/test_edge_router.py b/neutron/tests/unit/vmware/vshield/test_edge_router.py new file mode 100644 index 000000000..e19de2502 --- /dev/null +++ b/neutron/tests/unit/vmware/vshield/test_edge_router.py @@ -0,0 +1,308 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from eventlet import greenthread +import mock +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron import context +from neutron.db import l3_db +from neutron.extensions import l3 +from neutron import manager as n_manager +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as service_constants +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware.plugins import service as nsp +from neutron.tests import base +from neutron.tests.unit import test_l3_plugin +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware import test_nsx_plugin +from neutron.tests.unit.vmware.vshield import fake_vcns + +_uuid = uuidutils.generate_uuid + + +class ServiceRouterTestExtensionManager(object): + + def get_resources(self): + # If l3 resources have been loaded and updated by main API + # router, update the map in the l3 extension so it will load + # the same attributes as the API router + l3_attr_map = copy.deepcopy(l3.RESOURCE_ATTRIBUTE_MAP) + for res in l3.RESOURCE_ATTRIBUTE_MAP.keys(): + attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res) + if attr_info: + l3.RESOURCE_ATTRIBUTE_MAP[res] = attr_info + resources = l3.L3.get_resources() + # restore the original resources once the controllers are created + l3.RESOURCE_ATTRIBUTE_MAP = l3_attr_map + + return resources + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class ServiceRouterTest(test_nsx_plugin.L3NatTest, + test_l3_plugin.L3NatTestCaseMixin): + + def vcns_patch(self): + instance = self.mock_vcns.start() + self.vcns_instance = instance + instance.return_value.deploy_edge.side_effect = self.fc2.deploy_edge + instance.return_value.get_edge_id.side_effect = self.fc2.get_edge_id + instance.return_value.get_edge_deploy_status.side_effect = ( + self.fc2.get_edge_deploy_status) + instance.return_value.delete_edge.side_effect = self.fc2.delete_edge + instance.return_value.update_interface.side_effect = ( + self.fc2.update_interface) + instance.return_value.get_nat_config.side_effect = ( + self.fc2.get_nat_config) + instance.return_value.update_nat_config.side_effect = ( + self.fc2.update_nat_config) + instance.return_value.delete_nat_rule.side_effect = ( + self.fc2.delete_nat_rule) + instance.return_value.get_edge_status.side_effect = ( + self.fc2.get_edge_status) + instance.return_value.get_edges.side_effect = self.fc2.get_edges + instance.return_value.update_routes.side_effect = ( + self.fc2.update_routes) + instance.return_value.create_lswitch.side_effect = ( + self.fc2.create_lswitch) + instance.return_value.delete_lswitch.side_effect = ( + self.fc2.delete_lswitch) + instance.return_value.get_loadbalancer_config.side_effect = ( + self.fc2.get_loadbalancer_config) + instance.return_value.enable_service_loadbalancer.side_effect = ( + self.fc2.enable_service_loadbalancer) + + def setUp(self, ext_mgr=None, service_plugins=None): + cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) + cfg.CONF.set_override('task_status_check_interval', 200, group="vcns") + + # vcns does not support duplicated router name, ignore router name + # validation for unit-test cases + self.fc2 = fake_vcns.FakeVcns(unique_router_name=False) + self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) + self.vcns_patch() + mock_proxy = mock.patch( + "%s.%s" % (vmware.SERVICE_PLUGIN_NAME, + '_set_create_lswitch_proxy')) + mock_proxy.start() + + ext_mgr = ext_mgr or ServiceRouterTestExtensionManager() + super(ServiceRouterTest, self).setUp( + plugin=vmware.SERVICE_PLUGIN_NAME, + service_plugins=service_plugins, + ext_mgr=ext_mgr) + + self.fc2.set_fake_nsx_api(self.fc) + self.addCleanup(self.fc2.reset_all) + + def tearDown(self): + plugin = n_manager.NeutronManager.get_plugin() + manager = plugin.vcns_driver.task_manager + # wait max ~10 seconds for all tasks to be finished + for i in range(100): + if not manager.has_pending_task(): + break + greenthread.sleep(0.1) + if manager.has_pending_task(): + manager.show_pending_tasks() + raise Exception(_("Tasks not completed")) + manager.stop() + # Ensure the manager thread has been stopped + self.assertIsNone(manager._thread) + super(ServiceRouterTest, self).tearDown() + + def _create_router(self, fmt, tenant_id, name=None, + admin_state_up=None, set_context=False, + arg_list=None, **kwargs): + data = {'router': {'tenant_id': tenant_id}} + if name: + data['router']['name'] = name + if admin_state_up: + data['router']['admin_state_up'] = admin_state_up + for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): + # Arg must be present + if arg in kwargs: + data['router'][arg] = kwargs[arg] + if data['router'].get('service_router') is None: + data['router']['service_router'] = True + router_req = self.new_create_request('routers', data, fmt) + if set_context and tenant_id: + # create a specific auth context for this request + router_req.environ['neutron.context'] = context.Context( + '', tenant_id) + + return router_req.get_response(self.ext_api) + + def _create_and_get_router(self, active_set=True, **kwargs): + """Create advanced service router for services.""" + req = self._create_router(self.fmt, self._tenant_id, **kwargs) + res = self.deserialize(self.fmt, req) + router_id = res['router']['id'] + # manually set router status ACTIVE to pass through the router check, + # else mimic router creation ERROR condition. + status = (service_constants.ACTIVE if active_set + else service_constants.ERROR) + self.plugin._resource_set_status( + context.get_admin_context(), + l3_db.Router, + router_id, + status) + return router_id + + +class ServiceRouterTestCase(ServiceRouterTest, + test_nsx_plugin.TestL3NatTestCase): + + def test_router_create(self): + name = 'router1' + tenant_id = _uuid() + expected_value = [('name', name), ('tenant_id', tenant_id), + ('admin_state_up', True), + ('external_gateway_info', None), + ('service_router', True)] + with self.router(name=name, admin_state_up=True, + tenant_id=tenant_id) as router: + expected_value_1 = expected_value + [('status', 'PENDING_CREATE')] + for k, v in expected_value_1: + self.assertEqual(router['router'][k], v) + + # wait max ~10 seconds for router status update + for i in range(20): + greenthread.sleep(0.5) + res = self._show('routers', router['router']['id']) + if res['router']['status'] == 'ACTIVE': + break + expected_value_2 = expected_value + [('status', 'ACTIVE')] + for k, v in expected_value_2: + self.assertEqual(res['router'][k], v) + + # check an integration lswitch is created + lswitch_name = "%s-ls" % name + for lswitch_id, lswitch in self.fc2._lswitches.iteritems(): + if lswitch['display_name'] == lswitch_name: + break + else: + self.fail("Integration lswitch not found") + + # check an integration lswitch is deleted + lswitch_name = "%s-ls" % name + for lswitch_id, lswitch in self.fc2._lswitches.iteritems(): + if lswitch['display_name'] == lswitch_name: + self.fail("Integration switch is not deleted") + + def test_router_delete_after_plugin_restart(self): + name = 'router1' + tenant_id = _uuid() + with self.router(name=name, admin_state_up=True, + tenant_id=tenant_id): + # clear router type cache to mimic plugin restart + plugin = n_manager.NeutronManager.get_plugin() + plugin._router_type = {} + + # check an integration lswitch is deleted + lswitch_name = "%s-ls" % name + for lswitch_id, lswitch in self.fc2._lswitches.iteritems(): + if lswitch['display_name'] == lswitch_name: + self.fail("Integration switch is not deleted") + + def test_router_show(self): + name = 'router1' + tenant_id = _uuid() + expected_value = [('name', name), ('tenant_id', tenant_id), + ('admin_state_up', True), + ('status', 'PENDING_CREATE'), + ('external_gateway_info', None), + ('service_router', True)] + with self.router(name='router1', admin_state_up=True, + tenant_id=tenant_id) as router: + res = self._show('routers', router['router']['id']) + for k, v in expected_value: + self.assertEqual(res['router'][k], v) + + def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None): + super(ServiceRouterTestCase, + self)._test_router_create_with_gwinfo_and_l3_ext_net( + vlan_id, validate_ext_gw=False) + + def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None): + super(ServiceRouterTestCase, + self)._test_router_update_gateway_on_l3_ext_net( + vlan_id, validate_ext_gw=False) + + +class TestProxyCreateLswitch(base.BaseTestCase): + def setUp(self): + super(TestProxyCreateLswitch, self).setUp() + self.tenant_id = "foo_tenant" + self.display_name = "foo_network" + self.tz_config = [ + {'zone_uuid': 'foo_zone', + 'transport_type': 'stt'} + ] + self.tags = utils.get_tags(quantum_net_id='foo_id', + os_tid=self.tenant_id) + self.cluster = None + + def test_create_lswitch_with_basic_args(self): + result = nsp._process_base_create_lswitch_args(self.cluster, + 'foo_id', + self.tenant_id, + self.display_name, + self.tz_config) + self.assertEqual(self.display_name, result[0]) + self.assertEqual(self.tz_config, result[1]) + self.assertEqual(sorted(self.tags), sorted(result[2])) + + def test_create_lswitch_with_shared_as_kwarg(self): + result = nsp._process_base_create_lswitch_args(self.cluster, + 'foo_id', + self.tenant_id, + self.display_name, + self.tz_config, + shared=True) + expected = self.tags + [{'scope': 'shared', 'tag': 'true'}] + self.assertEqual(sorted(expected), sorted(result[2])) + + def test_create_lswitch_with_shared_as_arg(self): + result = nsp._process_base_create_lswitch_args(self.cluster, + 'foo_id', + self.tenant_id, + self.display_name, + self.tz_config, + True) + additional_tags = [{'scope': 'shared', 'tag': 'true'}] + expected = self.tags + additional_tags + self.assertEqual(sorted(expected), sorted(result[2])) + + def test_create_lswitch_with_additional_tags(self): + more_tags = [{'scope': 'foo_scope', 'tag': 'foo_tag'}] + result = nsp._process_base_create_lswitch_args(self.cluster, + 'foo_id', + self.tenant_id, + self.display_name, + self.tz_config, + tags=more_tags) + expected = self.tags + more_tags + self.assertEqual(sorted(expected), sorted(result[2])) diff --git a/neutron/tests/unit/vmware/vshield/test_firewall_driver.py b/neutron/tests/unit/vmware/vshield/test_firewall_driver.py new file mode 100644 index 000000000..0002ab97d --- /dev/null +++ b/neutron/tests/unit/vmware/vshield/test_firewall_driver.py @@ -0,0 +1,375 @@ +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import contextlib +import mock +import webob.exc + +from neutron import context +from neutron.db.firewall import firewall_db +from neutron.openstack.common import uuidutils +from neutron.plugins.vmware.vshield.common import exceptions as vcns_exc +from neutron.plugins.vmware.vshield import vcns_driver +from neutron.tests.unit.db.firewall import test_db_firewall +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware.vshield import fake_vcns + + +_uuid = uuidutils.generate_uuid + +VSE_ID = 'edge-1' +ROUTER_ID = '42f95450-5cc9-44e4-a744-1320e592a9d5' + +VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test") + + +class VcnsDriverTestCase(test_db_firewall.FirewallPluginDbTestCase, + firewall_db.Firewall_db_mixin): + + def vcns_firewall_patch(self): + instance = self.mock_vcns.start() + instance.return_value.update_firewall.side_effect = ( + self.fc2.update_firewall) + instance.return_value.delete_firewall.side_effect = ( + self.fc2.delete_firewall) + instance.return_value.update_firewall_rule.side_effect = ( + self.fc2.update_firewall_rule) + instance.return_value.delete_firewall_rule.side_effect = ( + self.fc2.delete_firewall_rule) + instance.return_value.add_firewall_rule_above.side_effect = ( + self.fc2.add_firewall_rule_above) + instance.return_value.add_firewall_rule.side_effect = ( + self.fc2.add_firewall_rule) + instance.return_value.get_firewall.side_effect = ( + self.fc2.get_firewall) + instance.return_value.get_firewall_rule.side_effect = ( + self.fc2.get_firewall_rule) + + def setUp(self): + + self.config_parse(args=['--config-file', VCNS_CONFIG_FILE]) + # mock vcns + self.fc2 = fake_vcns.FakeVcns(unique_router_name=False) + self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) + self.vcns_firewall_patch() + + self.driver = vcns_driver.VcnsDriver(mock.Mock()) + + super(VcnsDriverTestCase, self).setUp() + self.addCleanup(self.fc2.reset_all) + self.addCleanup(self.mock_vcns.stop) + + self.tenant_id = _uuid() + self.subnet_id = _uuid() + + +class TestEdgeFwDriver(VcnsDriverTestCase): + + def _make_firewall_dict_with_rules(self, context, firewall_id): + fw = self.get_firewall(context, firewall_id) + fw_policy_id = fw['firewall_policy_id'] + if fw_policy_id: + firewall_policy_db = self._get_firewall_policy( + context, fw_policy_id) + fw['firewall_rule_list'] = [ + self._make_firewall_rule_dict(fw_rule_db) + for fw_rule_db in firewall_policy_db['firewall_rules'] + ] + + return fw + + def _compare_firewall_rule_lists(self, firewall_policy_id, + list1, list2): + for r1, r2 in zip(list1, list2): + rule = r1['firewall_rule'] + rule['firewall_policy_id'] = firewall_policy_id + for k in rule: + self.assertEqual(rule[k], r2[k]) + + def test_create_and_get_firewall(self): + ctx = context.get_admin_context() + name = 'firewall' + with contextlib.nested(self.firewall_rule(name='fwr1', + no_delete=True), + self.firewall_rule(name='fwr2', + no_delete=True), + self.firewall_rule(name='fwr3', + no_delete=True)) as fr: + fw_rule_ids = [r['firewall_rule']['id'] for r in fr] + with self.firewall_policy(firewall_rules=fw_rule_ids, + no_delete=True) as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(name=name, + firewall_policy_id=fwp_id) as firewall: + fw_create = firewall['firewall'] + fw_expect = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + self.driver.update_firewall(ctx, VSE_ID, fw_expect) + fw_get = self.driver.get_firewall(ctx, VSE_ID) + self._compare_firewall_rule_lists( + fwp_id, fw_get['firewall_rule_list'], + fw_expect['firewall_rule_list']) + + def test_update_firewall_with_rules(self): + ctx = context.get_admin_context() + name = 'new_firewall' + with contextlib.nested(self.firewall_rule(name='fwr1', + no_delete=True), + self.firewall_rule(name='fwr2', + no_delete=True), + self.firewall_rule(name='fwr3', + no_delete=True)) as fr: + fw_rule_ids = [r['firewall_rule']['id'] for r in fr] + with self.firewall_policy(firewall_rules=fw_rule_ids, + no_delete=True) as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(name=name, + firewall_policy_id=fwp_id) as firewall: + fw_create = firewall['firewall'] + fw_create = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + self.driver.update_firewall(ctx, VSE_ID, fw_create) + + data = {'firewall_rule': {'name': name, + 'source_port': '10:20', + 'destination_port': '30:40'}} + self.new_update_request('firewall_rules', data, + fr[0]['firewall_rule']['id']) + fw_expect = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + self.driver.update_firewall(ctx, VSE_ID, fw_expect) + + fw_get = self.driver.get_firewall( + ctx, VSE_ID) + self._compare_firewall_rule_lists( + fwp_id, fw_get['firewall_rule_list'], + fw_expect['firewall_rule_list']) + + def test_delete_firewall(self): + ctx = context.get_admin_context() + name = 'firewall' + with contextlib.nested(self.firewall_rule(name='fwr1', + no_delete=True), + self.firewall_rule(name='fwr2', + no_delete=True), + self.firewall_rule(name='fwr3', + no_delete=True)) as fr: + fw_rule_ids = [r['firewall_rule']['id'] for r in fr] + with self.firewall_policy(firewall_rules=fw_rule_ids, + no_delete=True) as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(name=name, + firewall_policy_id=fwp_id) as firewall: + fw_create = firewall['firewall'] + fw_expect = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + self.driver.update_firewall(ctx, VSE_ID, fw_expect) + self.driver.delete_firewall(ctx, VSE_ID) + fw_get = self.driver.get_firewall( + ctx, VSE_ID) + self.assertFalse(fw_get['firewall_rule_list']) + + def test_update_firewall_rule(self): + ctx = context.get_admin_context() + name = 'new_firewall' + with contextlib.nested(self.firewall_rule(name='fwr1', + no_delete=True)) as fr: + fw_rule_ids = [r['firewall_rule']['id'] for r in fr] + with self.firewall_policy(firewall_rules=fw_rule_ids, + no_delete=True) as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(name=name, + firewall_policy_id=fwp_id) as firewall: + fw_create = firewall['firewall'] + fw_create = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + self.driver.update_firewall(ctx, VSE_ID, fw_create) + + data = {'firewall_rule': {'name': name, + 'source_port': '10:20', + 'destination_port': '30:40'}} + req = self.new_update_request( + 'firewall_rules', data, + fr[0]['firewall_rule']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + rule_expect = res['firewall_rule'] + rule_expect['edge_id'] = VSE_ID + self.driver.update_firewall_rule( + ctx, rule_expect['id'], VSE_ID, rule_expect) + rule_get = self.driver.get_firewall_rule( + ctx, rule_expect['id'], VSE_ID) + for k, v in rule_get['firewall_rule'].items(): + self.assertEqual(rule_expect[k], v) + + def test_delete_firewall_rule(self): + ctx = context.get_admin_context() + name = 'new_firewall' + with contextlib.nested(self.firewall_rule(name='fwr1', + no_delete=True), + self.firewall_rule(name='fwr2', + no_delete=True)) as fr: + fw_rule_ids = [r['firewall_rule']['id'] for r in fr] + with self.firewall_policy(firewall_rules=fw_rule_ids, + no_delete=True) as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(name=name, + firewall_policy_id=fwp_id) as firewall: + fw_create = firewall['firewall'] + fw_create = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + self.driver.update_firewall(ctx, VSE_ID, fw_create) + + fr[0]['firewall_rule']['edge_id'] = VSE_ID + self.driver.delete_firewall_rule( + ctx, fr[0]['firewall_rule']['id'], + VSE_ID) + self.assertRaises(vcns_exc.VcnsNotFound, + self.driver.get_firewall_rule, + ctx, fr[0]['firewall_rule']['id'], + VSE_ID) + + def test_insert_rule(self): + ctx = context.get_admin_context() + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(firewall_policy_id=fwp_id) as firewall: + fw_create = firewall['firewall'] + fw_create = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + self.driver.update_firewall(ctx, VSE_ID, fw_create) + with contextlib.nested(self.firewall_rule(name='fwr0', + no_delete=True), + self.firewall_rule(name='fwr1', + no_delete=True), + self.firewall_rule(name='fwr2', + no_delete=True), + self.firewall_rule(name='fwr3', + no_delete=True), + self.firewall_rule(name='fwr4', + no_delete=True), + self.firewall_rule(name='fwr5', + no_delete=True), + self.firewall_rule( + name='fwr6', + no_delete=True)) as fwr: + # test insert when rule list is empty + fwr0_id = fwr[0]['firewall_rule']['id'] + self._rule_action('insert', fwp_id, fwr0_id, + insert_before=None, + insert_after=None, + expected_code=webob.exc.HTTPOk.code) + fw_update = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + self.driver.update_firewall(ctx, VSE_ID, fw_update) + # test insert at top of list above existing rule + fwr1_id = fwr[1]['firewall_rule']['id'] + self._rule_action('insert', fwp_id, fwr1_id, + insert_before=fwr0_id, + insert_after=None, + expected_code=webob.exc.HTTPOk.code) + + fw_expect = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + + rule_info = {'firewall_rule_id': fwr1_id, + 'insert_before': fwr0_id, + 'insert_after': None} + rule = fwr[1]['firewall_rule'] + self.driver.insert_rule(ctx, rule_info, VSE_ID, rule) + fw_get = self.driver.get_firewall( + ctx, VSE_ID) + self._compare_firewall_rule_lists( + fwp_id, fw_get['firewall_rule_list'], + fw_expect['firewall_rule_list']) + # test insert at bottom of list + fwr2_id = fwr[2]['firewall_rule']['id'] + self._rule_action('insert', fwp_id, fwr2_id, + insert_before=None, + insert_after=fwr0_id, + expected_code=webob.exc.HTTPOk.code) + fw_expect = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + + rule_info = {'firewall_rule_id': fwr2_id, + 'insert_before': None, + 'insert_after': fwr0_id} + rule = fwr[2]['firewall_rule'] + self.driver.insert_rule(ctx, rule_info, VSE_ID, rule) + fw_get = self.driver.get_firewall( + ctx, VSE_ID) + self._compare_firewall_rule_lists( + fwp_id, fw_get['firewall_rule_list'], + fw_expect['firewall_rule_list']) + # test insert in the middle of the list using + # insert_before + fwr3_id = fwr[3]['firewall_rule']['id'] + self._rule_action('insert', fwp_id, fwr3_id, + insert_before=fwr2_id, + insert_after=None, + expected_code=webob.exc.HTTPOk.code) + fw_expect = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + + rule_info = {'firewall_rule_id': fwr3_id, + 'insert_before': fwr2_id, + 'insert_after': None} + rule = fwr[3]['firewall_rule'] + self.driver.insert_rule(ctx, rule_info, VSE_ID, rule) + fw_get = self.driver.get_firewall( + ctx, VSE_ID) + self._compare_firewall_rule_lists( + fwp_id, fw_get['firewall_rule_list'], + fw_expect['firewall_rule_list']) + # test insert in the middle of the list using + # insert_after + fwr4_id = fwr[4]['firewall_rule']['id'] + self._rule_action('insert', fwp_id, fwr4_id, + insert_before=None, + insert_after=fwr3_id, + expected_code=webob.exc.HTTPOk.code) + fw_expect = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + + rule_info = {'firewall_rule_id': fwr4_id, + 'insert_before': None, + 'insert_after': fwr3_id} + rule = fwr[4]['firewall_rule'] + self.driver.insert_rule(ctx, rule_info, VSE_ID, rule) + fw_get = self.driver.get_firewall( + ctx, VSE_ID) + self._compare_firewall_rule_lists( + fwp_id, fw_get['firewall_rule_list'], + fw_expect['firewall_rule_list']) + # test insert when both insert_before and + # insert_after are set + fwr5_id = fwr[5]['firewall_rule']['id'] + self._rule_action('insert', fwp_id, fwr5_id, + insert_before=fwr4_id, + insert_after=fwr4_id, + expected_code=webob.exc.HTTPOk.code) + fw_expect = self._make_firewall_dict_with_rules( + ctx, fw_create['id']) + + rule_info = {'firewall_rule_id': fwr5_id, + 'insert_before': fwr4_id, + 'insert_after': fwr4_id} + rule = fwr[5]['firewall_rule'] + self.driver.insert_rule(ctx, rule_info, VSE_ID, rule) + fw_get = self.driver.get_firewall( + ctx, VSE_ID) + self._compare_firewall_rule_lists( + fwp_id, fw_get['firewall_rule_list'], + fw_expect['firewall_rule_list']) diff --git a/neutron/tests/unit/vmware/vshield/test_fwaas_plugin.py b/neutron/tests/unit/vmware/vshield/test_fwaas_plugin.py new file mode 100644 index 000000000..282e4a80f --- /dev/null +++ b/neutron/tests/unit/vmware/vshield/test_fwaas_plugin.py @@ -0,0 +1,697 @@ +# Copyright 2013 VMware, Inc +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import contextlib +import copy +import webob.exc + +from neutron.api.v2 import attributes +from neutron import context +from neutron.extensions import firewall +from neutron import manager +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as const +from neutron.tests.unit.db.firewall import test_db_firewall +from neutron.tests.unit.vmware.vshield import test_edge_router + +_uuid = uuidutils.generate_uuid + +FW_PLUGIN_CLASS = "neutron.plugins.vmware.plugin.NsxServicePlugin" + + +class FirewallTestExtensionManager( + test_edge_router.ServiceRouterTestExtensionManager): + + def get_resources(self): + # If l3 resources have been loaded and updated by main API + # router, update the map in the l3 extension so it will load + # the same attributes as the API router + resources = super(FirewallTestExtensionManager, self).get_resources() + firewall_attr_map = copy.deepcopy(firewall.RESOURCE_ATTRIBUTE_MAP) + for res in firewall.RESOURCE_ATTRIBUTE_MAP.keys(): + attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res) + if attr_info: + firewall.RESOURCE_ATTRIBUTE_MAP[res] = attr_info + fw_resources = firewall.Firewall.get_resources() + # restore the original resources once the controllers are created + firewall.RESOURCE_ATTRIBUTE_MAP = firewall_attr_map + + resources.extend(fw_resources) + + return resources + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class FirewallPluginTestCase(test_db_firewall.FirewallPluginDbTestCase, + test_edge_router.ServiceRouterTest): + + def vcns_firewall_patch(self): + self.vcns_instance.return_value.update_firewall.side_effect = ( + self.fc2.update_firewall) + self.vcns_instance.return_value.delete_firewall.side_effect = ( + self.fc2.delete_firewall) + self.vcns_instance.return_value.update_firewall_rule.side_effect = ( + self.fc2.update_firewall_rule) + self.vcns_instance.return_value.delete_firewall_rule.side_effect = ( + self.fc2.delete_firewall_rule) + self.vcns_instance.return_value.add_firewall_rule_above.side_effect = ( + self.fc2.add_firewall_rule_above) + self.vcns_instance.return_value.add_firewall_rule.side_effect = ( + self.fc2.add_firewall_rule) + self.vcns_instance.return_value.get_firewall.side_effect = ( + self.fc2.get_firewall) + self.vcns_instance.return_value.get_firewall_rule.side_effect = ( + self.fc2.get_firewall_rule) + + def setUp(self): + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + + super(FirewallPluginTestCase, self).setUp( + ext_mgr=FirewallTestExtensionManager(), + fw_plugin=FW_PLUGIN_CLASS) + self.vcns_firewall_patch() + self.plugin = manager.NeutronManager.get_plugin() + + def tearDown(self): + super(FirewallPluginTestCase, self).tearDown() + # Restore the global RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + self.ext_api = None + self.plugin = None + + def _create_firewall(self, fmt, name, description, firewall_policy_id, + admin_state_up=True, expected_res_status=None, + **kwargs): + data = {'firewall': {'name': name, + 'description': description, + 'firewall_policy_id': firewall_policy_id, + 'router_id': kwargs.get('router_id'), + 'admin_state_up': admin_state_up, + 'tenant_id': self._tenant_id}} + + firewall_req = self.new_create_request('firewalls', data, fmt) + firewall_res = firewall_req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(firewall_res.status_int, expected_res_status) + + return firewall_res + + def test_create_firewall(self): + name = "new_fw" + attrs = self._get_test_firewall_attrs(name) + + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + attrs['router_id'] = self._create_and_get_router() + with self.firewall(name=name, + firewall_policy_id=fwp_id, + router_id=attrs['router_id'], + admin_state_up= + test_db_firewall.ADMIN_STATE_UP, + expected_res_status=201) as fw: + attrs = self._replace_firewall_status( + attrs, const.PENDING_CREATE, const.ACTIVE) + for k, v in attrs.iteritems(): + self.assertEqual(fw['firewall'][k], v) + + def test_create_firewall_without_policy(self, **kwargs): + name = "new_fw" + attrs = self._get_test_firewall_attrs(name) + if 'router_id' in kwargs: + attrs['router_id'] = kwargs.pop('router_id') + else: + attrs['router_id'] = self._create_and_get_router() + + with self.firewall(name=name, + router_id=attrs['router_id'], + admin_state_up= + test_db_firewall.ADMIN_STATE_UP, + **kwargs) as fw: + attrs = self._replace_firewall_status( + attrs, const.PENDING_CREATE, const.ACTIVE) + for k, v in attrs.iteritems(): + self.assertEqual(fw['firewall'][k], v) + + def test_create_firewall_with_invalid_router(self): + name = "new_fw" + attrs = self._get_test_firewall_attrs(name) + attrs['router_id'] = self._create_and_get_router() + self.assertRaises(webob.exc.HTTPClientError, + self.test_create_firewall_without_policy, + router_id=None) + self.assertRaises(webob.exc.HTTPClientError, + self.test_create_firewall_without_policy, + router_id='invalid_id') + + router_id = self._create_and_get_router( + arg_list=('service_router',), service_router=False) + self.assertRaises(webob.exc.HTTPClientError, + self.test_create_firewall_without_policy, + router_id=router_id) + + router_id = self._create_and_get_router(active_set=False) + self.assertRaises(webob.exc.HTTPClientError, + self.test_create_firewall_without_policy, + router_id=router_id) + + def test_update_firewall(self): + name = "new_fw" + attrs = self._get_test_firewall_attrs(name) + attrs['router_id'] = self._create_and_get_router() + + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + with self.firewall( + firewall_policy_id=fwp_id, router_id=attrs['router_id'], + admin_state_up=test_db_firewall.ADMIN_STATE_UP) as fw: + fw_id = fw['firewall']['id'] + new_data = {'firewall': {'name': name}} + req = self.new_update_request('firewalls', new_data, fw_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 200) + res_json = self.deserialize( + self.fmt, res) + attrs = self._replace_firewall_status( + attrs, const.PENDING_CREATE, const.ACTIVE) + for k, v in attrs.iteritems(): + self.assertEqual(res_json['firewall'][k], v) + + def test_delete_firewall(self): + ctx = context.get_admin_context() + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall( + firewall_policy_id=fwp_id, + router_id=self._create_and_get_router(), + admin_state_up=test_db_firewall.ADMIN_STATE_UP, + no_delete=True) as fw: + fw_id = fw['firewall']['id'] + with ctx.session.begin(subtransactions=True): + req = self.new_delete_request('firewalls', fw_id) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + self.assertRaises( + firewall.FirewallNotFound, + self.plugin.get_firewall, ctx, fw_id) + + def test_delete_router_in_use_by_fwservice(self): + router_id = self._create_and_get_router() + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(name='fw', + firewall_policy_id=fwp_id, + router_id=router_id, + admin_state_up= + test_db_firewall.ADMIN_STATE_UP, + expected_res_status=201): + self._delete('routers', router_id, + expected_code=webob.exc.HTTPConflict.code) + + def test_show_firewall(self): + name = "firewall1" + attrs = self._get_test_firewall_attrs(name) + attrs['router_id'] = self._create_and_get_router() + + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + with self.firewall( + name=name, + firewall_policy_id=fwp_id, router_id=attrs['router_id'], + admin_state_up=test_db_firewall.ADMIN_STATE_UP) as firewall: + + req = self.new_show_request('firewalls', + firewall['firewall']['id'], + fmt=self.fmt) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + attrs = self._replace_firewall_status( + attrs, const.PENDING_CREATE, const.ACTIVE) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall'][k], v) + + def test_list_firewalls(self): + keys_list = [] + for i in range(3): + keys_list.append({'name': "fw" + str(i), + 'router_id': self._create_and_get_router(), + 'admin_state_up': True, + 'status': "ACTIVE"}) + + with contextlib.nested( + self.firewall( + name='fw0', router_id=keys_list[0]['router_id'], + admin_state_up=True, description='fw'), + self.firewall( + name='fw1', router_id=keys_list[1]['router_id'], + admin_state_up=True, description='fw'), + self.firewall( + name='fw2', router_id=keys_list[2]['router_id'], + admin_state_up=True, description='fw'), + ) as (fw1, fw2, fw3): + self._test_list_resources( + 'firewall', (fw1, fw2, fw3), + query_params='description=fw') + + req = self.new_list_request('firewalls') + res = self.deserialize( + self.fmt, req.get_response(self.ext_api)) + self.assertEqual(len(res['firewalls']), 3) + for index in range(len(res['firewalls'])): + for k, v in keys_list[index].items(): + self.assertEqual(res['firewalls'][index][k], v) + + def test_create_firewall_with_rules(self): + ctx = context.get_admin_context() + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3')) as fr: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + fw_rule_ids = [r['firewall_rule']['id'] for r in fr] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request( + 'firewall_policies', data, fwp_id) + req.get_response(self.ext_api) + attrs = self._get_test_firewall_attrs() + attrs['firewall_policy_id'] = fwp_id + with self.firewall( + firewall_policy_id=fwp_id, + router_id=self._create_and_get_router(), + admin_state_up=test_db_firewall.ADMIN_STATE_UP) as fw: + rule_list = ( + self.plugin._make_firewall_rule_list_by_policy_id( + ctx, fw['firewall']['firewall_policy_id'])) + self._compare_firewall_rule_lists( + fwp_id, fr, rule_list) + + def test_update_firewall_policy_with_no_firewall(self): + name = "new_firewall_policy1" + attrs = self._get_test_firewall_policy_attrs(name) + + with self.firewall_policy(shared=test_db_firewall.SHARED, + firewall_rules=None, + audited=test_db_firewall.AUDITED) as fwp: + data = {'firewall_policy': {'name': name}} + req = self.new_update_request('firewall_policies', data, + fwp['firewall_policy']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_policy'][k], v) + + def test_update_firewall_policy_with_firewall(self): + name = "new_firewall_policy1" + attrs = self._get_test_firewall_policy_attrs(name) + + with self.firewall_policy(shared=test_db_firewall.SHARED, + firewall_rules=None, + audited=test_db_firewall.AUDITED) as fwp: + fwp_id = fwp['firewall_policy']['id'] + with self.firewall(firewall_policy_id=fwp_id, + router_id=self._create_and_get_router(), + admin_state_up= + test_db_firewall.ADMIN_STATE_UP): + data = {'firewall_policy': {'name': name}} + req = self.new_update_request( + 'firewall_policies', data, fwp['firewall_policy']['id']) + res = self.deserialize( + self.fmt, req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_policy'][k], v) + + def test_update_firewall_rule_with_no_firewall(self): + name = "new_firewall_rule1" + attrs = self._get_test_firewall_rule_attrs(name) + + attrs['source_port'] = '10:20' + attrs['destination_port'] = '30:40' + with self.firewall_rule() as fwr: + data = {'firewall_rule': {'name': name, + 'source_port': '10:20', + 'destination_port': '30:40'}} + req = self.new_update_request( + 'firewall_rules', data, fwr['firewall_rule']['id']) + res = self.deserialize( + self.fmt, req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_rule'][k], v) + + attrs['source_port'] = '10000' + attrs['destination_port'] = '80' + with self.firewall_rule() as fwr: + data = {'firewall_rule': {'name': name, + 'source_port': 10000, + 'destination_port': 80}} + req = self.new_update_request('firewall_rules', data, + fwr['firewall_rule']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_rule'][k], v) + + attrs['source_port'] = None + attrs['destination_port'] = None + with self.firewall_rule() as fwr: + data = {'firewall_rule': {'name': name, + 'source_port': None, + 'destination_port': None}} + req = self.new_update_request( + 'firewall_rules', data, fwr['firewall_rule']['id']) + res = self.deserialize( + self.fmt, req.get_response(self.ext_api)) + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_rule'][k], v) + + def test_update_firewall_rule_with_firewall(self): + name = "new_firewall_rule1" + attrs = self._get_test_firewall_rule_attrs(name) + with self.firewall_rule() as fwr: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['firewall_policy_id'] = fwp_id + with self.firewall(firewall_policy_id=fwp_id, + router_id=self._create_and_get_router(), + admin_state_up= + test_db_firewall.ADMIN_STATE_UP): + fwr_id = fwr['firewall_rule']['id'] + data = {'firewall_policy': {'firewall_rules': [fwr_id]}} + req = self.new_update_request( + 'firewall_policies', data, + fwp['firewall_policy']['id']) + req.get_response(self.ext_api) + data = {'firewall_rule': {'name': name}} + req = self.new_update_request( + 'firewall_rules', data, + fwr['firewall_rule']['id']) + res = self.deserialize( + self.fmt, req.get_response(self.ext_api)) + attrs['firewall_policy_id'] = fwp_id + for k, v in attrs.iteritems(): + self.assertEqual(res['firewall_rule'][k], v) + + def test_insert_rule_with_no_firewall(self): + attrs = self._get_test_firewall_policy_attrs() + attrs['audited'] = False + attrs['firewall_list'] = [] + with contextlib.nested(self.firewall_rule(name='fwr0'), + self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3'), + self.firewall_rule(name='fwr4'), + self.firewall_rule(name='fwr5'), + self.firewall_rule(name='fwr6')) as fwr: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['id'] = fwp_id + # test insert when rule list is empty + fwr0_id = fwr[0]['firewall_rule']['id'] + attrs['firewall_rules'].insert(0, fwr0_id) + self._rule_action('insert', fwp_id, fwr0_id, + insert_before=None, + insert_after=None, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert at top of rule list, insert_before and + # insert_after not provided + fwr1_id = fwr[1]['firewall_rule']['id'] + attrs['firewall_rules'].insert(0, fwr1_id) + insert_data = {'firewall_rule_id': fwr1_id} + self._rule_action('insert', fwp_id, fwr0_id, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs, body_data=insert_data) + # test insert at top of list above existing rule + fwr2_id = fwr[2]['firewall_rule']['id'] + attrs['firewall_rules'].insert(0, fwr2_id) + self._rule_action('insert', fwp_id, fwr2_id, + insert_before=fwr1_id, + insert_after=None, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert at bottom of list + fwr3_id = fwr[3]['firewall_rule']['id'] + attrs['firewall_rules'].append(fwr3_id) + self._rule_action('insert', fwp_id, fwr3_id, + insert_before=None, + insert_after=fwr0_id, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert in the middle of the list using + # insert_before + fwr4_id = fwr[4]['firewall_rule']['id'] + attrs['firewall_rules'].insert(1, fwr4_id) + self._rule_action('insert', fwp_id, fwr4_id, + insert_before=fwr1_id, + insert_after=None, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert in the middle of the list using + # insert_after + fwr5_id = fwr[5]['firewall_rule']['id'] + attrs['firewall_rules'].insert(1, fwr5_id) + self._rule_action('insert', fwp_id, fwr5_id, + insert_before=None, + insert_after=fwr2_id, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert when both insert_before and + # insert_after are set + fwr6_id = fwr[6]['firewall_rule']['id'] + attrs['firewall_rules'].insert(1, fwr6_id) + self._rule_action('insert', fwp_id, fwr6_id, + insert_before=fwr5_id, + insert_after=fwr5_id, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + + def test_insert_rule_with_firewall(self): + attrs = self._get_test_firewall_policy_attrs() + attrs['audited'] = False + attrs['firewall_list'] = [] + with contextlib.nested(self.firewall_rule(name='fwr0'), + self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3'), + self.firewall_rule(name='fwr4'), + self.firewall_rule(name='fwr5'), + self.firewall_rule(name='fwr6')) as fwr: + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['id'] = fwp_id + with self.firewall(router_id=self._create_and_get_router(), + firewall_policy_id=fwp_id) as fw: + # test insert when rule list is empty + fwr0_id = fwr[0]['firewall_rule']['id'] + attrs['firewall_rules'].insert(0, fwr0_id) + attrs['firewall_list'].insert(0, fw['firewall']['id']) + self._rule_action('insert', fwp_id, fwr0_id, + insert_before=None, + insert_after=None, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert at top of rule list, insert_before and + # insert_after not provided + fwr1_id = fwr[1]['firewall_rule']['id'] + attrs['firewall_rules'].insert(0, fwr1_id) + insert_data = {'firewall_rule_id': fwr1_id} + self._rule_action( + 'insert', fwp_id, fwr0_id, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs, body_data=insert_data) + # test insert at top of list above existing rule + fwr2_id = fwr[2]['firewall_rule']['id'] + attrs['firewall_rules'].insert(0, fwr2_id) + self._rule_action('insert', fwp_id, fwr2_id, + insert_before=fwr1_id, + insert_after=None, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert at bottom of list + fwr3_id = fwr[3]['firewall_rule']['id'] + attrs['firewall_rules'].append(fwr3_id) + self._rule_action('insert', fwp_id, fwr3_id, + insert_before=None, + insert_after=fwr0_id, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert in the middle of the list using + # insert_before + fwr4_id = fwr[4]['firewall_rule']['id'] + attrs['firewall_rules'].insert(1, fwr4_id) + self._rule_action('insert', fwp_id, fwr4_id, + insert_before=fwr1_id, + insert_after=None, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert in the middle of the list using + # insert_after + fwr5_id = fwr[5]['firewall_rule']['id'] + attrs['firewall_rules'].insert(1, fwr5_id) + self._rule_action('insert', fwp_id, fwr5_id, + insert_before=None, + insert_after=fwr2_id, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + # test insert when both insert_before and + # insert_after are set + fwr6_id = fwr[6]['firewall_rule']['id'] + attrs['firewall_rules'].insert(1, fwr6_id) + self._rule_action('insert', fwp_id, fwr6_id, + insert_before=fwr5_id, + insert_after=fwr5_id, + expected_code=webob.exc.HTTPOk.code, + expected_body=attrs) + + def test_remove_rule_with_no_firewall(self): + attrs = self._get_test_firewall_policy_attrs() + attrs['audited'] = False + attrs['firewall_list'] = [] + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['id'] = fwp_id + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3')) as fr1: + fw_rule_ids = [r['firewall_rule']['id'] for r in fr1] + attrs['firewall_rules'] = fw_rule_ids[:] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request('firewall_policies', data, + fwp_id) + req.get_response(self.ext_api) + # test removing a rule from a policy that does not exist + self._rule_action('remove', '123', fw_rule_ids[1], + expected_code=webob.exc.HTTPNotFound.code, + expected_body=None) + # test removing a rule in the middle of the list + attrs['firewall_rules'].remove(fw_rule_ids[1]) + self._rule_action('remove', fwp_id, fw_rule_ids[1], + expected_body=attrs) + # test removing a rule at the top of the list + attrs['firewall_rules'].remove(fw_rule_ids[0]) + self._rule_action('remove', fwp_id, fw_rule_ids[0], + expected_body=attrs) + # test removing remaining rule in the list + attrs['firewall_rules'].remove(fw_rule_ids[2]) + self._rule_action('remove', fwp_id, fw_rule_ids[2], + expected_body=attrs) + # test removing rule that is not associated with the policy + self._rule_action('remove', fwp_id, fw_rule_ids[2], + expected_code=webob.exc.HTTPBadRequest.code, + expected_body=None) + + def test_remove_rule_with_firewall(self): + attrs = self._get_test_firewall_policy_attrs() + attrs['audited'] = False + attrs['firewall_list'] = [] + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['id'] = fwp_id + with self.firewall(router_id=self._create_and_get_router(), + firewall_policy_id=fwp_id) as fw: + attrs['firewall_list'].insert(0, fw['firewall']['id']) + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3')) as fr1: + fw_rule_ids = [r['firewall_rule']['id'] for r in fr1] + attrs['firewall_rules'] = fw_rule_ids[:] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request( + 'firewall_policies', data, fwp_id) + req.get_response(self.ext_api) + # test removing a rule from a policy that does not exist + self._rule_action( + 'remove', '123', + fw_rule_ids[1], + expected_code=webob.exc.HTTPNotFound.code, + expected_body=None) + # test removing a rule in the middle of the list + attrs['firewall_rules'].remove(fw_rule_ids[1]) + self._rule_action('remove', fwp_id, fw_rule_ids[1], + expected_body=attrs) + # test removing a rule at the top of the list + attrs['firewall_rules'].remove(fw_rule_ids[0]) + self._rule_action('remove', fwp_id, fw_rule_ids[0], + expected_body=attrs) + # test removing remaining rule in the list + attrs['firewall_rules'].remove(fw_rule_ids[2]) + self._rule_action('remove', fwp_id, fw_rule_ids[2], + expected_body=attrs) + # test removing rule that is not + #associated with the policy + self._rule_action( + 'remove', fwp_id, fw_rule_ids[2], + expected_code=webob.exc.HTTPBadRequest.code, + expected_body=None) + + def test_remove_rule_with_firewalls(self): + attrs = self._get_test_firewall_policy_attrs() + attrs['audited'] = False + attrs['firewall_list'] = [] + with self.firewall_policy() as fwp: + fwp_id = fwp['firewall_policy']['id'] + attrs['id'] = fwp_id + with contextlib.nested( + self.firewall(router_id=self._create_and_get_router(), + firewall_policy_id=fwp_id), + self.firewall(router_id=self._create_and_get_router(), + firewall_policy_id=fwp_id)) as (fw1, fw2): + attrs['firewall_list'].insert(0, fw1['firewall']['id']) + attrs['firewall_list'].insert(1, fw2['firewall']['id']) + with contextlib.nested(self.firewall_rule(name='fwr1'), + self.firewall_rule(name='fwr2'), + self.firewall_rule(name='fwr3')) as fr1: + fw_rule_ids = [r['firewall_rule']['id'] for r in fr1] + attrs['firewall_rules'] = fw_rule_ids[:] + data = {'firewall_policy': + {'firewall_rules': fw_rule_ids}} + req = self.new_update_request( + 'firewall_policies', data, fwp_id) + req.get_response(self.ext_api) + # test removing a rule from a policy that does not exist + self._rule_action( + 'remove', '123', + fw_rule_ids[1], + expected_code=webob.exc.HTTPNotFound.code, + expected_body=None) + # test removing a rule in the middle of the list + attrs['firewall_rules'].remove(fw_rule_ids[1]) + self._rule_action('remove', fwp_id, fw_rule_ids[1], + expected_body=attrs) + # test removing a rule at the top of the list + attrs['firewall_rules'].remove(fw_rule_ids[0]) + self._rule_action('remove', fwp_id, fw_rule_ids[0], + expected_body=attrs) + # test removing remaining rule in the list + attrs['firewall_rules'].remove(fw_rule_ids[2]) + self._rule_action('remove', fwp_id, fw_rule_ids[2], + expected_body=attrs) + # test removing rule that is not + #associated with the policy + self._rule_action( + 'remove', fwp_id, fw_rule_ids[2], + expected_code=webob.exc.HTTPBadRequest.code, + expected_body=None) diff --git a/neutron/tests/unit/vmware/vshield/test_lbaas_plugin.py b/neutron/tests/unit/vmware/vshield/test_lbaas_plugin.py new file mode 100644 index 000000000..fdb0be434 --- /dev/null +++ b/neutron/tests/unit/vmware/vshield/test_lbaas_plugin.py @@ -0,0 +1,532 @@ +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import contextlib + +import testtools +from webob import exc as web_exc + +from neutron.api.v2 import attributes +from neutron import context +from neutron.extensions import loadbalancer as lb +from neutron import manager +from neutron.openstack.common import uuidutils +from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer +from neutron.tests.unit.vmware.vshield import test_edge_router + +_uuid = uuidutils.generate_uuid + +LBAAS_PLUGIN_CLASS = "neutron.plugins.vmware.plugin.NsxServicePlugin" + + +class LoadBalancerTestExtensionManager( + test_edge_router.ServiceRouterTestExtensionManager): + + def get_resources(self): + # If l3 resources have been loaded and updated by main API + # router, update the map in the l3 extension so it will load + # the same attributes as the API router + resources = super(LoadBalancerTestExtensionManager, + self).get_resources() + lb_attr_map = lb.RESOURCE_ATTRIBUTE_MAP.copy() + for res in lb.RESOURCE_ATTRIBUTE_MAP.keys(): + attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res) + if attr_info: + lb.RESOURCE_ATTRIBUTE_MAP[res] = attr_info + lb_resources = lb.Loadbalancer.get_resources() + # restore the original resources once the controllers are created + lb.RESOURCE_ATTRIBUTE_MAP = lb_attr_map + resources.extend(lb_resources) + return resources + + +class TestLoadbalancerPlugin( + test_db_loadbalancer.LoadBalancerPluginDbTestCase, + test_edge_router.ServiceRouterTest): + + def vcns_loadbalancer_patch(self): + instance = self.vcns_instance + instance.return_value.create_vip.side_effect = ( + self.fc2.create_vip) + instance.return_value.get_vip.side_effect = ( + self.fc2.get_vip) + instance.return_value.update_vip.side_effect = ( + self.fc2.update_vip) + instance.return_value.delete_vip.side_effect = ( + self.fc2.delete_vip) + instance.return_value.create_pool.side_effect = ( + self.fc2.create_pool) + instance.return_value.get_pool.side_effect = ( + self.fc2.get_pool) + instance.return_value.update_pool.side_effect = ( + self.fc2.update_pool) + instance.return_value.delete_pool.side_effect = ( + self.fc2.delete_pool) + instance.return_value.create_health_monitor.side_effect = ( + self.fc2.create_health_monitor) + instance.return_value.get_health_monitor.side_effect = ( + self.fc2.get_health_monitor) + instance.return_value.update_health_monitor.side_effect = ( + self.fc2.update_health_monitor) + instance.return_value.delete_health_monitor.side_effect = ( + self.fc2.delete_health_monitor) + instance.return_value.create_app_profile.side_effect = ( + self.fc2.create_app_profile) + instance.return_value.update_app_profile.side_effect = ( + self.fc2.update_app_profile) + instance.return_value.delete_app_profile.side_effect = ( + self.fc2.delete_app_profile) + + def setUp(self): + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): + self.saved_attr_map[resource] = attrs.copy() + + super(TestLoadbalancerPlugin, self).setUp( + ext_mgr=LoadBalancerTestExtensionManager(), + lb_plugin=LBAAS_PLUGIN_CLASS) + self.vcns_loadbalancer_patch() + self.plugin = manager.NeutronManager.get_plugin() + + def tearDown(self): + super(TestLoadbalancerPlugin, self).tearDown() + # Restore the global RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + self.ext_api = None + self.plugin = None + + def _get_vip_optional_args(self): + args = super(TestLoadbalancerPlugin, self)._get_vip_optional_args() + return args + ('router_id',) + + def test_update_healthmonitor(self): + keys = [('type', "TCP"), + ('tenant_id', self._tenant_id), + ('delay', 20), + ('timeout', 20), + ('max_retries', 2), + ('admin_state_up', False)] + + with contextlib.nested( + self.subnet(), + self.health_monitor(), + self.pool() + ) as (subnet, health_mon, pool): + net_id = subnet['subnet']['network_id'] + self._set_net_external(net_id) + with self.vip( + router_id=self._create_and_get_router(), + pool=pool, subnet=subnet): + self.plugin.create_pool_health_monitor( + context.get_admin_context(), + health_mon, pool['pool']['id'] + ) + data = {'health_monitor': {'delay': 20, + 'timeout': 20, + 'max_retries': 2, + 'admin_state_up': False}} + req = self.new_update_request( + "health_monitors", + data, + health_mon['health_monitor']['id']) + res = self.deserialize( + self.fmt, req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['health_monitor'][k], v) + + def test_create_vip(self, **kwargs): + expected = { + 'name': 'vip1', + 'description': '', + 'protocol_port': 80, + 'protocol': 'HTTP', + 'connection_limit': -1, + 'admin_state_up': True, + 'status': 'ACTIVE', + 'tenant_id': self._tenant_id, + } + if 'router_id' in kwargs: + expected['router_id'] = kwargs.pop('router_id') + else: + expected['router_id'] = self._create_and_get_router() + + expected.update(kwargs) + + name = expected['name'] + + with contextlib.nested( + self.subnet(), + self.health_monitor(), + self.pool() + ) as (subnet, monitor, pool): + net_id = subnet['subnet']['network_id'] + self._set_net_external(net_id) + expected['pool_id'] = pool['pool']['id'] + self.plugin.create_pool_health_monitor( + context.get_admin_context(), + monitor, pool['pool']['id'] + ) + with self.vip( + router_id=expected['router_id'], name=name, + pool=pool, subnet=subnet, **kwargs) as vip: + for k in ('id', 'address', 'port_id', 'pool_id'): + self.assertTrue(vip['vip'].get(k, None)) + self.assertEqual( + dict((k, v) + for k, v in vip['vip'].items() if k in expected), + expected + ) + + def test_create_vip_with_invalid_router(self): + self.assertRaises( + web_exc.HTTPClientError, + self.test_create_vip, router_id=None) + self.assertRaises( + web_exc.HTTPClientError, + self.test_create_vip, router_id='invalid_router_id') + router_id = self._create_and_get_router( + arg_list=('service_router',), service_router=False) + self.assertRaises( + web_exc.HTTPClientError, + self.test_create_vip, router_id=router_id) + router_id = self._create_and_get_router(active_set=False) + self.assertRaises( + web_exc.HTTPClientError, + self.test_create_vip, router_id=router_id) + + def test_create_vip_with_session_persistence(self): + self.test_create_vip(session_persistence={'type': 'HTTP_COOKIE'}) + + def test_create_vip_with_invalid_persistence_method(self): + with testtools.ExpectedException(web_exc.HTTPClientError): + self.test_create_vip( + protocol='TCP', + session_persistence={'type': 'HTTP_COOKIE'}) + + def test_create_vips_with_same_names(self): + new_router_id = self._create_and_get_router() + with self.subnet() as subnet: + net_id = subnet['subnet']['network_id'] + self._set_net_external(net_id) + with contextlib.nested( + self.vip( + name='vip', + router_id=new_router_id, + subnet=subnet, protocol_port=80), + self.vip( + name='vip', + router_id=new_router_id, + subnet=subnet, protocol_port=81), + self.vip( + name='vip', + router_id=new_router_id, + subnet=subnet, protocol_port=82), + ) as (vip1, vip2, vip3): + req = self.new_list_request('vips') + res = self.deserialize( + self.fmt, req.get_response(self.ext_api)) + for index in range(len(res['vips'])): + self.assertEqual(res['vips'][index]['name'], 'vip') + + def test_update_vip(self): + name = 'new_vip' + router_id = self._create_and_get_router() + keys = [('router_id', router_id), + ('name', name), + ('address', "10.0.0.2"), + ('protocol_port', 80), + ('connection_limit', 100), + ('admin_state_up', False), + ('status', 'ACTIVE')] + + with contextlib.nested( + self.subnet(), + self.health_monitor(), + self.pool() + ) as (subnet, monitor, pool): + net_id = subnet['subnet']['network_id'] + self._set_net_external(net_id) + self.plugin.create_pool_health_monitor( + context.get_admin_context(), + monitor, pool['pool']['id'] + ) + with self.vip( + router_id=router_id, name=name, + pool=pool, subnet=subnet) as vip: + keys.append(('subnet_id', vip['vip']['subnet_id'])) + data = {'vip': {'name': name, + 'connection_limit': 100, + 'session_persistence': + {'type': "APP_COOKIE", + 'cookie_name': "jesssionId"}, + 'admin_state_up': False}} + req = self.new_update_request( + 'vips', data, vip['vip']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['vip'][k], v) + + def test_delete_vip(self): + with contextlib.nested( + self.subnet(), + self.health_monitor(), + self.pool() + ) as (subnet, monitor, pool): + net_id = subnet['subnet']['network_id'] + self._set_net_external(net_id) + self.plugin.create_pool_health_monitor( + context.get_admin_context(), + monitor, pool['pool']['id'] + ) + with self.vip( + router_id=self._create_and_get_router(), + pool=pool, subnet=subnet, no_delete=True) as vip: + req = self.new_delete_request('vips', vip['vip']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + + def test_delete_router_in_use_by_lbservice(self): + router_id = self._create_and_get_router() + with contextlib.nested( + self.subnet(), + self.health_monitor(), + self.pool() + ) as (subnet, monitor, pool): + net_id = subnet['subnet']['network_id'] + self._set_net_external(net_id) + self.plugin.create_pool_health_monitor( + context.get_admin_context(), + monitor, pool['pool']['id'] + ) + with self.vip( + router_id=router_id, + pool=pool, subnet=subnet): + self._delete('routers', router_id, + expected_code=web_exc.HTTPConflict.code) + + def test_show_vip(self): + router_id = self._create_and_get_router() + name = "vip_show" + keys = [('name', name), + ('protocol_port', 80), + ('protocol', 'HTTP'), + ('connection_limit', -1), + ('admin_state_up', True), + ('status', 'ACTIVE'), + ('router_id', router_id)] + + with contextlib.nested( + self.subnet(), + self.health_monitor(), + self.pool() + ) as (subnet, monitor, pool): + net_id = subnet['subnet']['network_id'] + self._set_net_external(net_id) + self.plugin.create_pool_health_monitor( + context.get_admin_context(), + monitor, pool['pool']['id'] + ) + with self.vip( + router_id=router_id, name=name, + pool=pool, subnet=subnet) as vip: + req = self.new_show_request('vips', + vip['vip']['id']) + res = self.deserialize( + self.fmt, req.get_response(self.ext_api)) + for k, v in keys: + self.assertEqual(res['vip'][k], v) + + def test_list_vips(self): + keys_list = [] + for i in range(3): + keys_list.append({'name': "vip" + str(i), + 'router_id': self._create_and_get_router(), + 'protocol_port': 80 + i, + 'protocol': "HTTP", + 'status': "ACTIVE", + 'admin_state_up': True}) + + with self.subnet() as subnet: + net_id = subnet['subnet']['network_id'] + self._set_net_external(net_id) + with contextlib.nested( + self.vip( + router_id=keys_list[0]['router_id'], name='vip0', + subnet=subnet, protocol_port=80), + self.vip( + router_id=keys_list[1]['router_id'], name='vip1', + subnet=subnet, protocol_port=81), + self.vip( + router_id=keys_list[2]['router_id'], name='vip2', + subnet=subnet, protocol_port=82), + ) as (vip1, vip2, vip3): + self._test_list_with_sort( + 'vip', + (vip1, vip3, vip2), + [('protocol_port', 'asc'), ('name', 'desc')] + ) + req = self.new_list_request('vips') + res = self.deserialize( + self.fmt, req.get_response(self.ext_api)) + self.assertEqual(len(res['vips']), 3) + for index in range(len(res['vips'])): + for k, v in keys_list[index].items(): + self.assertEqual(res['vips'][index][k], v) + + def test_update_pool(self): + data = {'pool': {'name': "new_pool", + 'admin_state_up': False}} + with contextlib.nested( + self.subnet(), + self.health_monitor(), + self.pool() + ) as (subnet, monitor, pool): + net_id = subnet['subnet']['network_id'] + self._set_net_external(net_id) + self.plugin.create_pool_health_monitor( + context.get_admin_context(), + monitor, pool['pool']['id'] + ) + with self.vip( + router_id=self._create_and_get_router(), + pool=pool, subnet=subnet): + req = self.new_update_request( + 'pools', data, pool['pool']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in data['pool'].items(): + self.assertEqual(res['pool'][k], v) + + def test_create_member(self): + router_id = self._create_and_get_router() + with contextlib.nested( + self.subnet(), + self.health_monitor(), + self.pool() + ) as (subnet, monitor, pool): + pool_id = pool['pool']['id'] + net_id = subnet['subnet']['network_id'] + self._set_net_external(net_id) + self.plugin.create_pool_health_monitor( + context.get_admin_context(), + monitor, pool['pool']['id'] + ) + with self.vip( + router_id=router_id, + pool=pool, subnet=subnet): + with contextlib.nested( + self.member(address='192.168.1.100', + protocol_port=80, + pool_id=pool_id), + self.member(router_id=router_id, + address='192.168.1.101', + protocol_port=80, + pool_id=pool_id)) as (member1, member2): + req = self.new_show_request('pools', + pool_id, + fmt=self.fmt) + pool_update = self.deserialize( + self.fmt, + req.get_response(self.ext_api) + ) + self.assertIn(member1['member']['id'], + pool_update['pool']['members']) + self.assertIn(member2['member']['id'], + pool_update['pool']['members']) + + def _show_pool(self, pool_id): + req = self.new_show_request('pools', pool_id, fmt=self.fmt) + res = req.get_response(self.ext_api) + self.assertEqual(web_exc.HTTPOk.code, res.status_int) + return self.deserialize(self.fmt, res) + + def test_update_member(self): + with contextlib.nested( + self.subnet(), + self.health_monitor(), + self.pool(name="pool1"), + self.pool(name="pool2") + ) as (subnet, monitor, pool1, pool2): + net_id = subnet['subnet']['network_id'] + self._set_net_external(net_id) + self.plugin.create_pool_health_monitor( + context.get_admin_context(), + monitor, pool1['pool']['id'] + ) + self.plugin.create_pool_health_monitor( + context.get_admin_context(), + monitor, pool2['pool']['id'] + ) + with self.vip( + router_id=self._create_and_get_router(), + pool=pool1, subnet=subnet): + keys = [('address', "192.168.1.100"), + ('tenant_id', self._tenant_id), + ('protocol_port', 80), + ('weight', 10), + ('pool_id', pool2['pool']['id']), + ('admin_state_up', False), + ('status', 'ACTIVE')] + with self.member( + pool_id=pool1['pool']['id']) as member: + + pool1_update = self._show_pool(pool1['pool']['id']) + self.assertEqual(len(pool1_update['pool']['members']), 1) + pool2_update = self._show_pool(pool2['pool']['id']) + self.assertEqual(len(pool1_update['pool']['members']), 1) + self.assertFalse(pool2_update['pool']['members']) + + data = {'member': {'pool_id': pool2['pool']['id'], + 'weight': 10, + 'admin_state_up': False}} + req = self.new_update_request('members', + data, + member['member']['id']) + raw_res = req.get_response(self.ext_api) + self.assertEqual(web_exc.HTTPOk.code, raw_res.status_int) + res = self.deserialize(self.fmt, raw_res) + for k, v in keys: + self.assertEqual(res['member'][k], v) + pool1_update = self._show_pool(pool1['pool']['id']) + pool2_update = self._show_pool(pool2['pool']['id']) + self.assertEqual(len(pool2_update['pool']['members']), 1) + self.assertFalse(pool1_update['pool']['members']) + + def test_delete_member(self): + with contextlib.nested( + self.subnet(), + self.health_monitor(), + self.pool() + ) as (subnet, monitor, pool): + pool_id = pool['pool']['id'] + net_id = subnet['subnet']['network_id'] + self._set_net_external(net_id) + self.plugin.create_pool_health_monitor( + context.get_admin_context(), + monitor, pool['pool']['id'] + ) + with self.vip( + router_id=self._create_and_get_router(), + pool=pool, subnet=subnet): + with self.member(pool_id=pool_id, + no_delete=True) as member: + req = self.new_delete_request('members', + member['member']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + pool_update = self._show_pool(pool['pool']['id']) + self.assertFalse(pool_update['pool']['members']) diff --git a/neutron/tests/unit/vmware/vshield/test_loadbalancer_driver.py b/neutron/tests/unit/vmware/vshield/test_loadbalancer_driver.py new file mode 100644 index 000000000..27027f38c --- /dev/null +++ b/neutron/tests/unit/vmware/vshield/test_loadbalancer_driver.py @@ -0,0 +1,340 @@ +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: linb, VMware + +import mock + +from neutron import context +from neutron.openstack.common import uuidutils +from neutron.plugins.vmware.dbexts import vcns_db +from neutron.plugins.vmware.vshield.common import exceptions as vcns_exc +from neutron.plugins.vmware.vshield import vcns_driver +from neutron.services.loadbalancer import constants as lb_constants +from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware.vshield import fake_vcns + +_uuid = uuidutils.generate_uuid + +VSE_ID = 'edge-1' +POOL_MAP_INFO = { + 'pool_id': None, + 'edge_id': VSE_ID, + 'pool_vseid': 'pool-1'} + +VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test") + + +class VcnsDriverTestCase(test_db_loadbalancer.LoadBalancerPluginDbTestCase): + + def vcns_loadbalancer_patch(self): + instance = self.mock_vcns.start() + instance.return_value.create_vip.side_effect = ( + self.fc2.create_vip) + instance.return_value.get_vip.side_effect = ( + self.fc2.get_vip) + instance.return_value.update_vip.side_effect = ( + self.fc2.update_vip) + instance.return_value.delete_vip.side_effect = ( + self.fc2.delete_vip) + instance.return_value.create_pool.side_effect = ( + self.fc2.create_pool) + instance.return_value.get_pool.side_effect = ( + self.fc2.get_pool) + instance.return_value.update_pool.side_effect = ( + self.fc2.update_pool) + instance.return_value.delete_pool.side_effect = ( + self.fc2.delete_pool) + instance.return_value.create_health_monitor.side_effect = ( + self.fc2.create_health_monitor) + instance.return_value.get_health_monitor.side_effect = ( + self.fc2.get_health_monitor) + instance.return_value.update_health_monitor.side_effect = ( + self.fc2.update_health_monitor) + instance.return_value.delete_health_monitor.side_effect = ( + self.fc2.delete_health_monitor) + instance.return_value.create_app_profile.side_effect = ( + self.fc2.create_app_profile) + instance.return_value.update_app_profile.side_effect = ( + self.fc2.update_app_profile) + instance.return_value.delete_app_profile.side_effect = ( + self.fc2.delete_app_profile) + self.pool_id = None + self.vip_id = None + + def setUp(self): + + self.config_parse(args=['--config-file', VCNS_CONFIG_FILE]) + # mock vcns + self.fc2 = fake_vcns.FakeVcns(unique_router_name=False) + self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) + self.vcns_loadbalancer_patch() + + self.driver = vcns_driver.VcnsDriver(mock.Mock()) + + super(VcnsDriverTestCase, self).setUp() + self.addCleanup(self.fc2.reset_all) + self.addCleanup(self.mock_vcns.stop) + + def tearDown(self): + super(VcnsDriverTestCase, self).tearDown() + + +class TestEdgeLbDriver(VcnsDriverTestCase): + + def test_create_and_get_vip(self): + ctx = context.get_admin_context() + with self.pool(no_delete=True) as pool: + self.pool_id = pool['pool']['id'] + POOL_MAP_INFO['pool_id'] = pool['pool']['id'] + vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO) + with self.vip(pool=pool) as res: + vip_create = res['vip'] + self.driver.create_vip(ctx, VSE_ID, vip_create) + vip_get = self.driver.get_vip(ctx, vip_create['id']) + for k, v in vip_get.iteritems(): + self.assertEqual(vip_create[k], v) + + def test_create_two_vips_with_same_name(self): + ctx = context.get_admin_context() + with self.pool(no_delete=True) as pool: + self.pool_id = pool['pool']['id'] + POOL_MAP_INFO['pool_id'] = pool['pool']['id'] + vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO) + with self.vip(pool=pool) as res: + vip_create = res['vip'] + self.driver.create_vip(ctx, VSE_ID, vip_create) + self.assertRaises(vcns_exc.Forbidden, + self.driver.create_vip, + ctx, VSE_ID, vip_create) + + def test_convert_app_profile(self): + app_profile_name = 'app_profile_name' + sess_persist1 = {'type': "SOURCE_IP"} + sess_persist2 = {'type': "HTTP_COOKIE"} + sess_persist3 = {'type': "APP_COOKIE", + 'cookie_name': "app_cookie_name"} + # protocol is HTTP and type is SOURCE_IP + expect_vcns_app_profile1 = { + 'insertXForwardedFor': False, + 'name': app_profile_name, + 'serverSslEnabled': False, + 'sslPassthrough': False, + 'template': lb_constants.PROTOCOL_HTTP, + 'persistence': {'method': 'sourceip'}} + vcns_app_profile = self.driver._convert_app_profile( + app_profile_name, sess_persist1, lb_constants.PROTOCOL_HTTP) + for k, v in expect_vcns_app_profile1.iteritems(): + self.assertEqual(vcns_app_profile[k], v) + # protocol is HTTP and type is HTTP_COOKIE and APP_COOKIE + expect_vcns_app_profile2 = { + 'insertXForwardedFor': False, + 'name': app_profile_name, + 'serverSslEnabled': False, + 'sslPassthrough': False, + 'template': lb_constants.PROTOCOL_HTTP, + 'persistence': {'method': 'cookie', + 'cookieName': 'default_cookie_name', + 'cookieMode': 'insert'}} + vcns_app_profile = self.driver._convert_app_profile( + app_profile_name, sess_persist2, lb_constants.PROTOCOL_HTTP) + for k, v in expect_vcns_app_profile2.iteritems(): + self.assertEqual(vcns_app_profile[k], v) + expect_vcns_app_profile3 = { + 'insertXForwardedFor': False, + 'name': app_profile_name, + 'serverSslEnabled': False, + 'sslPassthrough': False, + 'template': lb_constants.PROTOCOL_HTTP, + 'persistence': {'method': 'cookie', + 'cookieName': sess_persist3['cookie_name'], + 'cookieMode': 'app'}} + vcns_app_profile = self.driver._convert_app_profile( + app_profile_name, sess_persist3, lb_constants.PROTOCOL_HTTP) + for k, v in expect_vcns_app_profile3.iteritems(): + self.assertEqual(vcns_app_profile[k], v) + # protocol is HTTPS and type is SOURCE_IP + expect_vcns_app_profile1 = { + 'insertXForwardedFor': False, + 'name': app_profile_name, + 'serverSslEnabled': False, + 'sslPassthrough': True, + 'template': lb_constants.PROTOCOL_HTTPS, + 'persistence': {'method': 'sourceip'}} + vcns_app_profile = self.driver._convert_app_profile( + app_profile_name, sess_persist1, lb_constants.PROTOCOL_HTTPS) + for k, v in expect_vcns_app_profile1.iteritems(): + self.assertEqual(vcns_app_profile[k], v) + # protocol is HTTPS, and type isn't SOURCE_IP + self.assertRaises(vcns_exc.VcnsBadRequest, + self.driver._convert_app_profile, + app_profile_name, + sess_persist2, lb_constants.PROTOCOL_HTTPS) + self.assertRaises(vcns_exc.VcnsBadRequest, + self.driver._convert_app_profile, + app_profile_name, + sess_persist3, lb_constants.PROTOCOL_HTTPS) + # protocol is TCP and type is SOURCE_IP + expect_vcns_app_profile1 = { + 'insertXForwardedFor': False, + 'name': app_profile_name, + 'serverSslEnabled': False, + 'sslPassthrough': False, + 'template': lb_constants.PROTOCOL_TCP, + 'persistence': {'method': 'sourceip'}} + vcns_app_profile = self.driver._convert_app_profile( + app_profile_name, sess_persist1, lb_constants.PROTOCOL_TCP) + for k, v in expect_vcns_app_profile1.iteritems(): + self.assertEqual(vcns_app_profile[k], v) + # protocol is TCP, and type isn't SOURCE_IP + self.assertRaises(vcns_exc.VcnsBadRequest, + self.driver._convert_app_profile, + app_profile_name, + sess_persist2, lb_constants.PROTOCOL_TCP) + self.assertRaises(vcns_exc.VcnsBadRequest, + self.driver._convert_app_profile, + app_profile_name, + sess_persist3, lb_constants.PROTOCOL_TCP) + + def test_update_vip(self): + ctx = context.get_admin_context() + with self.pool(no_delete=True) as pool: + self.pool_id = pool['pool']['id'] + POOL_MAP_INFO['pool_id'] = pool['pool']['id'] + vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO) + with self.vip(pool=pool) as res: + vip_create = res['vip'] + self.driver.create_vip(ctx, VSE_ID, vip_create) + vip_update = {'id': vip_create['id'], + 'pool_id': pool['pool']['id'], + 'name': 'update_name', + 'description': 'description', + 'address': 'update_address', + 'port_id': 'update_port_id', + 'protocol_port': 'protocol_port', + 'protocol': 'update_protocol'} + self.driver.update_vip(ctx, vip_update) + vip_get = self.driver.get_vip(ctx, vip_create['id']) + for k, v in vip_get.iteritems(): + if k in vip_update: + self.assertEqual(vip_update[k], v) + + def test_delete_vip(self): + ctx = context.get_admin_context() + with self.pool(no_delete=True) as pool: + self.pool_id = pool['pool']['id'] + POOL_MAP_INFO['pool_id'] = pool['pool']['id'] + vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO) + with self.vip(pool=pool) as res: + vip_create = res['vip'] + self.driver.create_vip(ctx, VSE_ID, vip_create) + self.driver.delete_vip(ctx, vip_create['id']) + self.assertRaises(vcns_exc.VcnsNotFound, + self.driver.get_vip, + ctx, + vip_create['id']) + + #Test Pool Operation + def test_create_and_get_pool(self): + ctx = context.get_admin_context() + with self.pool(no_delete=True) as p: + self.pool_id = p['pool']['id'] + pool_create = p['pool'] + self.driver.create_pool(ctx, VSE_ID, pool_create, []) + pool_get = self.driver.get_pool(ctx, pool_create['id'], VSE_ID) + for k, v in pool_get.iteritems(): + self.assertEqual(pool_create[k], v) + + def test_create_two_pools_with_same_name(self): + ctx = context.get_admin_context() + with self.pool(no_delete=True) as p: + self.pool_id = p['pool']['id'] + pool_create = p['pool'] + self.driver.create_pool(ctx, VSE_ID, pool_create, []) + self.assertRaises(vcns_exc.Forbidden, + self.driver.create_pool, + ctx, VSE_ID, pool_create, []) + + def test_update_pool(self): + ctx = context.get_admin_context() + with self.pool(no_delete=True) as p: + self.pool_id = p['pool']['id'] + pool_create = p['pool'] + self.driver.create_pool(ctx, VSE_ID, pool_create, []) + pool_update = {'id': pool_create['id'], + 'lb_method': 'lb_method', + 'name': 'update_name', + 'members': [], + 'health_monitors': []} + self.driver.update_pool(ctx, VSE_ID, pool_update, []) + pool_get = self.driver.get_pool(ctx, pool_create['id'], VSE_ID) + for k, v in pool_get.iteritems(): + if k in pool_update: + self.assertEqual(pool_update[k], v) + + def test_delete_pool(self): + ctx = context.get_admin_context() + with self.pool(no_delete=True) as p: + self.pool_id = p['pool']['id'] + pool_create = p['pool'] + self.driver.create_pool(ctx, VSE_ID, pool_create, []) + self.driver.delete_pool(ctx, pool_create['id'], VSE_ID) + self.assertRaises(vcns_exc.VcnsNotFound, + self.driver.get_pool, + ctx, + pool_create['id'], + VSE_ID) + + def test_create_and_get_monitor(self): + ctx = context.get_admin_context() + with self.health_monitor(no_delete=True) as m: + monitor_create = m['health_monitor'] + self.driver.create_health_monitor(ctx, VSE_ID, monitor_create) + monitor_get = self.driver.get_health_monitor( + ctx, monitor_create['id'], VSE_ID) + for k, v in monitor_get.iteritems(): + self.assertEqual(monitor_create[k], v) + + def test_update_health_monitor(self): + ctx = context.get_admin_context() + with self.health_monitor(no_delete=True) as m: + monitor_create = m['health_monitor'] + self.driver.create_health_monitor( + ctx, VSE_ID, monitor_create) + monitor_update = {'id': monitor_create['id'], + 'delay': 'new_delay', + 'timeout': "new_timeout", + 'type': 'type', + 'max_retries': "max_retries"} + self.driver.update_health_monitor( + ctx, VSE_ID, monitor_create, monitor_update) + monitor_get = self.driver.get_health_monitor( + ctx, monitor_create['id'], VSE_ID) + for k, v in monitor_get.iteritems(): + if k in monitor_update: + self.assertEqual(monitor_update[k], v) + + def test_delete_health_monitor(self): + ctx = context.get_admin_context() + with self.health_monitor(no_delete=True) as m: + monitor_create = m['health_monitor'] + self.driver.create_health_monitor(ctx, VSE_ID, monitor_create) + self.driver.delete_health_monitor( + ctx, monitor_create['id'], VSE_ID) + self.assertRaises(vcns_exc.VcnsNotFound, + self.driver.get_health_monitor, + ctx, + monitor_create['id'], + VSE_ID) diff --git a/neutron/tests/unit/vmware/vshield/test_vcns_driver.py b/neutron/tests/unit/vmware/vshield/test_vcns_driver.py new file mode 100644 index 000000000..c0451715f --- /dev/null +++ b/neutron/tests/unit/vmware/vshield/test_vcns_driver.py @@ -0,0 +1,587 @@ +# Copyright 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from eventlet import greenthread +import mock + +from neutron.plugins.vmware.vshield.common import constants as vcns_const +from neutron.plugins.vmware.vshield.tasks import constants as ts_const +from neutron.plugins.vmware.vshield.tasks import tasks as ts +from neutron.plugins.vmware.vshield import vcns_driver +from neutron.tests import base +from neutron.tests.unit import vmware +from neutron.tests.unit.vmware.vshield import fake_vcns + +VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test") + +ts.TaskManager.set_default_interval(100) + + +class VcnsDriverTaskManagerTestCase(base.BaseTestCase): + + def setUp(self): + super(VcnsDriverTaskManagerTestCase, self).setUp() + self.manager = ts.TaskManager() + self.manager.start(100) + + def tearDown(self): + self.manager.stop() + # Task manager should not leave running threads around + # if _thread is None it means it was killed in stop() + self.assertIsNone(self.manager._thread) + super(VcnsDriverTaskManagerTestCase, self).tearDown() + + def _test_task_manager_task_process_state(self, sync_exec=False): + def _task_failed(task, reason): + task.userdata['result'] = False + task.userdata['error'] = reason + + def _check_state(task, exp_state): + if not task.userdata.get('result', True): + return False + + state = task.userdata['state'] + if state != exp_state: + msg = "state %d expect %d" % ( + state, exp_state) + _task_failed(task, msg) + return False + + task.userdata['state'] = state + 1 + return True + + def _exec(task): + if not _check_state(task, 1): + return ts_const.TaskStatus.ERROR + + if task.userdata['sync_exec']: + return ts_const.TaskStatus.COMPLETED + else: + return ts_const.TaskStatus.PENDING + + def _status(task): + if task.userdata['sync_exec']: + _task_failed(task, "_status callback triggered") + + state = task.userdata['state'] + if state == 3: + _check_state(task, 3) + return ts_const.TaskStatus.PENDING + else: + _check_state(task, 4) + return ts_const.TaskStatus.COMPLETED + + def _result(task): + if task.userdata['sync_exec']: + exp_state = 3 + else: + exp_state = 5 + + _check_state(task, exp_state) + + def _start_monitor(task): + _check_state(task, 0) + + def _executed_monitor(task): + _check_state(task, 2) + + def _result_monitor(task): + if task.userdata['sync_exec']: + exp_state = 4 + else: + exp_state = 6 + + if _check_state(task, exp_state): + task.userdata['result'] = True + else: + task.userdata['result'] = False + + userdata = { + 'state': 0, + 'sync_exec': sync_exec + } + task = ts.Task('name', 'res', _exec, _status, _result, userdata) + task.add_start_monitor(_start_monitor) + task.add_executed_monitor(_executed_monitor) + task.add_result_monitor(_result_monitor) + + self.manager.add(task) + + task.wait(ts_const.TaskState.RESULT) + + self.assertTrue(userdata['result']) + + def test_task_manager_task_sync_exec_process_state(self): + self._test_task_manager_task_process_state(sync_exec=True) + + def test_task_manager_task_async_exec_process_state(self): + self._test_task_manager_task_process_state(sync_exec=False) + + def test_task_manager_task_ordered_process(self): + def _task_failed(task, reason): + task.userdata['result'] = False + task.userdata['error'] = reason + + def _exec(task): + task.userdata['executed'] = True + return ts_const.TaskStatus.PENDING + + def _status(task): + return ts_const.TaskStatus.COMPLETED + + def _result(task): + next_task = task.userdata.get('next') + if next_task: + if next_task.userdata.get('executed'): + _task_failed(next_task, "executed premature") + if task.userdata.get('result', True): + task.userdata['result'] = True + + tasks = [] + prev = None + last_task = None + for i in range(5): + name = "name-%d" % i + task = ts.Task(name, 'res', _exec, _status, _result, {}) + tasks.append(task) + if prev: + prev.userdata['next'] = task + prev = task + last_task = task + + for task in tasks: + self.manager.add(task) + + last_task.wait(ts_const.TaskState.RESULT) + + for task in tasks: + self.assertTrue(task.userdata['result']) + + def test_task_manager_task_parallel_process(self): + tasks = [] + + def _exec(task): + task.userdata['executed'] = True + return ts_const.TaskStatus.PENDING + + def _status(task): + for t in tasks: + if not t.userdata.get('executed'): + t.userdata['resut'] = False + return ts_const.TaskStatus.COMPLETED + + def _result(task): + if (task.userdata.get('result') is None and + task.status == ts_const.TaskStatus.COMPLETED): + task.userdata['result'] = True + else: + task.userdata['result'] = False + + for i in range(5): + name = "name-%d" % i + res = 'resource-%d' % i + task = ts.Task(name, res, _exec, _status, _result, {}) + tasks.append(task) + self.manager.add(task) + + for task in tasks: + task.wait(ts_const.TaskState.RESULT) + self.assertTrue(task.userdata['result']) + + def _test_task_manager_stop(self, exec_wait=False, result_wait=False, + stop_wait=0): + def _exec(task): + if exec_wait: + greenthread.sleep(0.01) + return ts_const.TaskStatus.PENDING + + def _status(task): + greenthread.sleep(0.01) + return ts_const.TaskStatus.PENDING + + def _result(task): + if result_wait: + greenthread.sleep(0) + pass + + manager = ts.TaskManager().start(100) + manager.stop() + # Task manager should not leave running threads around + # if _thread is None it means it was killed in stop() + self.assertIsNone(manager._thread) + manager.start(100) + + alltasks = {} + for i in range(100): + res = 'res-%d' % i + tasks = [] + for i in range(100): + task = ts.Task('name', res, _exec, _status, _result) + manager.add(task) + tasks.append(task) + alltasks[res] = tasks + + greenthread.sleep(stop_wait) + manager.stop() + # Task manager should not leave running threads around + # if _thread is None it means it was killed in stop() + self.assertIsNone(manager._thread) + + for res, tasks in alltasks.iteritems(): + for task in tasks: + self.assertEqual(task.status, ts_const.TaskStatus.ABORT) + + def test_task_manager_stop_1(self): + self._test_task_manager_stop(True, True, 0) + + def test_task_manager_stop_2(self): + self._test_task_manager_stop(True, True, 1) + + def test_task_manager_stop_3(self): + self._test_task_manager_stop(False, False, 0) + + def test_task_manager_stop_4(self): + self._test_task_manager_stop(False, False, 1) + + def test_task_pending_task(self): + def _exec(task): + task.userdata['executing'] = True + while not task.userdata['tested']: + greenthread.sleep(0) + task.userdata['executing'] = False + return ts_const.TaskStatus.COMPLETED + + userdata = { + 'executing': False, + 'tested': False + } + manager = ts.TaskManager().start(100) + task = ts.Task('name', 'res', _exec, userdata=userdata) + manager.add(task) + + while not userdata['executing']: + greenthread.sleep(0) + self.assertTrue(manager.has_pending_task()) + + userdata['tested'] = True + while userdata['executing']: + greenthread.sleep(0) + self.assertFalse(manager.has_pending_task()) + + +class VcnsDriverTestCase(base.BaseTestCase): + + def vcns_patch(self): + instance = self.mock_vcns.start() + instance.return_value.deploy_edge.side_effect = self.fc.deploy_edge + instance.return_value.get_edge_id.side_effect = self.fc.get_edge_id + instance.return_value.get_edge_deploy_status.side_effect = ( + self.fc.get_edge_deploy_status) + instance.return_value.delete_edge.side_effect = self.fc.delete_edge + instance.return_value.update_interface.side_effect = ( + self.fc.update_interface) + instance.return_value.get_nat_config.side_effect = ( + self.fc.get_nat_config) + instance.return_value.update_nat_config.side_effect = ( + self.fc.update_nat_config) + instance.return_value.delete_nat_rule.side_effect = ( + self.fc.delete_nat_rule) + instance.return_value.get_edge_status.side_effect = ( + self.fc.get_edge_status) + instance.return_value.get_edges.side_effect = self.fc.get_edges + instance.return_value.update_routes.side_effect = ( + self.fc.update_routes) + instance.return_value.create_lswitch.side_effect = ( + self.fc.create_lswitch) + instance.return_value.delete_lswitch.side_effect = ( + self.fc.delete_lswitch) + + def setUp(self): + super(VcnsDriverTestCase, self).setUp() + + self.config_parse(args=['--config-file', VCNS_CONFIG_FILE]) + + self.fc = fake_vcns.FakeVcns() + self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) + self.vcns_patch() + + self.addCleanup(self.fc.reset_all) + + self.vcns_driver = vcns_driver.VcnsDriver(self) + + self.edge_id = None + self.result = None + + def tearDown(self): + self.vcns_driver.task_manager.stop() + # Task manager should not leave running threads around + # if _thread is None it means it was killed in stop() + self.assertIsNone(self.vcns_driver.task_manager._thread) + super(VcnsDriverTestCase, self).tearDown() + + def _deploy_edge(self): + task = self.vcns_driver.deploy_edge( + 'router-id', 'myedge', 'internal-network', {}, wait_for_exec=True) + self.assertEqual(self.edge_id, 'edge-1') + task.wait(ts_const.TaskState.RESULT) + return task + + def edge_deploy_started(self, task): + self.edge_id = task.userdata['edge_id'] + + def edge_deploy_result(self, task): + if task.status == ts_const.TaskStatus.COMPLETED: + task.userdata['jobdata']['edge_deploy_result'] = True + + def edge_delete_result(self, task): + if task.status == ts_const.TaskStatus.COMPLETED: + task.userdata['jobdata']['edge_delete_result'] = True + + def snat_create_result(self, task): + if task.status == ts_const.TaskStatus.COMPLETED: + task.userdata['jobdata']['snat_create_result'] = True + + def snat_delete_result(self, task): + if task.status == ts_const.TaskStatus.COMPLETED: + task.userdata['jobdata']['snat_delete_result'] = True + + def dnat_create_result(self, task): + if task.status == ts_const.TaskStatus.COMPLETED: + task.userdata['jobdata']['dnat_create_result'] = True + + def dnat_delete_result(self, task): + if task.status == ts_const.TaskStatus.COMPLETED: + task.userdata['jobdata']['dnat_delete_result'] = True + + def nat_update_result(self, task): + if task.status == ts_const.TaskStatus.COMPLETED: + task.userdata['jobdata']['nat_update_result'] = True + + def routes_update_result(self, task): + if task.status == ts_const.TaskStatus.COMPLETED: + task.userdata['jobdata']['routes_update_result'] = True + + def interface_update_result(self, task): + if task.status == ts_const.TaskStatus.COMPLETED: + task.userdata['jobdata']['interface_update_result'] = True + + def test_deploy_edge(self): + jobdata = {} + task = self.vcns_driver.deploy_edge( + 'router-id', 'myedge', 'internal-network', jobdata=jobdata, + wait_for_exec=True) + self.assertEqual(self.edge_id, 'edge-1') + task.wait(ts_const.TaskState.RESULT) + self.assertEqual(task.status, ts_const.TaskStatus.COMPLETED) + self.assertTrue(jobdata.get('edge_deploy_result')) + + def test_deploy_edge_fail(self): + task1 = self.vcns_driver.deploy_edge( + 'router-1', 'myedge', 'internal-network', {}, wait_for_exec=True) + task2 = self.vcns_driver.deploy_edge( + 'router-2', 'myedge', 'internal-network', {}, wait_for_exec=True) + task1.wait(ts_const.TaskState.RESULT) + task2.wait(ts_const.TaskState.RESULT) + self.assertEqual(task2.status, ts_const.TaskStatus.ERROR) + + def test_get_edge_status(self): + self._deploy_edge() + status = self.vcns_driver.get_edge_status(self.edge_id) + self.assertEqual(status, vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE) + + def test_get_edges(self): + self._deploy_edge() + edges = self.vcns_driver.get_edges_statuses() + found = False + for edge_id, status in edges.iteritems(): + if edge_id == self.edge_id: + found = True + break + self.assertTrue(found) + + def _create_nat_rule(self, edge_id, action, org, translated): + jobdata = {} + if action == 'snat': + task = self.vcns_driver.create_snat_rule( + 'router-id', edge_id, org, translated, jobdata=jobdata) + key = 'snat_create_result' + else: + task = self.vcns_driver.create_dnat_rule( + 'router-id', edge_id, org, translated, jobdata=jobdata) + key = 'dnat_create_result' + task.wait(ts_const.TaskState.RESULT) + self.assertTrue(jobdata.get(key)) + + def _delete_nat_rule(self, edge_id, action, addr): + jobdata = {} + if action == 'snat': + task = self.vcns_driver.delete_snat_rule( + 'router-id', edge_id, addr, jobdata=jobdata) + key = 'snat_delete_result' + else: + task = self.vcns_driver.delete_dnat_rule( + 'router-id', edge_id, addr, jobdata=jobdata) + key = 'dnat_delete_result' + task.wait(ts_const.TaskState.RESULT) + self.assertTrue(jobdata.get(key)) + + def _test_create_nat_rule(self, action): + self._deploy_edge() + addr = '192.168.1.1' + translated = '10.0.0.1' + self._create_nat_rule(self.edge_id, action, addr, translated) + + natcfg = self.vcns_driver.get_nat_config(self.edge_id) + for rule in natcfg['rules']['natRulesDtos']: + if (rule['originalAddress'] == addr and + rule['translatedAddress'] == translated and + rule['action'] == action): + break + else: + self.assertTrue(False) + + def _test_delete_nat_rule(self, action): + self._deploy_edge() + addr = '192.168.1.1' + translated = '10.0.0.1' + self._create_nat_rule(self.edge_id, action, addr, translated) + if action == 'snat': + self._delete_nat_rule(self.edge_id, action, addr) + else: + self._delete_nat_rule(self.edge_id, action, translated) + natcfg = self.vcns_driver.get_nat_config(self.edge_id) + for rule in natcfg['rules']['natRulesDtos']: + if (rule['originalAddress'] == addr and + rule['translatedAddress'] == translated and + rule['action'] == action): + self.assertTrue(False) + break + + def test_create_snat_rule(self): + self._test_create_nat_rule('snat') + + def test_delete_snat_rule(self): + self._test_delete_nat_rule('snat') + + def test_create_dnat_rule(self): + self._test_create_nat_rule('dnat') + + def test_delete_dnat_rule(self): + self._test_delete_nat_rule('dnat') + + def test_update_nat_rules(self): + self._deploy_edge() + jobdata = {} + snats = [{ + 'src': '192.168.1.0/24', + 'translated': '10.0.0.1' + }, { + 'src': '192.168.2.0/24', + 'translated': '10.0.0.2' + }, { + 'src': '192.168.3.0/24', + 'translated': '10.0.0.3' + } + ] + dnats = [{ + 'dst': '100.0.0.4', + 'translated': '192.168.1.1' + }, { + 'dst': '100.0.0.5', + 'translated': '192.168.2.1' + } + ] + task = self.vcns_driver.update_nat_rules( + 'router-id', self.edge_id, snats, dnats, jobdata=jobdata) + task.wait(ts_const.TaskState.RESULT) + self.assertTrue(jobdata.get('nat_update_result')) + + natcfg = self.vcns_driver.get_nat_config(self.edge_id) + rules = natcfg['rules']['natRulesDtos'] + self.assertEqual(len(rules), 2 * len(dnats) + len(snats)) + self.natEquals(rules[0], dnats[0]) + self.natEquals(rules[1], self.snat_for_dnat(dnats[0])) + self.natEquals(rules[2], dnats[1]) + self.natEquals(rules[3], self.snat_for_dnat(dnats[1])) + self.natEquals(rules[4], snats[0]) + self.natEquals(rules[5], snats[1]) + self.natEquals(rules[6], snats[2]) + + def snat_for_dnat(self, dnat): + return { + 'src': dnat['translated'], + 'translated': dnat['dst'] + } + + def natEquals(self, rule, exp): + addr = exp.get('src') + if not addr: + addr = exp.get('dst') + + self.assertEqual(rule['originalAddress'], addr) + self.assertEqual(rule['translatedAddress'], exp['translated']) + + def test_update_routes(self): + self._deploy_edge() + jobdata = {} + routes = [{ + 'cidr': '192.168.1.0/24', + 'nexthop': '169.254.2.1' + }, { + 'cidr': '192.168.2.0/24', + 'nexthop': '169.254.2.1' + }, { + 'cidr': '192.168.3.0/24', + 'nexthop': '169.254.2.1' + } + ] + task = self.vcns_driver.update_routes( + 'router-id', self.edge_id, '10.0.0.1', routes, jobdata=jobdata) + task.wait(ts_const.TaskState.RESULT) + self.assertTrue(jobdata.get('routes_update_result')) + + def test_update_interface(self): + self._deploy_edge() + jobdata = {} + task = self.vcns_driver.update_interface( + 'router-id', self.edge_id, vcns_const.EXTERNAL_VNIC_INDEX, + 'network-id', address='100.0.0.3', netmask='255.255.255.0', + jobdata=jobdata) + task.wait(ts_const.TaskState.RESULT) + self.assertTrue(jobdata.get('interface_update_result')) + + def test_delete_edge(self): + self._deploy_edge() + jobdata = {} + task = self.vcns_driver.delete_edge( + 'router-id', self.edge_id, jobdata=jobdata) + task.wait(ts_const.TaskState.RESULT) + self.assertTrue(jobdata.get('edge_delete_result')) + + def test_create_lswitch(self): + tz_config = [{ + 'transport_zone_uuid': 'tz-uuid' + }] + lswitch = self.vcns_driver.create_lswitch('lswitch', tz_config) + self.assertEqual(lswitch['display_name'], 'lswitch') + self.assertEqual(lswitch['type'], 'LogicalSwitchConfig') + self.assertIn('uuid', lswitch) + + def test_delete_lswitch(self): + tz_config = { + 'transport_zone_uuid': 'tz-uuid' + } + lswitch = self.vcns_driver.create_lswitch('lswitch', tz_config) + self.vcns_driver.delete_lswitch(lswitch['uuid']) diff --git a/neutron/tests/unit/vmware/vshield/test_vpnaas_plugin.py b/neutron/tests/unit/vmware/vshield/test_vpnaas_plugin.py new file mode 100644 index 000000000..7ef1a5445 --- /dev/null +++ b/neutron/tests/unit/vmware/vshield/test_vpnaas_plugin.py @@ -0,0 +1,417 @@ +# Copyright 2014 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import copy + +import webob.exc + +from neutron.api.v2 import attributes +from neutron.db.vpn import vpn_db +from neutron.extensions import vpnaas +from neutron import manager +from neutron.openstack.common import uuidutils +from neutron.tests.unit.db.vpn import test_db_vpnaas +from neutron.tests.unit.vmware.vshield import test_edge_router + +_uuid = uuidutils.generate_uuid + + +class VPNTestExtensionManager( + test_edge_router.ServiceRouterTestExtensionManager): + + def get_resources(self): + # If l3 resources have been loaded and updated by main API + # router, update the map in the l3 extension so it will load + # the same attributes as the API router + resources = super(VPNTestExtensionManager, self).get_resources() + vpn_attr_map = copy.deepcopy(vpnaas.RESOURCE_ATTRIBUTE_MAP) + for res in vpnaas.RESOURCE_ATTRIBUTE_MAP.keys(): + attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res) + if attr_info: + vpnaas.RESOURCE_ATTRIBUTE_MAP[res] = attr_info + vpn_resources = vpnaas.Vpnaas.get_resources() + # restore the original resources once the controllers are created + vpnaas.RESOURCE_ATTRIBUTE_MAP = vpn_attr_map + resources.extend(vpn_resources) + return resources + + +class TestVpnPlugin(test_db_vpnaas.VPNTestMixin, + test_edge_router.ServiceRouterTest): + + def vcns_vpn_patch(self): + instance = self.vcns_instance + instance.return_value.update_ipsec_config.side_effect = ( + self.fc2.update_ipsec_config) + instance.return_value.get_ipsec_config.side_effect = ( + self.fc2.get_ipsec_config) + instance.return_value.delete_ipsec_config.side_effect = ( + self.fc2.delete_ipsec_config) + + def setUp(self): + # Save the global RESOURCE_ATTRIBUTE_MAP + self.saved_attr_map = {} + for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.items(): + self.saved_attr_map[resource] = attrs.copy() + + super(TestVpnPlugin, self).setUp(ext_mgr=VPNTestExtensionManager()) + self.vcns_vpn_patch() + self.plugin = manager.NeutronManager.get_plugin() + self.router_id = None + + def tearDown(self): + super(TestVpnPlugin, self).tearDown() + # Restore the global RESOURCE_ATTRIBUTE_MAP + attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map + self.ext_api = None + self.plugin = None + + @contextlib.contextmanager + def router(self, vlan_id=None, do_delete=True, **kwargs): + with self._create_l3_ext_network(vlan_id) as net: + with self.subnet(cidr='100.0.0.0/24', network=net) as s: + router_id = self._create_and_get_router(**kwargs) + self._add_external_gateway_to_router( + router_id, s['subnet']['network_id']) + router = self._show('routers', router_id) + yield router + if do_delete: + self._remove_external_gateway_from_router( + router_id, s['subnet']['network_id']) + self._delete('routers', router_id) + + def test_create_vpnservice(self, **extras): + """Test case to create a vpnservice.""" + description = 'my-vpn-service' + expected = {'name': 'vpnservice1', + 'description': 'my-vpn-service', + 'admin_state_up': True, + 'status': 'ACTIVE', + 'tenant_id': self._tenant_id, } + + expected.update(extras) + with self.subnet(cidr='10.2.0.0/24') as subnet: + with self.router() as router: + expected['router_id'] = router['router']['id'] + expected['subnet_id'] = subnet['subnet']['id'] + name = expected['name'] + with self.vpnservice(name=name, + subnet=subnet, + router=router, + description=description, + **extras) as vpnservice: + self.assertEqual(dict((k, v) for k, v in + vpnservice['vpnservice'].items() + if k in expected), + expected) + + def test_create_vpnservices_with_same_router(self, **extras): + """Test case to create two vpnservices with same router.""" + with self.subnet(cidr='10.2.0.0/24') as subnet: + with self.router() as router: + with self.vpnservice(name='vpnservice1', + subnet=subnet, + router=router): + res = self._create_vpnservice( + 'json', 'vpnservice2', True, + router_id=(router['router']['id']), + subnet_id=(subnet['subnet']['id'])) + self.assertEqual( + res.status_int, webob.exc.HTTPConflict.code) + + def test_create_vpnservice_with_invalid_router(self): + """Test case to create vpnservices with invalid router.""" + with self.subnet(cidr='10.2.0.0/24') as subnet: + with contextlib.nested( + self.router(arg_list=('service_router',), + service_router=False), + self.router(active_set=False)) as (r1, r2): + res = self._create_vpnservice( + 'json', 'vpnservice', True, + router_id='invalid_id', + subnet_id=(subnet['subnet']['id'])) + self.assertEqual( + res.status_int, webob.exc.HTTPBadRequest.code) + res = self._create_vpnservice( + 'json', 'vpnservice', True, + router_id=r1['router']['id'], + subnet_id=(subnet['subnet']['id'])) + self.assertEqual( + res.status_int, webob.exc.HTTPBadRequest.code) + res = self._create_vpnservice( + 'json', 'vpnservice', True, + router_id=r2['router']['id'], + subnet_id=(subnet['subnet']['id'])) + self.assertEqual( + res.status_int, + webob.exc.HTTPServiceUnavailable.code) + + def test_update_vpnservice(self): + """Test case to update a vpnservice.""" + name = 'new_vpnservice1' + expected = [('name', name)] + with contextlib.nested( + self.subnet(cidr='10.2.0.0/24'), + self.router()) as (subnet, router): + with self.vpnservice(name=name, + subnet=subnet, + router=router) as vpnservice: + expected.append(('subnet_id', + vpnservice['vpnservice']['subnet_id'])) + expected.append(('router_id', + vpnservice['vpnservice']['router_id'])) + data = {'vpnservice': {'name': name, + 'admin_state_up': False}} + expected.append(('admin_state_up', False)) + self._set_active(vpn_db.VPNService, + vpnservice['vpnservice']['id']) + req = self.new_update_request( + 'vpnservices', + data, + vpnservice['vpnservice']['id']) + res = self.deserialize(self.fmt, + req.get_response(self.ext_api)) + for k, v in expected: + self.assertEqual(res['vpnservice'][k], v) + + def test_delete_vpnservice(self): + """Test case to delete a vpnservice.""" + with self.subnet(cidr='10.2.0.0/24') as subnet: + with self.router() as router: + with self.vpnservice(name='vpnservice', + subnet=subnet, + router=router, + no_delete=True) as vpnservice: + req = self.new_delete_request( + 'vpnservices', vpnservice['vpnservice']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) + + def test_delete_router_in_use_by_vpnservice(self): + """Test delete router in use by vpn service.""" + with self.subnet(cidr='10.2.0.0/24') as subnet: + with self.router() as router: + with self.vpnservice(subnet=subnet, + router=router): + self._delete('routers', router['router']['id'], + expected_code=webob.exc.HTTPConflict.code) + + def _test_create_ipsec_site_connection(self, key_overrides=None, + ike_key_overrides=None, + ipsec_key_overrides=None, + setup_overrides=None, + expected_status_int=200): + """Create ipsec_site_connection and check results.""" + params = {'ikename': 'ikepolicy1', + 'ipsecname': 'ipsecpolicy1', + 'vpnsname': 'vpnservice1', + 'subnet_cidr': '10.2.0.0/24', + 'subnet_version': 4} + if setup_overrides: + params.update(setup_overrides) + expected = {'name': 'connection1', + 'description': 'my-ipsec-connection', + 'peer_address': '192.168.1.10', + 'peer_id': '192.168.1.10', + 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], + 'initiator': 'bi-directional', + 'mtu': 1500, + 'tenant_id': self._tenant_id, + 'psk': 'abcd', + 'status': 'ACTIVE', + 'admin_state_up': True} + if key_overrides: + expected.update(key_overrides) + + ike_expected = {'name': params['ikename'], + 'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-128', + 'ike_version': 'v1', + 'pfs': 'group5'} + if ike_key_overrides: + ike_expected.update(ike_key_overrides) + + ipsec_expected = {'name': params['ipsecname'], + 'auth_algorithm': 'sha1', + 'encryption_algorithm': 'aes-128', + 'pfs': 'group5'} + if ipsec_key_overrides: + ipsec_expected.update(ipsec_key_overrides) + + dpd = {'action': 'hold', + 'interval': 40, + 'timeout': 120} + with contextlib.nested( + self.ikepolicy(self.fmt, ike_expected['name'], + ike_expected['auth_algorithm'], + ike_expected['encryption_algorithm'], + ike_version=ike_expected['ike_version'], + pfs=ike_expected['pfs']), + self.ipsecpolicy(self.fmt, ipsec_expected['name'], + ipsec_expected['auth_algorithm'], + ipsec_expected['encryption_algorithm'], + pfs=ipsec_expected['pfs']), + self.subnet(cidr=params['subnet_cidr'], + ip_version=params['subnet_version']), + self.router()) as ( + ikepolicy, ipsecpolicy, subnet, router): + with self.vpnservice(name=params['vpnsname'], subnet=subnet, + router=router) as vpnservice1: + expected['ikepolicy_id'] = ikepolicy['ikepolicy']['id'] + expected['ipsecpolicy_id'] = ( + ipsecpolicy['ipsecpolicy']['id'] + ) + expected['vpnservice_id'] = ( + vpnservice1['vpnservice']['id'] + ) + try: + with self.ipsec_site_connection( + self.fmt, + expected['name'], + expected['peer_address'], + expected['peer_id'], + expected['peer_cidrs'], + expected['mtu'], + expected['psk'], + expected['initiator'], + dpd['action'], + dpd['interval'], + dpd['timeout'], + vpnservice1, + ikepolicy, + ipsecpolicy, + expected['admin_state_up'], + description=expected['description'] + ) as ipsec_site_connection: + if expected_status_int != 200: + self.fail("Expected failure on create") + self._check_ipsec_site_connection( + ipsec_site_connection['ipsec_site_connection'], + expected, + dpd) + except webob.exc.HTTPClientError as ce: + self.assertEqual(ce.code, expected_status_int) + + def test_create_ipsec_site_connection(self, **extras): + """Test case to create an ipsec_site_connection.""" + self._test_create_ipsec_site_connection(key_overrides=extras) + + def test_create_ipsec_site_connection_invalid_ikepolicy(self): + self._test_create_ipsec_site_connection( + ike_key_overrides={'ike_version': 'v2'}, + expected_status_int=400) + + def test_create_ipsec_site_connection_invalid_ipsecpolicy(self): + self._test_create_ipsec_site_connection( + ipsec_key_overrides={'encryption_algorithm': 'aes-192'}, + expected_status_int=400) + self._test_create_ipsec_site_connection( + ipsec_key_overrides={'pfs': 'group14'}, + expected_status_int=400) + + def _test_update_ipsec_site_connection(self, + update={'name': 'new name'}, + overrides=None, + expected_status_int=200): + """Creates and then updates ipsec_site_connection.""" + expected = {'name': 'new_ipsec_site_connection', + 'ikename': 'ikepolicy1', + 'ipsecname': 'ipsecpolicy1', + 'vpnsname': 'vpnservice1', + 'description': 'my-ipsec-connection', + 'peer_address': '192.168.1.10', + 'peer_id': '192.168.1.10', + 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], + 'initiator': 'bi-directional', + 'mtu': 1500, + 'tenant_id': self._tenant_id, + 'psk': 'abcd', + 'status': 'ACTIVE', + 'admin_state_up': True, + 'action': 'hold', + 'interval': 40, + 'timeout': 120, + 'subnet_cidr': '10.2.0.0/24', + 'subnet_version': 4, + 'make_active': True} + if overrides: + expected.update(overrides) + + with contextlib.nested( + self.ikepolicy(name=expected['ikename']), + self.ipsecpolicy(name=expected['ipsecname']), + self.subnet(cidr=expected['subnet_cidr'], + ip_version=expected['subnet_version']), + self.router() + ) as (ikepolicy, ipsecpolicy, subnet, router): + with self.vpnservice(name=expected['vpnsname'], subnet=subnet, + router=router) as vpnservice1: + expected['vpnservice_id'] = vpnservice1['vpnservice']['id'] + expected['ikepolicy_id'] = ikepolicy['ikepolicy']['id'] + expected['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id'] + with self.ipsec_site_connection( + self.fmt, + expected['name'], + expected['peer_address'], + expected['peer_id'], + expected['peer_cidrs'], + expected['mtu'], + expected['psk'], + expected['initiator'], + expected['action'], + expected['interval'], + expected['timeout'], + vpnservice1, + ikepolicy, + ipsecpolicy, + expected['admin_state_up'], + description=expected['description'] + ) as ipsec_site_connection: + data = {'ipsec_site_connection': update} + if expected.get('make_active'): + self._set_active( + vpn_db.IPsecSiteConnection, + (ipsec_site_connection['ipsec_site_connection'] + ['id'])) + req = self.new_update_request( + 'ipsec-site-connections', + data, + ipsec_site_connection['ipsec_site_connection']['id']) + res = req.get_response(self.ext_api) + self.assertEqual(expected_status_int, res.status_int) + if expected_status_int == 200: + res_dict = self.deserialize(self.fmt, res) + for k, v in update.items(): + self.assertEqual( + res_dict['ipsec_site_connection'][k], v) + + def test_update_ipsec_site_connection(self): + """Test case for valid updates to IPSec site connection.""" + dpd = {'action': 'hold', + 'interval': 40, + 'timeout': 120} + self._test_update_ipsec_site_connection(update={'dpd': dpd}) + self._test_update_ipsec_site_connection(update={'mtu': 2000}) + + def test_delete_ipsec_site_connection(self): + """Test case to delete a ipsec_site_connection.""" + with self.ipsec_site_connection( + no_delete=True) as ipsec_site_connection: + req = self.new_delete_request( + 'ipsec-site-connections', + ipsec_site_connection['ipsec_site_connection']['id'] + ) + res = req.get_response(self.ext_api) + self.assertEqual(res.status_int, 204) diff --git a/neutron/tests/var/ca.crt b/neutron/tests/var/ca.crt new file mode 100644 index 000000000..9d66ca627 --- /dev/null +++ b/neutron/tests/var/ca.crt @@ -0,0 +1,35 @@ +-----BEGIN CERTIFICATE----- +MIIGDDCCA/SgAwIBAgIJAPSvwQYk4qI4MA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMRUwEwYDVQQKEwxPcGVuc3RhY2sg +Q0ExEjAQBgNVBAsTCUdsYW5jZSBDQTESMBAGA1UEAxMJR2xhbmNlIENBMB4XDTEy +MDIwOTE3MTAwMloXDTIyMDIwNjE3MTAwMlowYTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ +R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDmf+fapWfzy1Uylus0KGalw4X/5xZ+ltPVOr+IdCPbstvi +RTC5g+O+TvXeOP32V/cnSY4ho/+f2q730za+ZA/cgWO252rcm3Q7KTJn3PoqzJvX +/l3EXe3/TCrbzgZ7lW3QLTCTEE2eEzwYG3wfDTOyoBq+F6ct6ADh+86gmpbIRfYI +N+ixB0hVyz9427PTof97fL7qxxkjAayB28OfwHrkEBl7iblNhUC0RoH+/H9r5GEl +GnWiebxfNrONEHug6PHgiaGq7/Dj+u9bwr7J3/NoS84I08ajMnhlPZxZ8bS/O8If +ceWGZv7clPozyhABT/otDfgVcNH1UdZ4zLlQwc1MuPYN7CwxrElxc8Quf94ttGjb +tfGTl4RTXkDofYdG1qBWW962PsGl2tWmbYDXV0q5JhV/IwbrE1X9f+OksJQne1/+ +dZDxMhdf2Q1V0P9hZZICu4+YhmTMs5Mc9myKVnzp4NYdX5fXoB/uNYph+G7xG5IK +WLSODKhr1wFGTTcuaa8LhOH5UREVenGDJuc6DdgX9a9PzyJGIi2ngQ03TJIkCiU/ +4J/r/vsm81ezDiYZSp2j5JbME+ixW0GBLTUWpOIxUSHgUFwH5f7lQwbXWBOgwXQk +BwpZTmdQx09MfalhBtWeu4/6BnOCOj7e/4+4J0eVxXST0AmVyv8YjJ2nz1F9oQID +AQABo4HGMIHDMB0GA1UdDgQWBBTk7Krj4bEsTjHXaWEtI2GZ5ACQyTCBkwYDVR0j +BIGLMIGIgBTk7Krj4bEsTjHXaWEtI2GZ5ACQyaFlpGMwYTELMAkGA1UEBhMCQVUx +EzARBgNVBAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAG +A1UECxMJR2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0GCCQD0r8EGJOKiODAM +BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQA8Zrss/MiwFHGmDlercE0h +UvzA54n/EvKP9nP3jHM2qW/VPfKdnFw99nEPFLhb+lN553vdjOpCYFm+sW0Z5Mi4 +qsFkk4AmXIIEFOPt6zKxMioLYDQ9Sw/BUv6EZGeANWr/bhmaE+dMcKJt5le/0jJm +2ahsVB9fbFu9jBFeYb7Ba/x2aLkEGMxaDLla+6EQhj148fTnS1wjmX9G2cNzJvj/ ++C2EfKJIuDJDqw2oS2FGVpP37FA2Bz2vga0QatNneLkGKCFI3ZTenBznoN+fmurX +TL3eJE4IFNrANCcdfMpdyLAtXz4KpjcehqpZMu70er3d30zbi1l0Ajz4dU+WKz/a +NQES+vMkT2wqjXHVTjrNwodxw3oLK/EuTgwoxIHJuplx5E5Wrdx9g7Gl1PBIJL8V +xiOYS5N7CakyALvdhP7cPubA2+TPAjNInxiAcmhdASS/Vrmpvrkat6XhGn8h9liv +ysDOpMQmYQkmgZBpW8yBKK7JABGGsJADJ3E6J5MMWBX2RR4kFoqVGAzdOU3oyaTy +I0kz5sfuahaWpdYJVlkO+esc0CRXw8fLDYivabK2tOgUEWeZsZGZ9uK6aV1VxTAY +9Guu3BJ4Rv/KP/hk7mP8rIeCwotV66/2H8nq72ImQhzSVyWcxbFf2rJiFQJ3BFwA +WoRMgEwjGJWqzhJZUYpUAQ== +-----END CERTIFICATE----- diff --git a/neutron/tests/var/certandkey.pem b/neutron/tests/var/certandkey.pem new file mode 100644 index 000000000..a5baf3ae7 --- /dev/null +++ b/neutron/tests/var/certandkey.pem @@ -0,0 +1,81 @@ +-----BEGIN CERTIFICATE----- +MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ +R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN +MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 +ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT +BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu +avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb +Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ +bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA +BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q +8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG +/64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 +iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ +KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 +0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 +Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr +mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC +AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y +0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN +rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k +yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY +vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc +AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 +KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL +cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 +hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 +Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM +YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe +4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny +FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD +/P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K +gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN ++Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy +QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH +pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 +rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS +L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN +H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA +AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW +t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N +sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ +8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 +f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH +Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r +VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh +/W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR +dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh +WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw +1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK +hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM +ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh +sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o +uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ +LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U +4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n +bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc +NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn +7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp +TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 +3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL +5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ +fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze +IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz +JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p +pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD +bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB +utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP +pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ +GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq +ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps +av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB +1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX +juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag +miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS +8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed +TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= +-----END RSA PRIVATE KEY----- diff --git a/neutron/tests/var/certificate.crt b/neutron/tests/var/certificate.crt new file mode 100644 index 000000000..3c1aa6363 --- /dev/null +++ b/neutron/tests/var/certificate.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ +R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN +MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 +ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT +BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu +avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb +Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ +bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA +BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q +8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG +/64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 +iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ +KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 +0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 +Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr +mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC +AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y +0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN +rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k +yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY +vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc +AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 +KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL +cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 +hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 +Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM +YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== +-----END CERTIFICATE----- diff --git a/neutron/tests/var/privatekey.key b/neutron/tests/var/privatekey.key new file mode 100644 index 000000000..b63df3d29 --- /dev/null +++ b/neutron/tests/var/privatekey.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe +4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny +FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD +/P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K +gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN ++Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy +QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH +pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 +rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS +L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN +H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA +AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW +t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N +sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ +8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 +f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH +Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r +VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh +/W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR +dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh +WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw +1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK +hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM +ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh +sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o +uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ +LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U +4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n +bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc +NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn +7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp +TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 +3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL +5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ +fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze +IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz +JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p +pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD +bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB +utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP +pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ +GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq +ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps +av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB +1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX +juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag +miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS +8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed +TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= +-----END RSA PRIVATE KEY----- diff --git a/neutron/version.py b/neutron/version.py new file mode 100644 index 000000000..181926489 --- /dev/null +++ b/neutron/version.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + +version_info = pbr.version.VersionInfo('neutron') diff --git a/neutron/wsgi.py b/neutron/wsgi.py new file mode 100644 index 000000000..9242530d5 --- /dev/null +++ b/neutron/wsgi.py @@ -0,0 +1,1303 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utility methods for working with WSGI servers +""" +from __future__ import print_function + +import errno +import os +import socket +import ssl +import sys +import time +from xml.etree import ElementTree as etree +from xml.parsers import expat + +import eventlet.wsgi +eventlet.patcher.monkey_patch(all=False, socket=True, thread=True) +from oslo.config import cfg +import routes.middleware +import webob.dec +import webob.exc + +from neutron.common import constants +from neutron.common import exceptions as exception +from neutron import context +from neutron.db import api +from neutron.openstack.common import excutils +from neutron.openstack.common import gettextutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import service as common_service +from neutron.openstack.common import systemd + +socket_opts = [ + cfg.IntOpt('backlog', + default=4096, + help=_("Number of backlog requests to configure " + "the socket with")), + cfg.IntOpt('tcp_keepidle', + default=600, + help=_("Sets the value of TCP_KEEPIDLE in seconds for each " + "server socket. Not supported on OS X.")), + cfg.IntOpt('retry_until_window', + default=30, + help=_("Number of seconds to keep retrying to listen")), + cfg.IntOpt('max_header_line', + default=16384, + help=_("Max header line to accommodate large tokens")), + cfg.BoolOpt('use_ssl', + default=False, + help=_('Enable SSL on the API server')), + cfg.StrOpt('ssl_ca_file', + help=_("CA certificate file to use to verify " + "connecting clients")), + cfg.StrOpt('ssl_cert_file', + help=_("Certificate file to use when starting " + "the server securely")), + cfg.StrOpt('ssl_key_file', + help=_("Private key file to use when starting " + "the server securely")), +] + +CONF = cfg.CONF +CONF.register_opts(socket_opts) + +LOG = logging.getLogger(__name__) + + +class WorkerService(object): + """Wraps a worker to be handled by ProcessLauncher""" + def __init__(self, service, application): + self._service = service + self._application = application + self._server = None + + def start(self): + # We may have just forked from parent process. A quick disposal of the + # existing sql connections avoids producting 500 errors later when they + # are discovered to be broken. + api.get_engine().pool.dispose() + self._server = self._service.pool.spawn(self._service._run, + self._application, + self._service._socket) + + def wait(self): + self._service.pool.waitall() + + def stop(self): + if isinstance(self._server, eventlet.greenthread.GreenThread): + self._server.kill() + self._server = None + + +class Server(object): + """Server class to manage multiple WSGI sockets and applications.""" + + def __init__(self, name, threads=1000): + # Raise the default from 8192 to accommodate large tokens + eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line + self.pool = eventlet.GreenPool(threads) + self.name = name + self._launcher = None + self._server = None + + def _get_socket(self, host, port, backlog): + bind_addr = (host, port) + # TODO(dims): eventlet's green dns/socket module does not actually + # support IPv6 in getaddrinfo(). We need to get around this in the + # future or monitor upstream for a fix + try: + info = socket.getaddrinfo(bind_addr[0], + bind_addr[1], + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0] + family = info[0] + bind_addr = info[-1] + except Exception: + LOG.exception(_("Unable to listen on %(host)s:%(port)s"), + {'host': host, 'port': port}) + sys.exit(1) + + if CONF.use_ssl: + if not os.path.exists(CONF.ssl_cert_file): + raise RuntimeError(_("Unable to find ssl_cert_file " + ": %s") % CONF.ssl_cert_file) + + # ssl_key_file is optional because the key may be embedded in the + # certificate file + if CONF.ssl_key_file and not os.path.exists(CONF.ssl_key_file): + raise RuntimeError(_("Unable to find " + "ssl_key_file : %s") % CONF.ssl_key_file) + + # ssl_ca_file is optional + if CONF.ssl_ca_file and not os.path.exists(CONF.ssl_ca_file): + raise RuntimeError(_("Unable to find ssl_ca_file " + ": %s") % CONF.ssl_ca_file) + + def wrap_ssl(sock): + ssl_kwargs = { + 'server_side': True, + 'certfile': CONF.ssl_cert_file, + 'keyfile': CONF.ssl_key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl_ca_file: + ssl_kwargs['ca_certs'] = CONF.ssl_ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + return ssl.wrap_socket(sock, **ssl_kwargs) + + sock = None + retry_until = time.time() + CONF.retry_until_window + while not sock and time.time() < retry_until: + try: + sock = eventlet.listen(bind_addr, + backlog=backlog, + family=family) + if CONF.use_ssl: + sock = wrap_ssl(sock) + + except socket.error as err: + with excutils.save_and_reraise_exception() as ctxt: + if err.errno == errno.EADDRINUSE: + ctxt.reraise = False + eventlet.sleep(0.1) + if not sock: + raise RuntimeError(_("Could not bind to %(host)s:%(port)s " + "after trying for %(time)d seconds") % + {'host': host, + 'port': port, + 'time': CONF.retry_until_window}) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + # sockets can hang around forever without keepalive + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + + # This option isn't available in the OS X version of eventlet + if hasattr(socket, 'TCP_KEEPIDLE'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + CONF.tcp_keepidle) + + return sock + + def start(self, application, port, host='0.0.0.0', workers=0): + """Run a WSGI server with the given application.""" + self._host = host + self._port = port + backlog = CONF.backlog + + self._socket = self._get_socket(self._host, + self._port, + backlog=backlog) + if workers < 1: + # For the case where only one process is required. + self._server = self.pool.spawn(self._run, application, + self._socket) + systemd.notify_once() + else: + # Minimize the cost of checking for child exit by extending the + # wait interval past the default of 0.01s. + self._launcher = common_service.ProcessLauncher(wait_interval=1.0) + self._server = WorkerService(self, application) + self._launcher.launch_service(self._server, workers=workers) + + @property + def host(self): + return self._socket.getsockname()[0] if self._socket else self._host + + @property + def port(self): + return self._socket.getsockname()[1] if self._socket else self._port + + def stop(self): + if self._launcher: + # The process launcher does not support stop or kill. + self._launcher.running = False + else: + self._server.kill() + + def wait(self): + """Wait until all servers have completed running.""" + try: + if self._launcher: + self._launcher.wait() + else: + self.pool.waitall() + except KeyboardInterrupt: + pass + + def _run(self, application, socket): + """Start a WSGI server in a new green thread.""" + eventlet.wsgi.server(socket, application, custom_pool=self.pool, + log=logging.WritableLogger(LOG)) + + +class Middleware(object): + """Base WSGI middleware wrapper. + + These classes require an application to be initialized that will be called + next. By default the middleware will simply call its wrapped app, or you + can override __call__ to customize its behavior. + """ + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [filter:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [filter:analytics] + redis_host = 127.0.0.1 + paste.filter_factory = nova.api.analytics:Analytics.factory + + which would result in a call to the `Analytics` class as + + import nova.api.analytics + analytics.Analytics(app_from_paste, redis_host='127.0.0.1') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + def _factory(app): + return cls(app, **local_config) + return _factory + + def __init__(self, application): + self.application = application + + def process_request(self, req): + """Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + + @webob.dec.wsgify + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) + + +class Request(webob.Request): + + def best_match_content_type(self): + """Determine the most acceptable content-type. + + Based on: + 1) URI extension (.json/.xml) + 2) Content-type header + 3) Accept* headers + """ + # First lookup http request path + parts = self.path.rsplit('.', 1) + if len(parts) > 1: + _format = parts[1] + if _format in ['json', 'xml']: + return 'application/{0}'.format(_format) + + #Then look up content header + type_from_header = self.get_content_type() + if type_from_header: + return type_from_header + ctypes = ['application/json', 'application/xml'] + + #Finally search in Accept-* headers + bm = self.accept.best_match(ctypes) + return bm or 'application/json' + + def get_content_type(self): + allowed_types = ("application/xml", "application/json") + if "Content-Type" not in self.headers: + LOG.debug(_("Missing Content-Type")) + return None + _type = self.content_type + if _type in allowed_types: + return _type + return None + + def best_match_language(self): + """Determines best available locale from the Accept-Language header. + + :returns: the best language match or None if the 'Accept-Language' + header was not available in the request. + """ + if not self.accept_language: + return None + all_languages = gettextutils.get_available_languages('neutron') + return self.accept_language.best_match(all_languages) + + @property + def context(self): + if 'neutron.context' not in self.environ: + self.environ['neutron.context'] = context.get_admin_context() + return self.environ['neutron.context'] + + +class ActionDispatcher(object): + """Maps method name to local methods through action name.""" + + def dispatch(self, *args, **kwargs): + """Find and call local method.""" + action = kwargs.pop('action', 'default') + action_method = getattr(self, str(action), self.default) + return action_method(*args, **kwargs) + + def default(self, data): + raise NotImplementedError() + + +class DictSerializer(ActionDispatcher): + """Default request body serialization.""" + + def serialize(self, data, action='default'): + return self.dispatch(data, action=action) + + def default(self, data): + return "" + + +class JSONDictSerializer(DictSerializer): + """Default JSON request body serialization.""" + + def default(self, data): + def sanitizer(obj): + return unicode(obj) + return jsonutils.dumps(data, default=sanitizer) + + +class XMLDictSerializer(DictSerializer): + + def __init__(self, metadata=None, xmlns=None): + """Object initialization. + + :param metadata: information needed to deserialize xml into + a dictionary. + :param xmlns: XML namespace to include with serialized xml + """ + super(XMLDictSerializer, self).__init__() + self.metadata = metadata or {} + if not xmlns: + xmlns = self.metadata.get('xmlns') + if not xmlns: + xmlns = constants.XML_NS_V20 + self.xmlns = xmlns + + def default(self, data): + """Return data as XML string. + + :param data: expect data to contain a single key as XML root, or + contain another '*_links' key as atom links. Other + case will use 'VIRTUAL_ROOT_KEY' as XML root. + """ + try: + links = None + has_atom = False + if data is None: + root_key = constants.VIRTUAL_ROOT_KEY + root_value = None + else: + link_keys = [k for k in data.iterkeys() or [] + if k.endswith('_links')] + if link_keys: + links = data.pop(link_keys[0], None) + has_atom = True + root_key = (len(data) == 1 and + data.keys()[0] or constants.VIRTUAL_ROOT_KEY) + root_value = data.get(root_key, data) + doc = etree.Element("_temp_root") + used_prefixes = [] + self._to_xml_node(doc, self.metadata, root_key, + root_value, used_prefixes) + if links: + self._create_link_nodes(list(doc)[0], links) + return self.to_xml_string(list(doc)[0], used_prefixes, has_atom) + except AttributeError as e: + LOG.exception(str(e)) + return '' + + def __call__(self, data): + # Provides a migration path to a cleaner WSGI layer, this + # "default" stuff and extreme extensibility isn't being used + # like originally intended + return self.default(data) + + def to_xml_string(self, node, used_prefixes, has_atom=False): + self._add_xmlns(node, used_prefixes, has_atom) + return etree.tostring(node, encoding='UTF-8') + + #NOTE (ameade): the has_atom should be removed after all of the + # xml serializers and view builders have been updated to the current + # spec that required all responses include the xmlns:atom, the has_atom + # flag is to prevent current tests from breaking + def _add_xmlns(self, node, used_prefixes, has_atom=False): + node.set('xmlns', self.xmlns) + node.set(constants.TYPE_XMLNS, self.xmlns) + if has_atom: + node.set(constants.ATOM_XMLNS, constants.ATOM_NAMESPACE) + node.set(constants.XSI_NIL_ATTR, constants.XSI_NAMESPACE) + ext_ns = self.metadata.get(constants.EXT_NS, {}) + ext_ns_bc = self.metadata.get(constants.EXT_NS_COMP, {}) + for prefix in used_prefixes: + if prefix in ext_ns: + node.set('xmlns:' + prefix, ext_ns[prefix]) + if prefix in ext_ns_bc: + node.set('xmlns:' + prefix, ext_ns_bc[prefix]) + + def _to_xml_node(self, parent, metadata, nodename, data, used_prefixes): + """Recursive method to convert data members to XML nodes.""" + result = etree.SubElement(parent, nodename) + if ":" in nodename: + used_prefixes.append(nodename.split(":", 1)[0]) + #TODO(bcwaldon): accomplish this without a type-check + if isinstance(data, list): + if not data: + result.set( + constants.TYPE_ATTR, + constants.TYPE_LIST) + return result + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + self._to_xml_node(result, metadata, singular, item, + used_prefixes) + #TODO(bcwaldon): accomplish this without a type-check + elif isinstance(data, dict): + if not data: + result.set( + constants.TYPE_ATTR, + constants.TYPE_DICT) + return result + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k, v in data.items(): + if k in attrs: + result.set(k, str(v)) + else: + self._to_xml_node(result, metadata, k, v, + used_prefixes) + elif data is None: + result.set(constants.XSI_ATTR, 'true') + else: + if isinstance(data, bool): + result.set( + constants.TYPE_ATTR, + constants.TYPE_BOOL) + elif isinstance(data, int): + result.set( + constants.TYPE_ATTR, + constants.TYPE_INT) + elif isinstance(data, long): + result.set( + constants.TYPE_ATTR, + constants.TYPE_LONG) + elif isinstance(data, float): + result.set( + constants.TYPE_ATTR, + constants.TYPE_FLOAT) + LOG.debug(_("Data %(data)s type is %(type)s"), + {'data': data, + 'type': type(data)}) + if isinstance(data, str): + result.text = unicode(data, 'utf-8') + else: + result.text = unicode(data) + return result + + def _create_link_nodes(self, xml_doc, links): + for link in links: + link_node = etree.SubElement(xml_doc, 'atom:link') + link_node.set('rel', link['rel']) + link_node.set('href', link['href']) + + +class ResponseHeaderSerializer(ActionDispatcher): + """Default response headers serialization.""" + + def serialize(self, response, data, action): + self.dispatch(response, data, action=action) + + def default(self, response, data): + response.status_int = 200 + + +class ResponseSerializer(object): + """Encode the necessary pieces into a response object.""" + + def __init__(self, body_serializers=None, headers_serializer=None): + self.body_serializers = { + 'application/xml': XMLDictSerializer(), + 'application/json': JSONDictSerializer(), + } + self.body_serializers.update(body_serializers or {}) + + self.headers_serializer = (headers_serializer or + ResponseHeaderSerializer()) + + def serialize(self, response_data, content_type, action='default'): + """Serialize a dict into a string and wrap in a wsgi.Request object. + + :param response_data: dict produced by the Controller + :param content_type: expected mimetype of serialized response body + + """ + response = webob.Response() + self.serialize_headers(response, response_data, action) + self.serialize_body(response, response_data, content_type, action) + return response + + def serialize_headers(self, response, data, action): + self.headers_serializer.serialize(response, data, action) + + def serialize_body(self, response, data, content_type, action): + response.headers['Content-Type'] = content_type + if data is not None: + serializer = self.get_body_serializer(content_type) + response.body = serializer.serialize(data, action) + + def get_body_serializer(self, content_type): + try: + return self.body_serializers[content_type] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + +class TextDeserializer(ActionDispatcher): + """Default request body deserialization.""" + + def deserialize(self, datastring, action='default'): + return self.dispatch(datastring, action=action) + + def default(self, datastring): + return {} + + +class JSONDeserializer(TextDeserializer): + + def _from_json(self, datastring): + try: + return jsonutils.loads(datastring) + except ValueError: + msg = _("Cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + def default(self, datastring): + return {'body': self._from_json(datastring)} + + +class ProtectedXMLParser(etree.XMLParser): + def __init__(self, *args, **kwargs): + etree.XMLParser.__init__(self, *args, **kwargs) + self._parser.StartDoctypeDeclHandler = self.start_doctype_decl + + def start_doctype_decl(self, name, sysid, pubid, internal): + raise ValueError(_("Inline DTD forbidden")) + + def doctype(self, name, pubid, system): + raise ValueError(_("Inline DTD forbidden")) + + +class XMLDeserializer(TextDeserializer): + + def __init__(self, metadata=None): + """Object initialization. + + :param metadata: information needed to deserialize xml into + a dictionary. + """ + super(XMLDeserializer, self).__init__() + self.metadata = metadata or {} + xmlns = self.metadata.get('xmlns') + if not xmlns: + xmlns = constants.XML_NS_V20 + self.xmlns = xmlns + + def _get_key(self, tag): + tags = tag.split("}", 1) + if len(tags) == 2: + ns = tags[0][1:] + bare_tag = tags[1] + ext_ns = self.metadata.get(constants.EXT_NS, {}) + if ns == self.xmlns: + return bare_tag + for prefix, _ns in ext_ns.items(): + if ns == _ns: + return prefix + ":" + bare_tag + ext_ns_bc = self.metadata.get(constants.EXT_NS_COMP, {}) + for prefix, _ns in ext_ns_bc.items(): + if ns == _ns: + return prefix + ":" + bare_tag + else: + return tag + + def _get_links(self, root_tag, node): + link_nodes = node.findall(constants.ATOM_LINK_NOTATION) + root_tag = self._get_key(node.tag) + link_key = "%s_links" % root_tag + link_list = [] + for link in link_nodes: + link_list.append({'rel': link.get('rel'), + 'href': link.get('href')}) + # Remove link node in order to avoid link node process as + # an item in _from_xml_node + node.remove(link) + return link_list and {link_key: link_list} or {} + + def _parseXML(self, text): + parser = ProtectedXMLParser() + parser.feed(text) + return parser.close() + + def _from_xml(self, datastring): + if datastring is None: + return None + plurals = set(self.metadata.get('plurals', {})) + try: + node = self._parseXML(datastring) + root_tag = self._get_key(node.tag) + # Deserialize link node was needed by unit test for verifying + # the request's response + links = self._get_links(root_tag, node) + result = self._from_xml_node(node, plurals) + # root_tag = constants.VIRTUAL_ROOT_KEY and links is not None + # is not possible because of the way data are serialized. + if root_tag == constants.VIRTUAL_ROOT_KEY: + return result + return dict({root_tag: result}, **links) + except Exception as e: + with excutils.save_and_reraise_exception(): + parseError = False + # Python2.7 + if (hasattr(etree, 'ParseError') and + isinstance(e, getattr(etree, 'ParseError'))): + parseError = True + # Python2.6 + elif isinstance(e, expat.ExpatError): + parseError = True + if parseError: + msg = _("Cannot understand XML") + raise exception.MalformedRequestBody(reason=msg) + + def _from_xml_node(self, node, listnames): + """Convert a minidom node to a simple Python type. + + :param listnames: list of XML node names whose subnodes should + be considered list items. + + """ + attrNil = node.get(str(etree.QName(constants.XSI_NAMESPACE, "nil"))) + attrType = node.get(str(etree.QName( + self.metadata.get('xmlns'), "type"))) + if (attrNil and attrNil.lower() == 'true'): + return None + elif not len(node) and not node.text: + if (attrType and attrType == constants.TYPE_DICT): + return {} + elif (attrType and attrType == constants.TYPE_LIST): + return [] + else: + return '' + elif (len(node) == 0 and node.text): + converters = {constants.TYPE_BOOL: + lambda x: x.lower() == 'true', + constants.TYPE_INT: + lambda x: int(x), + constants.TYPE_LONG: + lambda x: long(x), + constants.TYPE_FLOAT: + lambda x: float(x)} + if attrType and attrType in converters: + return converters[attrType](node.text) + else: + return node.text + elif self._get_key(node.tag) in listnames: + return [self._from_xml_node(n, listnames) for n in node] + else: + result = dict() + for attr in node.keys(): + if (attr == 'xmlns' or + attr.startswith('xmlns:') or + attr == constants.XSI_ATTR or + attr == constants.TYPE_ATTR): + continue + result[self._get_key(attr)] = node.get(attr) + children = list(node) + for child in children: + result[self._get_key(child.tag)] = self._from_xml_node( + child, listnames) + return result + + def default(self, datastring): + return {'body': self._from_xml(datastring)} + + def __call__(self, datastring): + # Adding a migration path to allow us to remove unncessary classes + return self.default(datastring) + + +class RequestHeadersDeserializer(ActionDispatcher): + """Default request headers deserializer.""" + + def deserialize(self, request, action): + return self.dispatch(request, action=action) + + def default(self, request): + return {} + + +class RequestDeserializer(object): + """Break up a Request object into more useful pieces.""" + + def __init__(self, body_deserializers=None, headers_deserializer=None): + self.body_deserializers = { + 'application/xml': XMLDeserializer(), + 'application/json': JSONDeserializer(), + } + self.body_deserializers.update(body_deserializers or {}) + + self.headers_deserializer = (headers_deserializer or + RequestHeadersDeserializer()) + + def deserialize(self, request): + """Extract necessary pieces of the request. + + :param request: Request object + :returns tuple of expected controller action name, dictionary of + keyword arguments to pass to the controller, the expected + content type of the response + + """ + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + + action_args.update(self.deserialize_headers(request, action)) + action_args.update(self.deserialize_body(request, action)) + + accept = self.get_expected_content_type(request) + + return (action, action_args, accept) + + def deserialize_headers(self, request, action): + return self.headers_deserializer.deserialize(request, action) + + def deserialize_body(self, request, action): + try: + content_type = request.best_match_content_type() + except exception.InvalidContentType: + LOG.debug(_("Unrecognized Content-Type provided in request")) + return {} + + if content_type is None: + LOG.debug(_("No Content-Type provided in request")) + return {} + + if not len(request.body) > 0: + LOG.debug(_("Empty body provided in request")) + return {} + + try: + deserializer = self.get_body_deserializer(content_type) + except exception.InvalidContentType: + with excutils.save_and_reraise_exception(): + LOG.debug(_("Unable to deserialize body as provided " + "Content-Type")) + + return deserializer.deserialize(request.body, action) + + def get_body_deserializer(self, content_type): + try: + return self.body_deserializers[content_type] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + def get_expected_content_type(self, request): + return request.best_match_content_type() + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except Exception: + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args + + +class Application(object): + """Base WSGI application wrapper. Subclasses need to implement __call__.""" + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [app:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [app:wadl] + latest_version = 1.3 + paste.app_factory = nova.api.fancy_api:Wadl.factory + + which would result in a call to the `Wadl` class as + + import neutron.api.fancy_api + fancy_api.Wadl(latest_version='1.3') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + return cls(**local_config) + + def __call__(self, environ, start_response): + r"""Subclasses will probably want to implement __call__ like this: + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + # Any of the following objects work as responses: + + # Option 1: simple string + res = 'message\n' + + # Option 2: a nicely formatted HTTP exception page + res = exc.HTTPForbidden(explanation='Nice try') + + # Option 3: a webob Response object (in case you need to play with + # headers, or you want to be treated like an iterable, or or or) + res = Response(); + res.app_iter = open('somefile') + + # Option 4: any wsgi app to be run next + res = self.application + + # Option 5: you can get a Response object for a wsgi app, too, to + # play with headers etc + res = req.get_response(self.application) + + # You can then just return your response... + return res + # ... or set req.response and return None. + req.response = res + + See the end of http://pythonpaste.org/webob/modules/dec.html + for more info. + + """ + raise NotImplementedError(_('You must implement __call__')) + + +class Debug(Middleware): + """Middleware for debugging. + + Helper class that can be inserted into any WSGI application chain + to get information about the request and response. + """ + + @webob.dec.wsgify + def __call__(self, req): + print(("*" * 40) + " REQUEST ENVIRON") + for key, value in req.environ.items(): + print(key, "=", value) + print() + resp = req.get_response(self.application) + + print(("*" * 40) + " RESPONSE HEADERS") + for (key, value) in resp.headers.iteritems(): + print(key, "=", value) + print() + + resp.app_iter = self.print_generator(resp.app_iter) + + return resp + + @staticmethod + def print_generator(app_iter): + """Print contents of a wrapper string iterator when iterated.""" + print(("*" * 40) + " BODY") + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print() + + +class Router(object): + """WSGI middleware that maps incoming requests to WSGI apps.""" + + @classmethod + def factory(cls, global_config, **local_config): + """Return an instance of the WSGI Router class.""" + return cls() + + def __init__(self, mapper): + """Create a router for the given routes.Mapper. + + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be a wsgi.Controller, who will route + the request to the action method. + + Examples: + mapper = routes.Mapper() + sc = ServerController() + + # Explicit mapping of one route to a controller+action + mapper.connect(None, "/svrlist", controller=sc, action="list") + + # Actions are all implicitly defined + mapper.resource("network", "networks", controller=nc) + + # Pointing to an arbitrary WSGI app. You can specify the + # {path_info:.*} parameter so the target app can be handed just that + # section of the URL. + mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) + """ + self.map = mapper + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + self.map) + + @webob.dec.wsgify + def __call__(self, req): + """Route the incoming request to a controller based on self.map. + + If no match, return a 404. + """ + return self._router + + @staticmethod + @webob.dec.wsgify(RequestClass=Request) + def _dispatch(req): + """Dispatch a Request. + + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. Either returns 404 + or the routed WSGI app's response. + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + language = req.best_match_language() + msg = _('The resource could not be found.') + msg = gettextutils.translate(msg, language) + return webob.exc.HTTPNotFound(explanation=msg) + app = match['controller'] + return app + + +class Resource(Application): + """WSGI app that handles (de)serialization and controller dispatch. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon its controller. All + controller action methods must accept a 'req' argument, which is the + incoming wsgi.Request. If the operation is a PUT or POST, the controller + method must also accept a 'body' argument (the deserialized request body). + They may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + + """ + + def __init__(self, controller, fault_body_function, + deserializer=None, serializer=None): + """Object initialization. + + :param controller: object that implement methods created by routes lib + :param deserializer: object that can serialize the output of a + controller into a webob response + :param serializer: object that can deserialize a webob request + into necessary pieces + :param fault_body_function: a function that will build the response + body for HTTP errors raised by operations + on this resource object + + """ + self.controller = controller + self.deserializer = deserializer or RequestDeserializer() + self.serializer = serializer or ResponseSerializer() + self._fault_body_function = fault_body_function + # use serializer's xmlns for populating Fault generator xmlns + xml_serializer = self.serializer.body_serializers['application/xml'] + if hasattr(xml_serializer, 'xmlns'): + self._xmlns = xml_serializer.xmlns + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" + + LOG.info(_("%(method)s %(url)s"), {"method": request.method, + "url": request.url}) + + try: + action, args, accept = self.deserializer.deserialize(request) + except exception.InvalidContentType: + msg = _("Unsupported Content-Type") + LOG.exception(_("InvalidContentType: %s"), msg) + return Fault(webob.exc.HTTPBadRequest(explanation=msg), + self._xmlns) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + LOG.exception(_("MalformedRequestBody: %s"), msg) + return Fault(webob.exc.HTTPBadRequest(explanation=msg), + self._xmlns) + + try: + action_result = self.dispatch(request, action, args) + except webob.exc.HTTPException as ex: + LOG.info(_("HTTP exception thrown: %s"), unicode(ex)) + action_result = Fault(ex, + self._xmlns, + self._fault_body_function) + except Exception: + LOG.exception(_("Internal error")) + # Do not include the traceback to avoid returning it to clients. + action_result = Fault(webob.exc.HTTPServerError(), + self._xmlns, + self._fault_body_function) + + if isinstance(action_result, dict) or action_result is None: + response = self.serializer.serialize(action_result, + accept, + action=action) + else: + response = action_result + + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError as e: + msg_dict = dict(url=request.url, exception=e) + msg = _("%(url)s returned a fault: %(exception)s") % msg_dict + + LOG.info(msg) + + return response + + def dispatch(self, request, action, action_args): + """Find action-spefic method on controller and call it.""" + + controller_method = getattr(self.controller, action) + try: + #NOTE(salvatore-orlando): the controller method must have + # an argument whose name is 'request' + return controller_method(request=request, **action_args) + except TypeError as exc: + LOG.exception(exc) + return Fault(webob.exc.HTTPBadRequest(), + self._xmlns) + + +def _default_body_function(wrapped_exc): + code = wrapped_exc.status_int + fault_data = { + 'Error': { + 'code': code, + 'message': wrapped_exc.explanation}} + # 'code' is an attribute on the fault tag itself + metadata = {'attributes': {'Error': 'code'}} + return fault_data, metadata + + +class Fault(webob.exc.HTTPException): + """Generates an HTTP response from a webob HTTP exception.""" + + def __init__(self, exception, xmlns=None, body_function=None): + """Creates a Fault for the given webob.exc.exception.""" + self.wrapped_exc = exception + self.status_int = self.wrapped_exc.status_int + self._xmlns = xmlns + self._body_function = body_function or _default_body_function + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Generate a WSGI response based on the exception passed to ctor.""" + # Replace the body with fault details. + fault_data, metadata = self._body_function(self.wrapped_exc) + xml_serializer = XMLDictSerializer(metadata, self._xmlns) + content_type = req.best_match_content_type() + serializer = { + 'application/xml': xml_serializer, + 'application/json': JSONDictSerializer(), + }[content_type] + + self.wrapped_exc.body = serializer.serialize(fault_data) + self.wrapped_exc.content_type = content_type + return self.wrapped_exc + + +# NOTE(salvatore-orlando): this class will go once the +# extension API framework is updated +class Controller(object): + """WSGI app that dispatched to methods. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon itself. All action methods + must, in addition to their normal parameters, accept a 'req' argument + which is the incoming wsgi.Request. They raise a webob.exc exception, + or return a dict which will be serialized by requested content type. + + """ + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Call the method specified in req.environ by RoutesMiddleware.""" + arg_dict = req.environ['wsgiorg.routing_args'][1] + action = arg_dict['action'] + method = getattr(self, action) + del arg_dict['controller'] + del arg_dict['action'] + if 'format' in arg_dict: + del arg_dict['format'] + arg_dict['request'] = req + result = method(**arg_dict) + + if isinstance(result, dict) or result is None: + if result is None: + status = 204 + content_type = '' + body = None + else: + status = 200 + content_type = req.best_match_content_type() + default_xmlns = self.get_default_xmlns(req) + body = self._serialize(result, content_type, default_xmlns) + + response = webob.Response(status=status, + content_type=content_type, + body=body) + msg_dict = dict(url=req.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + LOG.debug(msg) + return response + else: + return result + + def _serialize(self, data, content_type, default_xmlns): + """Serialize the given dict to the provided content_type. + + Uses self._serialization_metadata if it exists, which is a dict mapping + MIME types to information needed to serialize to that type. + + """ + _metadata = getattr(type(self), '_serialization_metadata', {}) + + serializer = Serializer(_metadata, default_xmlns) + try: + return serializer.serialize(data, content_type) + except exception.InvalidContentType: + msg = _('The requested content type %s is invalid.') % content_type + raise webob.exc.HTTPNotAcceptable(msg) + + def _deserialize(self, data, content_type): + """Deserialize the request body to the specefied content type. + + Uses self._serialization_metadata if it exists, which is a dict mapping + MIME types to information needed to serialize to that type. + + """ + _metadata = getattr(type(self), '_serialization_metadata', {}) + serializer = Serializer(_metadata) + return serializer.deserialize(data, content_type)['body'] + + def get_default_xmlns(self, req): + """Provide the XML namespace to use if none is otherwise specified.""" + return None + + +# NOTE(salvatore-orlando): this class will go once the +# extension API framework is updated +class Serializer(object): + """Serializes and deserializes dictionaries to certain MIME types.""" + + def __init__(self, metadata=None, default_xmlns=None): + """Create a serializer based on the given WSGI environment. + + 'metadata' is an optional dict mapping MIME types to information + needed to serialize a dictionary to that type. + + """ + self.metadata = metadata or {} + self.default_xmlns = default_xmlns + + def _get_serialize_handler(self, content_type): + handlers = { + 'application/json': JSONDictSerializer(), + 'application/xml': XMLDictSerializer(self.metadata), + } + + try: + return handlers[content_type] + except Exception: + raise exception.InvalidContentType(content_type=content_type) + + def serialize(self, data, content_type): + """Serialize a dictionary into the specified content type.""" + return self._get_serialize_handler(content_type).serialize(data) + + def deserialize(self, datastring, content_type): + """Deserialize a string to a dictionary. + + The string must be in the format of a supported MIME type. + + """ + try: + return self.get_deserialize_handler(content_type).deserialize( + datastring) + except Exception: + raise webob.exc.HTTPBadRequest(_("Could not deserialize data")) + + def get_deserialize_handler(self, content_type): + handlers = { + 'application/json': JSONDeserializer(), + 'application/xml': XMLDeserializer(self.metadata), + } + + try: + return handlers[content_type] + except Exception: + raise exception.InvalidContentType(content_type=content_type) diff --git a/openstack-common.conf b/openstack-common.conf new file mode 100644 index 000000000..f6f6fdaf5 --- /dev/null +++ b/openstack-common.conf @@ -0,0 +1,35 @@ +[DEFAULT] +# The list of modules to copy from oslo-incubator.git +module=cache +module=context +module=db +module=db.sqlalchemy +module=eventlet_backdoor +module=excutils +module=fileutils +module=fixture +module=gettextutils +module=importutils +module=install_venv_common +module=jsonutils +module=local +module=lockutils +module=log +module=log_handler +module=loopingcall +module=middleware +module=network_utils +module=periodic_task +module=policy +module=processutils +module=service +module=sslutils +module=strutils +module=systemd +module=threadgroup +module=timeutils +module=uuidutils +module=versionutils + +# The base module to hold the copy of openstack.common +base=neutron diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..f34177ab2 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,29 @@ +pbr>=0.6,!=0.7,<1.0 + +Paste +PasteDeploy>=1.5.0 +Routes>=1.12.3 +anyjson>=0.3.3 +argparse +Babel>=1.3 +eventlet>=0.13.0 +greenlet>=0.3.2 +httplib2>=0.7.5 +requests>=1.1 +iso8601>=0.1.9 +jsonrpclib +Jinja2 +kombu>=2.4.8 +netaddr>=0.7.6 +python-neutronclient>=2.3.4,<3 +SQLAlchemy>=0.7.8,<=0.9.99 +WebOb>=1.2.3 +python-keystoneclient>=0.9.0 +alembic>=0.4.1 +six>=1.7.0 +stevedore>=0.14 +oslo.config>=1.2.1 +oslo.messaging>=1.3.0 +oslo.rootwrap + +python-novaclient>=2.17.0 diff --git a/run_tests.sh b/run_tests.sh new file mode 100755 index 000000000..8f0dbd3d7 --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,226 @@ +#!/bin/bash + +set -eu + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run Neutron's test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment" + echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." + echo " -n, --no-recreate-db Don't recreate the test database." + echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -u, --update Update the virtual environment with any newer package versions" + echo " -p, --pep8 Just run PEP8 and HACKING compliance check" + echo " -P, --no-pep8 Don't run static code checks" + echo " -c, --coverage Generate coverage report" + echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." + echo " -h, --help Print this usage message" + echo " --virtual-env-path Location of the virtualenv directory" + echo " Default: \$(pwd)" + echo " --virtual-env-name Name of the virtualenv directory" + echo " Default: .venv" + echo " --tools-path Location of the tools directory" + echo " Default: \$(pwd)" + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_options { + i=1 + while [ $i -le $# ]; do + case "${!i}" in + -h|--help) usage;; + -V|--virtual-env) always_venv=1; never_venv=0;; + -N|--no-virtual-env) always_venv=0; never_venv=1;; + -s|--no-site-packages) no_site_packages=1;; + -r|--recreate-db) recreate_db=1;; + -n|--no-recreate-db) recreate_db=0;; + -f|--force) force=1;; + -u|--update) update=1;; + -p|--pep8) just_pep8=1;; + -P|--no-pep8) no_pep8=1;; + -c|--coverage) coverage=1;; + -d|--debug) debug=1;; + --virtual-env-path) + (( i++ )) + venv_path=${!i} + ;; + --virtual-env-name) + (( i++ )) + venv_dir=${!i} + ;; + --tools-path) + (( i++ )) + tools_path=${!i} + ;; + -*) testropts="$testropts ${!i}";; + *) testrargs="$testrargs ${!i}" + esac + (( i++ )) + done +} + +tool_path=${tools_path:-$(pwd)} +venv_path=${venv_path:-$(pwd)} +venv_dir=${venv_name:-.venv} +with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +force=0 +no_site_packages=0 +installvenvopts= +testrargs= +testropts= +wrapper="" +just_pep8=0 +no_pep8=0 +coverage=0 +debug=0 +recreate_db=1 +update=0 + +LANG=en_US.UTF-8 +LANGUAGE=en_US:en +LC_ALL=C + +process_options $@ +# Make our paths available to other scripts we call +export venv_path +export venv_dir +export venv_name +export tools_dir +export venv=${venv_path}/${venv_dir} + +if [ $no_site_packages -eq 1 ]; then + installvenvopts="--no-site-packages" +fi + + +function run_tests { + # Cleanup *pyc + ${wrapper} find . -type f -name "*.pyc" -delete + + if [ $debug -eq 1 ]; then + if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then + # Default to running all tests if specific test is not + # provided. + testrargs="discover ./neutron/tests" + fi + ${wrapper} python -m testtools.run $testropts $testrargs + + # Short circuit because all of the testr and coverage stuff + # below does not make sense when running testtools.run for + # debugging purposes. + return $? + fi + + if [ $coverage -eq 1 ]; then + TESTRTESTS="$TESTRTESTS --coverage" + else + TESTRTESTS="$TESTRTESTS --slowest" + fi + + # Just run the test suites in current environment + set +e + testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` + TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testropts $testrargs'" + OS_TEST_PATH=`echo $testrargs|grep -o 'neutron\.tests[^[:space:]:]*\+'|tr . /` + if [ -d "$OS_TEST_PATH" ]; then + wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper" + elif [ -d "$(dirname $OS_TEST_PATH)" ]; then + wrapper="OS_TEST_PATH=$(dirname $OS_TEST_PATH) $wrapper" + fi + echo "Running \`${wrapper} $TESTRTESTS\`" + bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit" + RESULT=$? + set -e + + copy_subunit_log + + if [ $coverage -eq 1 ]; then + echo "Generating coverage report in covhtml/" + # Don't compute coverage for common code, which is tested elsewhere + ${wrapper} coverage combine + ${wrapper} coverage html --include='neutron/*' --omit='neutron/openstack/common/*' -d covhtml -i + fi + + return $RESULT +} + +function copy_subunit_log { + LOGNAME=`cat .testrepository/next-stream` + LOGNAME=$(($LOGNAME - 1)) + LOGNAME=".testrepository/${LOGNAME}" + cp $LOGNAME subunit.log +} + +function run_pep8 { + echo "Running flake8 ..." + + ${wrapper} flake8 +} + + +TESTRTESTS="python -m neutron.openstack.common.lockutils python setup.py testr" + +if [ $never_venv -eq 0 ] +then + # Remove the virtual environment if --force used + if [ $force -eq 1 ]; then + echo "Cleaning virtualenv..." + rm -rf ${venv} + fi + if [ $update -eq 1 ]; then + echo "Updating virtualenv..." + python tools/install_venv.py $installvenvopts + fi + if [ -e ${venv} ]; then + wrapper="${with_venv}" + else + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py $installvenvopts + wrapper="${with_venv}" + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py $installvenvopts + wrapper=${with_venv} + fi + fi + fi +fi + +# Delete old coverage data from previous runs +if [ $coverage -eq 1 ]; then + ${wrapper} coverage erase +fi + +if [ $just_pep8 -eq 1 ]; then + run_pep8 + exit +fi + +if [ $recreate_db -eq 1 ]; then + rm -f tests.sqlite +fi + +run_tests + +# NOTE(sirp): we only want to run pep8 when we're running the full-test suite, +# not when we're running tests individually. To handle this, we need to +# distinguish between options (testropts), which begin with a '-', and +# arguments (testrargs). +if [ -z "$testrargs" ]; then + if [ $no_pep8 -eq 0 ]; then + run_pep8 + fi +fi diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000..0eaaaed0f --- /dev/null +++ b/setup.cfg @@ -0,0 +1,201 @@ +[metadata] +name = neutron +version = 2014.2 +summary = OpenStack Networking +description-file = + README.rst +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://www.openstack.org/ +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + Programming Language :: Python :: 2.6 + +[files] +packages = + neutron +data_files = + etc/neutron = + etc/api-paste.ini + etc/dhcp_agent.ini + etc/fwaas_driver.ini + etc/l3_agent.ini + etc/lbaas_agent.ini + etc/metadata_agent.ini + etc/metering_agent.ini + etc/policy.json + etc/neutron.conf + etc/rootwrap.conf + etc/vpn_agent.ini + etc/neutron/rootwrap.d = + etc/neutron/rootwrap.d/debug.filters + etc/neutron/rootwrap.d/dhcp.filters + etc/neutron/rootwrap.d/iptables-firewall.filters + etc/neutron/rootwrap.d/l3.filters + etc/neutron/rootwrap.d/lbaas-haproxy.filters + etc/neutron/rootwrap.d/linuxbridge-plugin.filters + etc/neutron/rootwrap.d/nec-plugin.filters + etc/neutron/rootwrap.d/openvswitch-plugin.filters + etc/neutron/rootwrap.d/ryu-plugin.filters + etc/neutron/rootwrap.d/vpnaas.filters + etc/init.d = etc/init.d/neutron-server + etc/neutron/plugins/bigswitch = + etc/neutron/plugins/bigswitch/restproxy.ini + etc/neutron/plugins/bigswitch/ssl/ca_certs/README + etc/neutron/plugins/bigswitch/ssl/host_certs/README + etc/neutron/plugins/brocade = etc/neutron/plugins/brocade/brocade.ini + etc/neutron/plugins/cisco = + etc/neutron/plugins/cisco/cisco_plugins.ini + etc/neutron/plugins/cisco/cisco_vpn_agent.ini + etc/neutron/plugins/embrane = etc/neutron/plugins/embrane/heleos_conf.ini + etc/neutron/plugins/hyperv = etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini + etc/neutron/plugins/ibm = etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini + etc/neutron/plugins/linuxbridge = etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini + etc/neutron/plugins/metaplugin = etc/neutron/plugins/metaplugin/metaplugin.ini + etc/neutron/plugins/midonet = etc/neutron/plugins/midonet/midonet.ini + etc/neutron/plugins/ml2 = + etc/neutron/plugins/bigswitch/restproxy.ini + etc/neutron/plugins/ml2/ml2_conf.ini + etc/neutron/plugins/ml2/ml2_conf_arista.ini + etc/neutron/plugins/ml2/ml2_conf_brocade.ini + etc/neutron/plugins/ml2/ml2_conf_cisco.ini + etc/neutron/plugins/ml2/ml2_conf_mlnx.ini + etc/neutron/plugins/ml2/ml2_conf_ncs.ini + etc/neutron/plugins/ml2/ml2_conf_odl.ini + etc/neutron/plugins/ml2/ml2_conf_ofa.ini + etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini + etc/neutron/plugins/mlnx = etc/neutron/plugins/mlnx/mlnx_conf.ini + etc/neutron/plugins/nec = etc/neutron/plugins/nec/nec.ini + etc/neutron/plugins/nuage = etc/neutron/plugins/nuage/nuage_plugin.ini + etc/neutron/plugins/oneconvergence = etc/neutron/plugins/oneconvergence/nvsdplugin.ini + etc/neutron/plugins/openvswitch = etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini + etc/neutron/plugins/plumgrid = etc/neutron/plugins/plumgrid/plumgrid.ini + etc/neutron/plugins/ryu = etc/neutron/plugins/ryu/ryu.ini + etc/neutron/plugins/vmware = etc/neutron/plugins/vmware/nsx.ini +scripts = + bin/neutron-rootwrap + bin/neutron-rootwrap-xen-dom0 + +[global] +setup-hooks = + pbr.hooks.setup_hook + neutron.hooks.setup_hook + +[entry_points] +console_scripts = + neutron-check-nsx-config = neutron.plugins.vmware.check_nsx_config:main + neutron-db-manage = neutron.db.migration.cli:main + neutron-debug = neutron.debug.shell:main + neutron-dhcp-agent = neutron.agent.dhcp_agent:main + neutron-hyperv-agent = neutron.plugins.hyperv.agent.hyperv_neutron_agent:main + neutron-ibm-agent = neutron.plugins.ibm.agent.sdnve_neutron_agent:main + neutron-l3-agent = neutron.agent.l3_agent:main + neutron-lbaas-agent = neutron.services.loadbalancer.agent.agent:main + neutron-linuxbridge-agent = neutron.plugins.linuxbridge.agent.linuxbridge_neutron_agent:main + neutron-metadata-agent = neutron.agent.metadata.agent:main + neutron-mlnx-agent = neutron.plugins.mlnx.agent.eswitch_neutron_agent:main + neutron-nec-agent = neutron.plugins.nec.agent.nec_neutron_agent:main + neutron-netns-cleanup = neutron.agent.netns_cleanup_util:main + neutron-ns-metadata-proxy = neutron.agent.metadata.namespace_proxy:main + neutron-nsx-manage = neutron.plugins.vmware.shell:main + neutron-nvsd-agent = neutron.plugins.oneconvergence.agent.nvsd_neutron_agent:main + neutron-openvswitch-agent = neutron.plugins.openvswitch.agent.ovs_neutron_agent:main + neutron-ovs-cleanup = neutron.agent.ovs_cleanup_util:main + neutron-restproxy-agent = neutron.plugins.bigswitch.agent.restproxy_agent:main + neutron-ryu-agent = neutron.plugins.ryu.agent.ryu_neutron_agent:main + neutron-server = neutron.server:main + neutron-rootwrap = oslo.rootwrap.cmd:main + neutron-usage-audit = neutron.cmd.usage_audit:main + neutron-vpn-agent = neutron.services.vpn.agent:main + neutron-metering-agent = neutron.services.metering.agents.metering_agent:main + neutron-ofagent-agent = ryu.cmd.ofa_neutron_agent:main + neutron-sanity-check = neutron.cmd.sanity_check:main +neutron.core_plugins = + bigswitch = neutron.plugins.bigswitch.plugin:NeutronRestProxyV2 + brocade = neutron.plugins.brocade.NeutronPlugin:BrocadePluginV2 + cisco = neutron.plugins.cisco.network_plugin:PluginV2 + embrane = neutron.plugins.embrane.plugins.embrane_ovs_plugin:EmbraneOvsPlugin + hyperv = neutron.plugins.hyperv.hyperv_neutron_plugin:HyperVNeutronPlugin + ibm = neutron.plugins.ibm.sdnve_neutron_plugin:SdnvePluginV2 + linuxbridge = neutron.plugins.linuxbridge.lb_neutron_plugin:LinuxBridgePluginV2 + midonet = neutron.plugins.midonet.plugin:MidonetPluginV2 + ml2 = neutron.plugins.ml2.plugin:Ml2Plugin + mlnx = neutron.plugins.mlnx.mlnx_plugin:MellanoxEswitchPlugin + nec = neutron.plugins.nec.nec_plugin:NECPluginV2 + nuage = neutron.plugins.nuage.plugin:NuagePlugin + metaplugin = neutron.plugins.metaplugin.meta_neutron_plugin:MetaPluginV2 + oneconvergence = neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2 + openvswitch = neutron.plugins.openvswitch.ovs_neutron_plugin:OVSNeutronPluginV2 + plumgrid = neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin:NeutronPluginPLUMgridV2 + ryu = neutron.plugins.ryu.ryu_neutron_plugin:RyuNeutronPluginV2 + vmware = neutron.plugins.vmware.plugin:NsxPlugin +neutron.service_plugins = + dummy = neutron.tests.unit.dummy_plugin:DummyServicePlugin + router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin + firewall = neutron.services.firewall.fwaas_plugin:FirewallPlugin + lbaas = neutron.services.loadbalancer.plugin:LoadBalancerPlugin + vpnaas = neutron.services.vpn.plugin:VPNDriverPlugin + metering = neutron.services.metering.metering_plugin:MeteringPlugin +neutron.ml2.type_drivers = + flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver + local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver + vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver + gre = neutron.plugins.ml2.drivers.type_gre:GreTypeDriver + vxlan = neutron.plugins.ml2.drivers.type_vxlan:VxlanTypeDriver +neutron.ml2.mechanism_drivers = + opendaylight = neutron.plugins.ml2.drivers.mechanism_odl:OpenDaylightMechanismDriver + logger = neutron.tests.unit.ml2.drivers.mechanism_logger:LoggerMechanismDriver + test = neutron.tests.unit.ml2.drivers.mechanism_test:TestMechanismDriver + bulkless = neutron.tests.unit.ml2.drivers.mechanism_bulkless:BulklessMechanismDriver + linuxbridge = neutron.plugins.ml2.drivers.mech_linuxbridge:LinuxbridgeMechanismDriver + openvswitch = neutron.plugins.ml2.drivers.mech_openvswitch:OpenvswitchMechanismDriver + hyperv = neutron.plugins.ml2.drivers.mech_hyperv:HypervMechanismDriver + ncs = neutron.plugins.ml2.drivers.mechanism_ncs:NCSMechanismDriver + arista = neutron.plugins.ml2.drivers.mech_arista.mechanism_arista:AristaDriver + cisco_nexus = neutron.plugins.ml2.drivers.cisco.nexus.mech_cisco_nexus:CiscoNexusMechanismDriver + cisco_apic = neutron.plugins.ml2.drivers.cisco.apic.mechanism_apic:APICMechanismDriver + l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver + bigswitch = neutron.plugins.ml2.drivers.mech_bigswitch.driver:BigSwitchMechanismDriver + ofagent = neutron.plugins.ml2.drivers.mech_ofagent:OfagentMechanismDriver + mlnx = neutron.plugins.ml2.drivers.mlnx.mech_mlnx:MlnxMechanismDriver + brocade = neutron.plugins.ml2.drivers.brocade.mechanism_brocade:BrocadeMechanism + fslsdn = neutron.plugins.ml2.drivers.mechanism_fslsdn:FslsdnMechanismDriver +neutron.openstack.common.cache.backends = + memory = neutron.openstack.common.cache._backends.memory:MemoryBackend +# These are for backwards compat with Icehouse notification_driver configuration values +oslo.messaging.notify.drivers = + neutron.openstack.common.notifier.log_notifier = oslo.messaging.notify._impl_log:LogDriver + neutron.openstack.common.notifier.no_op_notifier = oslo.messaging.notify._impl_noop:NoOpDriver + neutron.openstack.common.notifier.rpc_notifier2 = oslo.messaging.notify._impl_messaging:MessagingV2Driver + neutron.openstack.common.notifier.rpc_notifier = oslo.messaging.notify._impl_messaging:MessagingDriver + neutron.openstack.common.notifier.test_notifier = oslo.messaging.notify._impl_test:TestDriver + + +[build_sphinx] +all_files = 1 +build-dir = doc/build +source-dir = doc/source + +[extract_messages] +keywords = _ gettext ngettext l_ lazy_gettext +mapping_file = babel.cfg +output_file = neutron/locale/neutron.pot + +[compile_catalog] +directory = neutron/locale +domain = neutron + +[update_catalog] +domain = neutron +output_dir = neutron/locale +input_file = neutron/locale/neutron.pot + +[wheel] +universal = 1 diff --git a/setup.py b/setup.py new file mode 100644 index 000000000..736375744 --- /dev/null +++ b/setup.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 000000000..e4e4ab4f9 --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,18 @@ +hacking>=0.9.1,<0.10 + +cliff>=1.4.3 +coverage>=3.6 +discover +fixtures>=0.3.14 +mock>=1.0 +python-subunit>=0.0.18 +ordereddict +sphinx>=1.1.2,!=1.2.0,<1.3 +oslosphinx +testrepository>=0.0.18 +testtools>=0.9.34 +WebTest>=2.0 +# Packages for the Cisco Plugin +############################### +configobj +############################### diff --git a/tools/check_i18n.py b/tools/check_i18n.py new file mode 100644 index 000000000..f9b31ebda --- /dev/null +++ b/tools/check_i18n.py @@ -0,0 +1,155 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from __future__ import print_function + +import compiler +import imp +import os.path +import sys + + +def is_localized(node): + """Check message wrapped by _()""" + if isinstance(node.parent, compiler.ast.CallFunc): + if isinstance(node.parent.node, compiler.ast.Name): + if node.parent.node.name == '_': + return True + return False + + +class ASTWalker(compiler.visitor.ASTVisitor): + + def default(self, node, *args): + for child in node.getChildNodes(): + child.parent = node + compiler.visitor.ASTVisitor.default(self, node, *args) + + +class Visitor(object): + + def __init__(self, filename, i18n_msg_predicates, + msg_format_checkers, debug): + self.filename = filename + self.debug = debug + self.error = 0 + self.i18n_msg_predicates = i18n_msg_predicates + self.msg_format_checkers = msg_format_checkers + with open(filename) as f: + self.lines = f.readlines() + + def visitConst(self, node): + if not isinstance(node.value, str): + return + + if is_localized(node): + for (checker, msg) in self.msg_format_checkers: + if checker(node): + print('%s:%d %s: %s Error: %s' % + (self.filename, node.lineno, + self.lines[node.lineno - 1][:-1], + checker.__name__, msg), + file=sys.stderr) + self.error = 1 + return + if debug: + print('%s:%d %s: %s' % + (self.filename, node.lineno, + self.lines[node.lineno - 1][:-1], + "Pass")) + else: + for (predicate, action, msg) in self.i18n_msg_predicates: + if predicate(node): + if action == 'skip': + if debug: + print('%s:%d %s: %s' % + (self.filename, node.lineno, + self.lines[node.lineno - 1][:-1], + "Pass")) + return + elif action == 'error': + print('%s:%d %s: %s Error: %s' % + (self.filename, node.lineno, + self.lines[node.lineno - 1][:-1], + predicate.__name__, msg), + file=sys.stderr) + self.error = 1 + return + elif action == 'warn': + print('%s:%d %s: %s' % + (self.filename, node.lineno, + self.lines[node.lineno - 1][:-1], + "Warn: %s" % msg)) + return + print('Predicate with wrong action!', file=sys.stderr) + + +def is_file_in_black_list(black_list, f): + for f in black_list: + if os.path.abspath(input_file).startswith( + os.path.abspath(f)): + return True + return False + + +def check_i18n(input_file, i18n_msg_predicates, msg_format_checkers, debug): + input_mod = compiler.parseFile(input_file) + v = compiler.visitor.walk(input_mod, + Visitor(input_file, + i18n_msg_predicates, + msg_format_checkers, + debug), + ASTWalker()) + return v.error + + +if __name__ == '__main__': + input_path = sys.argv[1] + cfg_path = sys.argv[2] + try: + cfg_mod = imp.load_source('', cfg_path) + except Exception: + print("Load cfg module failed", file=sys.stderr) + sys.exit(1) + + i18n_msg_predicates = cfg_mod.i18n_msg_predicates + msg_format_checkers = cfg_mod.msg_format_checkers + black_list = cfg_mod.file_black_list + + debug = False + if len(sys.argv) > 3: + if sys.argv[3] == '-d': + debug = True + + if os.path.isfile(input_path): + sys.exit(check_i18n(input_path, + i18n_msg_predicates, + msg_format_checkers, + debug)) + + error = 0 + for dirpath, dirs, files in os.walk(input_path): + for f in files: + if not f.endswith('.py'): + continue + input_file = os.path.join(dirpath, f) + if is_file_in_black_list(black_list, input_file): + continue + if check_i18n(input_file, + i18n_msg_predicates, + msg_format_checkers, + debug): + error = 1 + sys.exit(error) diff --git a/tools/check_i18n_test_case.txt b/tools/check_i18n_test_case.txt new file mode 100644 index 000000000..3d1391d94 --- /dev/null +++ b/tools/check_i18n_test_case.txt @@ -0,0 +1,67 @@ +# test-case for check_i18n.py +# python check_i18n.py check_i18n.txt -d + +# message format checking +# capital checking +msg = _("hello world, error") +msg = _("hello world_var, error") +msg = _('file_list xyz, pass') +msg = _("Hello world, pass") + +# format specifier checking +msg = _("Hello %s world %d, error") +msg = _("Hello %s world, pass") +msg = _("Hello %(var1)s world %(var2)s, pass") + +# message has been localized +# is_localized +msg = _("Hello world, pass") +msg = _("Hello world, pass") % var +LOG.debug(_('Hello world, pass')) +LOG.info(_('Hello world, pass')) +raise x.y.Exception(_('Hello world, pass')) +raise Exception(_('Hello world, pass')) + +# message need be localized +# is_log_callfunc +LOG.debug('hello world, error') +LOG.debug('hello world, error' % xyz) +sys.append('hello world, warn') + +# is_log_i18n_msg_with_mod +LOG.debug(_('Hello world, error') % xyz) + +# default warn +msg = 'hello world, warn' +msg = 'hello world, warn' % var + +# message needn't be localized +# skip only one word +msg = '' +msg = "hello,pass" + +# skip dict +msg = {'hello world, pass': 1} + +# skip list +msg = ["hello world, pass"] + +# skip subscript +msg['hello world, pass'] + +# skip xml marker +msg = ", pass" + +# skip sql statement +msg = "SELECT * FROM xyz WHERE hello=1, pass" +msg = "select * from xyz, pass" + +# skip add statement +msg = 'hello world' + e + 'world hello, pass' + +# skip doc string +""" +Hello world, pass +""" +class Msg: + pass diff --git a/tools/clean.sh b/tools/clean.sh new file mode 100755 index 000000000..27bc219f9 --- /dev/null +++ b/tools/clean.sh @@ -0,0 +1,5 @@ +#!/bin/bash +rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes +rm -rf */*.deb +rm -rf ./plugins/**/build/ ./plugins/**/dist +rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-* diff --git a/tools/i18n_cfg.py b/tools/i18n_cfg.py new file mode 100644 index 000000000..5ad1a514d --- /dev/null +++ b/tools/i18n_cfg.py @@ -0,0 +1,97 @@ +import compiler +import re + + +def is_log_callfunc(n): + """LOG.xxx('hello %s' % xyz) and LOG('hello')""" + if isinstance(n.parent, compiler.ast.Mod): + n = n.parent + if isinstance(n.parent, compiler.ast.CallFunc): + if isinstance(n.parent.node, compiler.ast.Getattr): + if isinstance(n.parent.node.getChildNodes()[0], + compiler.ast.Name): + if n.parent.node.getChildNodes()[0].name == 'LOG': + return True + return False + + +def is_log_i18n_msg_with_mod(n): + """LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)""" + if not isinstance(n.parent.parent, compiler.ast.Mod): + return False + n = n.parent.parent + if isinstance(n.parent, compiler.ast.CallFunc): + if isinstance(n.parent.node, compiler.ast.Getattr): + if isinstance(n.parent.node.getChildNodes()[0], + compiler.ast.Name): + if n.parent.node.getChildNodes()[0].name == 'LOG': + return True + return False + + +def is_wrong_i18n_format(n): + """Check _('hello %s' % xyz)""" + if isinstance(n.parent, compiler.ast.Mod): + n = n.parent + if isinstance(n.parent, compiler.ast.CallFunc): + if isinstance(n.parent.node, compiler.ast.Name): + if n.parent.node.name == '_': + return True + return False + + +""" +Used for check message need be localized or not. +(predicate_func, action, message) +""" +i18n_msg_predicates = [ + # Skip ['hello world', 1] + (lambda n: isinstance(n.parent, compiler.ast.List), 'skip', ''), + # Skip {'hellow world', 1} + (lambda n: isinstance(n.parent, compiler.ast.Dict), 'skip', ''), + # Skip msg['hello world'] + (lambda n: isinstance(n.parent, compiler.ast.Subscript), 'skip', ''), + # Skip doc string + (lambda n: isinstance(n.parent, compiler.ast.Discard), 'skip', ''), + # Skip msg = "hello", in normal, message should more than one word + (lambda n: len(n.value.strip().split(' ')) <= 1, 'skip', ''), + # Skip msg = 'hello world' + vars + 'world hello' + (lambda n: isinstance(n.parent, compiler.ast.Add), 'skip', ''), + # Skip xml markers msg = "" + (lambda n: len(re.compile("").findall(n.value)) > 0, 'skip', ''), + # Skip sql statement + (lambda n: len( + re.compile("^SELECT.*FROM", flags=re.I).findall(n.value)) > 0, + 'skip', ''), + # LOG.xxx() + (is_log_callfunc, 'error', 'Message must be localized'), + # _('hello %s' % xyz) should be _('hello %s') % xyz + (is_wrong_i18n_format, 'error', + ("Message format was wrong, _('hello %s' % xyz) " + "should be _('hello %s') % xyz")), + # default + (lambda n: True, 'warn', 'Message might need localized') +] + + +""" +Used for checking message format. (checker_func, message) +""" +msg_format_checkers = [ + # If message contain more than on format specifier, it should use + # mapping key + (lambda n: len(re.compile("%[bcdeEfFgGnosxX]").findall(n.value)) > 1, + "The message shouldn't contain more than one format specifier"), + # Check capital + (lambda n: n.value.split(' ')[0].count('_') == 0 and + n.value[0].isalpha() and + n.value[0].islower(), + "First letter must be capital"), + (is_log_i18n_msg_with_mod, + 'LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)') +] + + +file_black_list = ["./neutron/tests/unit", + "./neutron/openstack", + "./neutron/plugins/bigswitch/tests"] diff --git a/tools/install_venv.py b/tools/install_venv.py new file mode 100644 index 000000000..df06fa9c6 --- /dev/null +++ b/tools/install_venv.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2010 OpenStack Foundation. +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Installation script for Neutron's development virtualenv +""" +from __future__ import print_function + +import os +import sys + +import install_venv_common as install_venv + + +def print_help(): + help = """ + Neutron development environment setup is complete. + + Neutron development uses virtualenv to track and manage Python dependencies + while in development and testing. + + To activate the Neutron virtualenv for the extent of your current shell + session you can run: + + $ source .venv/bin/activate + + Or, if you prefer, you can run commands in the virtualenv on a case by case + basis by running: + + $ tools/with_venv.sh + + Also, make test will automatically use the virtualenv. + """ + print(help) + + +def main(argv): + root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + venv = os.path.join(root, '.venv') + pip_requires = os.path.join(root, 'requirements.txt') + test_requires = os.path.join(root, 'test-requirements.txt') + py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) + project = 'Neutron' + install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, + py_version, project) + options = install.parse_args(argv) + install.check_python_version() + install.check_dependencies() + install.create_virtualenv(no_site_packages=options.no_site_packages) + install.install_dependencies() + print_help() + + +if __name__ == '__main__': + main(sys.argv) diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py new file mode 100644 index 000000000..b5ec5092f --- /dev/null +++ b/tools/install_venv_common.py @@ -0,0 +1,174 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides methods needed by installation script for OpenStack development +virtual environments. + +Since this script is used to bootstrap a virtualenv from the system's Python +environment, it should be kept strictly compatible with Python 2.6. + +Synced in from openstack-common +""" + +from __future__ import print_function + +import optparse +import os +import subprocess +import sys + + +class InstallVenv(object): + + def __init__(self, root, venv, requirements, + test_requirements, py_version, + project): + self.root = root + self.venv = venv + self.requirements = requirements + self.test_requirements = test_requirements + self.py_version = py_version + self.project = project + + def die(self, message, *args): + print(message % args, file=sys.stderr) + sys.exit(1) + + def check_python_version(self): + if sys.version_info < (2, 6): + self.die("Need Python Version >= 2.6") + + def run_command_with_code(self, cmd, redirect_output=True, + check_exit_code=True): + """Runs a command in an out-of-process shell. + + Returns the output of that command. Working directory is self.root. + """ + if redirect_output: + stdout = subprocess.PIPE + else: + stdout = None + + proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) + output = proc.communicate()[0] + if check_exit_code and proc.returncode != 0: + self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) + return (output, proc.returncode) + + def run_command(self, cmd, redirect_output=True, check_exit_code=True): + return self.run_command_with_code(cmd, redirect_output, + check_exit_code)[0] + + def get_distro(self): + if (os.path.exists('/etc/fedora-release') or + os.path.exists('/etc/redhat-release')): + return Fedora( + self.root, self.venv, self.requirements, + self.test_requirements, self.py_version, self.project) + else: + return Distro( + self.root, self.venv, self.requirements, + self.test_requirements, self.py_version, self.project) + + def check_dependencies(self): + self.get_distro().install_virtualenv() + + def create_virtualenv(self, no_site_packages=True): + """Creates the virtual environment and installs PIP. + + Creates the virtual environment and installs PIP only into the + virtual environment. + """ + if not os.path.isdir(self.venv): + print('Creating venv...', end=' ') + if no_site_packages: + self.run_command(['virtualenv', '-q', '--no-site-packages', + self.venv]) + else: + self.run_command(['virtualenv', '-q', self.venv]) + print('done.') + else: + print("venv already exists...") + pass + + def pip_install(self, *args): + self.run_command(['tools/with_venv.sh', + 'pip', 'install', '--upgrade'] + list(args), + redirect_output=False) + + def install_dependencies(self): + print('Installing dependencies with pip (this can take a while)...') + + # First things first, make sure our venv has the latest pip and + # setuptools. + self.pip_install('pip>=1.3') + self.pip_install('setuptools') + + self.pip_install('-r', self.requirements) + self.pip_install('-r', self.test_requirements) + + def parse_args(self, argv): + """Parses command-line arguments.""" + parser = optparse.OptionParser() + parser.add_option('-n', '--no-site-packages', + action='store_true', + help="Do not inherit packages from global Python " + "install") + return parser.parse_args(argv[1:])[0] + + +class Distro(InstallVenv): + + def check_cmd(self, cmd): + return bool(self.run_command(['which', cmd], + check_exit_code=False).strip()) + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if self.check_cmd('easy_install'): + print('Installing virtualenv via easy_install...', end=' ') + if self.run_command(['easy_install', 'virtualenv']): + print('Succeeded') + return + else: + print('Failed') + + self.die('ERROR: virtualenv not found.\n\n%s development' + ' requires virtualenv, please install it using your' + ' favorite package management tool' % self.project) + + +class Fedora(Distro): + """This covers all Fedora-based distributions. + + Includes: Fedora, RHEL, CentOS, Scientific Linux + """ + + def check_pkg(self, pkg): + return self.run_command_with_code(['rpm', '-q', pkg], + check_exit_code=False)[1] == 0 + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if not self.check_pkg('python-virtualenv'): + self.die("Please install 'python-virtualenv'.") + + super(Fedora, self).install_virtualenv() diff --git a/tools/with_venv.sh b/tools/with_venv.sh new file mode 100755 index 000000000..63f5b9837 --- /dev/null +++ b/tools/with_venv.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +TOOLS=`dirname $0` +VENV=$TOOLS/../.venv +source $VENV/bin/activate && "$@" diff --git a/tox.ini b/tox.ini new file mode 100644 index 000000000..223ed2e2f --- /dev/null +++ b/tox.ini @@ -0,0 +1,73 @@ +[tox] +# TODO(yamahata): enable tests of py26, py27, py33. +# Those unit tests are temporalily disabled until its stabilization +#envlist = py26,py27,py33,pep8 +envlist = pep8 +minversion = 1.6 +skipsdist = True + +[testenv] +setenv = VIRTUAL_ENV={envdir} +usedevelop = True +install_command = pip install -U {opts} {packages} +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + setuptools_git>=0.4 +commands = + python -m neutron.openstack.common.lockutils python setup.py testr --slowest --testr-args='{posargs}' + +[testenv:functional] +setenv = OS_TEST_PATH=./neutron/tests/functional +commands = + python setup.py testr --slowest --testr-args='{posargs}' + +[tox:jenkins] +sitepackages = True +downloadcache = ~/cache/pip + +[testenv:pep8] +commands = + flake8 + neutron-db-manage check_migration + bash -c "find neutron -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" +whitelist_externals = bash + +[testenv:i18n] +commands = python ./tools/check_i18n.py ./neutron ./tools/i18n_cfg.py + +[testenv:cover] +commands = + python -m neutron.openstack.common.lockutils python setup.py testr --coverage --testr-args='{posargs}' + +[testenv:venv] +commands = {posargs} + +[flake8] +# E125 continuation line does not distinguish itself from next logical line +# E126 continuation line over-indented for hanging indent +# E128 continuation line under-indented for visual indent +# E129 visually indented line with same indent as next logical line +# E251 unexpected spaces around keyword / parameter equals +# E265 block comment should start with ‘# ‘ +# E713 test for membership should be ‘not in’ +# F402 import module shadowed by loop variable +# F811 redefinition of unused variable +# F812 list comprehension redefines name from line +# H104 file contains nothing but comments +# H237 module is removed in Python 3 +# H302 import only modules +# H304 no relative imports +# H305 imports not grouped correctly +# H307 like imports should be grouped together +# H401 docstring should not start with a space +# H402 one line docstring needs punctuation +# H405 multi line docstring summary not separated with an empty line +# H904 Wrap long lines in parentheses instead of a backslash +# TODO(marun) H404 multi line docstring should start with a summary +ignore = E125,E126,E128,E129,E251,E265,E713,F402,F811,F812,H104,H237,H302,H304,H305,H307,H401,H402,H404,H405,H904 +show-source = true +builtins = _ +exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools,.ropeproject + +[hacking] +local-check-factory = neutron.hacking.checks.factory